xref: /linux/drivers/gpu/drm/i915/gt/intel_workarounds.c (revision e04e2b760ddbe3d7b283a05898c3a029085cd8cd)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2018 Intel Corporation
4  */
5 
6 #include "i915_drv.h"
7 #include "i915_reg.h"
8 #include "intel_context.h"
9 #include "intel_engine_pm.h"
10 #include "intel_engine_regs.h"
11 #include "intel_gpu_commands.h"
12 #include "intel_gt.h"
13 #include "intel_gt_ccs_mode.h"
14 #include "intel_gt_mcr.h"
15 #include "intel_gt_print.h"
16 #include "intel_gt_regs.h"
17 #include "intel_ring.h"
18 #include "intel_workarounds.h"
19 
20 #include "display/intel_fbc_regs.h"
21 
22 /**
23  * DOC: Hardware workarounds
24  *
25  * Hardware workarounds are register programming documented to be executed in
26  * the driver that fall outside of the normal programming sequences for a
27  * platform. There are some basic categories of workarounds, depending on
28  * how/when they are applied:
29  *
30  * - Context workarounds: workarounds that touch registers that are
31  *   saved/restored to/from the HW context image. The list is emitted (via Load
32  *   Register Immediate commands) once when initializing the device and saved in
33  *   the default context. That default context is then used on every context
34  *   creation to have a "primed golden context", i.e. a context image that
35  *   already contains the changes needed to all the registers.
36  *
37  *   Context workarounds should be implemented in the \*_ctx_workarounds_init()
38  *   variants respective to the targeted platforms.
39  *
40  * - Engine workarounds: the list of these WAs is applied whenever the specific
41  *   engine is reset. It's also possible that a set of engine classes share a
42  *   common power domain and they are reset together. This happens on some
43  *   platforms with render and compute engines. In this case (at least) one of
44  *   them need to keeep the workaround programming: the approach taken in the
45  *   driver is to tie those workarounds to the first compute/render engine that
46  *   is registered.  When executing with GuC submission, engine resets are
47  *   outside of kernel driver control, hence the list of registers involved in
48  *   written once, on engine initialization, and then passed to GuC, that
49  *   saves/restores their values before/after the reset takes place. See
50  *   ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
51  *
52  *   Workarounds for registers specific to RCS and CCS should be implemented in
53  *   rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
54  *   registers belonging to BCS, VCS or VECS should be implemented in
55  *   xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
56  *   engine's MMIO range but that are part of of the common RCS/CCS reset domain
57  *   should be implemented in general_render_compute_wa_init(). The settings
58  *   about the CCS load balancing should be added in ccs_engine_wa_mode().
59  *
60  * - GT workarounds: the list of these WAs is applied whenever these registers
61  *   revert to their default values: on GPU reset, suspend/resume [1]_, etc.
62  *
63  *   GT workarounds should be implemented in the \*_gt_workarounds_init()
64  *   variants respective to the targeted platforms.
65  *
66  * - Register whitelist: some workarounds need to be implemented in userspace,
67  *   but need to touch privileged registers. The whitelist in the kernel
68  *   instructs the hardware to allow the access to happen. From the kernel side,
69  *   this is just a special case of a MMIO workaround (as we write the list of
70  *   these to/be-whitelisted registers to some special HW registers).
71  *
72  *   Register whitelisting should be done in the \*_whitelist_build() variants
73  *   respective to the targeted platforms.
74  *
75  * - Workaround batchbuffers: buffers that get executed automatically by the
76  *   hardware on every HW context restore. These buffers are created and
77  *   programmed in the default context so the hardware always go through those
78  *   programming sequences when switching contexts. The support for workaround
79  *   batchbuffers is enabled these hardware mechanisms:
80  *
81  *   #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
82  *      context, pointing the hardware to jump to that location when that offset
83  *      is reached in the context restore. Workaround batchbuffer in the driver
84  *      currently uses this mechanism for all platforms.
85  *
86  *   #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
87  *      pointing the hardware to a buffer to continue executing after the
88  *      engine registers are restored in a context restore sequence. This is
89  *      currently not used in the driver.
90  *
91  * - Other:  There are WAs that, due to their nature, cannot be applied from a
92  *   central place. Those are peppered around the rest of the code, as needed.
93  *   Workarounds related to the display IP are the main example.
94  *
95  * .. [1] Technically, some registers are powercontext saved & restored, so they
96  *    survive a suspend/resume. In practice, writing them again is not too
97  *    costly and simplifies things, so it's the approach taken in the driver.
98  */
99 
100 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
101 			  const char *name, const char *engine_name)
102 {
103 	wal->gt = gt;
104 	wal->name = name;
105 	wal->engine_name = engine_name;
106 }
107 
108 #define WA_LIST_CHUNK (1 << 4)
109 
110 static void wa_init_finish(struct i915_wa_list *wal)
111 {
112 	/* Trim unused entries. */
113 	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
114 		struct i915_wa *list = kmemdup(wal->list,
115 					       wal->count * sizeof(*list),
116 					       GFP_KERNEL);
117 
118 		if (list) {
119 			kfree(wal->list);
120 			wal->list = list;
121 		}
122 	}
123 
124 	if (!wal->count)
125 		return;
126 
127 	gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n",
128 	       wal->wa_count, wal->name, wal->engine_name);
129 }
130 
131 static enum forcewake_domains
132 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
133 {
134 	enum forcewake_domains fw = 0;
135 	struct i915_wa *wa;
136 	unsigned int i;
137 
138 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
139 		fw |= intel_uncore_forcewake_for_reg(uncore,
140 						     wa->reg,
141 						     FW_REG_READ |
142 						     FW_REG_WRITE);
143 
144 	return fw;
145 }
146 
147 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
148 {
149 	unsigned int addr = i915_mmio_reg_offset(wa->reg);
150 	struct drm_i915_private *i915 = wal->gt->i915;
151 	unsigned int start = 0, end = wal->count;
152 	const unsigned int grow = WA_LIST_CHUNK;
153 	struct i915_wa *wa_;
154 
155 	GEM_BUG_ON(!is_power_of_2(grow));
156 
157 	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
158 		struct i915_wa *list;
159 
160 		list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
161 				     GFP_KERNEL);
162 		if (!list) {
163 			drm_err(&i915->drm, "No space for workaround init!\n");
164 			return;
165 		}
166 
167 		if (wal->list) {
168 			memcpy(list, wal->list, sizeof(*wa) * wal->count);
169 			kfree(wal->list);
170 		}
171 
172 		wal->list = list;
173 	}
174 
175 	while (start < end) {
176 		unsigned int mid = start + (end - start) / 2;
177 
178 		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
179 			start = mid + 1;
180 		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
181 			end = mid;
182 		} else {
183 			wa_ = &wal->list[mid];
184 
185 			if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
186 				drm_err(&i915->drm,
187 					"Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
188 					i915_mmio_reg_offset(wa_->reg),
189 					wa_->clr, wa_->set);
190 
191 				wa_->set &= ~wa->clr;
192 			}
193 
194 			wal->wa_count++;
195 			wa_->set |= wa->set;
196 			wa_->clr |= wa->clr;
197 			wa_->read |= wa->read;
198 			return;
199 		}
200 	}
201 
202 	wal->wa_count++;
203 	wa_ = &wal->list[wal->count++];
204 	*wa_ = *wa;
205 
206 	while (wa_-- > wal->list) {
207 		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
208 			   i915_mmio_reg_offset(wa_[1].reg));
209 		if (i915_mmio_reg_offset(wa_[1].reg) >
210 		    i915_mmio_reg_offset(wa_[0].reg))
211 			break;
212 
213 		swap(wa_[1], wa_[0]);
214 	}
215 }
216 
217 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
218 		   u32 clear, u32 set, u32 read_mask, bool masked_reg)
219 {
220 	struct i915_wa wa = {
221 		.reg  = reg,
222 		.clr  = clear,
223 		.set  = set,
224 		.read = read_mask,
225 		.masked_reg = masked_reg,
226 	};
227 
228 	_wa_add(wal, &wa);
229 }
230 
231 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
232 		       u32 clear, u32 set, u32 read_mask, bool masked_reg)
233 {
234 	struct i915_wa wa = {
235 		.mcr_reg = reg,
236 		.clr  = clear,
237 		.set  = set,
238 		.read = read_mask,
239 		.masked_reg = masked_reg,
240 		.is_mcr = 1,
241 	};
242 
243 	_wa_add(wal, &wa);
244 }
245 
246 static void
247 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
248 {
249 	wa_add(wal, reg, clear, set, clear | set, false);
250 }
251 
252 static void
253 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
254 {
255 	wa_mcr_add(wal, reg, clear, set, clear | set, false);
256 }
257 
258 static void
259 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
260 {
261 	wa_write_clr_set(wal, reg, ~0, set);
262 }
263 
264 static void
265 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
266 {
267 	wa_write_clr_set(wal, reg, set, set);
268 }
269 
270 static void
271 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
272 {
273 	wa_mcr_write_clr_set(wal, reg, set, set);
274 }
275 
276 static void
277 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
278 {
279 	wa_write_clr_set(wal, reg, clr, 0);
280 }
281 
282 static void
283 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
284 {
285 	wa_mcr_write_clr_set(wal, reg, clr, 0);
286 }
287 
288 /*
289  * WA operations on "masked register". A masked register has the upper 16 bits
290  * documented as "masked" in b-spec. Its purpose is to allow writing to just a
291  * portion of the register without a rmw: you simply write in the upper 16 bits
292  * the mask of bits you are going to modify.
293  *
294  * The wa_masked_* family of functions already does the necessary operations to
295  * calculate the mask based on the parameters passed, so user only has to
296  * provide the lower 16 bits of that register.
297  */
298 
299 static void
300 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
301 {
302 	wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
303 }
304 
305 static void
306 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
307 {
308 	wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
309 }
310 
311 static void
312 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
313 {
314 	wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
315 }
316 
317 static void
318 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
319 {
320 	wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
321 }
322 
323 static void
324 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
325 		    u32 mask, u32 val)
326 {
327 	wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
328 }
329 
330 static void
331 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
332 			u32 mask, u32 val)
333 {
334 	wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
335 }
336 
337 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
338 				      struct i915_wa_list *wal)
339 {
340 	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
341 }
342 
343 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
344 				      struct i915_wa_list *wal)
345 {
346 	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
347 }
348 
349 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
350 				      struct i915_wa_list *wal)
351 {
352 	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
353 
354 	/* WaDisableAsyncFlipPerfMode:bdw,chv */
355 	wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
356 
357 	/* WaDisablePartialInstShootdown:bdw,chv */
358 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
359 			 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
360 
361 	/* Use Force Non-Coherent whenever executing a 3D context. This is a
362 	 * workaround for a possible hang in the unlikely event a TLB
363 	 * invalidation occurs during a PSD flush.
364 	 */
365 	/* WaForceEnableNonCoherent:bdw,chv */
366 	/* WaHdcDisableFetchWhenMasked:bdw,chv */
367 	wa_masked_en(wal, HDC_CHICKEN0,
368 		     HDC_DONOT_FETCH_MEM_WHEN_MASKED |
369 		     HDC_FORCE_NON_COHERENT);
370 
371 	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
372 	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
373 	 *  polygons in the same 8x4 pixel/sample area to be processed without
374 	 *  stalling waiting for the earlier ones to write to Hierarchical Z
375 	 *  buffer."
376 	 *
377 	 * This optimization is off by default for BDW and CHV; turn it on.
378 	 */
379 	wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
380 
381 	/* Wa4x4STCOptimizationDisable:bdw,chv */
382 	wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
383 
384 	/*
385 	 * BSpec recommends 8x4 when MSAA is used,
386 	 * however in practice 16x4 seems fastest.
387 	 *
388 	 * Note that PS/WM thread counts depend on the WIZ hashing
389 	 * disable bit, which we don't touch here, but it's good
390 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
391 	 */
392 	wa_masked_field_set(wal, GEN7_GT_MODE,
393 			    GEN6_WIZ_HASHING_MASK,
394 			    GEN6_WIZ_HASHING_16x4);
395 }
396 
397 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
398 				     struct i915_wa_list *wal)
399 {
400 	struct drm_i915_private *i915 = engine->i915;
401 
402 	gen8_ctx_workarounds_init(engine, wal);
403 
404 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
405 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
406 
407 	/* WaDisableDopClockGating:bdw
408 	 *
409 	 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
410 	 * to disable EUTC clock gating.
411 	 */
412 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
413 			 DOP_CLOCK_GATING_DISABLE);
414 
415 	wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
416 			 GEN8_SAMPLER_POWER_BYPASS_DIS);
417 
418 	wa_masked_en(wal, HDC_CHICKEN0,
419 		     /* WaForceContextSaveRestoreNonCoherent:bdw */
420 		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
421 		     /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
422 		     (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
423 }
424 
425 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
426 				     struct i915_wa_list *wal)
427 {
428 	gen8_ctx_workarounds_init(engine, wal);
429 
430 	/* WaDisableThreadStallDopClockGating:chv */
431 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
432 
433 	/* Improve HiZ throughput on CHV. */
434 	wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
435 }
436 
437 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
438 				      struct i915_wa_list *wal)
439 {
440 	struct drm_i915_private *i915 = engine->i915;
441 
442 	if (HAS_LLC(i915)) {
443 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
444 		 *
445 		 * Must match Display Engine. See
446 		 * WaCompressedResourceDisplayNewHashMode.
447 		 */
448 		wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
449 			     GEN9_PBE_COMPRESSED_HASH_SELECTION);
450 		wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
451 				 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
452 	}
453 
454 	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
455 	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
456 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
457 			 FLOW_CONTROL_ENABLE |
458 			 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
459 
460 	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
461 	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
462 	wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
463 			 GEN9_ENABLE_YV12_BUGFIX |
464 			 GEN9_ENABLE_GPGPU_PREEMPTION);
465 
466 	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
467 	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
468 	wa_masked_en(wal, CACHE_MODE_1,
469 		     GEN8_4x4_STC_OPTIMIZATION_DISABLE |
470 		     GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
471 
472 	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
473 	wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
474 			  GEN9_CCS_TLB_PREFETCH_ENABLE);
475 
476 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
477 	wa_masked_en(wal, HDC_CHICKEN0,
478 		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
479 		     HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
480 
481 	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
482 	 * both tied to WaForceContextSaveRestoreNonCoherent
483 	 * in some hsds for skl. We keep the tie for all gen9. The
484 	 * documentation is a bit hazy and so we want to get common behaviour,
485 	 * even though there is no clear evidence we would need both on kbl/bxt.
486 	 * This area has been source of system hangs so we play it safe
487 	 * and mimic the skl regardless of what bspec says.
488 	 *
489 	 * Use Force Non-Coherent whenever executing a 3D context. This
490 	 * is a workaround for a possible hang in the unlikely event
491 	 * a TLB invalidation occurs during a PSD flush.
492 	 */
493 
494 	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
495 	wa_masked_en(wal, HDC_CHICKEN0,
496 		     HDC_FORCE_NON_COHERENT);
497 
498 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
499 	if (IS_SKYLAKE(i915) ||
500 	    IS_KABYLAKE(i915) ||
501 	    IS_COFFEELAKE(i915) ||
502 	    IS_COMETLAKE(i915))
503 		wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
504 				 GEN8_SAMPLER_POWER_BYPASS_DIS);
505 
506 	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
507 	wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
508 
509 	/*
510 	 * Supporting preemption with fine-granularity requires changes in the
511 	 * batch buffer programming. Since we can't break old userspace, we
512 	 * need to set our default preemption level to safe value. Userspace is
513 	 * still able to use more fine-grained preemption levels, since in
514 	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
515 	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
516 	 * not real HW workarounds, but merely a way to start using preemption
517 	 * while maintaining old contract with userspace.
518 	 */
519 
520 	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
521 	wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
522 
523 	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
524 	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
525 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
526 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
527 
528 	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
529 	if (IS_GEN9_LP(i915))
530 		wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
531 }
532 
533 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
534 				struct i915_wa_list *wal)
535 {
536 	struct intel_gt *gt = engine->gt;
537 	u8 vals[3] = { 0, 0, 0 };
538 	unsigned int i;
539 
540 	for (i = 0; i < 3; i++) {
541 		u8 ss;
542 
543 		/*
544 		 * Only consider slices where one, and only one, subslice has 7
545 		 * EUs
546 		 */
547 		if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
548 			continue;
549 
550 		/*
551 		 * subslice_7eu[i] != 0 (because of the check above) and
552 		 * ss_max == 4 (maximum number of subslices possible per slice)
553 		 *
554 		 * ->    0 <= ss <= 3;
555 		 */
556 		ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
557 		vals[i] = 3 - ss;
558 	}
559 
560 	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
561 		return;
562 
563 	/* Tune IZ hashing. See intel_device_info_runtime_init() */
564 	wa_masked_field_set(wal, GEN7_GT_MODE,
565 			    GEN9_IZ_HASHING_MASK(2) |
566 			    GEN9_IZ_HASHING_MASK(1) |
567 			    GEN9_IZ_HASHING_MASK(0),
568 			    GEN9_IZ_HASHING(2, vals[2]) |
569 			    GEN9_IZ_HASHING(1, vals[1]) |
570 			    GEN9_IZ_HASHING(0, vals[0]));
571 }
572 
573 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
574 				     struct i915_wa_list *wal)
575 {
576 	gen9_ctx_workarounds_init(engine, wal);
577 	skl_tune_iz_hashing(engine, wal);
578 }
579 
580 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
581 				     struct i915_wa_list *wal)
582 {
583 	gen9_ctx_workarounds_init(engine, wal);
584 
585 	/* WaDisableThreadStallDopClockGating:bxt */
586 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
587 			 STALL_DOP_GATING_DISABLE);
588 
589 	/* WaToEnableHwFixForPushConstHWBug:bxt */
590 	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
591 		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
592 }
593 
594 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
595 				     struct i915_wa_list *wal)
596 {
597 	struct drm_i915_private *i915 = engine->i915;
598 
599 	gen9_ctx_workarounds_init(engine, wal);
600 
601 	/* WaToEnableHwFixForPushConstHWBug:kbl */
602 	if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
603 		wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
604 			     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
605 
606 	/* WaDisableSbeCacheDispatchPortSharing:kbl */
607 	wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
608 			 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
609 }
610 
611 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
612 				     struct i915_wa_list *wal)
613 {
614 	gen9_ctx_workarounds_init(engine, wal);
615 
616 	/* WaToEnableHwFixForPushConstHWBug:glk */
617 	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
618 		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
619 }
620 
621 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
622 				     struct i915_wa_list *wal)
623 {
624 	gen9_ctx_workarounds_init(engine, wal);
625 
626 	/* WaToEnableHwFixForPushConstHWBug:cfl */
627 	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
628 		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
629 
630 	/* WaDisableSbeCacheDispatchPortSharing:cfl */
631 	wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
632 			 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
633 }
634 
635 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
636 				     struct i915_wa_list *wal)
637 {
638 	/* Wa_1406697149 (WaDisableBankHangMode:icl) */
639 	wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
640 
641 	/* WaForceEnableNonCoherent:icl
642 	 * This is not the same workaround as in early Gen9 platforms, where
643 	 * lacking this could cause system hangs, but coherency performance
644 	 * overhead is high and only a few compute workloads really need it
645 	 * (the register is whitelisted in hardware now, so UMDs can opt in
646 	 * for coherency if they have a good reason).
647 	 */
648 	wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
649 
650 	/* WaEnableFloatBlendOptimization:icl */
651 	wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
652 		   _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
653 		   0 /* write-only, so skip validation */,
654 		   true);
655 
656 	/* WaDisableGPGPUMidThreadPreemption:icl */
657 	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
658 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
659 			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
660 
661 	/* allow headerless messages for preemptible GPGPU context */
662 	wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
663 			 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
664 
665 	/* Wa_1604278689:icl,ehl */
666 	wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
667 	wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
668 			 0,
669 			 0xFFFFFFFF);
670 
671 	/* Wa_1406306137:icl,ehl */
672 	wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
673 }
674 
675 /*
676  * These settings aren't actually workarounds, but general tuning settings that
677  * need to be programmed on dg2 platform.
678  */
679 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
680 				   struct i915_wa_list *wal)
681 {
682 	wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
683 	wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
684 			     REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
685 	wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
686 			     FF_MODE2_TDS_TIMER_128);
687 }
688 
689 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
690 				       struct i915_wa_list *wal)
691 {
692 	struct drm_i915_private *i915 = engine->i915;
693 
694 	/*
695 	 * Wa_1409142259:tgl,dg1,adl-p
696 	 * Wa_1409347922:tgl,dg1,adl-p
697 	 * Wa_1409252684:tgl,dg1,adl-p
698 	 * Wa_1409217633:tgl,dg1,adl-p
699 	 * Wa_1409207793:tgl,dg1,adl-p
700 	 * Wa_1409178076:tgl,dg1,adl-p
701 	 * Wa_1408979724:tgl,dg1,adl-p
702 	 * Wa_14010443199:tgl,rkl,dg1,adl-p
703 	 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
704 	 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
705 	 */
706 	wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
707 		     GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
708 
709 	/* WaDisableGPGPUMidThreadPreemption:gen12 */
710 	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
711 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
712 			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
713 
714 	/*
715 	 * Wa_16011163337 - GS_TIMER
716 	 *
717 	 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
718 	 * need to program it even on those that don't explicitly list that
719 	 * workaround.
720 	 *
721 	 * Note that the programming of GEN12_FF_MODE2 is further modified
722 	 * according to the FF_MODE2 guidance given by Wa_1608008084.
723 	 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
724 	 * value when read from the CPU.
725 	 *
726 	 * The default value for this register is zero for all fields.
727 	 * So instead of doing a RMW we should just write the desired values
728 	 * for TDS and GS timers. Note that since the readback can't be trusted,
729 	 * the clear mask is just set to ~0 to make sure other bits are not
730 	 * inadvertently set. For the same reason read verification is ignored.
731 	 */
732 	wa_add(wal,
733 	       GEN12_FF_MODE2,
734 	       ~0,
735 	       FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
736 	       0, false);
737 
738 	if (!IS_DG1(i915)) {
739 		/* Wa_1806527549 */
740 		wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
741 
742 		/* Wa_1606376872 */
743 		wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
744 	}
745 }
746 
747 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
748 				     struct i915_wa_list *wal)
749 {
750 	gen12_ctx_workarounds_init(engine, wal);
751 
752 	/* Wa_1409044764 */
753 	wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
754 		      DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
755 
756 	/* Wa_22010493298 */
757 	wa_masked_en(wal, HIZ_CHICKEN,
758 		     DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
759 }
760 
761 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
762 				     struct i915_wa_list *wal)
763 {
764 	dg2_ctx_gt_tuning_init(engine, wal);
765 
766 	/* Wa_16013271637:dg2 */
767 	wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
768 			 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
769 
770 	/* Wa_14014947963:dg2 */
771 	wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
772 
773 	/* Wa_18018764978:dg2 */
774 	wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
775 
776 	/* Wa_18019271663:dg2 */
777 	wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
778 
779 	/* Wa_14019877138:dg2 */
780 	wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
781 }
782 
783 static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
784 				     struct i915_wa_list *wal)
785 {
786 	struct intel_gt *gt = engine->gt;
787 
788 	dg2_ctx_gt_tuning_init(engine, wal);
789 
790 	/*
791 	 * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
792 	 * gen12_emit_indirect_ctx_rcs() rather than here on some early
793 	 * steppings.
794 	 */
795 	if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
796 	      IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
797 		wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
798 }
799 
800 static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
801 				       struct i915_wa_list *wal)
802 {
803 	struct intel_gt *gt = engine->gt;
804 
805 	xelpg_ctx_gt_tuning_init(engine, wal);
806 
807 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
808 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
809 		/* Wa_14014947963 */
810 		wa_masked_field_set(wal, VF_PREEMPTION,
811 				    PREEMPTION_VERTEX_COUNT, 0x4000);
812 
813 		/* Wa_16013271637 */
814 		wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
815 				 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
816 
817 		/* Wa_18019627453 */
818 		wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
819 
820 		/* Wa_18018764978 */
821 		wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
822 	}
823 
824 	/* Wa_18019271663 */
825 	wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
826 
827 	/* Wa_14019877138 */
828 	wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
829 }
830 
831 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
832 					 struct i915_wa_list *wal)
833 {
834 	/*
835 	 * This is a "fake" workaround defined by software to ensure we
836 	 * maintain reliable, backward-compatible behavior for userspace with
837 	 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
838 	 *
839 	 * The per-context setting of MI_MODE[12] determines whether the bits
840 	 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
841 	 * in the traditional manner or whether they should instead use a new
842 	 * tgl+ meaning that breaks backward compatibility, but allows nesting
843 	 * into 3rd-level batchbuffers.  When this new capability was first
844 	 * added in TGL, it remained off by default unless a context
845 	 * intentionally opted in to the new behavior.  However Xe_HPG now
846 	 * flips this on by default and requires that we explicitly opt out if
847 	 * we don't want the new behavior.
848 	 *
849 	 * From a SW perspective, we want to maintain the backward-compatible
850 	 * behavior for userspace, so we'll apply a fake workaround to set it
851 	 * back to the legacy behavior on platforms where the hardware default
852 	 * is to break compatibility.  At the moment there is no Linux
853 	 * userspace that utilizes third-level batchbuffers, so this will avoid
854 	 * userspace from needing to make any changes.  using the legacy
855 	 * meaning is the correct thing to do.  If/when we have userspace
856 	 * consumers that want to utilize third-level batch nesting, we can
857 	 * provide a context parameter to allow them to opt-in.
858 	 */
859 	wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
860 }
861 
862 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
863 				   struct i915_wa_list *wal)
864 {
865 	u8 mocs;
866 
867 	/*
868 	 * Some blitter commands do not have a field for MOCS, those
869 	 * commands will use MOCS index pointed by BLIT_CCTL.
870 	 * BLIT_CCTL registers are needed to be programmed to un-cached.
871 	 */
872 	if (engine->class == COPY_ENGINE_CLASS) {
873 		mocs = engine->gt->mocs.uc_index;
874 		wa_write_clr_set(wal,
875 				 BLIT_CCTL(engine->mmio_base),
876 				 BLIT_CCTL_MASK,
877 				 BLIT_CCTL_MOCS(mocs, mocs));
878 	}
879 }
880 
881 /*
882  * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
883  * defined by the hardware team, but it programming general context registers.
884  * Adding those context register programming in context workaround
885  * allow us to use the wa framework for proper application and validation.
886  */
887 static void
888 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
889 			  struct i915_wa_list *wal)
890 {
891 	if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
892 		fakewa_disable_nestedbb_mode(engine, wal);
893 
894 	gen12_ctx_gt_mocs_init(engine, wal);
895 }
896 
897 static void
898 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
899 			   struct i915_wa_list *wal,
900 			   const char *name)
901 {
902 	struct drm_i915_private *i915 = engine->i915;
903 
904 	wa_init_start(wal, engine->gt, name, engine->name);
905 
906 	/* Applies to all engines */
907 	/*
908 	 * Fake workarounds are not the actual workaround but
909 	 * programming of context registers using workaround framework.
910 	 */
911 	if (GRAPHICS_VER(i915) >= 12)
912 		gen12_ctx_gt_fake_wa_init(engine, wal);
913 
914 	if (engine->class != RENDER_CLASS)
915 		goto done;
916 
917 	if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
918 		xelpg_ctx_workarounds_init(engine, wal);
919 	else if (IS_DG2(i915))
920 		dg2_ctx_workarounds_init(engine, wal);
921 	else if (IS_DG1(i915))
922 		dg1_ctx_workarounds_init(engine, wal);
923 	else if (GRAPHICS_VER(i915) == 12)
924 		gen12_ctx_workarounds_init(engine, wal);
925 	else if (GRAPHICS_VER(i915) == 11)
926 		icl_ctx_workarounds_init(engine, wal);
927 	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
928 		cfl_ctx_workarounds_init(engine, wal);
929 	else if (IS_GEMINILAKE(i915))
930 		glk_ctx_workarounds_init(engine, wal);
931 	else if (IS_KABYLAKE(i915))
932 		kbl_ctx_workarounds_init(engine, wal);
933 	else if (IS_BROXTON(i915))
934 		bxt_ctx_workarounds_init(engine, wal);
935 	else if (IS_SKYLAKE(i915))
936 		skl_ctx_workarounds_init(engine, wal);
937 	else if (IS_CHERRYVIEW(i915))
938 		chv_ctx_workarounds_init(engine, wal);
939 	else if (IS_BROADWELL(i915))
940 		bdw_ctx_workarounds_init(engine, wal);
941 	else if (GRAPHICS_VER(i915) == 7)
942 		gen7_ctx_workarounds_init(engine, wal);
943 	else if (GRAPHICS_VER(i915) == 6)
944 		gen6_ctx_workarounds_init(engine, wal);
945 	else if (GRAPHICS_VER(i915) < 8)
946 		;
947 	else
948 		MISSING_CASE(GRAPHICS_VER(i915));
949 
950 done:
951 	wa_init_finish(wal);
952 }
953 
954 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
955 {
956 	__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
957 }
958 
959 int intel_engine_emit_ctx_wa(struct i915_request *rq)
960 {
961 	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
962 	struct intel_uncore *uncore = rq->engine->uncore;
963 	enum forcewake_domains fw;
964 	unsigned long flags;
965 	struct i915_wa *wa;
966 	unsigned int i;
967 	u32 *cs;
968 	int ret;
969 
970 	if (wal->count == 0)
971 		return 0;
972 
973 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
974 	if (ret)
975 		return ret;
976 
977 	cs = intel_ring_begin(rq, (wal->count * 2 + 2));
978 	if (IS_ERR(cs))
979 		return PTR_ERR(cs);
980 
981 	fw = wal_get_fw_for_rmw(uncore, wal);
982 
983 	intel_gt_mcr_lock(wal->gt, &flags);
984 	spin_lock(&uncore->lock);
985 	intel_uncore_forcewake_get__locked(uncore, fw);
986 
987 	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
988 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
989 		u32 val;
990 
991 		/* Skip reading the register if it's not really needed */
992 		if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
993 			val = wa->set;
994 		} else {
995 			val = wa->is_mcr ?
996 				intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
997 				intel_uncore_read_fw(uncore, wa->reg);
998 			val &= ~wa->clr;
999 			val |= wa->set;
1000 		}
1001 
1002 		*cs++ = i915_mmio_reg_offset(wa->reg);
1003 		*cs++ = val;
1004 	}
1005 	*cs++ = MI_NOOP;
1006 
1007 	intel_uncore_forcewake_put__locked(uncore, fw);
1008 	spin_unlock(&uncore->lock);
1009 	intel_gt_mcr_unlock(wal->gt, flags);
1010 
1011 	intel_ring_advance(rq, cs);
1012 
1013 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1014 	if (ret)
1015 		return ret;
1016 
1017 	return 0;
1018 }
1019 
1020 static void
1021 gen4_gt_workarounds_init(struct intel_gt *gt,
1022 			 struct i915_wa_list *wal)
1023 {
1024 	/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
1025 	wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
1026 }
1027 
1028 static void
1029 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1030 {
1031 	gen4_gt_workarounds_init(gt, wal);
1032 
1033 	/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
1034 	wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
1035 }
1036 
1037 static void
1038 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1039 {
1040 	g4x_gt_workarounds_init(gt, wal);
1041 
1042 	wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
1043 }
1044 
1045 static void
1046 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1047 {
1048 }
1049 
1050 static void
1051 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1052 {
1053 	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
1054 	wa_masked_dis(wal,
1055 		      GEN7_COMMON_SLICE_CHICKEN1,
1056 		      GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
1057 
1058 	/* WaApplyL3ControlAndL3ChickenMode:ivb */
1059 	wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
1060 	wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
1061 
1062 	/* WaForceL3Serialization:ivb */
1063 	wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1064 }
1065 
1066 static void
1067 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1068 {
1069 	/* WaForceL3Serialization:vlv */
1070 	wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1071 
1072 	/*
1073 	 * WaIncreaseL3CreditsForVLVB0:vlv
1074 	 * This is the hardware default actually.
1075 	 */
1076 	wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
1077 }
1078 
1079 static void
1080 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1081 {
1082 	/* L3 caching of data atomics doesn't work -- disable it. */
1083 	wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
1084 
1085 	wa_add(wal,
1086 	       HSW_ROW_CHICKEN3, 0,
1087 	       _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
1088 	       0 /* XXX does this reg exist? */, true);
1089 
1090 	/* WaVSRefCountFullforceMissDisable:hsw */
1091 	wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
1092 }
1093 
1094 static void
1095 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
1096 {
1097 	const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
1098 	unsigned int slice, subslice;
1099 	u32 mcr, mcr_mask;
1100 
1101 	GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
1102 
1103 	/*
1104 	 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
1105 	 * Before any MMIO read into slice/subslice specific registers, MCR
1106 	 * packet control register needs to be programmed to point to any
1107 	 * enabled s/ss pair. Otherwise, incorrect values will be returned.
1108 	 * This means each subsequent MMIO read will be forwarded to an
1109 	 * specific s/ss combination, but this is OK since these registers
1110 	 * are consistent across s/ss in almost all cases. In the rare
1111 	 * occasions, such as INSTDONE, where this value is dependent
1112 	 * on s/ss combo, the read should be done with read_subslice_reg.
1113 	 */
1114 	slice = ffs(sseu->slice_mask) - 1;
1115 	GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
1116 	subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
1117 	GEM_BUG_ON(!subslice);
1118 	subslice--;
1119 
1120 	/*
1121 	 * We use GEN8_MCR..() macros to calculate the |mcr| value for
1122 	 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
1123 	 */
1124 	mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1125 	mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1126 
1127 	drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
1128 
1129 	wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
1130 }
1131 
1132 static void
1133 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1134 {
1135 	struct drm_i915_private *i915 = gt->i915;
1136 
1137 	/* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
1138 	gen9_wa_init_mcr(i915, wal);
1139 
1140 	/* WaDisableKillLogic:bxt,skl,kbl */
1141 	if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
1142 		wa_write_or(wal,
1143 			    GAM_ECOCHK,
1144 			    ECOCHK_DIS_TLB);
1145 
1146 	if (HAS_LLC(i915)) {
1147 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1148 		 *
1149 		 * Must match Display Engine. See
1150 		 * WaCompressedResourceDisplayNewHashMode.
1151 		 */
1152 		wa_write_or(wal,
1153 			    MMCD_MISC_CTRL,
1154 			    MMCD_PCLA | MMCD_HOTSPOT_EN);
1155 	}
1156 
1157 	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1158 	wa_write_or(wal,
1159 		    GAM_ECOCHK,
1160 		    BDW_DISABLE_HDC_INVALIDATION);
1161 }
1162 
1163 static void
1164 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1165 {
1166 	gen9_gt_workarounds_init(gt, wal);
1167 
1168 	/* WaDisableGafsUnitClkGating:skl */
1169 	wa_write_or(wal,
1170 		    GEN7_UCGCTL4,
1171 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1172 
1173 	/* WaInPlaceDecompressionHang:skl */
1174 	if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1175 		wa_write_or(wal,
1176 			    GEN9_GAMT_ECO_REG_RW_IA,
1177 			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1178 }
1179 
1180 static void
1181 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1182 {
1183 	gen9_gt_workarounds_init(gt, wal);
1184 
1185 	/* WaDisableDynamicCreditSharing:kbl */
1186 	if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1187 		wa_write_or(wal,
1188 			    GAMT_CHKN_BIT_REG,
1189 			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1190 
1191 	/* WaDisableGafsUnitClkGating:kbl */
1192 	wa_write_or(wal,
1193 		    GEN7_UCGCTL4,
1194 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1195 
1196 	/* WaInPlaceDecompressionHang:kbl */
1197 	wa_write_or(wal,
1198 		    GEN9_GAMT_ECO_REG_RW_IA,
1199 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1200 }
1201 
1202 static void
1203 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1204 {
1205 	gen9_gt_workarounds_init(gt, wal);
1206 }
1207 
1208 static void
1209 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1210 {
1211 	gen9_gt_workarounds_init(gt, wal);
1212 
1213 	/* WaDisableGafsUnitClkGating:cfl */
1214 	wa_write_or(wal,
1215 		    GEN7_UCGCTL4,
1216 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1217 
1218 	/* WaInPlaceDecompressionHang:cfl */
1219 	wa_write_or(wal,
1220 		    GEN9_GAMT_ECO_REG_RW_IA,
1221 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1222 }
1223 
1224 static void __set_mcr_steering(struct i915_wa_list *wal,
1225 			       i915_reg_t steering_reg,
1226 			       unsigned int slice, unsigned int subslice)
1227 {
1228 	u32 mcr, mcr_mask;
1229 
1230 	mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1231 	mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1232 
1233 	wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
1234 }
1235 
1236 static void debug_dump_steering(struct intel_gt *gt)
1237 {
1238 	struct drm_printer p = drm_dbg_printer(&gt->i915->drm, DRM_UT_DRIVER,
1239 					       "MCR Steering:");
1240 
1241 	if (drm_debug_enabled(DRM_UT_DRIVER))
1242 		intel_gt_mcr_report_steering(&p, gt, false);
1243 }
1244 
1245 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1246 			 unsigned int slice, unsigned int subslice)
1247 {
1248 	__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1249 
1250 	gt->default_steering.groupid = slice;
1251 	gt->default_steering.instanceid = subslice;
1252 
1253 	debug_dump_steering(gt);
1254 }
1255 
1256 static void
1257 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1258 {
1259 	const struct sseu_dev_info *sseu = &gt->info.sseu;
1260 	unsigned int subslice;
1261 
1262 	GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1263 	GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1264 
1265 	/*
1266 	 * Although a platform may have subslices, we need to always steer
1267 	 * reads to the lowest instance that isn't fused off.  When Render
1268 	 * Power Gating is enabled, grabbing forcewake will only power up a
1269 	 * single subslice (the "minconfig") if there isn't a real workload
1270 	 * that needs to be run; this means that if we steer register reads to
1271 	 * one of the higher subslices, we run the risk of reading back 0's or
1272 	 * random garbage.
1273 	 */
1274 	subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
1275 
1276 	/*
1277 	 * If the subslice we picked above also steers us to a valid L3 bank,
1278 	 * then we can just rely on the default steering and won't need to
1279 	 * worry about explicitly re-steering L3BANK reads later.
1280 	 */
1281 	if (gt->info.l3bank_mask & BIT(subslice))
1282 		gt->steering_table[L3BANK] = NULL;
1283 
1284 	__add_mcr_wa(gt, wal, 0, subslice);
1285 }
1286 
1287 static void
1288 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1289 {
1290 	const struct sseu_dev_info *sseu = &gt->info.sseu;
1291 	unsigned long slice, subslice = 0, slice_mask = 0;
1292 	u32 lncf_mask = 0;
1293 	int i;
1294 
1295 	/*
1296 	 * On Xe_HP the steering increases in complexity. There are now several
1297 	 * more units that require steering and we're not guaranteed to be able
1298 	 * to find a common setting for all of them. These are:
1299 	 * - GSLICE (fusable)
1300 	 * - DSS (sub-unit within gslice; fusable)
1301 	 * - L3 Bank (fusable)
1302 	 * - MSLICE (fusable)
1303 	 * - LNCF (sub-unit within mslice; always present if mslice is present)
1304 	 *
1305 	 * We'll do our default/implicit steering based on GSLICE (in the
1306 	 * sliceid field) and DSS (in the subsliceid field).  If we can
1307 	 * find overlap between the valid MSLICE and/or LNCF values with
1308 	 * a suitable GSLICE, then we can just re-use the default value and
1309 	 * skip and explicit steering at runtime.
1310 	 *
1311 	 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1312 	 * a valid sliceid value.  DSS steering is the only type of steering
1313 	 * that utilizes the 'subsliceid' bits.
1314 	 *
1315 	 * Also note that, even though the steering domain is called "GSlice"
1316 	 * and it is encoded in the register using the gslice format, the spec
1317 	 * says that the combined (geometry | compute) fuse should be used to
1318 	 * select the steering.
1319 	 */
1320 
1321 	/* Find the potential gslice candidates */
1322 	slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
1323 						       GEN_DSS_PER_GSLICE);
1324 
1325 	/*
1326 	 * Find the potential LNCF candidates.  Either LNCF within a valid
1327 	 * mslice is fine.
1328 	 */
1329 	for_each_set_bit(i, &gt->info.mslice_mask, GEN12_MAX_MSLICES)
1330 		lncf_mask |= (0x3 << (i * 2));
1331 
1332 	/*
1333 	 * Are there any sliceid values that work for both GSLICE and LNCF
1334 	 * steering?
1335 	 */
1336 	if (slice_mask & lncf_mask) {
1337 		slice_mask &= lncf_mask;
1338 		gt->steering_table[LNCF] = NULL;
1339 	}
1340 
1341 	/* How about sliceid values that also work for MSLICE steering? */
1342 	if (slice_mask & gt->info.mslice_mask) {
1343 		slice_mask &= gt->info.mslice_mask;
1344 		gt->steering_table[MSLICE] = NULL;
1345 	}
1346 
1347 	slice = __ffs(slice_mask);
1348 	subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
1349 		GEN_DSS_PER_GSLICE;
1350 
1351 	__add_mcr_wa(gt, wal, slice, subslice);
1352 
1353 	/*
1354 	 * SQIDI ranges are special because they use different steering
1355 	 * registers than everything else we work with.  On XeHP SDV and
1356 	 * DG2-G10, any value in the steering registers will work fine since
1357 	 * all instances are present, but DG2-G11 only has SQIDI instances at
1358 	 * ID's 2 and 3, so we need to steer to one of those.  For simplicity
1359 	 * we'll just steer to a hardcoded "2" since that value will work
1360 	 * everywhere.
1361 	 */
1362 	__set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
1363 	__set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
1364 
1365 	/*
1366 	 * On DG2, GAM registers have a dedicated steering control register
1367 	 * and must always be programmed to a hardcoded groupid of "1."
1368 	 */
1369 	if (IS_DG2(gt->i915))
1370 		__set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
1371 }
1372 
1373 static void
1374 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1375 {
1376 	struct drm_i915_private *i915 = gt->i915;
1377 
1378 	icl_wa_init_mcr(gt, wal);
1379 
1380 	/* WaModifyGamTlbPartitioning:icl */
1381 	wa_write_clr_set(wal,
1382 			 GEN11_GACB_PERF_CTRL,
1383 			 GEN11_HASH_CTRL_MASK,
1384 			 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1385 
1386 	/* Wa_1405766107:icl
1387 	 * Formerly known as WaCL2SFHalfMaxAlloc
1388 	 */
1389 	wa_write_or(wal,
1390 		    GEN11_LSN_UNSLCVC,
1391 		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1392 		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1393 
1394 	/* Wa_220166154:icl
1395 	 * Formerly known as WaDisCtxReload
1396 	 */
1397 	wa_write_or(wal,
1398 		    GEN8_GAMW_ECO_DEV_RW_IA,
1399 		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1400 
1401 	/* Wa_1406463099:icl
1402 	 * Formerly known as WaGamTlbPendError
1403 	 */
1404 	wa_write_or(wal,
1405 		    GAMT_CHKN_BIT_REG,
1406 		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
1407 
1408 	/*
1409 	 * Wa_1408615072:icl,ehl  (vsunit)
1410 	 * Wa_1407596294:icl,ehl  (hsunit)
1411 	 */
1412 	wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1413 		    VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1414 
1415 	/* Wa_1407352427:icl,ehl */
1416 	wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1417 		    PSDUNIT_CLKGATE_DIS);
1418 
1419 	/* Wa_1406680159:icl,ehl */
1420 	wa_mcr_write_or(wal,
1421 			GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1422 			GWUNIT_CLKGATE_DIS);
1423 
1424 	/* Wa_1607087056:icl,ehl,jsl */
1425 	if (IS_ICELAKE(i915) ||
1426 		((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1427 		IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
1428 		wa_write_or(wal,
1429 			    GEN11_SLICE_UNIT_LEVEL_CLKGATE,
1430 			    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1431 
1432 	/*
1433 	 * This is not a documented workaround, but rather an optimization
1434 	 * to reduce sampler power.
1435 	 */
1436 	wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1437 }
1438 
1439 /*
1440  * Though there are per-engine instances of these registers,
1441  * they retain their value through engine resets and should
1442  * only be provided on the GT workaround list rather than
1443  * the engine-specific workaround list.
1444  */
1445 static void
1446 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1447 {
1448 	struct intel_engine_cs *engine;
1449 	int id;
1450 
1451 	for_each_engine(engine, gt, id) {
1452 		if (engine->class != VIDEO_DECODE_CLASS ||
1453 		    (engine->instance % 2))
1454 			continue;
1455 
1456 		wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1457 			    IECPUNIT_CLKGATE_DIS);
1458 	}
1459 }
1460 
1461 static void
1462 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1463 {
1464 	icl_wa_init_mcr(gt, wal);
1465 
1466 	/* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1467 	wa_14011060649(gt, wal);
1468 
1469 	/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1470 	wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1471 
1472 	/*
1473 	 * Wa_14015795083
1474 	 *
1475 	 * Firmware on some gen12 platforms locks the MISCCPCTL register,
1476 	 * preventing i915 from modifying it for this workaround.  Skip the
1477 	 * readback verification for this workaround on debug builds; if the
1478 	 * workaround doesn't stick due to firmware behavior, it's not an error
1479 	 * that we want CI to flag.
1480 	 */
1481 	wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1482 	       0, 0, false);
1483 }
1484 
1485 static void
1486 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1487 {
1488 	gen12_gt_workarounds_init(gt, wal);
1489 
1490 	/* Wa_1409420604:dg1 */
1491 	wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
1492 			CPSSUNIT_CLKGATE_DIS);
1493 
1494 	/* Wa_1408615072:dg1 */
1495 	/* Empirical testing shows this register is unaffected by engine reset. */
1496 	wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
1497 }
1498 
1499 static void
1500 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1501 {
1502 	xehp_init_mcr(gt, wal);
1503 
1504 	/* Wa_14011060649:dg2 */
1505 	wa_14011060649(gt, wal);
1506 
1507 	if (IS_DG2_G10(gt->i915)) {
1508 		/* Wa_22010523718:dg2 */
1509 		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1510 			    CG3DDISCFEG_CLKGATE_DIS);
1511 
1512 		/* Wa_14011006942:dg2 */
1513 		wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1514 				DSS_ROUTER_CLKGATE_DIS);
1515 	}
1516 
1517 	/* Wa_14014830051:dg2 */
1518 	wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1519 
1520 	/*
1521 	 * Wa_14015795083
1522 	 * Skip verification for possibly locked register.
1523 	 */
1524 	wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1525 	       0, 0, false);
1526 
1527 	/* Wa_18018781329 */
1528 	wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1529 	wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1530 	wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1531 	wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1532 
1533 	/* Wa_1509235366:dg2 */
1534 	wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1535 			INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1536 
1537 	/* Wa_14010648519:dg2 */
1538 	wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1539 }
1540 
1541 static void
1542 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1543 {
1544 	/* Wa_14018575942 / Wa_18018781329 */
1545 	wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1546 	wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1547 
1548 	/* Wa_22016670082 */
1549 	wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1550 
1551 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
1552 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
1553 		/* Wa_14014830051 */
1554 		wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1555 
1556 		/* Wa_14015795083 */
1557 		wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1558 	}
1559 
1560 	/*
1561 	 * Unlike older platforms, we no longer setup implicit steering here;
1562 	 * all MCR accesses are explicitly steered.
1563 	 */
1564 	debug_dump_steering(gt);
1565 }
1566 
1567 static void
1568 wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
1569 {
1570 	struct intel_engine_cs *engine;
1571 	int id;
1572 
1573 	for_each_engine(engine, gt, id)
1574 		if (engine->class == VIDEO_DECODE_CLASS)
1575 			wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
1576 				    MFXPIPE_CLKGATE_DIS);
1577 }
1578 
1579 static void
1580 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1581 {
1582 	wa_16021867713(gt, wal);
1583 
1584 	/*
1585 	 * Wa_14018778641
1586 	 * Wa_18018781329
1587 	 *
1588 	 * Note that although these registers are MCR on the primary
1589 	 * GT, the media GT's versions are regular singleton registers.
1590 	 */
1591 	wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
1592 
1593 	/*
1594 	 * Wa_14018575942
1595 	 *
1596 	 * Issue is seen on media KPI test running on VDBOX engine
1597 	 * especially VP9 encoding WLs
1598 	 */
1599 	wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1600 
1601 	/* Wa_22016670082 */
1602 	wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1603 
1604 	debug_dump_steering(gt);
1605 }
1606 
1607 /*
1608  * The bspec performance guide has recommended MMIO tuning settings.  These
1609  * aren't truly "workarounds" but we want to program them through the
1610  * workaround infrastructure to make sure they're (re)applied at the proper
1611  * times.
1612  *
1613  * The programming in this function is for settings that persist through
1614  * engine resets and also are not part of any engine's register state context.
1615  * I.e., settings that only need to be re-applied in the event of a full GT
1616  * reset.
1617  */
1618 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
1619 {
1620 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
1621 		wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1622 		wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1623 	}
1624 
1625 	if (IS_DG2(gt->i915)) {
1626 		wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1627 		wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1628 	}
1629 }
1630 
1631 static void
1632 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1633 {
1634 	struct drm_i915_private *i915 = gt->i915;
1635 
1636 	gt_tuning_settings(gt, wal);
1637 
1638 	if (gt->type == GT_MEDIA) {
1639 		if (MEDIA_VER_FULL(i915) == IP_VER(13, 0))
1640 			xelpmp_gt_workarounds_init(gt, wal);
1641 		else
1642 			MISSING_CASE(MEDIA_VER_FULL(i915));
1643 
1644 		return;
1645 	}
1646 
1647 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
1648 		xelpg_gt_workarounds_init(gt, wal);
1649 	else if (IS_DG2(i915))
1650 		dg2_gt_workarounds_init(gt, wal);
1651 	else if (IS_DG1(i915))
1652 		dg1_gt_workarounds_init(gt, wal);
1653 	else if (GRAPHICS_VER(i915) == 12)
1654 		gen12_gt_workarounds_init(gt, wal);
1655 	else if (GRAPHICS_VER(i915) == 11)
1656 		icl_gt_workarounds_init(gt, wal);
1657 	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1658 		cfl_gt_workarounds_init(gt, wal);
1659 	else if (IS_GEMINILAKE(i915))
1660 		glk_gt_workarounds_init(gt, wal);
1661 	else if (IS_KABYLAKE(i915))
1662 		kbl_gt_workarounds_init(gt, wal);
1663 	else if (IS_BROXTON(i915))
1664 		gen9_gt_workarounds_init(gt, wal);
1665 	else if (IS_SKYLAKE(i915))
1666 		skl_gt_workarounds_init(gt, wal);
1667 	else if (IS_HASWELL(i915))
1668 		hsw_gt_workarounds_init(gt, wal);
1669 	else if (IS_VALLEYVIEW(i915))
1670 		vlv_gt_workarounds_init(gt, wal);
1671 	else if (IS_IVYBRIDGE(i915))
1672 		ivb_gt_workarounds_init(gt, wal);
1673 	else if (GRAPHICS_VER(i915) == 6)
1674 		snb_gt_workarounds_init(gt, wal);
1675 	else if (GRAPHICS_VER(i915) == 5)
1676 		ilk_gt_workarounds_init(gt, wal);
1677 	else if (IS_G4X(i915))
1678 		g4x_gt_workarounds_init(gt, wal);
1679 	else if (GRAPHICS_VER(i915) == 4)
1680 		gen4_gt_workarounds_init(gt, wal);
1681 	else if (GRAPHICS_VER(i915) <= 8)
1682 		;
1683 	else
1684 		MISSING_CASE(GRAPHICS_VER(i915));
1685 }
1686 
1687 void intel_gt_init_workarounds(struct intel_gt *gt)
1688 {
1689 	struct i915_wa_list *wal = &gt->wa_list;
1690 
1691 	wa_init_start(wal, gt, "GT", "global");
1692 	gt_init_workarounds(gt, wal);
1693 	wa_init_finish(wal);
1694 }
1695 
1696 static bool
1697 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
1698 	  const char *name, const char *from)
1699 {
1700 	if ((cur ^ wa->set) & wa->read) {
1701 		gt_err(gt,
1702 		       "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1703 		       name, from, i915_mmio_reg_offset(wa->reg),
1704 		       cur, cur & wa->read, wa->set & wa->read);
1705 
1706 		return false;
1707 	}
1708 
1709 	return true;
1710 }
1711 
1712 static void wa_list_apply(const struct i915_wa_list *wal)
1713 {
1714 	struct intel_gt *gt = wal->gt;
1715 	struct intel_uncore *uncore = gt->uncore;
1716 	enum forcewake_domains fw;
1717 	unsigned long flags;
1718 	struct i915_wa *wa;
1719 	unsigned int i;
1720 
1721 	if (!wal->count)
1722 		return;
1723 
1724 	fw = wal_get_fw_for_rmw(uncore, wal);
1725 
1726 	intel_gt_mcr_lock(gt, &flags);
1727 	spin_lock(&uncore->lock);
1728 	intel_uncore_forcewake_get__locked(uncore, fw);
1729 
1730 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1731 		u32 val, old = 0;
1732 
1733 		/* open-coded rmw due to steering */
1734 		if (wa->clr)
1735 			old = wa->is_mcr ?
1736 				intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1737 				intel_uncore_read_fw(uncore, wa->reg);
1738 		val = (old & ~wa->clr) | wa->set;
1739 		if (val != old || !wa->clr) {
1740 			if (wa->is_mcr)
1741 				intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
1742 			else
1743 				intel_uncore_write_fw(uncore, wa->reg, val);
1744 		}
1745 
1746 		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1747 			u32 val = wa->is_mcr ?
1748 				intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1749 				intel_uncore_read_fw(uncore, wa->reg);
1750 
1751 			wa_verify(gt, wa, val, wal->name, "application");
1752 		}
1753 	}
1754 
1755 	intel_uncore_forcewake_put__locked(uncore, fw);
1756 	spin_unlock(&uncore->lock);
1757 	intel_gt_mcr_unlock(gt, flags);
1758 }
1759 
1760 void intel_gt_apply_workarounds(struct intel_gt *gt)
1761 {
1762 	wa_list_apply(&gt->wa_list);
1763 }
1764 
1765 static bool wa_list_verify(struct intel_gt *gt,
1766 			   const struct i915_wa_list *wal,
1767 			   const char *from)
1768 {
1769 	struct intel_uncore *uncore = gt->uncore;
1770 	struct i915_wa *wa;
1771 	enum forcewake_domains fw;
1772 	unsigned long flags;
1773 	unsigned int i;
1774 	bool ok = true;
1775 
1776 	fw = wal_get_fw_for_rmw(uncore, wal);
1777 
1778 	intel_gt_mcr_lock(gt, &flags);
1779 	spin_lock(&uncore->lock);
1780 	intel_uncore_forcewake_get__locked(uncore, fw);
1781 
1782 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1783 		ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
1784 				intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1785 				intel_uncore_read_fw(uncore, wa->reg),
1786 				wal->name, from);
1787 
1788 	intel_uncore_forcewake_put__locked(uncore, fw);
1789 	spin_unlock(&uncore->lock);
1790 	intel_gt_mcr_unlock(gt, flags);
1791 
1792 	return ok;
1793 }
1794 
1795 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1796 {
1797 	return wa_list_verify(gt, &gt->wa_list, from);
1798 }
1799 
1800 __maybe_unused
1801 static bool is_nonpriv_flags_valid(u32 flags)
1802 {
1803 	/* Check only valid flag bits are set */
1804 	if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1805 		return false;
1806 
1807 	/* NB: Only 3 out of 4 enum values are valid for access field */
1808 	if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1809 	    RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1810 		return false;
1811 
1812 	return true;
1813 }
1814 
1815 static void
1816 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1817 {
1818 	struct i915_wa wa = {
1819 		.reg = reg
1820 	};
1821 
1822 	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1823 		return;
1824 
1825 	if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1826 		return;
1827 
1828 	wa.reg.reg |= flags;
1829 	_wa_add(wal, &wa);
1830 }
1831 
1832 static void
1833 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
1834 {
1835 	struct i915_wa wa = {
1836 		.mcr_reg = reg,
1837 		.is_mcr = 1,
1838 	};
1839 
1840 	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1841 		return;
1842 
1843 	if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1844 		return;
1845 
1846 	wa.mcr_reg.reg |= flags;
1847 	_wa_add(wal, &wa);
1848 }
1849 
1850 static void
1851 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1852 {
1853 	whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1854 }
1855 
1856 static void
1857 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
1858 {
1859 	whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1860 }
1861 
1862 static void gen9_whitelist_build(struct i915_wa_list *w)
1863 {
1864 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1865 	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1866 
1867 	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1868 	whitelist_reg(w, GEN8_CS_CHICKEN1);
1869 
1870 	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1871 	whitelist_reg(w, GEN8_HDC_CHICKEN1);
1872 
1873 	/* WaSendPushConstantsFromMMIO:skl,bxt */
1874 	whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1875 }
1876 
1877 static void skl_whitelist_build(struct intel_engine_cs *engine)
1878 {
1879 	struct i915_wa_list *w = &engine->whitelist;
1880 
1881 	if (engine->class != RENDER_CLASS)
1882 		return;
1883 
1884 	gen9_whitelist_build(w);
1885 
1886 	/* WaDisableLSQCROPERFforOCL:skl */
1887 	whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1888 }
1889 
1890 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1891 {
1892 	if (engine->class != RENDER_CLASS)
1893 		return;
1894 
1895 	gen9_whitelist_build(&engine->whitelist);
1896 }
1897 
1898 static void kbl_whitelist_build(struct intel_engine_cs *engine)
1899 {
1900 	struct i915_wa_list *w = &engine->whitelist;
1901 
1902 	if (engine->class != RENDER_CLASS)
1903 		return;
1904 
1905 	gen9_whitelist_build(w);
1906 
1907 	/* WaDisableLSQCROPERFforOCL:kbl */
1908 	whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1909 }
1910 
1911 static void glk_whitelist_build(struct intel_engine_cs *engine)
1912 {
1913 	struct i915_wa_list *w = &engine->whitelist;
1914 
1915 	if (engine->class != RENDER_CLASS)
1916 		return;
1917 
1918 	gen9_whitelist_build(w);
1919 
1920 	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1921 	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1922 }
1923 
1924 static void cfl_whitelist_build(struct intel_engine_cs *engine)
1925 {
1926 	struct i915_wa_list *w = &engine->whitelist;
1927 
1928 	if (engine->class != RENDER_CLASS)
1929 		return;
1930 
1931 	gen9_whitelist_build(w);
1932 
1933 	/*
1934 	 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1935 	 *
1936 	 * This covers 4 register which are next to one another :
1937 	 *   - PS_INVOCATION_COUNT
1938 	 *   - PS_INVOCATION_COUNT_UDW
1939 	 *   - PS_DEPTH_COUNT
1940 	 *   - PS_DEPTH_COUNT_UDW
1941 	 */
1942 	whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1943 			  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1944 			  RING_FORCE_TO_NONPRIV_RANGE_4);
1945 }
1946 
1947 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
1948 {
1949 	struct i915_wa_list *w = &engine->whitelist;
1950 
1951 	if (engine->class != RENDER_CLASS)
1952 		whitelist_reg_ext(w,
1953 				  RING_CTX_TIMESTAMP(engine->mmio_base),
1954 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1955 }
1956 
1957 static void cml_whitelist_build(struct intel_engine_cs *engine)
1958 {
1959 	allow_read_ctx_timestamp(engine);
1960 
1961 	cfl_whitelist_build(engine);
1962 }
1963 
1964 static void icl_whitelist_build(struct intel_engine_cs *engine)
1965 {
1966 	struct i915_wa_list *w = &engine->whitelist;
1967 
1968 	allow_read_ctx_timestamp(engine);
1969 
1970 	switch (engine->class) {
1971 	case RENDER_CLASS:
1972 		/* WaAllowUMDToModifyHalfSliceChicken7:icl */
1973 		whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
1974 
1975 		/* WaAllowUMDToModifySamplerMode:icl */
1976 		whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
1977 
1978 		/* WaEnableStateCacheRedirectToCS:icl */
1979 		whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1980 
1981 		/*
1982 		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
1983 		 *
1984 		 * This covers 4 register which are next to one another :
1985 		 *   - PS_INVOCATION_COUNT
1986 		 *   - PS_INVOCATION_COUNT_UDW
1987 		 *   - PS_DEPTH_COUNT
1988 		 *   - PS_DEPTH_COUNT_UDW
1989 		 */
1990 		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1991 				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1992 				  RING_FORCE_TO_NONPRIV_RANGE_4);
1993 		break;
1994 
1995 	case VIDEO_DECODE_CLASS:
1996 		/* hucStatusRegOffset */
1997 		whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
1998 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
1999 		/* hucUKernelHdrInfoRegOffset */
2000 		whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
2001 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
2002 		/* hucStatus2RegOffset */
2003 		whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
2004 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
2005 		break;
2006 
2007 	default:
2008 		break;
2009 	}
2010 }
2011 
2012 static void tgl_whitelist_build(struct intel_engine_cs *engine)
2013 {
2014 	struct i915_wa_list *w = &engine->whitelist;
2015 
2016 	allow_read_ctx_timestamp(engine);
2017 
2018 	switch (engine->class) {
2019 	case RENDER_CLASS:
2020 		/*
2021 		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
2022 		 * Wa_1408556865:tgl
2023 		 *
2024 		 * This covers 4 registers which are next to one another :
2025 		 *   - PS_INVOCATION_COUNT
2026 		 *   - PS_INVOCATION_COUNT_UDW
2027 		 *   - PS_DEPTH_COUNT
2028 		 *   - PS_DEPTH_COUNT_UDW
2029 		 */
2030 		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2031 				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
2032 				  RING_FORCE_TO_NONPRIV_RANGE_4);
2033 
2034 		/*
2035 		 * Wa_1808121037:tgl
2036 		 * Wa_14012131227:dg1
2037 		 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
2038 		 */
2039 		whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2040 
2041 		/* Wa_1806527549:tgl */
2042 		whitelist_reg(w, HIZ_CHICKEN);
2043 
2044 		/* Required by recommended tuning setting (not a workaround) */
2045 		whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
2046 
2047 		break;
2048 	default:
2049 		break;
2050 	}
2051 }
2052 
2053 static void dg2_whitelist_build(struct intel_engine_cs *engine)
2054 {
2055 	struct i915_wa_list *w = &engine->whitelist;
2056 
2057 	switch (engine->class) {
2058 	case RENDER_CLASS:
2059 		/* Required by recommended tuning setting (not a workaround) */
2060 		whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2061 
2062 		break;
2063 	default:
2064 		break;
2065 	}
2066 }
2067 
2068 static void xelpg_whitelist_build(struct intel_engine_cs *engine)
2069 {
2070 	struct i915_wa_list *w = &engine->whitelist;
2071 
2072 	switch (engine->class) {
2073 	case RENDER_CLASS:
2074 		/* Required by recommended tuning setting (not a workaround) */
2075 		whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2076 
2077 		break;
2078 	default:
2079 		break;
2080 	}
2081 }
2082 
2083 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
2084 {
2085 	struct drm_i915_private *i915 = engine->i915;
2086 	struct i915_wa_list *w = &engine->whitelist;
2087 
2088 	wa_init_start(w, engine->gt, "whitelist", engine->name);
2089 
2090 	if (engine->gt->type == GT_MEDIA)
2091 		; /* none yet */
2092 	else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
2093 		xelpg_whitelist_build(engine);
2094 	else if (IS_DG2(i915))
2095 		dg2_whitelist_build(engine);
2096 	else if (GRAPHICS_VER(i915) == 12)
2097 		tgl_whitelist_build(engine);
2098 	else if (GRAPHICS_VER(i915) == 11)
2099 		icl_whitelist_build(engine);
2100 	else if (IS_COMETLAKE(i915))
2101 		cml_whitelist_build(engine);
2102 	else if (IS_COFFEELAKE(i915))
2103 		cfl_whitelist_build(engine);
2104 	else if (IS_GEMINILAKE(i915))
2105 		glk_whitelist_build(engine);
2106 	else if (IS_KABYLAKE(i915))
2107 		kbl_whitelist_build(engine);
2108 	else if (IS_BROXTON(i915))
2109 		bxt_whitelist_build(engine);
2110 	else if (IS_SKYLAKE(i915))
2111 		skl_whitelist_build(engine);
2112 	else if (GRAPHICS_VER(i915) <= 8)
2113 		;
2114 	else
2115 		MISSING_CASE(GRAPHICS_VER(i915));
2116 
2117 	wa_init_finish(w);
2118 }
2119 
2120 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
2121 {
2122 	const struct i915_wa_list *wal = &engine->whitelist;
2123 	struct intel_uncore *uncore = engine->uncore;
2124 	const u32 base = engine->mmio_base;
2125 	struct i915_wa *wa;
2126 	unsigned int i;
2127 
2128 	if (!wal->count)
2129 		return;
2130 
2131 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2132 		intel_uncore_write(uncore,
2133 				   RING_FORCE_TO_NONPRIV(base, i),
2134 				   i915_mmio_reg_offset(wa->reg));
2135 
2136 	/* And clear the rest just in case of garbage */
2137 	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
2138 		intel_uncore_write(uncore,
2139 				   RING_FORCE_TO_NONPRIV(base, i),
2140 				   i915_mmio_reg_offset(RING_NOPID(base)));
2141 }
2142 
2143 /*
2144  * engine_fake_wa_init(), a place holder to program the registers
2145  * which are not part of an official workaround defined by the
2146  * hardware team.
2147  * Adding programming of those register inside workaround will
2148  * allow utilizing wa framework to proper application and verification.
2149  */
2150 static void
2151 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2152 {
2153 	u8 mocs_w, mocs_r;
2154 
2155 	/*
2156 	 * RING_CMD_CCTL specifies the default MOCS entry that will be used
2157 	 * by the command streamer when executing commands that don't have
2158 	 * a way to explicitly specify a MOCS setting.  The default should
2159 	 * usually reference whichever MOCS entry corresponds to uncached
2160 	 * behavior, although use of a WB cached entry is recommended by the
2161 	 * spec in certain circumstances on specific platforms.
2162 	 */
2163 	if (GRAPHICS_VER(engine->i915) >= 12) {
2164 		mocs_r = engine->gt->mocs.uc_index;
2165 		mocs_w = engine->gt->mocs.uc_index;
2166 
2167 		if (HAS_L3_CCS_READ(engine->i915) &&
2168 		    engine->class == COMPUTE_CLASS) {
2169 			mocs_r = engine->gt->mocs.wb_index;
2170 
2171 			/*
2172 			 * Even on the few platforms where MOCS 0 is a
2173 			 * legitimate table entry, it's never the correct
2174 			 * setting to use here; we can assume the MOCS init
2175 			 * just forgot to initialize wb_index.
2176 			 */
2177 			drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
2178 		}
2179 
2180 		wa_masked_field_set(wal,
2181 				    RING_CMD_CCTL(engine->mmio_base),
2182 				    CMD_CCTL_MOCS_MASK,
2183 				    CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
2184 	}
2185 }
2186 
2187 static void
2188 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2189 {
2190 	struct drm_i915_private *i915 = engine->i915;
2191 	struct intel_gt *gt = engine->gt;
2192 
2193 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2194 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
2195 		/* Wa_22014600077 */
2196 		wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2197 				 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
2198 	}
2199 
2200 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2201 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2202 	    IS_DG2(i915)) {
2203 		/* Wa_1509727124 */
2204 		wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2205 				 SC_DISABLE_POWER_OPTIMIZATION_EBB);
2206 	}
2207 
2208 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2209 	    IS_DG2(i915)) {
2210 		/* Wa_22012856258 */
2211 		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2212 				 GEN12_DISABLE_READ_SUPPRESSION);
2213 	}
2214 
2215 	if (IS_DG2(i915)) {
2216 		/*
2217 		 * Wa_22010960976:dg2
2218 		 * Wa_14013347512:dg2
2219 		 */
2220 		wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
2221 				  LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2222 	}
2223 
2224 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) ||
2225 	    IS_DG2(i915)) {
2226 		/* Wa_14015150844 */
2227 		wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
2228 			   _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
2229 			   0, true);
2230 	}
2231 
2232 	if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2233 	    IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2234 		/*
2235 		 * Wa_1606700617:tgl,dg1,adl-p
2236 		 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2237 		 * Wa_14010826681:tgl,dg1,rkl,adl-p
2238 		 * Wa_18019627453:dg2
2239 		 */
2240 		wa_masked_en(wal,
2241 			     GEN9_CS_DEBUG_MODE1,
2242 			     FF_DOP_CLOCK_GATE_DISABLE);
2243 	}
2244 
2245 	if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2246 	    IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2247 		/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2248 		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2249 
2250 		/*
2251 		 * Wa_1407928979:tgl A*
2252 		 * Wa_18011464164:tgl[B0+],dg1[B0+]
2253 		 * Wa_22010931296:tgl[B0+],dg1[B0+]
2254 		 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2255 		 */
2256 		wa_write_or(wal, GEN7_FF_THREAD_MODE,
2257 			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2258 
2259 		/* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2260 		wa_mcr_masked_en(wal,
2261 				 GEN10_SAMPLER_MODE,
2262 				 ENABLE_SMALLPL);
2263 	}
2264 
2265 	if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2266 	    IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2267 		/* Wa_1409804808 */
2268 		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2269 				 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2270 
2271 		/* Wa_14010229206 */
2272 		wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2273 	}
2274 
2275 	if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
2276 		/*
2277 		 * Wa_1607297627
2278 		 *
2279 		 * On TGL and RKL there are multiple entries for this WA in the
2280 		 * BSpec; some indicate this is an A0-only WA, others indicate
2281 		 * it applies to all steppings so we trust the "all steppings."
2282 		 */
2283 		wa_masked_en(wal,
2284 			     RING_PSMI_CTL(RENDER_RING_BASE),
2285 			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2286 			     GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2287 	}
2288 
2289 	if (GRAPHICS_VER(i915) == 11) {
2290 		/* This is not an Wa. Enable for better image quality */
2291 		wa_masked_en(wal,
2292 			     _3D_CHICKEN3,
2293 			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2294 
2295 		/*
2296 		 * Wa_1405543622:icl
2297 		 * Formerly known as WaGAPZPriorityScheme
2298 		 */
2299 		wa_write_or(wal,
2300 			    GEN8_GARBCNTL,
2301 			    GEN11_ARBITRATION_PRIO_ORDER_MASK);
2302 
2303 		/*
2304 		 * Wa_1604223664:icl
2305 		 * Formerly known as WaL3BankAddressHashing
2306 		 */
2307 		wa_write_clr_set(wal,
2308 				 GEN8_GARBCNTL,
2309 				 GEN11_HASH_CTRL_EXCL_MASK,
2310 				 GEN11_HASH_CTRL_EXCL_BIT0);
2311 		wa_write_clr_set(wal,
2312 				 GEN11_GLBLINVL,
2313 				 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2314 				 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2315 
2316 		/*
2317 		 * Wa_1405733216:icl
2318 		 * Formerly known as WaDisableCleanEvicts
2319 		 */
2320 		wa_mcr_write_or(wal,
2321 				GEN8_L3SQCREG4,
2322 				GEN11_LQSC_CLEAN_EVICT_DISABLE);
2323 
2324 		/* Wa_1606682166:icl */
2325 		wa_write_or(wal,
2326 			    GEN7_SARCHKMD,
2327 			    GEN7_DISABLE_SAMPLER_PREFETCH);
2328 
2329 		/* Wa_1409178092:icl */
2330 		wa_mcr_write_clr_set(wal,
2331 				     GEN11_SCRATCH2,
2332 				     GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2333 				     0);
2334 
2335 		/* WaEnable32PlaneMode:icl */
2336 		wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2337 			     GEN11_ENABLE_32_PLANE_MODE);
2338 
2339 		/*
2340 		 * Wa_1408767742:icl[a2..forever],ehl[all]
2341 		 * Wa_1605460711:icl[a0..c0]
2342 		 */
2343 		wa_write_or(wal,
2344 			    GEN7_FF_THREAD_MODE,
2345 			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2346 
2347 		/* Wa_22010271021 */
2348 		wa_masked_en(wal,
2349 			     GEN9_CS_DEBUG_MODE1,
2350 			     FF_DOP_CLOCK_GATE_DISABLE);
2351 	}
2352 
2353 	/*
2354 	 * Intel platforms that support fine-grained preemption (i.e., gen9 and
2355 	 * beyond) allow the kernel-mode driver to choose between two different
2356 	 * options for controlling preemption granularity and behavior.
2357 	 *
2358 	 * Option 1 (hardware default):
2359 	 *   Preemption settings are controlled in a global manner via
2360 	 *   kernel-only register CS_DEBUG_MODE1 (0x20EC).  Any granularity
2361 	 *   and settings chosen by the kernel-mode driver will apply to all
2362 	 *   userspace clients.
2363 	 *
2364 	 * Option 2:
2365 	 *   Preemption settings are controlled on a per-context basis via
2366 	 *   register CS_CHICKEN1 (0x2580).  CS_CHICKEN1 is saved/restored on
2367 	 *   context switch and is writable by userspace (e.g., via
2368 	 *   MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
2369 	 *   which allows different userspace drivers/clients to select
2370 	 *   different settings, or to change those settings on the fly in
2371 	 *   response to runtime needs.  This option was known by name
2372 	 *   "FtrPerCtxtPreemptionGranularityControl" at one time, although
2373 	 *   that name is somewhat misleading as other non-granularity
2374 	 *   preemption settings are also impacted by this decision.
2375 	 *
2376 	 * On Linux, our policy has always been to let userspace drivers
2377 	 * control preemption granularity/settings (Option 2).  This was
2378 	 * originally mandatory on gen9 to prevent ABI breakage (old gen9
2379 	 * userspace developed before object-level preemption was enabled would
2380 	 * not behave well if i915 were to go with Option 1 and enable that
2381 	 * preemption in a global manner).  On gen9 each context would have
2382 	 * object-level preemption disabled by default (see
2383 	 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
2384 	 * userspace drivers could opt-in to object-level preemption as they
2385 	 * saw fit.  For post-gen9 platforms, we continue to utilize Option 2;
2386 	 * even though it is no longer necessary for ABI compatibility when
2387 	 * enabling a new platform, it does ensure that userspace will be able
2388 	 * to implement any workarounds that show up requiring temporary
2389 	 * adjustments to preemption behavior at runtime.
2390 	 *
2391 	 * Notes/Workarounds:
2392 	 *  - Wa_14015141709:  On DG2 and early steppings of MTL,
2393 	 *      CS_CHICKEN1[0] does not disable object-level preemption as
2394 	 *      it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
2395 	 *      using Option 1).  Effectively this means userspace is unable
2396 	 *      to disable object-level preemption on these platforms/steppings
2397 	 *      despite the setting here.
2398 	 *
2399 	 *  - Wa_16013994831:  May require that userspace program
2400 	 *      CS_CHICKEN1[10] when certain runtime conditions are true.
2401 	 *      Userspace requires Option 2 to be in effect for their update of
2402 	 *      CS_CHICKEN1[10] to be effective.
2403 	 *
2404 	 * Other workarounds may appear in the future that will also require
2405 	 * Option 2 behavior to allow proper userspace implementation.
2406 	 */
2407 	if (GRAPHICS_VER(i915) >= 9)
2408 		wa_masked_en(wal,
2409 			     GEN7_FF_SLICE_CS_CHICKEN1,
2410 			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2411 
2412 	if (IS_SKYLAKE(i915) ||
2413 	    IS_KABYLAKE(i915) ||
2414 	    IS_COFFEELAKE(i915) ||
2415 	    IS_COMETLAKE(i915)) {
2416 		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2417 		wa_write_or(wal,
2418 			    GEN8_GARBCNTL,
2419 			    GEN9_GAPS_TSV_CREDIT_DISABLE);
2420 	}
2421 
2422 	if (IS_BROXTON(i915)) {
2423 		/* WaDisablePooledEuLoadBalancingFix:bxt */
2424 		wa_masked_en(wal,
2425 			     FF_SLICE_CS_CHICKEN2,
2426 			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2427 	}
2428 
2429 	if (GRAPHICS_VER(i915) == 9) {
2430 		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2431 		wa_masked_en(wal,
2432 			     GEN9_CSFE_CHICKEN1_RCS,
2433 			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2434 
2435 		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2436 		wa_mcr_write_or(wal,
2437 				BDW_SCRATCH1,
2438 				GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2439 
2440 		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2441 		if (IS_GEN9_LP(i915))
2442 			wa_mcr_write_clr_set(wal,
2443 					     GEN8_L3SQCREG1,
2444 					     L3_PRIO_CREDITS_MASK,
2445 					     L3_GENERAL_PRIO_CREDITS(62) |
2446 					     L3_HIGH_PRIO_CREDITS(2));
2447 
2448 		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2449 		wa_mcr_write_or(wal,
2450 				GEN8_L3SQCREG4,
2451 				GEN8_LQSC_FLUSH_COHERENT_LINES);
2452 
2453 		/* Disable atomics in L3 to prevent unrecoverable hangs */
2454 		wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2455 				 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2456 		wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
2457 				     GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2458 		wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
2459 				     EVICTION_PERF_FIX_ENABLE, 0);
2460 	}
2461 
2462 	if (IS_HASWELL(i915)) {
2463 		/* WaSampleCChickenBitEnable:hsw */
2464 		wa_masked_en(wal,
2465 			     HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2466 
2467 		wa_masked_dis(wal,
2468 			      CACHE_MODE_0_GEN7,
2469 			      /* enable HiZ Raw Stall Optimization */
2470 			      HIZ_RAW_STALL_OPT_DISABLE);
2471 	}
2472 
2473 	if (IS_VALLEYVIEW(i915)) {
2474 		/* WaDisableEarlyCull:vlv */
2475 		wa_masked_en(wal,
2476 			     _3D_CHICKEN3,
2477 			     _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2478 
2479 		/*
2480 		 * WaVSThreadDispatchOverride:ivb,vlv
2481 		 *
2482 		 * This actually overrides the dispatch
2483 		 * mode for all thread types.
2484 		 */
2485 		wa_write_clr_set(wal,
2486 				 GEN7_FF_THREAD_MODE,
2487 				 GEN7_FF_SCHED_MASK,
2488 				 GEN7_FF_TS_SCHED_HW |
2489 				 GEN7_FF_VS_SCHED_HW |
2490 				 GEN7_FF_DS_SCHED_HW);
2491 
2492 		/* WaPsdDispatchEnable:vlv */
2493 		/* WaDisablePSDDualDispatchEnable:vlv */
2494 		wa_masked_en(wal,
2495 			     GEN7_HALF_SLICE_CHICKEN1,
2496 			     GEN7_MAX_PS_THREAD_DEP |
2497 			     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2498 	}
2499 
2500 	if (IS_IVYBRIDGE(i915)) {
2501 		/* WaDisableEarlyCull:ivb */
2502 		wa_masked_en(wal,
2503 			     _3D_CHICKEN3,
2504 			     _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2505 
2506 		if (0) { /* causes HiZ corruption on ivb:gt1 */
2507 			/* enable HiZ Raw Stall Optimization */
2508 			wa_masked_dis(wal,
2509 				      CACHE_MODE_0_GEN7,
2510 				      HIZ_RAW_STALL_OPT_DISABLE);
2511 		}
2512 
2513 		/*
2514 		 * WaVSThreadDispatchOverride:ivb,vlv
2515 		 *
2516 		 * This actually overrides the dispatch
2517 		 * mode for all thread types.
2518 		 */
2519 		wa_write_clr_set(wal,
2520 				 GEN7_FF_THREAD_MODE,
2521 				 GEN7_FF_SCHED_MASK,
2522 				 GEN7_FF_TS_SCHED_HW |
2523 				 GEN7_FF_VS_SCHED_HW |
2524 				 GEN7_FF_DS_SCHED_HW);
2525 
2526 		/* WaDisablePSDDualDispatchEnable:ivb */
2527 		if (IS_IVB_GT1(i915))
2528 			wa_masked_en(wal,
2529 				     GEN7_HALF_SLICE_CHICKEN1,
2530 				     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2531 	}
2532 
2533 	if (GRAPHICS_VER(i915) == 7) {
2534 		/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2535 		wa_masked_en(wal,
2536 			     RING_MODE_GEN7(RENDER_RING_BASE),
2537 			     GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2538 
2539 		/* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
2540 		wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
2541 
2542 		/*
2543 		 * BSpec says this must be set, even though
2544 		 * WaDisable4x2SubspanOptimization:ivb,hsw
2545 		 * WaDisable4x2SubspanOptimization isn't listed for VLV.
2546 		 */
2547 		wa_masked_en(wal,
2548 			     CACHE_MODE_1,
2549 			     PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2550 
2551 		/*
2552 		 * BSpec recommends 8x4 when MSAA is used,
2553 		 * however in practice 16x4 seems fastest.
2554 		 *
2555 		 * Note that PS/WM thread counts depend on the WIZ hashing
2556 		 * disable bit, which we don't touch here, but it's good
2557 		 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2558 		 */
2559 		wa_masked_field_set(wal,
2560 				    GEN7_GT_MODE,
2561 				    GEN6_WIZ_HASHING_MASK,
2562 				    GEN6_WIZ_HASHING_16x4);
2563 	}
2564 
2565 	if (IS_GRAPHICS_VER(i915, 6, 7))
2566 		/*
2567 		 * We need to disable the AsyncFlip performance optimisations in
2568 		 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2569 		 * already be programmed to '1' on all products.
2570 		 *
2571 		 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2572 		 */
2573 		wa_masked_en(wal,
2574 			     RING_MI_MODE(RENDER_RING_BASE),
2575 			     ASYNC_FLIP_PERF_DISABLE);
2576 
2577 	if (GRAPHICS_VER(i915) == 6) {
2578 		/*
2579 		 * Required for the hardware to program scanline values for
2580 		 * waiting
2581 		 * WaEnableFlushTlbInvalidationMode:snb
2582 		 */
2583 		wa_masked_en(wal,
2584 			     GFX_MODE,
2585 			     GFX_TLB_INVALIDATE_EXPLICIT);
2586 
2587 		/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2588 		wa_masked_en(wal,
2589 			     _3D_CHICKEN,
2590 			     _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2591 
2592 		wa_masked_en(wal,
2593 			     _3D_CHICKEN3,
2594 			     /* WaStripsFansDisableFastClipPerformanceFix:snb */
2595 			     _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2596 			     /*
2597 			      * Bspec says:
2598 			      * "This bit must be set if 3DSTATE_CLIP clip mode is set
2599 			      * to normal and 3DSTATE_SF number of SF output attributes
2600 			      * is more than 16."
2601 			      */
2602 			     _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2603 
2604 		/*
2605 		 * BSpec recommends 8x4 when MSAA is used,
2606 		 * however in practice 16x4 seems fastest.
2607 		 *
2608 		 * Note that PS/WM thread counts depend on the WIZ hashing
2609 		 * disable bit, which we don't touch here, but it's good
2610 		 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2611 		 */
2612 		wa_masked_field_set(wal,
2613 				    GEN6_GT_MODE,
2614 				    GEN6_WIZ_HASHING_MASK,
2615 				    GEN6_WIZ_HASHING_16x4);
2616 
2617 		/* WaDisable_RenderCache_OperationalFlush:snb */
2618 		wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
2619 
2620 		/*
2621 		 * From the Sandybridge PRM, volume 1 part 3, page 24:
2622 		 * "If this bit is set, STCunit will have LRA as replacement
2623 		 *  policy. [...] This bit must be reset. LRA replacement
2624 		 *  policy is not supported."
2625 		 */
2626 		wa_masked_dis(wal,
2627 			      CACHE_MODE_0,
2628 			      CM0_STC_EVICT_DISABLE_LRA_SNB);
2629 	}
2630 
2631 	if (IS_GRAPHICS_VER(i915, 4, 6))
2632 		/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2633 		wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2634 		       0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
2635 		       /* XXX bit doesn't stick on Broadwater */
2636 		       IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
2637 
2638 	if (GRAPHICS_VER(i915) == 4)
2639 		/*
2640 		 * Disable CONSTANT_BUFFER before it is loaded from the context
2641 		 * image. For as it is loaded, it is executed and the stored
2642 		 * address may no longer be valid, leading to a GPU hang.
2643 		 *
2644 		 * This imposes the requirement that userspace reload their
2645 		 * CONSTANT_BUFFER on every batch, fortunately a requirement
2646 		 * they are already accustomed to from before contexts were
2647 		 * enabled.
2648 		 */
2649 		wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2650 		       0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2651 		       0 /* XXX bit doesn't stick on Broadwater */,
2652 		       true);
2653 }
2654 
2655 static void
2656 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2657 {
2658 	struct drm_i915_private *i915 = engine->i915;
2659 
2660 	/* WaKBLVECSSemaphoreWaitPoll:kbl */
2661 	if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2662 		wa_write(wal,
2663 			 RING_SEMA_WAIT_POLL(engine->mmio_base),
2664 			 1);
2665 	}
2666 	/* Wa_16018031267, Wa_16018063123 */
2667 	if (NEEDS_FASTCOLOR_BLT_WABB(engine))
2668 		wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
2669 				    XEHP_BLITTER_SCHEDULING_MODE_MASK,
2670 				    XEHP_BLITTER_ROUND_ROBIN_MODE);
2671 }
2672 
2673 static void
2674 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2675 {
2676 	/* boilerplate for any CCS engine workaround */
2677 }
2678 
2679 /*
2680  * The bspec performance guide has recommended MMIO tuning settings.  These
2681  * aren't truly "workarounds" but we want to program them with the same
2682  * workaround infrastructure to ensure that they're automatically added to
2683  * the GuC save/restore lists, re-applied at the right times, and checked for
2684  * any conflicting programming requested by real workarounds.
2685  *
2686  * Programming settings should be added here only if their registers are not
2687  * part of an engine's register state context.  If a register is part of a
2688  * context, then any tuning settings should be programmed in an appropriate
2689  * function invoked by __intel_engine_init_ctx_wa().
2690  */
2691 static void
2692 add_render_compute_tuning_settings(struct intel_gt *gt,
2693 				   struct i915_wa_list *wal)
2694 {
2695 	struct drm_i915_private *i915 = gt->i915;
2696 
2697 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
2698 		wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
2699 
2700 	/*
2701 	 * This tuning setting proves beneficial only on ATS-M designs; the
2702 	 * default "age based" setting is optimal on regular DG2 and other
2703 	 * platforms.
2704 	 */
2705 	if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
2706 		wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
2707 					THREAD_EX_ARB_MODE_RR_AFTER_DEP);
2708 
2709 	if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
2710 		wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
2711 }
2712 
2713 static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2714 {
2715 	struct intel_gt *gt = engine->gt;
2716 	u32 mode;
2717 
2718 	if (!IS_DG2(gt->i915))
2719 		return;
2720 
2721 	/*
2722 	 * Wa_14019159160: This workaround, along with others, leads to
2723 	 * significant challenges in utilizing load balancing among the
2724 	 * CCS slices. Consequently, an architectural decision has been
2725 	 * made to completely disable automatic CCS load balancing.
2726 	 */
2727 	wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
2728 
2729 	/*
2730 	 * After having disabled automatic load balancing we need to
2731 	 * assign all slices to a single CCS. We will call it CCS mode 1
2732 	 */
2733 	mode = intel_gt_apply_ccs_mode(gt);
2734 	wa_masked_en(wal, XEHP_CCS_MODE, mode);
2735 }
2736 
2737 /*
2738  * The workarounds in this function apply to shared registers in
2739  * the general render reset domain that aren't tied to a
2740  * specific engine.  Since all render+compute engines get reset
2741  * together, and the contents of these registers are lost during
2742  * the shared render domain reset, we'll define such workarounds
2743  * here and then add them to just a single RCS or CCS engine's
2744  * workaround list (whichever engine has the XXXX flag).
2745  */
2746 static void
2747 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2748 {
2749 	struct drm_i915_private *i915 = engine->i915;
2750 	struct intel_gt *gt = engine->gt;
2751 
2752 	add_render_compute_tuning_settings(gt, wal);
2753 
2754 	if (GRAPHICS_VER(i915) >= 11) {
2755 		/* This is not a Wa (although referred to as
2756 		 * WaSetInidrectStateOverride in places), this allows
2757 		 * applications that reference sampler states through
2758 		 * the BindlessSamplerStateBaseAddress to have their
2759 		 * border color relative to DynamicStateBaseAddress
2760 		 * rather than BindlessSamplerStateBaseAddress.
2761 		 *
2762 		 * Otherwise SAMPLER_STATE border colors have to be
2763 		 * copied in multiple heaps (DynamicStateBaseAddress &
2764 		 * BindlessSamplerStateBaseAddress)
2765 		 *
2766 		 * BSpec: 46052
2767 		 */
2768 		wa_mcr_masked_en(wal,
2769 				 GEN10_SAMPLER_MODE,
2770 				 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
2771 	}
2772 
2773 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
2774 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
2775 	    IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) {
2776 		/* Wa_14017856879 */
2777 		wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
2778 
2779 		/* Wa_14020495402 */
2780 		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING);
2781 	}
2782 
2783 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2784 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2785 		/*
2786 		 * Wa_14017066071
2787 		 * Wa_14017654203
2788 		 */
2789 		wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2790 				 MTL_DISABLE_SAMPLER_SC_OOO);
2791 
2792 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2793 		/* Wa_22015279794 */
2794 		wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2795 				 DISABLE_PREFETCH_INTO_IC);
2796 
2797 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2798 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2799 	    IS_DG2(i915)) {
2800 		/* Wa_22013037850 */
2801 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
2802 				DISABLE_128B_EVICTION_COMMAND_UDW);
2803 
2804 		/* Wa_18017747507 */
2805 		wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
2806 	}
2807 
2808 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2809 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2810 	    IS_DG2(i915)) {
2811 		/* Wa_22014226127 */
2812 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
2813 	}
2814 
2815 	if (IS_DG2(i915)) {
2816 		/* Wa_14015227452:dg2,pvc */
2817 		wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
2818 
2819 		/*
2820 		 * Wa_16011620976:dg2_g11
2821 		 * Wa_22015475538:dg2
2822 		 */
2823 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
2824 
2825 		/* Wa_18028616096 */
2826 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
2827 	}
2828 
2829 	if (IS_DG2_G11(i915)) {
2830 		/*
2831 		 * Wa_22012826095:dg2
2832 		 * Wa_22013059131:dg2
2833 		 */
2834 		wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
2835 				     MAXREQS_PER_BANK,
2836 				     REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
2837 
2838 		/* Wa_22013059131:dg2 */
2839 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
2840 				FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
2841 
2842 		/*
2843 		 * Wa_22012654132
2844 		 *
2845 		 * Note that register 0xE420 is write-only and cannot be read
2846 		 * back for verification on DG2 (due to Wa_14012342262), so
2847 		 * we need to explicitly skip the readback.
2848 		 */
2849 		wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
2850 			   _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
2851 			   0 /* write-only, so skip validation */,
2852 			   true);
2853 	}
2854 }
2855 
2856 static void
2857 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2858 {
2859 	if (GRAPHICS_VER(engine->i915) < 4)
2860 		return;
2861 
2862 	engine_fake_wa_init(engine, wal);
2863 
2864 	/*
2865 	 * These are common workarounds that just need to applied
2866 	 * to a single RCS/CCS engine's workaround list since
2867 	 * they're reset as part of the general render domain reset.
2868 	 */
2869 	if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
2870 		general_render_compute_wa_init(engine, wal);
2871 		ccs_engine_wa_mode(engine, wal);
2872 	}
2873 
2874 	if (engine->class == COMPUTE_CLASS)
2875 		ccs_engine_wa_init(engine, wal);
2876 	else if (engine->class == RENDER_CLASS)
2877 		rcs_engine_wa_init(engine, wal);
2878 	else
2879 		xcs_engine_wa_init(engine, wal);
2880 }
2881 
2882 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
2883 {
2884 	struct i915_wa_list *wal = &engine->wa_list;
2885 
2886 	wa_init_start(wal, engine->gt, "engine", engine->name);
2887 	engine_init_workarounds(engine, wal);
2888 	wa_init_finish(wal);
2889 }
2890 
2891 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
2892 {
2893 	wa_list_apply(&engine->wa_list);
2894 }
2895 
2896 static const struct i915_range mcr_ranges_gen8[] = {
2897 	{ .start = 0x5500, .end = 0x55ff },
2898 	{ .start = 0x7000, .end = 0x7fff },
2899 	{ .start = 0x9400, .end = 0x97ff },
2900 	{ .start = 0xb000, .end = 0xb3ff },
2901 	{ .start = 0xe000, .end = 0xe7ff },
2902 	{},
2903 };
2904 
2905 static const struct i915_range mcr_ranges_gen12[] = {
2906 	{ .start =  0x8150, .end =  0x815f },
2907 	{ .start =  0x9520, .end =  0x955f },
2908 	{ .start =  0xb100, .end =  0xb3ff },
2909 	{ .start =  0xde80, .end =  0xe8ff },
2910 	{ .start = 0x24a00, .end = 0x24a7f },
2911 	{},
2912 };
2913 
2914 static const struct i915_range mcr_ranges_xehp[] = {
2915 	{ .start =  0x4000, .end =  0x4aff },
2916 	{ .start =  0x5200, .end =  0x52ff },
2917 	{ .start =  0x5400, .end =  0x7fff },
2918 	{ .start =  0x8140, .end =  0x815f },
2919 	{ .start =  0x8c80, .end =  0x8dff },
2920 	{ .start =  0x94d0, .end =  0x955f },
2921 	{ .start =  0x9680, .end =  0x96ff },
2922 	{ .start =  0xb000, .end =  0xb3ff },
2923 	{ .start =  0xc800, .end =  0xcfff },
2924 	{ .start =  0xd800, .end =  0xd8ff },
2925 	{ .start =  0xdc00, .end =  0xffff },
2926 	{ .start = 0x17000, .end = 0x17fff },
2927 	{ .start = 0x24a00, .end = 0x24a7f },
2928 	{},
2929 };
2930 
2931 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
2932 {
2933 	const struct i915_range *mcr_ranges;
2934 	int i;
2935 
2936 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
2937 		mcr_ranges = mcr_ranges_xehp;
2938 	else if (GRAPHICS_VER(i915) >= 12)
2939 		mcr_ranges = mcr_ranges_gen12;
2940 	else if (GRAPHICS_VER(i915) >= 8)
2941 		mcr_ranges = mcr_ranges_gen8;
2942 	else
2943 		return false;
2944 
2945 	/*
2946 	 * Registers in these ranges are affected by the MCR selector
2947 	 * which only controls CPU initiated MMIO. Routing does not
2948 	 * work for CS access so we cannot verify them on this path.
2949 	 */
2950 	for (i = 0; mcr_ranges[i].start; i++)
2951 		if (offset >= mcr_ranges[i].start &&
2952 		    offset <= mcr_ranges[i].end)
2953 			return true;
2954 
2955 	return false;
2956 }
2957 
2958 static int
2959 wa_list_srm(struct i915_request *rq,
2960 	    const struct i915_wa_list *wal,
2961 	    struct i915_vma *vma)
2962 {
2963 	struct drm_i915_private *i915 = rq->i915;
2964 	unsigned int i, count = 0;
2965 	const struct i915_wa *wa;
2966 	u32 srm, *cs;
2967 
2968 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
2969 	if (GRAPHICS_VER(i915) >= 8)
2970 		srm++;
2971 
2972 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2973 		if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
2974 			count++;
2975 	}
2976 
2977 	cs = intel_ring_begin(rq, 4 * count);
2978 	if (IS_ERR(cs))
2979 		return PTR_ERR(cs);
2980 
2981 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
2982 		u32 offset = i915_mmio_reg_offset(wa->reg);
2983 
2984 		if (mcr_range(i915, offset))
2985 			continue;
2986 
2987 		*cs++ = srm;
2988 		*cs++ = offset;
2989 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
2990 		*cs++ = 0;
2991 	}
2992 	intel_ring_advance(rq, cs);
2993 
2994 	return 0;
2995 }
2996 
2997 static int engine_wa_list_verify(struct intel_context *ce,
2998 				 const struct i915_wa_list * const wal,
2999 				 const char *from)
3000 {
3001 	const struct i915_wa *wa;
3002 	struct i915_request *rq;
3003 	struct i915_vma *vma;
3004 	struct i915_gem_ww_ctx ww;
3005 	unsigned int i;
3006 	u32 *results;
3007 	int err;
3008 
3009 	if (!wal->count)
3010 		return 0;
3011 
3012 	vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
3013 					   wal->count * sizeof(u32));
3014 	if (IS_ERR(vma))
3015 		return PTR_ERR(vma);
3016 
3017 	intel_engine_pm_get(ce->engine);
3018 	i915_gem_ww_ctx_init(&ww, false);
3019 retry:
3020 	err = i915_gem_object_lock(vma->obj, &ww);
3021 	if (err == 0)
3022 		err = intel_context_pin_ww(ce, &ww);
3023 	if (err)
3024 		goto err_pm;
3025 
3026 	err = i915_vma_pin_ww(vma, &ww, 0, 0,
3027 			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
3028 	if (err)
3029 		goto err_unpin;
3030 
3031 	rq = i915_request_create(ce);
3032 	if (IS_ERR(rq)) {
3033 		err = PTR_ERR(rq);
3034 		goto err_vma;
3035 	}
3036 
3037 	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3038 	if (err == 0)
3039 		err = wa_list_srm(rq, wal, vma);
3040 
3041 	i915_request_get(rq);
3042 	if (err)
3043 		i915_request_set_error_once(rq, err);
3044 	i915_request_add(rq);
3045 
3046 	if (err)
3047 		goto err_rq;
3048 
3049 	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
3050 		err = -ETIME;
3051 		goto err_rq;
3052 	}
3053 
3054 	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
3055 	if (IS_ERR(results)) {
3056 		err = PTR_ERR(results);
3057 		goto err_rq;
3058 	}
3059 
3060 	err = 0;
3061 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3062 		if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
3063 			continue;
3064 
3065 		if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
3066 			err = -ENXIO;
3067 	}
3068 
3069 	i915_gem_object_unpin_map(vma->obj);
3070 
3071 err_rq:
3072 	i915_request_put(rq);
3073 err_vma:
3074 	i915_vma_unpin(vma);
3075 err_unpin:
3076 	intel_context_unpin(ce);
3077 err_pm:
3078 	if (err == -EDEADLK) {
3079 		err = i915_gem_ww_ctx_backoff(&ww);
3080 		if (!err)
3081 			goto retry;
3082 	}
3083 	i915_gem_ww_ctx_fini(&ww);
3084 	intel_engine_pm_put(ce->engine);
3085 	i915_vma_put(vma);
3086 	return err;
3087 }
3088 
3089 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
3090 				    const char *from)
3091 {
3092 	return engine_wa_list_verify(engine->kernel_context,
3093 				     &engine->wa_list,
3094 				     from);
3095 }
3096 
3097 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3098 #include "selftest_workarounds.c"
3099 #endif
3100