xref: /linux/drivers/gpu/drm/i915/gt/intel_workarounds.c (revision 53597deca0e38c30e6cd4ba2114fa42d2bcd85bb)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2018 Intel Corporation
4  */
5 
6 #include <drm/intel/intel_gmd_misc_regs.h>
7 
8 #include "i915_drv.h"
9 #include "i915_reg.h"
10 #include "i915_mmio_range.h"
11 #include "intel_context.h"
12 #include "intel_engine_pm.h"
13 #include "intel_engine_regs.h"
14 #include "intel_gpu_commands.h"
15 #include "intel_gt.h"
16 #include "intel_gt_ccs_mode.h"
17 #include "intel_gt_mcr.h"
18 #include "intel_gt_print.h"
19 #include "intel_gt_regs.h"
20 #include "intel_ring.h"
21 #include "intel_workarounds.h"
22 
23 #include "display/intel_fbc_regs.h"
24 
25 /**
26  * DOC: Hardware workarounds
27  *
28  * Hardware workarounds are register programming documented to be executed in
29  * the driver that fall outside of the normal programming sequences for a
30  * platform. There are some basic categories of workarounds, depending on
31  * how/when they are applied:
32  *
33  * - Context workarounds: workarounds that touch registers that are
34  *   saved/restored to/from the HW context image. The list is emitted (via Load
35  *   Register Immediate commands) once when initializing the device and saved in
36  *   the default context. That default context is then used on every context
37  *   creation to have a "primed golden context", i.e. a context image that
38  *   already contains the changes needed to all the registers.
39  *
40  *   Context workarounds should be implemented in the \*_ctx_workarounds_init()
41  *   variants respective to the targeted platforms.
42  *
43  * - Engine workarounds: the list of these WAs is applied whenever the specific
44  *   engine is reset. It's also possible that a set of engine classes share a
45  *   common power domain and they are reset together. This happens on some
46  *   platforms with render and compute engines. In this case (at least) one of
47  *   them need to keeep the workaround programming: the approach taken in the
48  *   driver is to tie those workarounds to the first compute/render engine that
49  *   is registered.  When executing with GuC submission, engine resets are
50  *   outside of kernel driver control, hence the list of registers involved in
51  *   written once, on engine initialization, and then passed to GuC, that
52  *   saves/restores their values before/after the reset takes place. See
53  *   ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
54  *
55  *   Workarounds for registers specific to RCS and CCS should be implemented in
56  *   rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
57  *   registers belonging to BCS, VCS or VECS should be implemented in
58  *   xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
59  *   engine's MMIO range but that are part of of the common RCS/CCS reset domain
60  *   should be implemented in general_render_compute_wa_init(). The settings
61  *   about the CCS load balancing should be added in ccs_engine_wa_mode().
62  *
63  * - GT workarounds: the list of these WAs is applied whenever these registers
64  *   revert to their default values: on GPU reset, suspend/resume [1]_, etc.
65  *
66  *   GT workarounds should be implemented in the \*_gt_workarounds_init()
67  *   variants respective to the targeted platforms.
68  *
69  * - Register whitelist: some workarounds need to be implemented in userspace,
70  *   but need to touch privileged registers. The whitelist in the kernel
71  *   instructs the hardware to allow the access to happen. From the kernel side,
72  *   this is just a special case of a MMIO workaround (as we write the list of
73  *   these to/be-whitelisted registers to some special HW registers).
74  *
75  *   Register whitelisting should be done in the \*_whitelist_build() variants
76  *   respective to the targeted platforms.
77  *
78  * - Workaround batchbuffers: buffers that get executed automatically by the
79  *   hardware on every HW context restore. These buffers are created and
80  *   programmed in the default context so the hardware always go through those
81  *   programming sequences when switching contexts. The support for workaround
82  *   batchbuffers is enabled these hardware mechanisms:
83  *
84  *   #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
85  *      context, pointing the hardware to jump to that location when that offset
86  *      is reached in the context restore. Workaround batchbuffer in the driver
87  *      currently uses this mechanism for all platforms.
88  *
89  *   #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
90  *      pointing the hardware to a buffer to continue executing after the
91  *      engine registers are restored in a context restore sequence. This is
92  *      currently not used in the driver.
93  *
94  * - Other:  There are WAs that, due to their nature, cannot be applied from a
95  *   central place. Those are peppered around the rest of the code, as needed.
96  *   Workarounds related to the display IP are the main example.
97  *
98  * .. [1] Technically, some registers are powercontext saved & restored, so they
99  *    survive a suspend/resume. In practice, writing them again is not too
100  *    costly and simplifies things, so it's the approach taken in the driver.
101  */
102 
103 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
104 			  const char *name, const char *engine_name)
105 {
106 	wal->gt = gt;
107 	wal->name = name;
108 	wal->engine_name = engine_name;
109 }
110 
111 #define WA_LIST_CHUNK (1 << 4)
112 
113 static void wa_init_finish(struct i915_wa_list *wal)
114 {
115 	/* Trim unused entries. */
116 	if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
117 		struct i915_wa *list = kmemdup_array(wal->list, wal->count,
118 						     sizeof(*list), GFP_KERNEL);
119 
120 		if (list) {
121 			kfree(wal->list);
122 			wal->list = list;
123 		}
124 	}
125 
126 	if (!wal->count)
127 		return;
128 
129 	gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n",
130 	       wal->wa_count, wal->name, wal->engine_name);
131 }
132 
133 static enum forcewake_domains
134 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
135 {
136 	enum forcewake_domains fw = 0;
137 	struct i915_wa *wa;
138 	unsigned int i;
139 
140 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
141 		fw |= intel_uncore_forcewake_for_reg(uncore,
142 						     wa->reg,
143 						     FW_REG_READ |
144 						     FW_REG_WRITE);
145 
146 	return fw;
147 }
148 
149 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
150 {
151 	unsigned int addr = i915_mmio_reg_offset(wa->reg);
152 	struct drm_i915_private *i915 = wal->gt->i915;
153 	unsigned int start = 0, end = wal->count;
154 	const unsigned int grow = WA_LIST_CHUNK;
155 	struct i915_wa *wa_;
156 
157 	GEM_BUG_ON(!is_power_of_2(grow));
158 
159 	if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
160 		struct i915_wa *list;
161 
162 		list = kmalloc_objs(*list, ALIGN(wal->count + 1, grow));
163 		if (!list) {
164 			drm_err(&i915->drm, "No space for workaround init!\n");
165 			return;
166 		}
167 
168 		if (wal->list) {
169 			memcpy(list, wal->list, sizeof(*wa) * wal->count);
170 			kfree(wal->list);
171 		}
172 
173 		wal->list = list;
174 	}
175 
176 	while (start < end) {
177 		unsigned int mid = start + (end - start) / 2;
178 
179 		if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
180 			start = mid + 1;
181 		} else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
182 			end = mid;
183 		} else {
184 			wa_ = &wal->list[mid];
185 
186 			if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
187 				drm_err(&i915->drm,
188 					"Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
189 					i915_mmio_reg_offset(wa_->reg),
190 					wa_->clr, wa_->set);
191 
192 				wa_->set &= ~wa->clr;
193 			}
194 
195 			wal->wa_count++;
196 			wa_->set |= wa->set;
197 			wa_->clr |= wa->clr;
198 			wa_->read |= wa->read;
199 			return;
200 		}
201 	}
202 
203 	wal->wa_count++;
204 	wa_ = &wal->list[wal->count++];
205 	*wa_ = *wa;
206 
207 	while (wa_-- > wal->list) {
208 		GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
209 			   i915_mmio_reg_offset(wa_[1].reg));
210 		if (i915_mmio_reg_offset(wa_[1].reg) >
211 		    i915_mmio_reg_offset(wa_[0].reg))
212 			break;
213 
214 		swap(wa_[1], wa_[0]);
215 	}
216 }
217 
218 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
219 		   u32 clear, u32 set, u32 read_mask, bool masked_reg)
220 {
221 	struct i915_wa wa = {
222 		.reg  = reg,
223 		.clr  = clear,
224 		.set  = set,
225 		.read = read_mask,
226 		.masked_reg = masked_reg,
227 	};
228 
229 	_wa_add(wal, &wa);
230 }
231 
232 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
233 		       u32 clear, u32 set, u32 read_mask, bool masked_reg)
234 {
235 	struct i915_wa wa = {
236 		.mcr_reg = reg,
237 		.clr  = clear,
238 		.set  = set,
239 		.read = read_mask,
240 		.masked_reg = masked_reg,
241 		.is_mcr = 1,
242 	};
243 
244 	_wa_add(wal, &wa);
245 }
246 
247 static void
248 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
249 {
250 	wa_add(wal, reg, clear, set, clear | set, false);
251 }
252 
253 static void
254 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
255 {
256 	wa_mcr_add(wal, reg, clear, set, clear | set, false);
257 }
258 
259 static void
260 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
261 {
262 	wa_write_clr_set(wal, reg, ~0, set);
263 }
264 
265 static void
266 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
267 {
268 	wa_write_clr_set(wal, reg, set, set);
269 }
270 
271 static void
272 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
273 {
274 	wa_mcr_write_clr_set(wal, reg, set, set);
275 }
276 
277 static void
278 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
279 {
280 	wa_write_clr_set(wal, reg, clr, 0);
281 }
282 
283 static void
284 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
285 {
286 	wa_mcr_write_clr_set(wal, reg, clr, 0);
287 }
288 
289 /*
290  * WA operations on "masked register". A masked register has the upper 16 bits
291  * documented as "masked" in b-spec. Its purpose is to allow writing to just a
292  * portion of the register without a rmw: you simply write in the upper 16 bits
293  * the mask of bits you are going to modify.
294  *
295  * The wa_masked_* family of functions already does the necessary operations to
296  * calculate the mask based on the parameters passed, so user only has to
297  * provide the lower 16 bits of that register.
298  */
299 
300 static void
301 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
302 {
303 	wa_add(wal, reg, 0, REG_MASKED_FIELD_ENABLE(val), val, true);
304 }
305 
306 static void
307 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
308 {
309 	wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD_ENABLE(val), val, true);
310 }
311 
312 static void
313 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
314 {
315 	wa_add(wal, reg, 0, REG_MASKED_FIELD_DISABLE(val), val, true);
316 }
317 
318 static void
319 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
320 {
321 	wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD_DISABLE(val), val, true);
322 }
323 
324 static void
325 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
326 		    u32 mask, u32 val)
327 {
328 	wa_add(wal, reg, 0, REG_MASKED_FIELD(mask, val), mask, true);
329 }
330 
331 static void
332 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
333 			u32 mask, u32 val)
334 {
335 	wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD(mask, val), mask, true);
336 }
337 
338 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
339 				      struct i915_wa_list *wal)
340 {
341 	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
342 
343 	/* WaDisable_RenderCache_OperationalFlush:snb */
344 	wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
345 }
346 
347 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
348 				      struct i915_wa_list *wal)
349 {
350 	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
351 	/* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
352 	wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
353 
354 	/*
355 	 * BSpec says this must be set, even though
356 	 * WaDisable4x2SubspanOptimization:ivb,hsw
357 	 * WaDisable4x2SubspanOptimization isn't listed for VLV.
358 	 */
359 	wa_masked_en(wal,
360 		     CACHE_MODE_1,
361 		     PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
362 }
363 
364 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
365 				      struct i915_wa_list *wal)
366 {
367 	wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
368 
369 	/* WaDisableAsyncFlipPerfMode:bdw,chv */
370 	wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
371 
372 	/* WaDisablePartialInstShootdown:bdw,chv */
373 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
374 			 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
375 
376 	/* Use Force Non-Coherent whenever executing a 3D context. This is a
377 	 * workaround for a possible hang in the unlikely event a TLB
378 	 * invalidation occurs during a PSD flush.
379 	 */
380 	/* WaForceEnableNonCoherent:bdw,chv */
381 	/* WaHdcDisableFetchWhenMasked:bdw,chv */
382 	wa_masked_en(wal, HDC_CHICKEN0,
383 		     HDC_DONOT_FETCH_MEM_WHEN_MASKED |
384 		     HDC_FORCE_NON_COHERENT);
385 
386 	/* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
387 	 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
388 	 *  polygons in the same 8x4 pixel/sample area to be processed without
389 	 *  stalling waiting for the earlier ones to write to Hierarchical Z
390 	 *  buffer."
391 	 *
392 	 * This optimization is off by default for BDW and CHV; turn it on.
393 	 */
394 	wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
395 
396 	/* Wa4x4STCOptimizationDisable:bdw,chv */
397 	wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
398 
399 	/*
400 	 * BSpec recommends 8x4 when MSAA is used,
401 	 * however in practice 16x4 seems fastest.
402 	 *
403 	 * Note that PS/WM thread counts depend on the WIZ hashing
404 	 * disable bit, which we don't touch here, but it's good
405 	 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
406 	 */
407 	wa_masked_field_set(wal, GEN7_GT_MODE,
408 			    GEN6_WIZ_HASHING_MASK,
409 			    GEN6_WIZ_HASHING_16x4);
410 }
411 
412 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
413 				     struct i915_wa_list *wal)
414 {
415 	struct drm_i915_private *i915 = engine->i915;
416 
417 	gen8_ctx_workarounds_init(engine, wal);
418 
419 	/* WaDisableThreadStallDopClockGating:bdw (pre-production) */
420 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
421 
422 	/* WaDisableDopClockGating:bdw
423 	 *
424 	 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
425 	 * to disable EUTC clock gating.
426 	 */
427 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
428 			 DOP_CLOCK_GATING_DISABLE);
429 
430 	wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
431 			 GEN8_SAMPLER_POWER_BYPASS_DIS);
432 
433 	wa_masked_en(wal, HDC_CHICKEN0,
434 		     /* WaForceContextSaveRestoreNonCoherent:bdw */
435 		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
436 		     /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
437 		     (INTEL_INFO(i915)->gt == 3 ? HDC_FENCE_DEST_SLM_DISABLE : 0));
438 }
439 
440 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
441 				     struct i915_wa_list *wal)
442 {
443 	gen8_ctx_workarounds_init(engine, wal);
444 
445 	/* WaDisableThreadStallDopClockGating:chv */
446 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
447 
448 	/* Improve HiZ throughput on CHV. */
449 	wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
450 }
451 
452 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
453 				      struct i915_wa_list *wal)
454 {
455 	struct drm_i915_private *i915 = engine->i915;
456 
457 	if (HAS_LLC(i915)) {
458 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
459 		 *
460 		 * Must match Display Engine. See
461 		 * WaCompressedResourceDisplayNewHashMode.
462 		 */
463 		wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
464 			     GEN9_PBE_COMPRESSED_HASH_SELECTION);
465 		wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
466 				 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
467 	}
468 
469 	/* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
470 	/* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
471 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
472 			 FLOW_CONTROL_ENABLE |
473 			 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
474 
475 	/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
476 	/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
477 	wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
478 			 GEN9_ENABLE_YV12_BUGFIX |
479 			 GEN9_ENABLE_GPGPU_PREEMPTION);
480 
481 	/* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
482 	/* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
483 	wa_masked_en(wal, CACHE_MODE_1,
484 		     GEN8_4x4_STC_OPTIMIZATION_DISABLE |
485 		     GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
486 
487 	/* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
488 	wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
489 			  GEN9_CCS_TLB_PREFETCH_ENABLE);
490 
491 	/* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
492 	wa_masked_en(wal, HDC_CHICKEN0,
493 		     HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
494 		     HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
495 
496 	/* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
497 	 * both tied to WaForceContextSaveRestoreNonCoherent
498 	 * in some hsds for skl. We keep the tie for all gen9. The
499 	 * documentation is a bit hazy and so we want to get common behaviour,
500 	 * even though there is no clear evidence we would need both on kbl/bxt.
501 	 * This area has been source of system hangs so we play it safe
502 	 * and mimic the skl regardless of what bspec says.
503 	 *
504 	 * Use Force Non-Coherent whenever executing a 3D context. This
505 	 * is a workaround for a possible hang in the unlikely event
506 	 * a TLB invalidation occurs during a PSD flush.
507 	 */
508 
509 	/* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
510 	wa_masked_en(wal, HDC_CHICKEN0,
511 		     HDC_FORCE_NON_COHERENT);
512 
513 	/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
514 	if (IS_SKYLAKE(i915) ||
515 	    IS_KABYLAKE(i915) ||
516 	    IS_COFFEELAKE(i915) ||
517 	    IS_COMETLAKE(i915))
518 		wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
519 				 GEN8_SAMPLER_POWER_BYPASS_DIS);
520 
521 	/* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
522 	wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
523 
524 	/*
525 	 * Supporting preemption with fine-granularity requires changes in the
526 	 * batch buffer programming. Since we can't break old userspace, we
527 	 * need to set our default preemption level to safe value. Userspace is
528 	 * still able to use more fine-grained preemption levels, since in
529 	 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
530 	 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
531 	 * not real HW workarounds, but merely a way to start using preemption
532 	 * while maintaining old contract with userspace.
533 	 */
534 
535 	/* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
536 	wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
537 
538 	/* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
539 	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
540 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
541 			    GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
542 
543 	/* WaClearHIZ_WM_CHICKEN3:bxt,glk */
544 	if (IS_GEN9_LP(i915))
545 		wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
546 }
547 
548 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
549 				struct i915_wa_list *wal)
550 {
551 	struct intel_gt *gt = engine->gt;
552 	u8 vals[3] = { 0, 0, 0 };
553 	unsigned int i;
554 
555 	for (i = 0; i < 3; i++) {
556 		u8 ss;
557 
558 		/*
559 		 * Only consider slices where one, and only one, subslice has 7
560 		 * EUs
561 		 */
562 		if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
563 			continue;
564 
565 		/*
566 		 * subslice_7eu[i] != 0 (because of the check above) and
567 		 * ss_max == 4 (maximum number of subslices possible per slice)
568 		 *
569 		 * ->    0 <= ss <= 3;
570 		 */
571 		ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
572 		vals[i] = 3 - ss;
573 	}
574 
575 	if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
576 		return;
577 
578 	/* Tune IZ hashing. See intel_device_info_runtime_init() */
579 	wa_masked_field_set(wal, GEN7_GT_MODE,
580 			    GEN9_IZ_HASHING_MASK(2) |
581 			    GEN9_IZ_HASHING_MASK(1) |
582 			    GEN9_IZ_HASHING_MASK(0),
583 			    GEN9_IZ_HASHING(2, vals[2]) |
584 			    GEN9_IZ_HASHING(1, vals[1]) |
585 			    GEN9_IZ_HASHING(0, vals[0]));
586 }
587 
588 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
589 				     struct i915_wa_list *wal)
590 {
591 	gen9_ctx_workarounds_init(engine, wal);
592 	skl_tune_iz_hashing(engine, wal);
593 }
594 
595 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
596 				     struct i915_wa_list *wal)
597 {
598 	gen9_ctx_workarounds_init(engine, wal);
599 
600 	/* WaDisableThreadStallDopClockGating:bxt */
601 	wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
602 			 STALL_DOP_GATING_DISABLE);
603 
604 	/* WaToEnableHwFixForPushConstHWBug:bxt */
605 	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
606 		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
607 }
608 
609 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
610 				     struct i915_wa_list *wal)
611 {
612 	struct drm_i915_private *i915 = engine->i915;
613 
614 	gen9_ctx_workarounds_init(engine, wal);
615 
616 	/* WaToEnableHwFixForPushConstHWBug:kbl */
617 	if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
618 		wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
619 			     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
620 
621 	/* WaDisableSbeCacheDispatchPortSharing:kbl */
622 	wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
623 			 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
624 }
625 
626 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
627 				     struct i915_wa_list *wal)
628 {
629 	gen9_ctx_workarounds_init(engine, wal);
630 
631 	/* WaToEnableHwFixForPushConstHWBug:glk */
632 	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
633 		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
634 }
635 
636 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
637 				     struct i915_wa_list *wal)
638 {
639 	gen9_ctx_workarounds_init(engine, wal);
640 
641 	/* WaToEnableHwFixForPushConstHWBug:cfl */
642 	wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
643 		     GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
644 
645 	/* WaDisableSbeCacheDispatchPortSharing:cfl */
646 	wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
647 			 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
648 }
649 
650 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
651 				     struct i915_wa_list *wal)
652 {
653 	struct drm_i915_private *i915 = engine->i915;
654 
655 	/* Wa_1406697149 (WaDisableBankHangMode:icl) */
656 	wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
657 
658 	/* WaForceEnableNonCoherent:icl
659 	 * This is not the same workaround as in early Gen9 platforms, where
660 	 * lacking this could cause system hangs, but coherency performance
661 	 * overhead is high and only a few compute workloads really need it
662 	 * (the register is whitelisted in hardware now, so UMDs can opt in
663 	 * for coherency if they have a good reason).
664 	 */
665 	wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
666 
667 	/* WaEnableFloatBlendOptimization:icl */
668 	wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
669 		   REG_MASKED_FIELD_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
670 		   0 /* write-only, so skip validation */,
671 		   true);
672 
673 	/* WaDisableGPGPUMidThreadPreemption:icl */
674 	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
675 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
676 			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
677 
678 	/* allow headerless messages for preemptible GPGPU context */
679 	wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
680 			 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
681 
682 	/* Wa_1604278689:icl,ehl */
683 	wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
684 	wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
685 			 0,
686 			 0xFFFFFFFF);
687 
688 	/* Wa_1406306137:icl,ehl */
689 	wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
690 
691 	if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) {
692 		/*
693 		 * Disable Repacking for Compression (masked R/W access)
694 		 * before rendering compressed surfaces for display.
695 		 */
696 		wa_masked_en(wal, CACHE_MODE_0_GEN7,
697 			     DISABLE_REPACKING_FOR_COMPRESSION);
698 	}
699 }
700 
701 /*
702  * These settings aren't actually workarounds, but general tuning settings that
703  * need to be programmed on dg2 platform.
704  */
705 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
706 				   struct i915_wa_list *wal)
707 {
708 	wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
709 	wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
710 			     REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
711 	wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
712 			     FF_MODE2_TDS_TIMER_128);
713 }
714 
715 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
716 				       struct i915_wa_list *wal)
717 {
718 	struct drm_i915_private *i915 = engine->i915;
719 
720 	/*
721 	 * Wa_1409142259:tgl,dg1,adl-p,adl-n
722 	 * Wa_1409347922:tgl,dg1,adl-p
723 	 * Wa_1409252684:tgl,dg1,adl-p
724 	 * Wa_1409217633:tgl,dg1,adl-p
725 	 * Wa_1409207793:tgl,dg1,adl-p
726 	 * Wa_1409178076:tgl,dg1,adl-p,adl-n
727 	 * Wa_1408979724:tgl,dg1,adl-p,adl-n
728 	 * Wa_14010443199:tgl,rkl,dg1,adl-p,adl-n
729 	 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p,adl-n
730 	 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p,adl-n
731 	 * Wa_22010465259:tgl,rkl,dg1,adl-s,adl-p,adl-n
732 	 */
733 	wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
734 		     GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
735 
736 	/* WaDisableGPGPUMidThreadPreemption:gen12 */
737 	wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
738 			    GEN9_PREEMPT_GPGPU_LEVEL_MASK,
739 			    GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
740 
741 	/*
742 	 * Wa_16011163337 - GS_TIMER
743 	 *
744 	 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
745 	 * need to program it even on those that don't explicitly list that
746 	 * workaround.
747 	 *
748 	 * Note that the programming of GEN12_FF_MODE2 is further modified
749 	 * according to the FF_MODE2 guidance given by Wa_1608008084.
750 	 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
751 	 * value when read from the CPU.
752 	 *
753 	 * The default value for this register is zero for all fields.
754 	 * So instead of doing a RMW we should just write the desired values
755 	 * for TDS and GS timers. Note that since the readback can't be trusted,
756 	 * the clear mask is just set to ~0 to make sure other bits are not
757 	 * inadvertently set. For the same reason read verification is ignored.
758 	 */
759 	wa_add(wal,
760 	       GEN12_FF_MODE2,
761 	       ~0,
762 	       FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
763 	       0, false);
764 
765 	if (!IS_DG1(i915)) {
766 		/* Wa_1806527549 */
767 		wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
768 
769 		/* Wa_1606376872 */
770 		wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
771 	}
772 
773 	/*
774 	 * This bit must be set to enable performance optimization for fast
775 	 * clears.
776 	 */
777 	wa_mcr_write_or(wal, GEN8_WM_CHICKEN2, WAIT_ON_DEPTH_STALL_DONE_DISABLE);
778 }
779 
780 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
781 				     struct i915_wa_list *wal)
782 {
783 	gen12_ctx_workarounds_init(engine, wal);
784 
785 	/* Wa_1409044764 */
786 	wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
787 		      DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
788 
789 	/* Wa_22010493298 */
790 	wa_masked_en(wal, HIZ_CHICKEN,
791 		     DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
792 }
793 
794 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
795 				     struct i915_wa_list *wal)
796 {
797 	dg2_ctx_gt_tuning_init(engine, wal);
798 
799 	/* Wa_16013271637:dg2 */
800 	wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
801 			 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
802 
803 	/* Wa_14014947963:dg2 */
804 	wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
805 
806 	/* Wa_18018764978:dg2 */
807 	wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
808 
809 	/* Wa_18019271663:dg2 */
810 	wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
811 
812 	/* Wa_14019877138:dg2 */
813 	wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
814 }
815 
816 static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
817 				     struct i915_wa_list *wal)
818 {
819 	struct intel_gt *gt = engine->gt;
820 
821 	dg2_ctx_gt_tuning_init(engine, wal);
822 
823 	/*
824 	 * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
825 	 * gen12_emit_indirect_ctx_rcs() rather than here on some early
826 	 * steppings.
827 	 */
828 	if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
829 	      IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
830 		wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
831 }
832 
833 static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
834 				       struct i915_wa_list *wal)
835 {
836 	struct intel_gt *gt = engine->gt;
837 
838 	xelpg_ctx_gt_tuning_init(engine, wal);
839 
840 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
841 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
842 		/* Wa_14014947963 */
843 		wa_masked_field_set(wal, VF_PREEMPTION,
844 				    PREEMPTION_VERTEX_COUNT, 0x4000);
845 
846 		/* Wa_16013271637 */
847 		wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
848 				 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
849 
850 		/* Wa_18019627453 */
851 		wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
852 
853 		/* Wa_18018764978 */
854 		wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
855 	}
856 
857 	/* Wa_18019271663 */
858 	wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
859 
860 	/* Wa_14019877138 */
861 	wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
862 }
863 
864 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
865 					 struct i915_wa_list *wal)
866 {
867 	/*
868 	 * This is a "fake" workaround defined by software to ensure we
869 	 * maintain reliable, backward-compatible behavior for userspace with
870 	 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
871 	 *
872 	 * The per-context setting of MI_MODE[12] determines whether the bits
873 	 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
874 	 * in the traditional manner or whether they should instead use a new
875 	 * tgl+ meaning that breaks backward compatibility, but allows nesting
876 	 * into 3rd-level batchbuffers.  When this new capability was first
877 	 * added in TGL, it remained off by default unless a context
878 	 * intentionally opted in to the new behavior.  However Xe_HPG now
879 	 * flips this on by default and requires that we explicitly opt out if
880 	 * we don't want the new behavior.
881 	 *
882 	 * From a SW perspective, we want to maintain the backward-compatible
883 	 * behavior for userspace, so we'll apply a fake workaround to set it
884 	 * back to the legacy behavior on platforms where the hardware default
885 	 * is to break compatibility.  At the moment there is no Linux
886 	 * userspace that utilizes third-level batchbuffers, so this will avoid
887 	 * userspace from needing to make any changes.  using the legacy
888 	 * meaning is the correct thing to do.  If/when we have userspace
889 	 * consumers that want to utilize third-level batch nesting, we can
890 	 * provide a context parameter to allow them to opt-in.
891 	 */
892 	wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
893 }
894 
895 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
896 				   struct i915_wa_list *wal)
897 {
898 	u8 mocs;
899 
900 	/*
901 	 * Some blitter commands do not have a field for MOCS, those
902 	 * commands will use MOCS index pointed by BLIT_CCTL.
903 	 * BLIT_CCTL registers are needed to be programmed to un-cached.
904 	 */
905 	if (engine->class == COPY_ENGINE_CLASS) {
906 		mocs = engine->gt->mocs.uc_index;
907 		wa_write_clr_set(wal,
908 				 BLIT_CCTL(engine->mmio_base),
909 				 BLIT_CCTL_MASK,
910 				 BLIT_CCTL_MOCS(mocs, mocs));
911 	}
912 }
913 
914 /*
915  * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
916  * defined by the hardware team, but it programming general context registers.
917  * Adding those context register programming in context workaround
918  * allow us to use the wa framework for proper application and validation.
919  */
920 static void
921 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
922 			  struct i915_wa_list *wal)
923 {
924 	if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
925 		fakewa_disable_nestedbb_mode(engine, wal);
926 
927 	gen12_ctx_gt_mocs_init(engine, wal);
928 }
929 
930 static void
931 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
932 			   struct i915_wa_list *wal,
933 			   const char *name)
934 {
935 	struct drm_i915_private *i915 = engine->i915;
936 
937 	wa_init_start(wal, engine->gt, name, engine->name);
938 
939 	/* Applies to all engines */
940 	/*
941 	 * Fake workarounds are not the actual workaround but
942 	 * programming of context registers using workaround framework.
943 	 */
944 	if (GRAPHICS_VER(i915) >= 12)
945 		gen12_ctx_gt_fake_wa_init(engine, wal);
946 
947 	if (engine->class != RENDER_CLASS)
948 		goto done;
949 
950 	if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
951 		xelpg_ctx_workarounds_init(engine, wal);
952 	else if (IS_DG2(i915))
953 		dg2_ctx_workarounds_init(engine, wal);
954 	else if (IS_DG1(i915))
955 		dg1_ctx_workarounds_init(engine, wal);
956 	else if (GRAPHICS_VER(i915) == 12)
957 		gen12_ctx_workarounds_init(engine, wal);
958 	else if (GRAPHICS_VER(i915) == 11)
959 		icl_ctx_workarounds_init(engine, wal);
960 	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
961 		cfl_ctx_workarounds_init(engine, wal);
962 	else if (IS_GEMINILAKE(i915))
963 		glk_ctx_workarounds_init(engine, wal);
964 	else if (IS_KABYLAKE(i915))
965 		kbl_ctx_workarounds_init(engine, wal);
966 	else if (IS_BROXTON(i915))
967 		bxt_ctx_workarounds_init(engine, wal);
968 	else if (IS_SKYLAKE(i915))
969 		skl_ctx_workarounds_init(engine, wal);
970 	else if (IS_CHERRYVIEW(i915))
971 		chv_ctx_workarounds_init(engine, wal);
972 	else if (IS_BROADWELL(i915))
973 		bdw_ctx_workarounds_init(engine, wal);
974 	else if (GRAPHICS_VER(i915) == 7)
975 		gen7_ctx_workarounds_init(engine, wal);
976 	else if (GRAPHICS_VER(i915) == 6)
977 		gen6_ctx_workarounds_init(engine, wal);
978 	else if (GRAPHICS_VER(i915) < 8)
979 		;
980 	else
981 		MISSING_CASE(GRAPHICS_VER(i915));
982 
983 done:
984 	wa_init_finish(wal);
985 }
986 
987 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
988 {
989 	__intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
990 }
991 
992 int intel_engine_emit_ctx_wa(struct i915_request *rq)
993 {
994 	struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
995 	struct intel_uncore *uncore = rq->engine->uncore;
996 	enum forcewake_domains fw;
997 	unsigned long flags;
998 	struct i915_wa *wa;
999 	unsigned int i;
1000 	u32 *cs;
1001 	int ret;
1002 
1003 	if (wal->count == 0)
1004 		return 0;
1005 
1006 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1007 	if (ret)
1008 		return ret;
1009 
1010 	if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
1011 	     IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS)
1012 		cs = intel_ring_begin(rq, (wal->count * 2 + 6));
1013 	else
1014 		cs = intel_ring_begin(rq, (wal->count * 2 + 2));
1015 
1016 	if (IS_ERR(cs))
1017 		return PTR_ERR(cs);
1018 
1019 	fw = wal_get_fw_for_rmw(uncore, wal);
1020 
1021 	intel_gt_mcr_lock(wal->gt, &flags);
1022 	spin_lock(&uncore->lock);
1023 	intel_uncore_forcewake_get__locked(uncore, fw);
1024 
1025 	*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
1026 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1027 		u32 val;
1028 
1029 		/* Skip reading the register if it's not really needed */
1030 		if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
1031 			val = wa->set;
1032 		} else {
1033 			val = wa->is_mcr ?
1034 				intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
1035 				intel_uncore_read_fw(uncore, wa->reg);
1036 			val &= ~wa->clr;
1037 			val |= wa->set;
1038 		}
1039 
1040 		*cs++ = i915_mmio_reg_offset(wa->reg);
1041 		*cs++ = val;
1042 	}
1043 	*cs++ = MI_NOOP;
1044 
1045 	/* Wa_14019789679 */
1046 	if ((IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
1047 	     IS_DG2(rq->i915)) && rq->engine->class == RENDER_CLASS) {
1048 		*cs++ = CMD_3DSTATE_MESH_CONTROL;
1049 		*cs++ = 0;
1050 		*cs++ = 0;
1051 		*cs++ = MI_NOOP;
1052 	}
1053 
1054 	intel_uncore_forcewake_put__locked(uncore, fw);
1055 	spin_unlock(&uncore->lock);
1056 	intel_gt_mcr_unlock(wal->gt, flags);
1057 
1058 	intel_ring_advance(rq, cs);
1059 
1060 	ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1061 	if (ret)
1062 		return ret;
1063 
1064 	return 0;
1065 }
1066 
1067 static void
1068 gen4_gt_workarounds_init(struct intel_gt *gt,
1069 			 struct i915_wa_list *wal)
1070 {
1071 	/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
1072 	wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
1073 }
1074 
1075 static void
1076 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1077 {
1078 	gen4_gt_workarounds_init(gt, wal);
1079 
1080 	/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
1081 	wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
1082 }
1083 
1084 static void
1085 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1086 {
1087 	g4x_gt_workarounds_init(gt, wal);
1088 
1089 	wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
1090 }
1091 
1092 static void
1093 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1094 {
1095 }
1096 
1097 static void
1098 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1099 {
1100 	/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
1101 	wa_masked_dis(wal,
1102 		      GEN7_COMMON_SLICE_CHICKEN1,
1103 		      GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
1104 
1105 	/* WaApplyL3ControlAndL3ChickenMode:ivb */
1106 	wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
1107 	wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
1108 
1109 	/* WaForceL3Serialization:ivb */
1110 	wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1111 }
1112 
1113 static void
1114 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1115 {
1116 	/* WaForceL3Serialization:vlv */
1117 	wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1118 
1119 	/*
1120 	 * WaIncreaseL3CreditsForVLVB0:vlv
1121 	 * This is the hardware default actually.
1122 	 */
1123 	wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
1124 }
1125 
1126 static void
1127 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1128 {
1129 	/* L3 caching of data atomics doesn't work -- disable it. */
1130 	wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
1131 
1132 	wa_add(wal,
1133 	       HSW_ROW_CHICKEN3, 0,
1134 	       REG_MASKED_FIELD_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
1135 	       0 /* XXX does this reg exist? */, true);
1136 
1137 	/* WaVSRefCountFullforceMissDisable:hsw */
1138 	wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
1139 }
1140 
1141 static void
1142 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
1143 {
1144 	const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
1145 	unsigned int slice, subslice;
1146 	u32 mcr, mcr_mask;
1147 
1148 	GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
1149 
1150 	/*
1151 	 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
1152 	 * Before any MMIO read into slice/subslice specific registers, MCR
1153 	 * packet control register needs to be programmed to point to any
1154 	 * enabled s/ss pair. Otherwise, incorrect values will be returned.
1155 	 * This means each subsequent MMIO read will be forwarded to an
1156 	 * specific s/ss combination, but this is OK since these registers
1157 	 * are consistent across s/ss in almost all cases. In the rare
1158 	 * occasions, such as INSTDONE, where this value is dependent
1159 	 * on s/ss combo, the read should be done with read_subslice_reg.
1160 	 */
1161 	slice = ffs(sseu->slice_mask) - 1;
1162 	GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
1163 	subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
1164 	GEM_BUG_ON(!subslice);
1165 	subslice--;
1166 
1167 	/*
1168 	 * We use GEN8_MCR..() macros to calculate the |mcr| value for
1169 	 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
1170 	 */
1171 	mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1172 	mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1173 
1174 	drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
1175 
1176 	wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
1177 }
1178 
1179 static void
1180 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1181 {
1182 	struct drm_i915_private *i915 = gt->i915;
1183 
1184 	/* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
1185 	gen9_wa_init_mcr(i915, wal);
1186 
1187 	/* WaDisableKillLogic:bxt,skl,kbl */
1188 	if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
1189 		wa_write_or(wal,
1190 			    GAM_ECOCHK,
1191 			    ECOCHK_DIS_TLB);
1192 
1193 	if (HAS_LLC(i915)) {
1194 		/* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1195 		 *
1196 		 * Must match Display Engine. See
1197 		 * WaCompressedResourceDisplayNewHashMode.
1198 		 */
1199 		wa_write_or(wal,
1200 			    MMCD_MISC_CTRL,
1201 			    MMCD_PCLA | MMCD_HOTSPOT_EN);
1202 	}
1203 
1204 	/* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1205 	wa_write_or(wal,
1206 		    GAM_ECOCHK,
1207 		    BDW_DISABLE_HDC_INVALIDATION);
1208 }
1209 
1210 static void
1211 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1212 {
1213 	gen9_gt_workarounds_init(gt, wal);
1214 
1215 	/* WaDisableGafsUnitClkGating:skl */
1216 	wa_write_or(wal,
1217 		    GEN7_UCGCTL4,
1218 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1219 
1220 	/* WaInPlaceDecompressionHang:skl */
1221 	if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1222 		wa_write_or(wal,
1223 			    GEN9_GAMT_ECO_REG_RW_IA,
1224 			    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1225 }
1226 
1227 static void
1228 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1229 {
1230 	gen9_gt_workarounds_init(gt, wal);
1231 
1232 	/* WaDisableDynamicCreditSharing:kbl */
1233 	if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1234 		wa_write_or(wal,
1235 			    GAMT_CHKN_BIT_REG,
1236 			    GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1237 
1238 	/* WaDisableGafsUnitClkGating:kbl */
1239 	wa_write_or(wal,
1240 		    GEN7_UCGCTL4,
1241 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1242 
1243 	/* WaInPlaceDecompressionHang:kbl */
1244 	wa_write_or(wal,
1245 		    GEN9_GAMT_ECO_REG_RW_IA,
1246 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1247 }
1248 
1249 static void
1250 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1251 {
1252 	gen9_gt_workarounds_init(gt, wal);
1253 }
1254 
1255 static void
1256 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1257 {
1258 	gen9_gt_workarounds_init(gt, wal);
1259 
1260 	/* WaDisableGafsUnitClkGating:cfl */
1261 	wa_write_or(wal,
1262 		    GEN7_UCGCTL4,
1263 		    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1264 
1265 	/* WaInPlaceDecompressionHang:cfl */
1266 	wa_write_or(wal,
1267 		    GEN9_GAMT_ECO_REG_RW_IA,
1268 		    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1269 }
1270 
1271 static void __set_mcr_steering(struct i915_wa_list *wal,
1272 			       i915_reg_t steering_reg,
1273 			       unsigned int slice, unsigned int subslice)
1274 {
1275 	u32 mcr, mcr_mask;
1276 
1277 	mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1278 	mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1279 
1280 	wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
1281 }
1282 
1283 static void debug_dump_steering(struct intel_gt *gt)
1284 {
1285 	struct drm_printer p = drm_dbg_printer(&gt->i915->drm, DRM_UT_DRIVER,
1286 					       "MCR Steering:");
1287 
1288 	if (drm_debug_enabled(DRM_UT_DRIVER))
1289 		intel_gt_mcr_report_steering(&p, gt, false);
1290 }
1291 
1292 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1293 			 unsigned int slice, unsigned int subslice)
1294 {
1295 	__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1296 
1297 	gt->default_steering.groupid = slice;
1298 	gt->default_steering.instanceid = subslice;
1299 
1300 	debug_dump_steering(gt);
1301 }
1302 
1303 static void
1304 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1305 {
1306 	const struct sseu_dev_info *sseu = &gt->info.sseu;
1307 	unsigned int subslice;
1308 
1309 	GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1310 	GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1311 
1312 	/*
1313 	 * Although a platform may have subslices, we need to always steer
1314 	 * reads to the lowest instance that isn't fused off.  When Render
1315 	 * Power Gating is enabled, grabbing forcewake will only power up a
1316 	 * single subslice (the "minconfig") if there isn't a real workload
1317 	 * that needs to be run; this means that if we steer register reads to
1318 	 * one of the higher subslices, we run the risk of reading back 0's or
1319 	 * random garbage.
1320 	 */
1321 	subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
1322 
1323 	/*
1324 	 * If the subslice we picked above also steers us to a valid L3 bank,
1325 	 * then we can just rely on the default steering and won't need to
1326 	 * worry about explicitly re-steering L3BANK reads later.
1327 	 */
1328 	if (gt->info.l3bank_mask & BIT(subslice))
1329 		gt->steering_table[L3BANK] = NULL;
1330 
1331 	__add_mcr_wa(gt, wal, 0, subslice);
1332 }
1333 
1334 static void
1335 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1336 {
1337 	const struct sseu_dev_info *sseu = &gt->info.sseu;
1338 	unsigned long slice, subslice = 0, slice_mask = 0;
1339 	u32 lncf_mask = 0;
1340 	int i;
1341 
1342 	/*
1343 	 * On Xe_HP the steering increases in complexity. There are now several
1344 	 * more units that require steering and we're not guaranteed to be able
1345 	 * to find a common setting for all of them. These are:
1346 	 * - GSLICE (fusable)
1347 	 * - DSS (sub-unit within gslice; fusable)
1348 	 * - L3 Bank (fusable)
1349 	 * - MSLICE (fusable)
1350 	 * - LNCF (sub-unit within mslice; always present if mslice is present)
1351 	 *
1352 	 * We'll do our default/implicit steering based on GSLICE (in the
1353 	 * sliceid field) and DSS (in the subsliceid field).  If we can
1354 	 * find overlap between the valid MSLICE and/or LNCF values with
1355 	 * a suitable GSLICE, then we can just reuse the default value and
1356 	 * skip and explicit steering at runtime.
1357 	 *
1358 	 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1359 	 * a valid sliceid value.  DSS steering is the only type of steering
1360 	 * that utilizes the 'subsliceid' bits.
1361 	 *
1362 	 * Also note that, even though the steering domain is called "GSlice"
1363 	 * and it is encoded in the register using the gslice format, the spec
1364 	 * says that the combined (geometry | compute) fuse should be used to
1365 	 * select the steering.
1366 	 */
1367 
1368 	/* Find the potential gslice candidates */
1369 	slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
1370 						       GEN_DSS_PER_GSLICE);
1371 
1372 	/*
1373 	 * Find the potential LNCF candidates.  Either LNCF within a valid
1374 	 * mslice is fine.
1375 	 */
1376 	for_each_set_bit(i, &gt->info.mslice_mask, GEN12_MAX_MSLICES)
1377 		lncf_mask |= (0x3 << (i * 2));
1378 
1379 	/*
1380 	 * Are there any sliceid values that work for both GSLICE and LNCF
1381 	 * steering?
1382 	 */
1383 	if (slice_mask & lncf_mask) {
1384 		slice_mask &= lncf_mask;
1385 		gt->steering_table[LNCF] = NULL;
1386 	}
1387 
1388 	/* How about sliceid values that also work for MSLICE steering? */
1389 	if (slice_mask & gt->info.mslice_mask) {
1390 		slice_mask &= gt->info.mslice_mask;
1391 		gt->steering_table[MSLICE] = NULL;
1392 	}
1393 
1394 	slice = __ffs(slice_mask);
1395 	subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
1396 		GEN_DSS_PER_GSLICE;
1397 
1398 	__add_mcr_wa(gt, wal, slice, subslice);
1399 
1400 	/*
1401 	 * SQIDI ranges are special because they use different steering
1402 	 * registers than everything else we work with.  On XeHP SDV and
1403 	 * DG2-G10, any value in the steering registers will work fine since
1404 	 * all instances are present, but DG2-G11 only has SQIDI instances at
1405 	 * ID's 2 and 3, so we need to steer to one of those.  For simplicity
1406 	 * we'll just steer to a hardcoded "2" since that value will work
1407 	 * everywhere.
1408 	 */
1409 	__set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
1410 	__set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
1411 
1412 	/*
1413 	 * On DG2, GAM registers have a dedicated steering control register
1414 	 * and must always be programmed to a hardcoded groupid of "1."
1415 	 */
1416 	if (IS_DG2(gt->i915))
1417 		__set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
1418 }
1419 
1420 static void
1421 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1422 {
1423 	struct drm_i915_private *i915 = gt->i915;
1424 
1425 	icl_wa_init_mcr(gt, wal);
1426 
1427 	/* WaModifyGamTlbPartitioning:icl */
1428 	wa_write_clr_set(wal,
1429 			 GEN11_GACB_PERF_CTRL,
1430 			 GEN11_HASH_CTRL_MASK,
1431 			 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1432 
1433 	/* Wa_1405766107:icl
1434 	 * Formerly known as WaCL2SFHalfMaxAlloc
1435 	 */
1436 	wa_write_or(wal,
1437 		    GEN11_LSN_UNSLCVC,
1438 		    GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1439 		    GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1440 
1441 	/* Wa_220166154:icl
1442 	 * Formerly known as WaDisCtxReload
1443 	 */
1444 	wa_write_or(wal,
1445 		    GEN8_GAMW_ECO_DEV_RW_IA,
1446 		    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1447 
1448 	/* Wa_1406463099:icl
1449 	 * Formerly known as WaGamTlbPendError
1450 	 */
1451 	wa_write_or(wal,
1452 		    GAMT_CHKN_BIT_REG,
1453 		    GAMT_CHKN_DISABLE_L3_COH_PIPE);
1454 
1455 	/*
1456 	 * Wa_1408615072:icl,ehl  (vsunit)
1457 	 * Wa_1407596294:icl,ehl  (hsunit)
1458 	 */
1459 	wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1460 		    VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1461 
1462 	/* Wa_1407352427:icl,ehl */
1463 	wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1464 		    PSDUNIT_CLKGATE_DIS);
1465 
1466 	/* Wa_1406680159:icl,ehl */
1467 	wa_mcr_write_or(wal,
1468 			GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1469 			GWUNIT_CLKGATE_DIS);
1470 
1471 	/* Wa_1607087056:icl,ehl,jsl */
1472 	if (IS_ICELAKE(i915) ||
1473 		((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1474 		IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
1475 		wa_write_or(wal,
1476 			    GEN11_SLICE_UNIT_LEVEL_CLKGATE,
1477 			    L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1478 
1479 	/*
1480 	 * This is not a documented workaround, but rather an optimization
1481 	 * to reduce sampler power.
1482 	 */
1483 	wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1484 }
1485 
1486 /*
1487  * Though there are per-engine instances of these registers,
1488  * they retain their value through engine resets and should
1489  * only be provided on the GT workaround list rather than
1490  * the engine-specific workaround list.
1491  */
1492 static void
1493 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1494 {
1495 	struct intel_engine_cs *engine;
1496 	int id;
1497 
1498 	for_each_engine(engine, gt, id) {
1499 		if (engine->class != VIDEO_DECODE_CLASS ||
1500 		    (engine->instance % 2))
1501 			continue;
1502 
1503 		wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1504 			    IECPUNIT_CLKGATE_DIS);
1505 	}
1506 }
1507 
1508 static void
1509 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1510 {
1511 	icl_wa_init_mcr(gt, wal);
1512 
1513 	/* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1514 	wa_14011060649(gt, wal);
1515 
1516 	/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1517 	wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1518 
1519 	/*
1520 	 * Wa_14015795083
1521 	 *
1522 	 * Firmware on some gen12 platforms locks the MISCCPCTL register,
1523 	 * preventing i915 from modifying it for this workaround.  Skip the
1524 	 * readback verification for this workaround on debug builds; if the
1525 	 * workaround doesn't stick due to firmware behavior, it's not an error
1526 	 * that we want CI to flag.
1527 	 */
1528 	wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1529 	       0, 0, false);
1530 }
1531 
1532 static void
1533 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1534 {
1535 	gen12_gt_workarounds_init(gt, wal);
1536 
1537 	/* Wa_1409420604:dg1 */
1538 	wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
1539 			CPSSUNIT_CLKGATE_DIS);
1540 
1541 	/* Wa_1408615072:dg1 */
1542 	/* Empirical testing shows this register is unaffected by engine reset. */
1543 	wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
1544 }
1545 
1546 static void
1547 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1548 {
1549 	xehp_init_mcr(gt, wal);
1550 
1551 	/* Wa_14011060649:dg2 */
1552 	wa_14011060649(gt, wal);
1553 
1554 	if (IS_DG2_G10(gt->i915)) {
1555 		/* Wa_22010523718:dg2 */
1556 		wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1557 			    CG3DDISCFEG_CLKGATE_DIS);
1558 
1559 		/* Wa_14011006942:dg2 */
1560 		wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1561 				DSS_ROUTER_CLKGATE_DIS);
1562 	}
1563 
1564 	/* Wa_14014830051:dg2 */
1565 	wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1566 
1567 	/*
1568 	 * Wa_14015795083
1569 	 * Skip verification for possibly locked register.
1570 	 */
1571 	wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1572 	       0, 0, false);
1573 
1574 	/* Wa_18018781329 */
1575 	wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1576 	wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1577 	wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1578 	wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1579 
1580 	/* Wa_1509235366:dg2 */
1581 	wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1582 			INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1583 
1584 	/* Wa_14010648519:dg2 */
1585 	wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1586 }
1587 
1588 static void
1589 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1590 {
1591 	/* Wa_14018575942 / Wa_18018781329 */
1592 	wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1593 	wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1594 
1595 	/* Wa_22016670082 */
1596 	wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1597 
1598 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
1599 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
1600 		/* Wa_14014830051 */
1601 		wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1602 
1603 		/* Wa_14015795083 */
1604 		wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1605 	}
1606 
1607 	/*
1608 	 * Unlike older platforms, we no longer setup implicit steering here;
1609 	 * all MCR accesses are explicitly steered.
1610 	 */
1611 	debug_dump_steering(gt);
1612 }
1613 
1614 static void
1615 wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
1616 {
1617 	struct intel_engine_cs *engine;
1618 	int id;
1619 
1620 	for_each_engine(engine, gt, id)
1621 		if (engine->class == VIDEO_DECODE_CLASS)
1622 			wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
1623 				    MFXPIPE_CLKGATE_DIS);
1624 }
1625 
1626 static void
1627 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1628 {
1629 	wa_16021867713(gt, wal);
1630 
1631 	/*
1632 	 * Wa_14018778641
1633 	 * Wa_18018781329
1634 	 *
1635 	 * Note that although these registers are MCR on the primary
1636 	 * GT, the media GT's versions are regular singleton registers.
1637 	 */
1638 	wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
1639 
1640 	/*
1641 	 * Wa_14018575942
1642 	 *
1643 	 * Issue is seen on media KPI test running on VDBOX engine
1644 	 * especially VP9 encoding WLs
1645 	 */
1646 	wa_write_or(wal, XELPMP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1647 
1648 	/* Wa_22016670082 */
1649 	wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1650 
1651 	debug_dump_steering(gt);
1652 }
1653 
1654 /*
1655  * The bspec performance guide has recommended MMIO tuning settings.  These
1656  * aren't truly "workarounds" but we want to program them through the
1657  * workaround infrastructure to make sure they're (re)applied at the proper
1658  * times.
1659  *
1660  * The programming in this function is for settings that persist through
1661  * engine resets and also are not part of any engine's register state context.
1662  * I.e., settings that only need to be re-applied in the event of a full GT
1663  * reset.
1664  */
1665 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
1666 {
1667 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
1668 		wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1669 		wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1670 	}
1671 
1672 	if (IS_DG2(gt->i915)) {
1673 		wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1674 		wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1675 	}
1676 }
1677 
1678 static void
1679 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1680 {
1681 	struct drm_i915_private *i915 = gt->i915;
1682 
1683 	gt_tuning_settings(gt, wal);
1684 
1685 	if (gt->type == GT_MEDIA) {
1686 		if (MEDIA_VER_FULL(i915) == IP_VER(13, 0))
1687 			xelpmp_gt_workarounds_init(gt, wal);
1688 		else
1689 			MISSING_CASE(MEDIA_VER_FULL(i915));
1690 
1691 		return;
1692 	}
1693 
1694 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
1695 		xelpg_gt_workarounds_init(gt, wal);
1696 	else if (IS_DG2(i915))
1697 		dg2_gt_workarounds_init(gt, wal);
1698 	else if (IS_DG1(i915))
1699 		dg1_gt_workarounds_init(gt, wal);
1700 	else if (GRAPHICS_VER(i915) == 12)
1701 		gen12_gt_workarounds_init(gt, wal);
1702 	else if (GRAPHICS_VER(i915) == 11)
1703 		icl_gt_workarounds_init(gt, wal);
1704 	else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1705 		cfl_gt_workarounds_init(gt, wal);
1706 	else if (IS_GEMINILAKE(i915))
1707 		glk_gt_workarounds_init(gt, wal);
1708 	else if (IS_KABYLAKE(i915))
1709 		kbl_gt_workarounds_init(gt, wal);
1710 	else if (IS_BROXTON(i915))
1711 		gen9_gt_workarounds_init(gt, wal);
1712 	else if (IS_SKYLAKE(i915))
1713 		skl_gt_workarounds_init(gt, wal);
1714 	else if (IS_HASWELL(i915))
1715 		hsw_gt_workarounds_init(gt, wal);
1716 	else if (IS_VALLEYVIEW(i915))
1717 		vlv_gt_workarounds_init(gt, wal);
1718 	else if (IS_IVYBRIDGE(i915))
1719 		ivb_gt_workarounds_init(gt, wal);
1720 	else if (GRAPHICS_VER(i915) == 6)
1721 		snb_gt_workarounds_init(gt, wal);
1722 	else if (GRAPHICS_VER(i915) == 5)
1723 		ilk_gt_workarounds_init(gt, wal);
1724 	else if (IS_G4X(i915))
1725 		g4x_gt_workarounds_init(gt, wal);
1726 	else if (GRAPHICS_VER(i915) == 4)
1727 		gen4_gt_workarounds_init(gt, wal);
1728 	else if (GRAPHICS_VER(i915) <= 8)
1729 		;
1730 	else
1731 		MISSING_CASE(GRAPHICS_VER(i915));
1732 }
1733 
1734 void intel_gt_init_workarounds(struct intel_gt *gt)
1735 {
1736 	struct i915_wa_list *wal = &gt->wa_list;
1737 
1738 	wa_init_start(wal, gt, "GT", "global");
1739 	gt_init_workarounds(gt, wal);
1740 	wa_init_finish(wal);
1741 }
1742 
1743 static bool
1744 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
1745 	  const char *name, const char *from)
1746 {
1747 	if ((cur ^ wa->set) & wa->read) {
1748 		gt_err(gt,
1749 		       "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1750 		       name, from, i915_mmio_reg_offset(wa->reg),
1751 		       cur, cur & wa->read, wa->set & wa->read);
1752 
1753 		return false;
1754 	}
1755 
1756 	return true;
1757 }
1758 
1759 static void wa_list_apply(const struct i915_wa_list *wal)
1760 {
1761 	struct intel_gt *gt = wal->gt;
1762 	struct intel_uncore *uncore = gt->uncore;
1763 	enum forcewake_domains fw;
1764 	unsigned long flags;
1765 	struct i915_wa *wa;
1766 	unsigned int i;
1767 
1768 	if (!wal->count)
1769 		return;
1770 
1771 	fw = wal_get_fw_for_rmw(uncore, wal);
1772 
1773 	intel_gt_mcr_lock(gt, &flags);
1774 	spin_lock(&uncore->lock);
1775 	intel_uncore_forcewake_get__locked(uncore, fw);
1776 
1777 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1778 		u32 val, old = 0;
1779 
1780 		/* open-coded rmw due to steering */
1781 		if (wa->clr)
1782 			old = wa->is_mcr ?
1783 				intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1784 				intel_uncore_read_fw(uncore, wa->reg);
1785 		val = (old & ~wa->clr) | wa->set;
1786 		if (val != old || !wa->clr) {
1787 			if (wa->is_mcr)
1788 				intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
1789 			else
1790 				intel_uncore_write_fw(uncore, wa->reg, val);
1791 		}
1792 
1793 		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1794 			u32 val = wa->is_mcr ?
1795 				intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1796 				intel_uncore_read_fw(uncore, wa->reg);
1797 
1798 			wa_verify(gt, wa, val, wal->name, "application");
1799 		}
1800 	}
1801 
1802 	intel_uncore_forcewake_put__locked(uncore, fw);
1803 	spin_unlock(&uncore->lock);
1804 	intel_gt_mcr_unlock(gt, flags);
1805 }
1806 
1807 void intel_gt_apply_workarounds(struct intel_gt *gt)
1808 {
1809 	wa_list_apply(&gt->wa_list);
1810 }
1811 
1812 static bool wa_list_verify(struct intel_gt *gt,
1813 			   const struct i915_wa_list *wal,
1814 			   const char *from)
1815 {
1816 	struct intel_uncore *uncore = gt->uncore;
1817 	struct i915_wa *wa;
1818 	enum forcewake_domains fw;
1819 	unsigned long flags;
1820 	unsigned int i;
1821 	bool ok = true;
1822 
1823 	fw = wal_get_fw_for_rmw(uncore, wal);
1824 
1825 	intel_gt_mcr_lock(gt, &flags);
1826 	spin_lock(&uncore->lock);
1827 	intel_uncore_forcewake_get__locked(uncore, fw);
1828 
1829 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1830 		ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
1831 				intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1832 				intel_uncore_read_fw(uncore, wa->reg),
1833 				wal->name, from);
1834 
1835 	intel_uncore_forcewake_put__locked(uncore, fw);
1836 	spin_unlock(&uncore->lock);
1837 	intel_gt_mcr_unlock(gt, flags);
1838 
1839 	return ok;
1840 }
1841 
1842 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1843 {
1844 	return wa_list_verify(gt, &gt->wa_list, from);
1845 }
1846 
1847 __maybe_unused
1848 static bool is_nonpriv_flags_valid(u32 flags)
1849 {
1850 	/* Check only valid flag bits are set */
1851 	if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1852 		return false;
1853 
1854 	/* NB: Only 3 out of 4 enum values are valid for access field */
1855 	if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1856 	    RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1857 		return false;
1858 
1859 	return true;
1860 }
1861 
1862 static void
1863 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1864 {
1865 	struct i915_wa wa = {
1866 		.reg = reg
1867 	};
1868 
1869 	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1870 		return;
1871 
1872 	if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1873 		return;
1874 
1875 	wa.reg.reg |= flags;
1876 	_wa_add(wal, &wa);
1877 }
1878 
1879 static void
1880 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
1881 {
1882 	struct i915_wa wa = {
1883 		.mcr_reg = reg,
1884 		.is_mcr = 1,
1885 	};
1886 
1887 	if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1888 		return;
1889 
1890 	if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1891 		return;
1892 
1893 	wa.mcr_reg.reg |= flags;
1894 	_wa_add(wal, &wa);
1895 }
1896 
1897 static void
1898 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1899 {
1900 	whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1901 }
1902 
1903 static void
1904 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
1905 {
1906 	whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1907 }
1908 
1909 static void gen9_whitelist_build(struct i915_wa_list *w)
1910 {
1911 	/* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1912 	whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1913 
1914 	/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1915 	whitelist_reg(w, GEN8_CS_CHICKEN1);
1916 
1917 	/* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1918 	whitelist_reg(w, GEN8_HDC_CHICKEN1);
1919 
1920 	/* WaSendPushConstantsFromMMIO:skl,bxt */
1921 	whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1922 }
1923 
1924 static void skl_whitelist_build(struct intel_engine_cs *engine)
1925 {
1926 	struct i915_wa_list *w = &engine->whitelist;
1927 
1928 	if (engine->class != RENDER_CLASS)
1929 		return;
1930 
1931 	gen9_whitelist_build(w);
1932 
1933 	/* WaDisableLSQCROPERFforOCL:skl */
1934 	whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1935 }
1936 
1937 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1938 {
1939 	if (engine->class != RENDER_CLASS)
1940 		return;
1941 
1942 	gen9_whitelist_build(&engine->whitelist);
1943 }
1944 
1945 static void kbl_whitelist_build(struct intel_engine_cs *engine)
1946 {
1947 	struct i915_wa_list *w = &engine->whitelist;
1948 
1949 	if (engine->class != RENDER_CLASS)
1950 		return;
1951 
1952 	gen9_whitelist_build(w);
1953 
1954 	/* WaDisableLSQCROPERFforOCL:kbl */
1955 	whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1956 }
1957 
1958 static void glk_whitelist_build(struct intel_engine_cs *engine)
1959 {
1960 	struct i915_wa_list *w = &engine->whitelist;
1961 
1962 	if (engine->class != RENDER_CLASS)
1963 		return;
1964 
1965 	gen9_whitelist_build(w);
1966 
1967 	/* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
1968 	whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
1969 }
1970 
1971 static void cfl_whitelist_build(struct intel_engine_cs *engine)
1972 {
1973 	struct i915_wa_list *w = &engine->whitelist;
1974 
1975 	if (engine->class != RENDER_CLASS)
1976 		return;
1977 
1978 	gen9_whitelist_build(w);
1979 
1980 	/*
1981 	 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
1982 	 *
1983 	 * This covers 4 register which are next to one another :
1984 	 *   - PS_INVOCATION_COUNT
1985 	 *   - PS_INVOCATION_COUNT_UDW
1986 	 *   - PS_DEPTH_COUNT
1987 	 *   - PS_DEPTH_COUNT_UDW
1988 	 */
1989 	whitelist_reg_ext(w, PS_INVOCATION_COUNT,
1990 			  RING_FORCE_TO_NONPRIV_ACCESS_RD |
1991 			  RING_FORCE_TO_NONPRIV_RANGE_4);
1992 }
1993 
1994 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
1995 {
1996 	struct i915_wa_list *w = &engine->whitelist;
1997 
1998 	if (engine->class != RENDER_CLASS)
1999 		whitelist_reg_ext(w,
2000 				  RING_CTX_TIMESTAMP(engine->mmio_base),
2001 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
2002 }
2003 
2004 static void cml_whitelist_build(struct intel_engine_cs *engine)
2005 {
2006 	allow_read_ctx_timestamp(engine);
2007 
2008 	cfl_whitelist_build(engine);
2009 }
2010 
2011 static void icl_whitelist_build(struct intel_engine_cs *engine)
2012 {
2013 	struct i915_wa_list *w = &engine->whitelist;
2014 
2015 	allow_read_ctx_timestamp(engine);
2016 
2017 	switch (engine->class) {
2018 	case RENDER_CLASS:
2019 		/* WaAllowUMDToModifyHalfSliceChicken7:icl */
2020 		whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
2021 
2022 		/* WaAllowUMDToModifySamplerMode:icl */
2023 		whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
2024 
2025 		/* WaEnableStateCacheRedirectToCS:icl */
2026 		whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2027 
2028 		/*
2029 		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
2030 		 *
2031 		 * This covers 4 register which are next to one another :
2032 		 *   - PS_INVOCATION_COUNT
2033 		 *   - PS_INVOCATION_COUNT_UDW
2034 		 *   - PS_DEPTH_COUNT
2035 		 *   - PS_DEPTH_COUNT_UDW
2036 		 */
2037 		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2038 				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
2039 				  RING_FORCE_TO_NONPRIV_RANGE_4);
2040 		break;
2041 
2042 	case VIDEO_DECODE_CLASS:
2043 		/* hucStatusRegOffset */
2044 		whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
2045 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
2046 		/* hucUKernelHdrInfoRegOffset */
2047 		whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
2048 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
2049 		/* hucStatus2RegOffset */
2050 		whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
2051 				  RING_FORCE_TO_NONPRIV_ACCESS_RD);
2052 		break;
2053 
2054 	default:
2055 		break;
2056 	}
2057 }
2058 
2059 static void tgl_whitelist_build(struct intel_engine_cs *engine)
2060 {
2061 	struct i915_wa_list *w = &engine->whitelist;
2062 
2063 	allow_read_ctx_timestamp(engine);
2064 
2065 	switch (engine->class) {
2066 	case RENDER_CLASS:
2067 		/*
2068 		 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
2069 		 * Wa_1408556865:tgl
2070 		 *
2071 		 * This covers 4 registers which are next to one another :
2072 		 *   - PS_INVOCATION_COUNT
2073 		 *   - PS_INVOCATION_COUNT_UDW
2074 		 *   - PS_DEPTH_COUNT
2075 		 *   - PS_DEPTH_COUNT_UDW
2076 		 */
2077 		whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2078 				  RING_FORCE_TO_NONPRIV_ACCESS_RD |
2079 				  RING_FORCE_TO_NONPRIV_RANGE_4);
2080 
2081 		/*
2082 		 * Wa_1808121037:tgl
2083 		 * Wa_14012131227:dg1
2084 		 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
2085 		 */
2086 		whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2087 
2088 		/* Wa_1806527549:tgl */
2089 		whitelist_reg(w, HIZ_CHICKEN);
2090 
2091 		/* Required by recommended tuning setting (not a workaround) */
2092 		whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
2093 
2094 		break;
2095 	default:
2096 		break;
2097 	}
2098 }
2099 
2100 static void dg2_whitelist_build(struct intel_engine_cs *engine)
2101 {
2102 	struct i915_wa_list *w = &engine->whitelist;
2103 
2104 	switch (engine->class) {
2105 	case RENDER_CLASS:
2106 		/* Required by recommended tuning setting (not a workaround) */
2107 		whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2108 		whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2109 		break;
2110 	default:
2111 		break;
2112 	}
2113 }
2114 
2115 static void xelpg_whitelist_build(struct intel_engine_cs *engine)
2116 {
2117 	struct i915_wa_list *w = &engine->whitelist;
2118 
2119 	switch (engine->class) {
2120 	case RENDER_CLASS:
2121 		/* Required by recommended tuning setting (not a workaround) */
2122 		whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2123 		whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2124 		break;
2125 	default:
2126 		break;
2127 	}
2128 }
2129 
2130 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
2131 {
2132 	struct drm_i915_private *i915 = engine->i915;
2133 	struct i915_wa_list *w = &engine->whitelist;
2134 
2135 	wa_init_start(w, engine->gt, "whitelist", engine->name);
2136 
2137 	if (engine->gt->type == GT_MEDIA)
2138 		; /* none yet */
2139 	else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
2140 		xelpg_whitelist_build(engine);
2141 	else if (IS_DG2(i915))
2142 		dg2_whitelist_build(engine);
2143 	else if (GRAPHICS_VER(i915) == 12)
2144 		tgl_whitelist_build(engine);
2145 	else if (GRAPHICS_VER(i915) == 11)
2146 		icl_whitelist_build(engine);
2147 	else if (IS_COMETLAKE(i915))
2148 		cml_whitelist_build(engine);
2149 	else if (IS_COFFEELAKE(i915))
2150 		cfl_whitelist_build(engine);
2151 	else if (IS_GEMINILAKE(i915))
2152 		glk_whitelist_build(engine);
2153 	else if (IS_KABYLAKE(i915))
2154 		kbl_whitelist_build(engine);
2155 	else if (IS_BROXTON(i915))
2156 		bxt_whitelist_build(engine);
2157 	else if (IS_SKYLAKE(i915))
2158 		skl_whitelist_build(engine);
2159 	else if (GRAPHICS_VER(i915) <= 8)
2160 		;
2161 	else
2162 		MISSING_CASE(GRAPHICS_VER(i915));
2163 
2164 	wa_init_finish(w);
2165 }
2166 
2167 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
2168 {
2169 	const struct i915_wa_list *wal = &engine->whitelist;
2170 	struct intel_uncore *uncore = engine->uncore;
2171 	const u32 base = engine->mmio_base;
2172 	struct i915_wa *wa;
2173 	unsigned int i;
2174 
2175 	if (!wal->count)
2176 		return;
2177 
2178 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2179 		intel_uncore_write(uncore,
2180 				   RING_FORCE_TO_NONPRIV(base, i),
2181 				   i915_mmio_reg_offset(wa->reg));
2182 
2183 	/* And clear the rest just in case of garbage */
2184 	for (; i < RING_MAX_NONPRIV_SLOTS; i++)
2185 		intel_uncore_write(uncore,
2186 				   RING_FORCE_TO_NONPRIV(base, i),
2187 				   i915_mmio_reg_offset(RING_NOPID(base)));
2188 }
2189 
2190 /*
2191  * engine_fake_wa_init(), a place holder to program the registers
2192  * which are not part of an official workaround defined by the
2193  * hardware team.
2194  * Adding programming of those register inside workaround will
2195  * allow utilizing wa framework to proper application and verification.
2196  */
2197 static void
2198 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2199 {
2200 	u8 mocs_w, mocs_r;
2201 
2202 	/*
2203 	 * RING_CMD_CCTL specifies the default MOCS entry that will be used
2204 	 * by the command streamer when executing commands that don't have
2205 	 * a way to explicitly specify a MOCS setting.  The default should
2206 	 * usually reference whichever MOCS entry corresponds to uncached
2207 	 * behavior, although use of a WB cached entry is recommended by the
2208 	 * spec in certain circumstances on specific platforms.
2209 	 */
2210 	if (GRAPHICS_VER(engine->i915) >= 12) {
2211 		mocs_r = engine->gt->mocs.uc_index;
2212 		mocs_w = engine->gt->mocs.uc_index;
2213 
2214 		if (HAS_L3_CCS_READ(engine->i915) &&
2215 		    engine->class == COMPUTE_CLASS) {
2216 			mocs_r = engine->gt->mocs.wb_index;
2217 
2218 			/*
2219 			 * Even on the few platforms where MOCS 0 is a
2220 			 * legitimate table entry, it's never the correct
2221 			 * setting to use here; we can assume the MOCS init
2222 			 * just forgot to initialize wb_index.
2223 			 */
2224 			drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
2225 		}
2226 
2227 		wa_masked_field_set(wal,
2228 				    RING_CMD_CCTL(engine->mmio_base),
2229 				    CMD_CCTL_MOCS_MASK,
2230 				    CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
2231 	}
2232 }
2233 
2234 static void
2235 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2236 {
2237 	struct drm_i915_private *i915 = engine->i915;
2238 	struct intel_gt *gt = engine->gt;
2239 
2240 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2241 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
2242 		/* Wa_22014600077 */
2243 		wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2244 				 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
2245 	}
2246 
2247 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2248 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2249 	    IS_DG2(i915)) {
2250 		/* Wa_1509727124 */
2251 		wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2252 				 SC_DISABLE_POWER_OPTIMIZATION_EBB);
2253 	}
2254 
2255 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2256 	    IS_DG2(i915)) {
2257 		/* Wa_22012856258 */
2258 		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2259 				 GEN12_DISABLE_READ_SUPPRESSION);
2260 	}
2261 
2262 	if (IS_DG2(i915)) {
2263 		/*
2264 		 * Wa_22010960976:dg2
2265 		 * Wa_14013347512:dg2
2266 		 */
2267 		wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
2268 				  LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2269 	}
2270 
2271 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) ||
2272 	    IS_DG2(i915)) {
2273 		/* Wa_14015150844 */
2274 		wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
2275 			   REG_MASKED_FIELD_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
2276 			   0, true);
2277 	}
2278 
2279 	if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2280 	    IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2281 		/*
2282 		 * Wa_1606700617:tgl,dg1,adl-p
2283 		 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2284 		 * Wa_14010826681:tgl,dg1,rkl,adl-p
2285 		 * Wa_18019627453:dg2
2286 		 */
2287 		wa_masked_en(wal,
2288 			     GEN9_CS_DEBUG_MODE1,
2289 			     FF_DOP_CLOCK_GATE_DISABLE);
2290 	}
2291 
2292 	if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2293 	    IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2294 		/* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2295 		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2296 
2297 		/*
2298 		 * Wa_1407928979:tgl A*
2299 		 * Wa_18011464164:tgl[B0+],dg1[B0+]
2300 		 * Wa_22010931296:tgl[B0+],dg1[B0+]
2301 		 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2302 		 */
2303 		wa_write_or(wal, GEN7_FF_THREAD_MODE,
2304 			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2305 
2306 		/* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2307 		wa_mcr_masked_en(wal,
2308 				 GEN10_SAMPLER_MODE,
2309 				 ENABLE_SMALLPL);
2310 	}
2311 
2312 	if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2313 	    IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2314 		/* Wa_1409804808 */
2315 		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2316 				 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2317 
2318 		/* Wa_14010229206 */
2319 		wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2320 	}
2321 
2322 	if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
2323 		/*
2324 		 * Wa_1607297627
2325 		 *
2326 		 * On TGL and RKL there are multiple entries for this WA in the
2327 		 * BSpec; some indicate this is an A0-only WA, others indicate
2328 		 * it applies to all steppings so we trust the "all steppings."
2329 		 */
2330 		wa_masked_en(wal,
2331 			     RING_PSMI_CTL(RENDER_RING_BASE),
2332 			     GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2333 			     GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2334 	}
2335 
2336 	if (GRAPHICS_VER(i915) == 11) {
2337 		/* This is not an Wa. Enable for better image quality */
2338 		wa_masked_en(wal,
2339 			     _3D_CHICKEN3,
2340 			     _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2341 
2342 		/*
2343 		 * Wa_1405543622:icl
2344 		 * Formerly known as WaGAPZPriorityScheme
2345 		 */
2346 		wa_write_or(wal,
2347 			    GEN8_GARBCNTL,
2348 			    GEN11_ARBITRATION_PRIO_ORDER_MASK);
2349 
2350 		/*
2351 		 * Wa_1604223664:icl
2352 		 * Formerly known as WaL3BankAddressHashing
2353 		 */
2354 		wa_write_clr_set(wal,
2355 				 GEN8_GARBCNTL,
2356 				 GEN11_HASH_CTRL_EXCL_MASK,
2357 				 GEN11_HASH_CTRL_EXCL_BIT0);
2358 		wa_write_clr_set(wal,
2359 				 GEN11_GLBLINVL,
2360 				 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2361 				 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2362 
2363 		/*
2364 		 * Wa_1405733216:icl
2365 		 * Formerly known as WaDisableCleanEvicts
2366 		 */
2367 		wa_mcr_write_or(wal,
2368 				GEN8_L3SQCREG4,
2369 				GEN11_LQSC_CLEAN_EVICT_DISABLE);
2370 
2371 		/* Wa_1606682166:icl */
2372 		wa_write_or(wal,
2373 			    GEN7_SARCHKMD,
2374 			    GEN7_DISABLE_SAMPLER_PREFETCH);
2375 
2376 		/* Wa_1409178092:icl */
2377 		wa_mcr_write_clr_set(wal,
2378 				     GEN11_SCRATCH2,
2379 				     GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2380 				     0);
2381 
2382 		/* WaEnable32PlaneMode:icl */
2383 		wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2384 			     GEN11_ENABLE_32_PLANE_MODE);
2385 
2386 		/*
2387 		 * Wa_1408767742:icl[a2..forever],ehl[all]
2388 		 * Wa_1605460711:icl[a0..c0]
2389 		 */
2390 		wa_write_or(wal,
2391 			    GEN7_FF_THREAD_MODE,
2392 			    GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2393 
2394 		/* Wa_22010271021 */
2395 		wa_masked_en(wal,
2396 			     GEN9_CS_DEBUG_MODE1,
2397 			     FF_DOP_CLOCK_GATE_DISABLE);
2398 	}
2399 
2400 	/*
2401 	 * Intel platforms that support fine-grained preemption (i.e., gen9 and
2402 	 * beyond) allow the kernel-mode driver to choose between two different
2403 	 * options for controlling preemption granularity and behavior.
2404 	 *
2405 	 * Option 1 (hardware default):
2406 	 *   Preemption settings are controlled in a global manner via
2407 	 *   kernel-only register CS_DEBUG_MODE1 (0x20EC).  Any granularity
2408 	 *   and settings chosen by the kernel-mode driver will apply to all
2409 	 *   userspace clients.
2410 	 *
2411 	 * Option 2:
2412 	 *   Preemption settings are controlled on a per-context basis via
2413 	 *   register CS_CHICKEN1 (0x2580).  CS_CHICKEN1 is saved/restored on
2414 	 *   context switch and is writable by userspace (e.g., via
2415 	 *   MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
2416 	 *   which allows different userspace drivers/clients to select
2417 	 *   different settings, or to change those settings on the fly in
2418 	 *   response to runtime needs.  This option was known by name
2419 	 *   "FtrPerCtxtPreemptionGranularityControl" at one time, although
2420 	 *   that name is somewhat misleading as other non-granularity
2421 	 *   preemption settings are also impacted by this decision.
2422 	 *
2423 	 * On Linux, our policy has always been to let userspace drivers
2424 	 * control preemption granularity/settings (Option 2).  This was
2425 	 * originally mandatory on gen9 to prevent ABI breakage (old gen9
2426 	 * userspace developed before object-level preemption was enabled would
2427 	 * not behave well if i915 were to go with Option 1 and enable that
2428 	 * preemption in a global manner).  On gen9 each context would have
2429 	 * object-level preemption disabled by default (see
2430 	 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
2431 	 * userspace drivers could opt-in to object-level preemption as they
2432 	 * saw fit.  For post-gen9 platforms, we continue to utilize Option 2;
2433 	 * even though it is no longer necessary for ABI compatibility when
2434 	 * enabling a new platform, it does ensure that userspace will be able
2435 	 * to implement any workarounds that show up requiring temporary
2436 	 * adjustments to preemption behavior at runtime.
2437 	 *
2438 	 * Notes/Workarounds:
2439 	 *  - Wa_14015141709:  On DG2 and early steppings of MTL,
2440 	 *      CS_CHICKEN1[0] does not disable object-level preemption as
2441 	 *      it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
2442 	 *      using Option 1).  Effectively this means userspace is unable
2443 	 *      to disable object-level preemption on these platforms/steppings
2444 	 *      despite the setting here.
2445 	 *
2446 	 *  - Wa_16013994831:  May require that userspace program
2447 	 *      CS_CHICKEN1[10] when certain runtime conditions are true.
2448 	 *      Userspace requires Option 2 to be in effect for their update of
2449 	 *      CS_CHICKEN1[10] to be effective.
2450 	 *
2451 	 * Other workarounds may appear in the future that will also require
2452 	 * Option 2 behavior to allow proper userspace implementation.
2453 	 */
2454 	if (GRAPHICS_VER(i915) >= 9)
2455 		wa_masked_en(wal,
2456 			     GEN7_FF_SLICE_CS_CHICKEN1,
2457 			     GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2458 
2459 	if (IS_SKYLAKE(i915) ||
2460 	    IS_KABYLAKE(i915) ||
2461 	    IS_COFFEELAKE(i915) ||
2462 	    IS_COMETLAKE(i915)) {
2463 		/* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2464 		wa_write_or(wal,
2465 			    GEN8_GARBCNTL,
2466 			    GEN9_GAPS_TSV_CREDIT_DISABLE);
2467 	}
2468 
2469 	if (IS_BROXTON(i915)) {
2470 		/* WaDisablePooledEuLoadBalancingFix:bxt */
2471 		wa_masked_en(wal,
2472 			     FF_SLICE_CS_CHICKEN2,
2473 			     GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2474 	}
2475 
2476 	if (GRAPHICS_VER(i915) == 9) {
2477 		/* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2478 		wa_masked_en(wal,
2479 			     GEN9_CSFE_CHICKEN1_RCS,
2480 			     GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2481 
2482 		/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2483 		wa_mcr_write_or(wal,
2484 				BDW_SCRATCH1,
2485 				GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2486 
2487 		/* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2488 		if (IS_GEN9_LP(i915))
2489 			wa_mcr_write_clr_set(wal,
2490 					     GEN8_L3SQCREG1,
2491 					     L3_PRIO_CREDITS_MASK,
2492 					     L3_GENERAL_PRIO_CREDITS(62) |
2493 					     L3_HIGH_PRIO_CREDITS(2));
2494 
2495 		/* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2496 		wa_mcr_write_or(wal,
2497 				GEN8_L3SQCREG4,
2498 				GEN8_LQSC_FLUSH_COHERENT_LINES);
2499 
2500 		/* Disable atomics in L3 to prevent unrecoverable hangs */
2501 		wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2502 				 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2503 		wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
2504 				     GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2505 		wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
2506 				     EVICTION_PERF_FIX_ENABLE, 0);
2507 	}
2508 
2509 	if (IS_HASWELL(i915)) {
2510 		/* WaSampleCChickenBitEnable:hsw */
2511 		wa_masked_en(wal,
2512 			     HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2513 
2514 		wa_masked_dis(wal,
2515 			      CACHE_MODE_0_GEN7,
2516 			      /* enable HiZ Raw Stall Optimization */
2517 			      HIZ_RAW_STALL_OPT_DISABLE);
2518 	}
2519 
2520 	if (IS_VALLEYVIEW(i915)) {
2521 		/* WaDisableEarlyCull:vlv */
2522 		wa_masked_en(wal,
2523 			     _3D_CHICKEN3,
2524 			     _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2525 
2526 		/*
2527 		 * WaVSThreadDispatchOverride:ivb,vlv
2528 		 *
2529 		 * This actually overrides the dispatch
2530 		 * mode for all thread types.
2531 		 */
2532 		wa_write_clr_set(wal,
2533 				 GEN7_FF_THREAD_MODE,
2534 				 GEN7_FF_SCHED_MASK,
2535 				 GEN7_FF_TS_SCHED_HW |
2536 				 GEN7_FF_VS_SCHED_HW |
2537 				 GEN7_FF_DS_SCHED_HW);
2538 
2539 		/* WaPsdDispatchEnable:vlv */
2540 		/* WaDisablePSDDualDispatchEnable:vlv */
2541 		wa_masked_en(wal,
2542 			     GEN7_HALF_SLICE_CHICKEN1,
2543 			     GEN7_MAX_PS_THREAD_DEP |
2544 			     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2545 	}
2546 
2547 	if (IS_IVYBRIDGE(i915)) {
2548 		/* WaDisableEarlyCull:ivb */
2549 		wa_masked_en(wal,
2550 			     _3D_CHICKEN3,
2551 			     _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2552 
2553 		if (0) { /* causes HiZ corruption on ivb:gt1 */
2554 			/* enable HiZ Raw Stall Optimization */
2555 			wa_masked_dis(wal,
2556 				      CACHE_MODE_0_GEN7,
2557 				      HIZ_RAW_STALL_OPT_DISABLE);
2558 		}
2559 
2560 		/*
2561 		 * WaVSThreadDispatchOverride:ivb,vlv
2562 		 *
2563 		 * This actually overrides the dispatch
2564 		 * mode for all thread types.
2565 		 */
2566 		wa_write_clr_set(wal,
2567 				 GEN7_FF_THREAD_MODE,
2568 				 GEN7_FF_SCHED_MASK,
2569 				 GEN7_FF_TS_SCHED_HW |
2570 				 GEN7_FF_VS_SCHED_HW |
2571 				 GEN7_FF_DS_SCHED_HW);
2572 
2573 		/* WaDisablePSDDualDispatchEnable:ivb */
2574 		if (INTEL_INFO(i915)->gt == 1)
2575 			wa_masked_en(wal,
2576 				     GEN7_HALF_SLICE_CHICKEN1,
2577 				     GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2578 	}
2579 
2580 	if (GRAPHICS_VER(i915) == 7) {
2581 		/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2582 		wa_masked_en(wal,
2583 			     RING_MODE_GEN7(RENDER_RING_BASE),
2584 			     GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2585 
2586 		/*
2587 		 * BSpec recommends 8x4 when MSAA is used,
2588 		 * however in practice 16x4 seems fastest.
2589 		 *
2590 		 * Note that PS/WM thread counts depend on the WIZ hashing
2591 		 * disable bit, which we don't touch here, but it's good
2592 		 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2593 		 */
2594 		wa_masked_field_set(wal,
2595 				    GEN7_GT_MODE,
2596 				    GEN6_WIZ_HASHING_MASK,
2597 				    GEN6_WIZ_HASHING_16x4);
2598 	}
2599 
2600 	if (IS_GRAPHICS_VER(i915, 6, 7))
2601 		/*
2602 		 * We need to disable the AsyncFlip performance optimisations in
2603 		 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2604 		 * already be programmed to '1' on all products.
2605 		 *
2606 		 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2607 		 */
2608 		wa_masked_en(wal,
2609 			     RING_MI_MODE(RENDER_RING_BASE),
2610 			     ASYNC_FLIP_PERF_DISABLE);
2611 
2612 	if (GRAPHICS_VER(i915) == 6) {
2613 		/*
2614 		 * Required for the hardware to program scanline values for
2615 		 * waiting
2616 		 * WaEnableFlushTlbInvalidationMode:snb
2617 		 */
2618 		wa_masked_en(wal,
2619 			     GFX_MODE,
2620 			     GFX_TLB_INVALIDATE_EXPLICIT);
2621 
2622 		/* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2623 		wa_masked_en(wal,
2624 			     _3D_CHICKEN,
2625 			     _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2626 
2627 		wa_masked_en(wal,
2628 			     _3D_CHICKEN3,
2629 			     /* WaStripsFansDisableFastClipPerformanceFix:snb */
2630 			     _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2631 			     /*
2632 			      * Bspec says:
2633 			      * "This bit must be set if 3DSTATE_CLIP clip mode is set
2634 			      * to normal and 3DSTATE_SF number of SF output attributes
2635 			      * is more than 16."
2636 			      */
2637 			     _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2638 
2639 		/*
2640 		 * BSpec recommends 8x4 when MSAA is used,
2641 		 * however in practice 16x4 seems fastest.
2642 		 *
2643 		 * Note that PS/WM thread counts depend on the WIZ hashing
2644 		 * disable bit, which we don't touch here, but it's good
2645 		 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2646 		 */
2647 		wa_masked_field_set(wal,
2648 				    GEN6_GT_MODE,
2649 				    GEN6_WIZ_HASHING_MASK,
2650 				    GEN6_WIZ_HASHING_16x4);
2651 
2652 		/*
2653 		 * From the Sandybridge PRM, volume 1 part 3, page 24:
2654 		 * "If this bit is set, STCunit will have LRA as replacement
2655 		 *  policy. [...] This bit must be reset. LRA replacement
2656 		 *  policy is not supported."
2657 		 */
2658 		wa_masked_dis(wal,
2659 			      CACHE_MODE_0,
2660 			      CM0_STC_EVICT_DISABLE_LRA_SNB);
2661 	}
2662 
2663 	if (IS_GRAPHICS_VER(i915, 4, 6))
2664 		/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2665 		wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2666 		       0, REG_MASKED_FIELD_ENABLE(VS_TIMER_DISPATCH),
2667 		       /* XXX bit doesn't stick on Broadwater */
2668 		       IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
2669 
2670 	if (GRAPHICS_VER(i915) == 4)
2671 		/*
2672 		 * Disable CONSTANT_BUFFER before it is loaded from the context
2673 		 * image. For as it is loaded, it is executed and the stored
2674 		 * address may no longer be valid, leading to a GPU hang.
2675 		 *
2676 		 * This imposes the requirement that userspace reload their
2677 		 * CONSTANT_BUFFER on every batch, fortunately a requirement
2678 		 * they are already accustomed to from before contexts were
2679 		 * enabled.
2680 		 */
2681 		wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2682 		       0, REG_MASKED_FIELD_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2683 		       0 /* XXX bit doesn't stick on Broadwater */,
2684 		       true);
2685 }
2686 
2687 static void
2688 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2689 {
2690 	struct drm_i915_private *i915 = engine->i915;
2691 
2692 	/* WaKBLVECSSemaphoreWaitPoll:kbl */
2693 	if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2694 		wa_write(wal,
2695 			 RING_SEMA_WAIT_POLL(engine->mmio_base),
2696 			 1);
2697 	}
2698 	/* Wa_16018031267, Wa_16018063123 */
2699 	if (NEEDS_FASTCOLOR_BLT_WABB(engine))
2700 		wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
2701 				    XEHP_BLITTER_SCHEDULING_MODE_MASK,
2702 				    XEHP_BLITTER_ROUND_ROBIN_MODE);
2703 }
2704 
2705 static void
2706 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2707 {
2708 	/* boilerplate for any CCS engine workaround */
2709 }
2710 
2711 /*
2712  * The bspec performance guide has recommended MMIO tuning settings.  These
2713  * aren't truly "workarounds" but we want to program them with the same
2714  * workaround infrastructure to ensure that they're automatically added to
2715  * the GuC save/restore lists, re-applied at the right times, and checked for
2716  * any conflicting programming requested by real workarounds.
2717  *
2718  * Programming settings should be added here only if their registers are not
2719  * part of an engine's register state context.  If a register is part of a
2720  * context, then any tuning settings should be programmed in an appropriate
2721  * function invoked by __intel_engine_init_ctx_wa().
2722  */
2723 static void
2724 add_render_compute_tuning_settings(struct intel_gt *gt,
2725 				   struct i915_wa_list *wal)
2726 {
2727 	struct drm_i915_private *i915 = gt->i915;
2728 
2729 	if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
2730 		wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
2731 
2732 	/*
2733 	 * This tuning setting proves beneficial only on ATS-M designs; the
2734 	 * default "age based" setting is optimal on regular DG2 and other
2735 	 * platforms.
2736 	 */
2737 	if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
2738 		wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
2739 					THREAD_EX_ARB_MODE_RR_AFTER_DEP);
2740 
2741 	if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 55))
2742 		wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
2743 }
2744 
2745 static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2746 {
2747 	struct intel_gt *gt = engine->gt;
2748 	u32 mode;
2749 
2750 	if (!IS_DG2(gt->i915))
2751 		return;
2752 
2753 	/*
2754 	 * Wa_14019159160: This workaround, along with others, leads to
2755 	 * significant challenges in utilizing load balancing among the
2756 	 * CCS slices. Consequently, an architectural decision has been
2757 	 * made to completely disable automatic CCS load balancing.
2758 	 */
2759 	wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
2760 
2761 	/*
2762 	 * After having disabled automatic load balancing we need to
2763 	 * assign all slices to a single CCS. We will call it CCS mode 1
2764 	 */
2765 	mode = intel_gt_apply_ccs_mode(gt);
2766 	wa_masked_en(wal, XEHP_CCS_MODE, mode);
2767 }
2768 
2769 /*
2770  * The workarounds in this function apply to shared registers in
2771  * the general render reset domain that aren't tied to a
2772  * specific engine.  Since all render+compute engines get reset
2773  * together, and the contents of these registers are lost during
2774  * the shared render domain reset, we'll define such workarounds
2775  * here and then add them to just a single RCS or CCS engine's
2776  * workaround list (whichever engine has the XXXX flag).
2777  */
2778 static void
2779 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2780 {
2781 	struct drm_i915_private *i915 = engine->i915;
2782 	struct intel_gt *gt = engine->gt;
2783 
2784 	add_render_compute_tuning_settings(gt, wal);
2785 
2786 	if (GRAPHICS_VER(i915) >= 11) {
2787 		/* This is not a Wa (although referred to as
2788 		 * WaSetInidrectStateOverride in places), this allows
2789 		 * applications that reference sampler states through
2790 		 * the BindlessSamplerStateBaseAddress to have their
2791 		 * border color relative to DynamicStateBaseAddress
2792 		 * rather than BindlessSamplerStateBaseAddress.
2793 		 *
2794 		 * Otherwise SAMPLER_STATE border colors have to be
2795 		 * copied in multiple heaps (DynamicStateBaseAddress &
2796 		 * BindlessSamplerStateBaseAddress)
2797 		 *
2798 		 * BSpec: 46052
2799 		 */
2800 		wa_mcr_masked_en(wal,
2801 				 GEN10_SAMPLER_MODE,
2802 				 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
2803 	}
2804 
2805 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
2806 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
2807 	    IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74))) {
2808 		/* Wa_14017856879 */
2809 		wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
2810 
2811 		/* Wa_14020495402 */
2812 		wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, XELPG_DISABLE_TDL_SVHS_GATING);
2813 	}
2814 
2815 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2816 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2817 		/*
2818 		 * Wa_14017066071
2819 		 * Wa_14017654203
2820 		 */
2821 		wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2822 				 MTL_DISABLE_SAMPLER_SC_OOO);
2823 
2824 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2825 		/* Wa_22015279794 */
2826 		wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2827 				 DISABLE_PREFETCH_INTO_IC);
2828 
2829 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2830 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2831 	    IS_DG2(i915)) {
2832 		/* Wa_22013037850 */
2833 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
2834 				DISABLE_128B_EVICTION_COMMAND_UDW);
2835 
2836 		/* Wa_18017747507 */
2837 		wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
2838 	}
2839 
2840 	if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2841 	    IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2842 	    IS_DG2(i915)) {
2843 		/* Wa_22014226127 */
2844 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
2845 	}
2846 
2847 	if (IS_DG2(i915)) {
2848 		/* Wa_14015227452:dg2,pvc */
2849 		wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
2850 
2851 		/*
2852 		 * Wa_16011620976:dg2_g11
2853 		 * Wa_22015475538:dg2
2854 		 */
2855 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
2856 
2857 		/* Wa_18028616096 */
2858 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
2859 	}
2860 
2861 	if (IS_DG2_G11(i915)) {
2862 		/*
2863 		 * Wa_22012826095:dg2
2864 		 * Wa_22013059131:dg2
2865 		 */
2866 		wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
2867 				     MAXREQS_PER_BANK,
2868 				     REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
2869 
2870 		/* Wa_22013059131:dg2 */
2871 		wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
2872 				FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
2873 
2874 		/*
2875 		 * Wa_22012654132
2876 		 *
2877 		 * Note that register 0xE420 is write-only and cannot be read
2878 		 * back for verification on DG2 (due to Wa_14012342262), so
2879 		 * we need to explicitly skip the readback.
2880 		 */
2881 		wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
2882 			   REG_MASKED_FIELD_ENABLE(ENABLE_PREFETCH_INTO_IC),
2883 			   0 /* write-only, so skip validation */,
2884 			   true);
2885 	}
2886 }
2887 
2888 static void
2889 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2890 {
2891 	if (GRAPHICS_VER(engine->i915) < 4)
2892 		return;
2893 
2894 	engine_fake_wa_init(engine, wal);
2895 
2896 	/*
2897 	 * These are common workarounds that just need to applied
2898 	 * to a single RCS/CCS engine's workaround list since
2899 	 * they're reset as part of the general render domain reset.
2900 	 */
2901 	if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
2902 		general_render_compute_wa_init(engine, wal);
2903 		ccs_engine_wa_mode(engine, wal);
2904 	}
2905 
2906 	if (engine->class == COMPUTE_CLASS)
2907 		ccs_engine_wa_init(engine, wal);
2908 	else if (engine->class == RENDER_CLASS)
2909 		rcs_engine_wa_init(engine, wal);
2910 	else
2911 		xcs_engine_wa_init(engine, wal);
2912 }
2913 
2914 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
2915 {
2916 	struct i915_wa_list *wal = &engine->wa_list;
2917 
2918 	wa_init_start(wal, engine->gt, "engine", engine->name);
2919 	engine_init_workarounds(engine, wal);
2920 	wa_init_finish(wal);
2921 }
2922 
2923 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
2924 {
2925 	wa_list_apply(&engine->wa_list);
2926 }
2927 
2928 static const struct i915_mmio_range mcr_ranges_gen8[] = {
2929 	{ .start = 0x5500, .end = 0x55ff },
2930 	{ .start = 0x7000, .end = 0x7fff },
2931 	{ .start = 0x9400, .end = 0x97ff },
2932 	{ .start = 0xb000, .end = 0xb3ff },
2933 	{ .start = 0xe000, .end = 0xe7ff },
2934 	{},
2935 };
2936 
2937 static const struct i915_mmio_range mcr_ranges_gen12[] = {
2938 	{ .start =  0x8150, .end =  0x815f },
2939 	{ .start =  0x9520, .end =  0x955f },
2940 	{ .start =  0xb100, .end =  0xb3ff },
2941 	{ .start =  0xde80, .end =  0xe8ff },
2942 	{ .start = 0x24a00, .end = 0x24a7f },
2943 	{},
2944 };
2945 
2946 static const struct i915_mmio_range mcr_ranges_xehp[] = {
2947 	{ .start =  0x4000, .end =  0x4aff },
2948 	{ .start =  0x5200, .end =  0x52ff },
2949 	{ .start =  0x5400, .end =  0x7fff },
2950 	{ .start =  0x8140, .end =  0x815f },
2951 	{ .start =  0x8c80, .end =  0x8dff },
2952 	{ .start =  0x94d0, .end =  0x955f },
2953 	{ .start =  0x9680, .end =  0x96ff },
2954 	{ .start =  0xb000, .end =  0xb3ff },
2955 	{ .start =  0xc800, .end =  0xcfff },
2956 	{ .start =  0xd800, .end =  0xd8ff },
2957 	{ .start =  0xdc00, .end =  0xffff },
2958 	{ .start = 0x17000, .end = 0x17fff },
2959 	{ .start = 0x24a00, .end = 0x24a7f },
2960 	{},
2961 };
2962 
2963 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
2964 {
2965 	const struct i915_mmio_range *mcr_ranges;
2966 	int i;
2967 
2968 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55))
2969 		mcr_ranges = mcr_ranges_xehp;
2970 	else if (GRAPHICS_VER(i915) >= 12)
2971 		mcr_ranges = mcr_ranges_gen12;
2972 	else if (GRAPHICS_VER(i915) >= 8)
2973 		mcr_ranges = mcr_ranges_gen8;
2974 	else
2975 		return false;
2976 
2977 	/*
2978 	 * Registers in these ranges are affected by the MCR selector
2979 	 * which only controls CPU initiated MMIO. Routing does not
2980 	 * work for CS access so we cannot verify them on this path.
2981 	 */
2982 	for (i = 0; mcr_ranges[i].start; i++)
2983 		if (offset >= mcr_ranges[i].start &&
2984 		    offset <= mcr_ranges[i].end)
2985 			return true;
2986 
2987 	return false;
2988 }
2989 
2990 static int
2991 wa_list_srm(struct i915_request *rq,
2992 	    const struct i915_wa_list *wal,
2993 	    struct i915_vma *vma)
2994 {
2995 	struct drm_i915_private *i915 = rq->i915;
2996 	unsigned int i, count = 0;
2997 	const struct i915_wa *wa;
2998 	u32 srm, *cs;
2999 
3000 	srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
3001 	if (GRAPHICS_VER(i915) >= 8)
3002 		srm++;
3003 
3004 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3005 		if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
3006 			count++;
3007 	}
3008 
3009 	cs = intel_ring_begin(rq, 4 * count);
3010 	if (IS_ERR(cs))
3011 		return PTR_ERR(cs);
3012 
3013 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3014 		u32 offset = i915_mmio_reg_offset(wa->reg);
3015 
3016 		if (mcr_range(i915, offset))
3017 			continue;
3018 
3019 		*cs++ = srm;
3020 		*cs++ = offset;
3021 		*cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
3022 		*cs++ = 0;
3023 	}
3024 	intel_ring_advance(rq, cs);
3025 
3026 	return 0;
3027 }
3028 
3029 static int engine_wa_list_verify(struct intel_context *ce,
3030 				 const struct i915_wa_list * const wal,
3031 				 const char *from)
3032 {
3033 	const struct i915_wa *wa;
3034 	struct i915_request *rq;
3035 	struct i915_vma *vma;
3036 	struct i915_gem_ww_ctx ww;
3037 	unsigned int i;
3038 	u32 *results;
3039 	int err;
3040 
3041 	if (!wal->count)
3042 		return 0;
3043 
3044 	vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
3045 					   wal->count * sizeof(u32));
3046 	if (IS_ERR(vma))
3047 		return PTR_ERR(vma);
3048 
3049 	intel_engine_pm_get(ce->engine);
3050 	i915_gem_ww_ctx_init(&ww, false);
3051 retry:
3052 	err = i915_gem_object_lock(vma->obj, &ww);
3053 	if (err == 0)
3054 		err = intel_context_pin_ww(ce, &ww);
3055 	if (err)
3056 		goto err_pm;
3057 
3058 	err = i915_vma_pin_ww(vma, &ww, 0, 0,
3059 			   i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
3060 	if (err)
3061 		goto err_unpin;
3062 
3063 	rq = i915_request_create(ce);
3064 	if (IS_ERR(rq)) {
3065 		err = PTR_ERR(rq);
3066 		goto err_vma;
3067 	}
3068 
3069 	err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3070 	if (err == 0)
3071 		err = wa_list_srm(rq, wal, vma);
3072 
3073 	i915_request_get(rq);
3074 	if (err)
3075 		i915_request_set_error_once(rq, err);
3076 	i915_request_add(rq);
3077 
3078 	if (err)
3079 		goto err_rq;
3080 
3081 	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
3082 		err = -ETIME;
3083 		goto err_rq;
3084 	}
3085 
3086 	results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
3087 	if (IS_ERR(results)) {
3088 		err = PTR_ERR(results);
3089 		goto err_rq;
3090 	}
3091 
3092 	err = 0;
3093 	for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3094 		if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
3095 			continue;
3096 
3097 		if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
3098 			err = -ENXIO;
3099 	}
3100 
3101 	i915_gem_object_unpin_map(vma->obj);
3102 
3103 err_rq:
3104 	i915_request_put(rq);
3105 err_vma:
3106 	i915_vma_unpin(vma);
3107 err_unpin:
3108 	intel_context_unpin(ce);
3109 err_pm:
3110 	if (err == -EDEADLK) {
3111 		err = i915_gem_ww_ctx_backoff(&ww);
3112 		if (!err)
3113 			goto retry;
3114 	}
3115 	i915_gem_ww_ctx_fini(&ww);
3116 	intel_engine_pm_put(ce->engine);
3117 	i915_vma_put(vma);
3118 	return err;
3119 }
3120 
3121 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
3122 				    const char *from)
3123 {
3124 	return engine_wa_list_verify(engine->kernel_context,
3125 				     &engine->wa_list,
3126 				     from);
3127 }
3128 
3129 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3130 #include "selftest_workarounds.c"
3131 #endif
3132