1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2015 Intel Corporation
4 */
5
6 #include "i915_drv.h"
7
8 #include "intel_engine.h"
9 #include "intel_gt.h"
10 #include "intel_gt_mcr.h"
11 #include "intel_gt_regs.h"
12 #include "intel_mocs.h"
13 #include "intel_ring.h"
14
15 /* structures required */
16 struct drm_i915_mocs_entry {
17 u32 control_value;
18 u16 l3cc_value;
19 u16 used;
20 };
21
22 struct drm_i915_mocs_table {
23 unsigned int size;
24 unsigned int n_entries;
25 const struct drm_i915_mocs_entry *table;
26 u8 uc_index;
27 u8 wb_index; /* Only used on HAS_L3_CCS_READ() platforms */
28 u8 unused_entries_index;
29 };
30
31 /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
32 #define _LE_CACHEABILITY(value) ((value) << 0)
33 #define _LE_TGT_CACHE(value) ((value) << 2)
34 #define LE_LRUM(value) ((value) << 4)
35 #define LE_AOM(value) ((value) << 6)
36 #define LE_RSC(value) ((value) << 7)
37 #define LE_SCC(value) ((value) << 8)
38 #define LE_PFM(value) ((value) << 11)
39 #define LE_SCF(value) ((value) << 14)
40 #define LE_COS(value) ((value) << 15)
41 #define LE_SSE(value) ((value) << 17)
42
43 /* Defines for the tables (GLOB_MOCS_0 - GLOB_MOCS_16) */
44 #define _L4_CACHEABILITY(value) ((value) << 2)
45 #define IG_PAT(value) ((value) << 8)
46
47 /* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
48 #define L3_ESC(value) ((value) << 0)
49 #define L3_SCC(value) ((value) << 1)
50 #define _L3_CACHEABILITY(value) ((value) << 4)
51 #define L3_GLBGO(value) ((value) << 6)
52 #define L3_LKUP(value) ((value) << 7)
53
54 /* Helper defines */
55 #define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
56 #define MTL_NUM_MOCS_ENTRIES 16
57
58 /* (e)LLC caching options */
59 /*
60 * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
61 * the same as LE_UC
62 */
63 #define LE_0_PAGETABLE _LE_CACHEABILITY(0)
64 #define LE_1_UC _LE_CACHEABILITY(1)
65 #define LE_2_WT _LE_CACHEABILITY(2)
66 #define LE_3_WB _LE_CACHEABILITY(3)
67
68 /* Target cache */
69 #define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0)
70 #define LE_TC_1_LLC _LE_TGT_CACHE(1)
71 #define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2)
72 #define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3)
73
74 /* L3 caching options */
75 #define L3_0_DIRECT _L3_CACHEABILITY(0)
76 #define L3_1_UC _L3_CACHEABILITY(1)
77 #define L3_2_RESERVED _L3_CACHEABILITY(2)
78 #define L3_3_WB _L3_CACHEABILITY(3)
79
80 /* L4 caching options */
81 #define L4_0_WB _L4_CACHEABILITY(0)
82 #define L4_1_WT _L4_CACHEABILITY(1)
83 #define L4_2_RESERVED _L4_CACHEABILITY(2)
84 #define L4_3_UC _L4_CACHEABILITY(3)
85
86 #define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
87 [__idx] = { \
88 .control_value = __control_value, \
89 .l3cc_value = __l3cc_value, \
90 .used = 1, \
91 }
92
93 /*
94 * MOCS tables
95 *
96 * These are the MOCS tables that are programmed across all the rings.
97 * The control value is programmed to all the rings that support the
98 * MOCS registers. While the l3cc_values are only programmed to the
99 * LNCFCMOCS0 - LNCFCMOCS32 registers.
100 *
101 * These tables are intended to be kept reasonably consistent across
102 * HW platforms, and for ICL+, be identical across OSes. To achieve
103 * that, for Icelake and above, list of entries is published as part
104 * of bspec.
105 *
106 * Entries not part of the following tables are undefined as far as
107 * userspace is concerned and shouldn't be relied upon. For Gen < 12
108 * they will be initialized to PTE. Gen >= 12 don't have a setting for
109 * PTE and those platforms except TGL/RKL will be initialized L3 WB to
110 * catch accidental use of reserved and unused mocs indexes.
111 *
112 * The last few entries are reserved by the hardware. For ICL+ they
113 * should be initialized according to bspec and never used, for older
114 * platforms they should never be written to.
115 *
116 * NOTE1: These tables are part of bspec and defined as part of hardware
117 * interface for ICL+. For older platforms, they are part of kernel
118 * ABI. It is expected that, for specific hardware platform, existing
119 * entries will remain constant and the table will only be updated by
120 * adding new entries, filling unused positions.
121 *
122 * NOTE2: For GEN >= 12 except TGL and RKL, reserved and unspecified MOCS
123 * indices have been set to L3 WB. These reserved entries should never
124 * be used, they may be changed to low performant variants with better
125 * coherency in the future if more entries are needed.
126 * For TGL/RKL, all the unspecified MOCS indexes are mapped to L3 UC.
127 */
128 #define GEN9_MOCS_ENTRIES \
129 MOCS_ENTRY(I915_MOCS_UNCACHED, \
130 LE_1_UC | LE_TC_2_LLC_ELLC, \
131 L3_1_UC), \
132 MOCS_ENTRY(I915_MOCS_PTE, \
133 LE_0_PAGETABLE | LE_TC_0_PAGETABLE | LE_LRUM(3), \
134 L3_3_WB)
135
136 static const struct drm_i915_mocs_entry skl_mocs_table[] = {
137 GEN9_MOCS_ENTRIES,
138 MOCS_ENTRY(I915_MOCS_CACHED,
139 LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3),
140 L3_3_WB),
141
142 /*
143 * mocs:63
144 * - used by the L3 for all of its evictions.
145 * Thus it is expected to allow LLC cacheability to enable coherent
146 * flows to be maintained.
147 * - used to force L3 uncachable cycles.
148 * Thus it is expected to make the surface L3 uncacheable.
149 */
150 MOCS_ENTRY(63,
151 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
152 L3_1_UC)
153 };
154
155 /* NOTE: the LE_TGT_CACHE is not used on Broxton */
156 static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
157 GEN9_MOCS_ENTRIES,
158 MOCS_ENTRY(I915_MOCS_CACHED,
159 LE_1_UC | LE_TC_2_LLC_ELLC | LE_LRUM(3),
160 L3_3_WB)
161 };
162
163 #define GEN11_MOCS_ENTRIES \
164 /* Entries 0 and 1 are defined per-platform */ \
165 /* Base - L3 + LLC */ \
166 MOCS_ENTRY(2, \
167 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
168 L3_3_WB), \
169 /* Base - Uncached */ \
170 MOCS_ENTRY(3, \
171 LE_1_UC | LE_TC_1_LLC, \
172 L3_1_UC), \
173 /* Base - L3 */ \
174 MOCS_ENTRY(4, \
175 LE_1_UC | LE_TC_1_LLC, \
176 L3_3_WB), \
177 /* Base - LLC */ \
178 MOCS_ENTRY(5, \
179 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
180 L3_1_UC), \
181 /* Age 0 - LLC */ \
182 MOCS_ENTRY(6, \
183 LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
184 L3_1_UC), \
185 /* Age 0 - L3 + LLC */ \
186 MOCS_ENTRY(7, \
187 LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), \
188 L3_3_WB), \
189 /* Age: Don't Chg. - LLC */ \
190 MOCS_ENTRY(8, \
191 LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
192 L3_1_UC), \
193 /* Age: Don't Chg. - L3 + LLC */ \
194 MOCS_ENTRY(9, \
195 LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), \
196 L3_3_WB), \
197 /* No AOM - LLC */ \
198 MOCS_ENTRY(10, \
199 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
200 L3_1_UC), \
201 /* No AOM - L3 + LLC */ \
202 MOCS_ENTRY(11, \
203 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), \
204 L3_3_WB), \
205 /* No AOM; Age 0 - LLC */ \
206 MOCS_ENTRY(12, \
207 LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
208 L3_1_UC), \
209 /* No AOM; Age 0 - L3 + LLC */ \
210 MOCS_ENTRY(13, \
211 LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), \
212 L3_3_WB), \
213 /* No AOM; Age:DC - LLC */ \
214 MOCS_ENTRY(14, \
215 LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
216 L3_1_UC), \
217 /* No AOM; Age:DC - L3 + LLC */ \
218 MOCS_ENTRY(15, \
219 LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), \
220 L3_3_WB), \
221 /* Bypass LLC - Uncached (EHL+) */ \
222 MOCS_ENTRY(16, \
223 LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
224 L3_1_UC), \
225 /* Bypass LLC - L3 (Read-Only) (EHL+) */ \
226 MOCS_ENTRY(17, \
227 LE_1_UC | LE_TC_1_LLC | LE_SCF(1), \
228 L3_3_WB), \
229 /* Self-Snoop - L3 + LLC */ \
230 MOCS_ENTRY(18, \
231 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), \
232 L3_3_WB), \
233 /* Skip Caching - L3 + LLC(12.5%) */ \
234 MOCS_ENTRY(19, \
235 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7), \
236 L3_3_WB), \
237 /* Skip Caching - L3 + LLC(25%) */ \
238 MOCS_ENTRY(20, \
239 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3), \
240 L3_3_WB), \
241 /* Skip Caching - L3 + LLC(50%) */ \
242 MOCS_ENTRY(21, \
243 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1), \
244 L3_3_WB), \
245 /* Skip Caching - L3 + LLC(75%) */ \
246 MOCS_ENTRY(22, \
247 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3), \
248 L3_3_WB), \
249 /* Skip Caching - L3 + LLC(87.5%) */ \
250 MOCS_ENTRY(23, \
251 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7), \
252 L3_3_WB), \
253 /* HW Reserved - SW program but never use */ \
254 MOCS_ENTRY(62, \
255 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
256 L3_1_UC), \
257 /* HW Reserved - SW program but never use */ \
258 MOCS_ENTRY(63, \
259 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), \
260 L3_1_UC)
261
262 static const struct drm_i915_mocs_entry tgl_mocs_table[] = {
263 /*
264 * NOTE:
265 * Reserved and unspecified MOCS indices have been set to (L3 + LCC).
266 * These reserved entries should never be used, they may be changed
267 * to low performant variants with better coherency in the future if
268 * more entries are needed. We are programming index I915_MOCS_PTE(1)
269 * only, __init_mocs_table() take care to program unused index with
270 * this entry.
271 */
272 MOCS_ENTRY(I915_MOCS_PTE,
273 LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
274 L3_1_UC),
275 GEN11_MOCS_ENTRIES,
276
277 /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
278 MOCS_ENTRY(48,
279 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
280 L3_3_WB),
281 /* Implicitly enable L1 - HDC:L1 + L3 */
282 MOCS_ENTRY(49,
283 LE_1_UC | LE_TC_1_LLC,
284 L3_3_WB),
285 /* Implicitly enable L1 - HDC:L1 + LLC */
286 MOCS_ENTRY(50,
287 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
288 L3_1_UC),
289 /* Implicitly enable L1 - HDC:L1 */
290 MOCS_ENTRY(51,
291 LE_1_UC | LE_TC_1_LLC,
292 L3_1_UC),
293 /* HW Special Case (CCS) */
294 MOCS_ENTRY(60,
295 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
296 L3_1_UC),
297 /* HW Special Case (Displayable) */
298 MOCS_ENTRY(61,
299 LE_1_UC | LE_TC_1_LLC,
300 L3_3_WB),
301 };
302
303 static const struct drm_i915_mocs_entry icl_mocs_table[] = {
304 /* Base - Uncached (Deprecated) */
305 MOCS_ENTRY(I915_MOCS_UNCACHED,
306 LE_1_UC | LE_TC_1_LLC,
307 L3_1_UC),
308 /* Base - L3 + LeCC:PAT (Deprecated) */
309 MOCS_ENTRY(I915_MOCS_PTE,
310 LE_0_PAGETABLE | LE_TC_0_PAGETABLE,
311 L3_3_WB),
312
313 GEN11_MOCS_ENTRIES
314 };
315
316 static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
317
318 /* UC */
319 MOCS_ENTRY(1, 0, L3_1_UC),
320 /* WB - L3 */
321 MOCS_ENTRY(5, 0, L3_3_WB),
322 /* WB - L3 50% */
323 MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB),
324 /* WB - L3 25% */
325 MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB),
326 /* WB - L3 12.5% */
327 MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB),
328
329 /* HDC:L1 + L3 */
330 MOCS_ENTRY(48, 0, L3_3_WB),
331 /* HDC:L1 */
332 MOCS_ENTRY(49, 0, L3_1_UC),
333
334 /* HW Reserved */
335 MOCS_ENTRY(60, 0, L3_1_UC),
336 MOCS_ENTRY(61, 0, L3_1_UC),
337 MOCS_ENTRY(62, 0, L3_1_UC),
338 MOCS_ENTRY(63, 0, L3_1_UC),
339 };
340
341 static const struct drm_i915_mocs_entry gen12_mocs_table[] = {
342 GEN11_MOCS_ENTRIES,
343 /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
344 MOCS_ENTRY(48,
345 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
346 L3_3_WB),
347 /* Implicitly enable L1 - HDC:L1 + L3 */
348 MOCS_ENTRY(49,
349 LE_1_UC | LE_TC_1_LLC,
350 L3_3_WB),
351 /* Implicitly enable L1 - HDC:L1 + LLC */
352 MOCS_ENTRY(50,
353 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
354 L3_1_UC),
355 /* Implicitly enable L1 - HDC:L1 */
356 MOCS_ENTRY(51,
357 LE_1_UC | LE_TC_1_LLC,
358 L3_1_UC),
359 /* HW Special Case (CCS) */
360 MOCS_ENTRY(60,
361 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
362 L3_1_UC),
363 /* HW Special Case (Displayable) */
364 MOCS_ENTRY(61,
365 LE_1_UC | LE_TC_1_LLC,
366 L3_3_WB),
367 };
368
369 static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
370 /* UC - Coherent; GO:L3 */
371 MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
372 /* UC - Coherent; GO:Memory */
373 MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
374 /* UC - Non-Coherent; GO:Memory */
375 MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
376
377 /* WB - LC */
378 MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
379 };
380
381 static const struct drm_i915_mocs_entry mtl_mocs_table[] = {
382 /* Error - Reserved for Non-Use */
383 MOCS_ENTRY(0,
384 IG_PAT(0),
385 L3_LKUP(1) | L3_3_WB),
386 /* Cached - L3 + L4 */
387 MOCS_ENTRY(1,
388 IG_PAT(1),
389 L3_LKUP(1) | L3_3_WB),
390 /* L4 - GO:L3 */
391 MOCS_ENTRY(2,
392 IG_PAT(1),
393 L3_LKUP(1) | L3_1_UC),
394 /* Uncached - GO:L3 */
395 MOCS_ENTRY(3,
396 IG_PAT(1) | L4_3_UC,
397 L3_LKUP(1) | L3_1_UC),
398 /* L4 - GO:Mem */
399 MOCS_ENTRY(4,
400 IG_PAT(1),
401 L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
402 /* Uncached - GO:Mem */
403 MOCS_ENTRY(5,
404 IG_PAT(1) | L4_3_UC,
405 L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
406 /* L4 - L3:NoLKUP; GO:L3 */
407 MOCS_ENTRY(6,
408 IG_PAT(1),
409 L3_1_UC),
410 /* Uncached - L3:NoLKUP; GO:L3 */
411 MOCS_ENTRY(7,
412 IG_PAT(1) | L4_3_UC,
413 L3_1_UC),
414 /* L4 - L3:NoLKUP; GO:Mem */
415 MOCS_ENTRY(8,
416 IG_PAT(1),
417 L3_GLBGO(1) | L3_1_UC),
418 /* Uncached - L3:NoLKUP; GO:Mem */
419 MOCS_ENTRY(9,
420 IG_PAT(1) | L4_3_UC,
421 L3_GLBGO(1) | L3_1_UC),
422 /* Display - L3; L4:WT */
423 MOCS_ENTRY(14,
424 IG_PAT(1) | L4_1_WT,
425 L3_LKUP(1) | L3_3_WB),
426 /* CCS - Non-Displayable */
427 MOCS_ENTRY(15,
428 IG_PAT(1),
429 L3_GLBGO(1) | L3_1_UC),
430 };
431
432 enum {
433 HAS_GLOBAL_MOCS = BIT(0),
434 HAS_ENGINE_MOCS = BIT(1),
435 HAS_RENDER_L3CC = BIT(2),
436 };
437
has_l3cc(const struct drm_i915_private * i915)438 static bool has_l3cc(const struct drm_i915_private *i915)
439 {
440 return true;
441 }
442
has_global_mocs(const struct drm_i915_private * i915)443 static bool has_global_mocs(const struct drm_i915_private *i915)
444 {
445 return HAS_GLOBAL_MOCS_REGISTERS(i915);
446 }
447
has_mocs(const struct drm_i915_private * i915)448 static bool has_mocs(const struct drm_i915_private *i915)
449 {
450 return !IS_DGFX(i915);
451 }
452
get_mocs_settings(struct drm_i915_private * i915,struct drm_i915_mocs_table * table)453 static unsigned int get_mocs_settings(struct drm_i915_private *i915,
454 struct drm_i915_mocs_table *table)
455 {
456 unsigned int flags;
457
458 memset(table, 0, sizeof(struct drm_i915_mocs_table));
459
460 table->unused_entries_index = I915_MOCS_PTE;
461 if (IS_GFX_GT_IP_RANGE(to_gt(i915), IP_VER(12, 70), IP_VER(12, 74))) {
462 table->size = ARRAY_SIZE(mtl_mocs_table);
463 table->table = mtl_mocs_table;
464 table->n_entries = MTL_NUM_MOCS_ENTRIES;
465 table->uc_index = 9;
466 table->unused_entries_index = 1;
467 } else if (IS_DG2(i915)) {
468 table->size = ARRAY_SIZE(dg2_mocs_table);
469 table->table = dg2_mocs_table;
470 table->uc_index = 1;
471 table->n_entries = GEN9_NUM_MOCS_ENTRIES;
472 table->unused_entries_index = 3;
473 } else if (IS_DG1(i915)) {
474 table->size = ARRAY_SIZE(dg1_mocs_table);
475 table->table = dg1_mocs_table;
476 table->uc_index = 1;
477 table->n_entries = GEN9_NUM_MOCS_ENTRIES;
478 table->uc_index = 1;
479 table->unused_entries_index = 5;
480 } else if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
481 /* For TGL/RKL, Can't be changed now for ABI reasons */
482 table->size = ARRAY_SIZE(tgl_mocs_table);
483 table->table = tgl_mocs_table;
484 table->n_entries = GEN9_NUM_MOCS_ENTRIES;
485 table->uc_index = 3;
486 } else if (GRAPHICS_VER(i915) >= 12) {
487 table->size = ARRAY_SIZE(gen12_mocs_table);
488 table->table = gen12_mocs_table;
489 table->n_entries = GEN9_NUM_MOCS_ENTRIES;
490 table->uc_index = 3;
491 table->unused_entries_index = 2;
492 } else if (GRAPHICS_VER(i915) == 11) {
493 table->size = ARRAY_SIZE(icl_mocs_table);
494 table->table = icl_mocs_table;
495 table->n_entries = GEN9_NUM_MOCS_ENTRIES;
496 } else if (IS_GEN9_BC(i915)) {
497 table->size = ARRAY_SIZE(skl_mocs_table);
498 table->n_entries = GEN9_NUM_MOCS_ENTRIES;
499 table->table = skl_mocs_table;
500 } else if (IS_GEN9_LP(i915)) {
501 table->size = ARRAY_SIZE(broxton_mocs_table);
502 table->n_entries = GEN9_NUM_MOCS_ENTRIES;
503 table->table = broxton_mocs_table;
504 } else {
505 drm_WARN_ONCE(&i915->drm, GRAPHICS_VER(i915) >= 9,
506 "Platform that should have a MOCS table does not.\n");
507 return 0;
508 }
509
510 if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
511 return 0;
512
513 /* WaDisableSkipCaching:skl,bxt,kbl,glk */
514 if (GRAPHICS_VER(i915) == 9) {
515 int i;
516
517 for (i = 0; i < table->size; i++)
518 if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
519 (L3_ESC(1) | L3_SCC(0x7))))
520 return 0;
521 }
522
523 flags = 0;
524 if (has_mocs(i915)) {
525 if (has_global_mocs(i915))
526 flags |= HAS_GLOBAL_MOCS;
527 else
528 flags |= HAS_ENGINE_MOCS;
529 }
530 if (has_l3cc(i915))
531 flags |= HAS_RENDER_L3CC;
532
533 return flags;
534 }
535
536 /*
537 * Get control_value from MOCS entry taking into account when it's not used
538 * then if unused_entries_index is non-zero then its value will be returned
539 * otherwise I915_MOCS_PTE's value is returned in this case.
540 */
get_entry_control(const struct drm_i915_mocs_table * table,unsigned int index)541 static u32 get_entry_control(const struct drm_i915_mocs_table *table,
542 unsigned int index)
543 {
544 if (index < table->size && table->table[index].used)
545 return table->table[index].control_value;
546 return table->table[table->unused_entries_index].control_value;
547 }
548
549 #define for_each_mocs(mocs, t, i) \
550 for (i = 0; \
551 i < (t)->n_entries ? (mocs = get_entry_control((t), i)), 1 : 0;\
552 i++)
553
__init_mocs_table(struct intel_uncore * uncore,const struct drm_i915_mocs_table * table,u32 addr)554 static void __init_mocs_table(struct intel_uncore *uncore,
555 const struct drm_i915_mocs_table *table,
556 u32 addr)
557 {
558 unsigned int i;
559 u32 mocs;
560
561 drm_WARN_ONCE(&uncore->i915->drm, !table->unused_entries_index,
562 "Unused entries index should have been defined\n");
563 for_each_mocs(mocs, table, i)
564 intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
565 }
566
mocs_offset(const struct intel_engine_cs * engine)567 static u32 mocs_offset(const struct intel_engine_cs *engine)
568 {
569 static const u32 offset[] = {
570 [RCS0] = __GEN9_RCS0_MOCS0,
571 [VCS0] = __GEN9_VCS0_MOCS0,
572 [VCS1] = __GEN9_VCS1_MOCS0,
573 [VECS0] = __GEN9_VECS0_MOCS0,
574 [BCS0] = __GEN9_BCS0_MOCS0,
575 [VCS2] = __GEN11_VCS2_MOCS0,
576 };
577
578 GEM_BUG_ON(engine->id >= ARRAY_SIZE(offset));
579 return offset[engine->id];
580 }
581
init_mocs_table(struct intel_engine_cs * engine,const struct drm_i915_mocs_table * table)582 static void init_mocs_table(struct intel_engine_cs *engine,
583 const struct drm_i915_mocs_table *table)
584 {
585 __init_mocs_table(engine->uncore, table, mocs_offset(engine));
586 }
587
588 /*
589 * Get l3cc_value from MOCS entry taking into account when it's not used
590 * then if unused_entries_index is not zero then its value will be returned
591 * otherwise I915_MOCS_PTE's value is returned in this case.
592 */
get_entry_l3cc(const struct drm_i915_mocs_table * table,unsigned int index)593 static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
594 unsigned int index)
595 {
596 if (index < table->size && table->table[index].used)
597 return table->table[index].l3cc_value;
598 return table->table[table->unused_entries_index].l3cc_value;
599 }
600
l3cc_combine(u16 low,u16 high)601 static u32 l3cc_combine(u16 low, u16 high)
602 {
603 return low | (u32)high << 16;
604 }
605
606 #define for_each_l3cc(l3cc, t, i) \
607 for (i = 0; \
608 i < ((t)->n_entries + 1) / 2 ? \
609 (l3cc = l3cc_combine(get_entry_l3cc((t), 2 * i), \
610 get_entry_l3cc((t), 2 * i + 1))), 1 : \
611 0; \
612 i++)
613
init_l3cc_table(struct intel_gt * gt,const struct drm_i915_mocs_table * table)614 static void init_l3cc_table(struct intel_gt *gt,
615 const struct drm_i915_mocs_table *table)
616 {
617 unsigned long flags;
618 unsigned int i;
619 u32 l3cc;
620
621 intel_gt_mcr_lock(gt, &flags);
622 for_each_l3cc(l3cc, table, i)
623 if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 55))
624 intel_gt_mcr_multicast_write_fw(gt, XEHP_LNCFCMOCS(i), l3cc);
625 else
626 intel_uncore_write_fw(gt->uncore, GEN9_LNCFCMOCS(i), l3cc);
627 intel_gt_mcr_unlock(gt, flags);
628 }
629
intel_mocs_init_engine(struct intel_engine_cs * engine)630 void intel_mocs_init_engine(struct intel_engine_cs *engine)
631 {
632 struct drm_i915_mocs_table table;
633 unsigned int flags;
634
635 /* Called under a blanket forcewake */
636 assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
637
638 flags = get_mocs_settings(engine->i915, &table);
639 if (!flags)
640 return;
641
642 /* Platforms with global MOCS do not need per-engine initialization. */
643 if (flags & HAS_ENGINE_MOCS)
644 init_mocs_table(engine, &table);
645
646 if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
647 init_l3cc_table(engine->gt, &table);
648 }
649
global_mocs_offset(void)650 static u32 global_mocs_offset(void)
651 {
652 return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
653 }
654
intel_set_mocs_index(struct intel_gt * gt)655 void intel_set_mocs_index(struct intel_gt *gt)
656 {
657 struct drm_i915_mocs_table table;
658
659 get_mocs_settings(gt->i915, &table);
660 gt->mocs.uc_index = table.uc_index;
661 if (HAS_L3_CCS_READ(gt->i915))
662 gt->mocs.wb_index = table.wb_index;
663 }
664
intel_mocs_init(struct intel_gt * gt)665 void intel_mocs_init(struct intel_gt *gt)
666 {
667 struct drm_i915_mocs_table table;
668 unsigned int flags;
669
670 /*
671 * LLC and eDRAM control values are not applicable to dgfx
672 */
673 flags = get_mocs_settings(gt->i915, &table);
674 if (flags & HAS_GLOBAL_MOCS)
675 __init_mocs_table(gt->uncore, &table, global_mocs_offset());
676
677 /*
678 * Initialize the L3CC table as part of mocs initalization to make
679 * sure the LNCFCMOCSx registers are programmed for the subsequent
680 * memory transactions including guc transactions
681 */
682 if (flags & HAS_RENDER_L3CC)
683 init_l3cc_table(gt, &table);
684 }
685
686 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
687 #include "selftest_mocs.c"
688 #endif
689