xref: /linux/drivers/gpu/drm/xe/xe_mocs.c (revision aa66c93d5f69d48809468c4e2124e408e31fa931)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_mocs.h"
7 
8 #include "regs/xe_gt_regs.h"
9 #include "xe_bo.h"
10 #include "xe_device.h"
11 #include "xe_exec_queue.h"
12 #include "xe_gt.h"
13 #include "xe_gt_mcr.h"
14 #include "xe_mmio.h"
15 #include "xe_platform_types.h"
16 #include "xe_sriov.h"
17 #include "xe_step_types.h"
18 
19 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
20 #define mocs_dbg xe_gt_dbg
21 #else
22 __printf(2, 3)
23 static inline void mocs_dbg(const struct xe_gt *gt,
24 			    const char *format, ...)
25 { /* noop */ }
26 #endif
27 
28 enum {
29 	HAS_GLOBAL_MOCS = BIT(0),
30 	HAS_LNCF_MOCS = BIT(1),
31 };
32 
33 struct xe_mocs_entry {
34 	u32 control_value;
35 	u16 l3cc_value;
36 	u16 used;
37 };
38 
39 struct xe_mocs_info {
40 	unsigned int size;
41 	unsigned int n_entries;
42 	const struct xe_mocs_entry *table;
43 	u8 uc_index;
44 	u8 wb_index;
45 	u8 unused_entries_index;
46 };
47 
48 /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
49 #define _LE_CACHEABILITY(value)	((value) << 0)
50 #define _LE_TGT_CACHE(value)	((value) << 2)
51 #define LE_LRUM(value)		((value) << 4)
52 #define LE_AOM(value)		((value) << 6)
53 #define LE_RSC(value)		((value) << 7)
54 #define LE_SCC(value)		((value) << 8)
55 #define LE_PFM(value)		((value) << 11)
56 #define LE_SCF(value)		((value) << 14)
57 #define LE_COS(value)		((value) << 15)
58 #define LE_SSE(value)		((value) << 17)
59 
60 /* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */
61 #define L3_ESC(value)		((value) << 0)
62 #define L3_SCC(value)		((value) << 1)
63 #define _L3_CACHEABILITY(value)	((value) << 4)
64 #define L3_GLBGO(value)		((value) << 6)
65 #define L3_LKUP(value)		((value) << 7)
66 
67 /* Defines for the tables (GLOB_MOCS_0 - GLOB_MOCS_16) */
68 #define IG_PAT				REG_BIT(8)
69 #define L3_CACHE_POLICY_MASK		REG_GENMASK(5, 4)
70 #define L4_CACHE_POLICY_MASK		REG_GENMASK(3, 2)
71 
72 /* Helper defines */
73 #define XELP_NUM_MOCS_ENTRIES	64  /* 63-64 are reserved, but configured. */
74 #define PVC_NUM_MOCS_ENTRIES	3
75 #define MTL_NUM_MOCS_ENTRIES	16
76 #define XE2_NUM_MOCS_ENTRIES	16
77 
78 /* (e)LLC caching options */
79 /*
80  * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means
81  * the same as LE_UC
82  */
83 #define LE_0_PAGETABLE		_LE_CACHEABILITY(0)
84 #define LE_1_UC			_LE_CACHEABILITY(1)
85 #define LE_2_WT			_LE_CACHEABILITY(2)
86 #define LE_3_WB			_LE_CACHEABILITY(3)
87 
88 /* Target cache */
89 #define LE_TC_0_PAGETABLE	_LE_TGT_CACHE(0)
90 #define LE_TC_1_LLC		_LE_TGT_CACHE(1)
91 #define LE_TC_2_LLC_ELLC	_LE_TGT_CACHE(2)
92 #define LE_TC_3_LLC_ELLC_ALT	_LE_TGT_CACHE(3)
93 
94 /* L3 caching options */
95 #define L3_0_DIRECT		_L3_CACHEABILITY(0)
96 #define L3_1_UC			_L3_CACHEABILITY(1)
97 #define L3_2_RESERVED		_L3_CACHEABILITY(2)
98 #define L3_3_WB			_L3_CACHEABILITY(3)
99 
100 /* L4 caching options */
101 #define L4_0_WB                 REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 0)
102 #define L4_1_WT                 REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 1)
103 #define L4_3_UC                 REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 3)
104 
105 #define XE2_L3_0_WB		REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 0)
106 /* XD: WB Transient Display */
107 #define XE2_L3_1_XD		REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 1)
108 #define XE2_L3_3_UC		REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 3)
109 
110 #define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \
111 	[__idx] = { \
112 		.control_value = __control_value, \
113 		.l3cc_value = __l3cc_value, \
114 		.used = 1, \
115 	}
116 
117 /*
118  * MOCS tables
119  *
120  * These are the MOCS tables that are programmed across all the rings.
121  * The control value is programmed to all the rings that support the
122  * MOCS registers. While the l3cc_values are only programmed to the
123  * LNCFCMOCS0 - LNCFCMOCS32 registers.
124  *
125  * These tables are intended to be kept reasonably consistent across
126  * HW platforms, and for ICL+, be identical across OSes. To achieve
127  * that, the list of entries is published as part of bspec.
128  *
129  * Entries not part of the following tables are undefined as far as userspace is
130  * concerned and shouldn't be relied upon. The last few entries are reserved by
131  * the hardware. They should be initialized according to bspec and never used.
132  *
133  * NOTE1: These tables are part of bspec and defined as part of the hardware
134  * interface. It is expected that, for specific hardware platform, existing
135  * entries will remain constant and the table will only be updated by adding new
136  * entries, filling unused positions.
137  *
138  * NOTE2: Reserved and unspecified MOCS indices have been set to L3 WB. These
139  * reserved entries should never be used. They may be changed to low performant
140  * variants with better coherency in the future if more entries are needed.
141  */
142 
143 static const struct xe_mocs_entry gen12_mocs_desc[] = {
144 	/* Base - L3 + LLC */
145 	MOCS_ENTRY(2,
146 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
147 		   L3_3_WB),
148 	/* Base - Uncached */
149 	MOCS_ENTRY(3,
150 		   LE_1_UC | LE_TC_1_LLC,
151 		   L3_1_UC),
152 	/* Base - L3 */
153 	MOCS_ENTRY(4,
154 		   LE_1_UC | LE_TC_1_LLC,
155 		   L3_3_WB),
156 	/* Base - LLC */
157 	MOCS_ENTRY(5,
158 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
159 		   L3_1_UC),
160 	/* Age 0 - LLC */
161 	MOCS_ENTRY(6,
162 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(1),
163 		   L3_1_UC),
164 	/* Age 0 - L3 + LLC */
165 	MOCS_ENTRY(7,
166 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(1),
167 		   L3_3_WB),
168 	/* Age: Don't Chg. - LLC */
169 	MOCS_ENTRY(8,
170 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2),
171 		   L3_1_UC),
172 	/* Age: Don't Chg. - L3 + LLC */
173 	MOCS_ENTRY(9,
174 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2),
175 		   L3_3_WB),
176 	/* No AOM - LLC */
177 	MOCS_ENTRY(10,
178 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1),
179 		   L3_1_UC),
180 	/* No AOM - L3 + LLC */
181 	MOCS_ENTRY(11,
182 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1),
183 		   L3_3_WB),
184 	/* No AOM; Age 0 - LLC */
185 	MOCS_ENTRY(12,
186 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1),
187 		   L3_1_UC),
188 	/* No AOM; Age 0 - L3 + LLC */
189 	MOCS_ENTRY(13,
190 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1),
191 		   L3_3_WB),
192 	/* No AOM; Age:DC - LLC */
193 	MOCS_ENTRY(14,
194 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1),
195 		   L3_1_UC),
196 	/* No AOM; Age:DC - L3 + LLC */
197 	MOCS_ENTRY(15,
198 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1),
199 		   L3_3_WB),
200 	/* Self-Snoop - L3 + LLC */
201 	MOCS_ENTRY(18,
202 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3),
203 		   L3_3_WB),
204 	/* Skip Caching - L3 + LLC(12.5%) */
205 	MOCS_ENTRY(19,
206 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7),
207 		   L3_3_WB),
208 	/* Skip Caching - L3 + LLC(25%) */
209 	MOCS_ENTRY(20,
210 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3),
211 		   L3_3_WB),
212 	/* Skip Caching - L3 + LLC(50%) */
213 	MOCS_ENTRY(21,
214 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1),
215 		   L3_3_WB),
216 	/* Skip Caching - L3 + LLC(75%) */
217 	MOCS_ENTRY(22,
218 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3),
219 		   L3_3_WB),
220 	/* Skip Caching - L3 + LLC(87.5%) */
221 	MOCS_ENTRY(23,
222 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7),
223 		   L3_3_WB),
224 	/* Implicitly enable L1 - HDC:L1 + L3 + LLC */
225 	MOCS_ENTRY(48,
226 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
227 		   L3_3_WB),
228 	/* Implicitly enable L1 - HDC:L1 + L3 */
229 	MOCS_ENTRY(49,
230 		   LE_1_UC | LE_TC_1_LLC,
231 		   L3_3_WB),
232 	/* Implicitly enable L1 - HDC:L1 + LLC */
233 	MOCS_ENTRY(50,
234 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
235 		   L3_1_UC),
236 	/* Implicitly enable L1 - HDC:L1 */
237 	MOCS_ENTRY(51,
238 		   LE_1_UC | LE_TC_1_LLC,
239 		   L3_1_UC),
240 	/* HW Special Case (CCS) */
241 	MOCS_ENTRY(60,
242 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
243 		   L3_1_UC),
244 	/* HW Special Case (Displayable) */
245 	MOCS_ENTRY(61,
246 		   LE_1_UC | LE_TC_1_LLC,
247 		   L3_3_WB),
248 	/* HW Reserved - SW program but never use */
249 	MOCS_ENTRY(62,
250 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
251 		   L3_1_UC),
252 	/* HW Reserved - SW program but never use */
253 	MOCS_ENTRY(63,
254 		   LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
255 		   L3_1_UC)
256 };
257 
258 static const struct xe_mocs_entry dg1_mocs_desc[] = {
259 	/* UC */
260 	MOCS_ENTRY(1, 0, L3_1_UC),
261 	/* WB - L3 */
262 	MOCS_ENTRY(5, 0, L3_3_WB),
263 	/* WB - L3 50% */
264 	MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB),
265 	/* WB - L3 25% */
266 	MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB),
267 	/* WB - L3 12.5% */
268 	MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB),
269 
270 	/* HDC:L1 + L3 */
271 	MOCS_ENTRY(48, 0, L3_3_WB),
272 	/* HDC:L1 */
273 	MOCS_ENTRY(49, 0, L3_1_UC),
274 
275 	/* HW Reserved */
276 	MOCS_ENTRY(60, 0, L3_1_UC),
277 	MOCS_ENTRY(61, 0, L3_1_UC),
278 	MOCS_ENTRY(62, 0, L3_1_UC),
279 	MOCS_ENTRY(63, 0, L3_1_UC),
280 };
281 
282 static const struct xe_mocs_entry dg2_mocs_desc[] = {
283 	/* UC - Coherent; GO:L3 */
284 	MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
285 	/* UC - Coherent; GO:Memory */
286 	MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
287 	/* UC - Non-Coherent; GO:Memory */
288 	MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
289 
290 	/* WB - LC */
291 	MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
292 };
293 
294 static const struct xe_mocs_entry pvc_mocs_desc[] = {
295 	/* Error */
296 	MOCS_ENTRY(0, 0, L3_3_WB),
297 
298 	/* UC */
299 	MOCS_ENTRY(1, 0, L3_1_UC),
300 
301 	/* WB */
302 	MOCS_ENTRY(2, 0, L3_3_WB),
303 };
304 
305 static const struct xe_mocs_entry mtl_mocs_desc[] = {
306 	/* Error - Reserved for Non-Use */
307 	MOCS_ENTRY(0,
308 		   0,
309 		   L3_LKUP(1) | L3_3_WB),
310 	/* Cached - L3 + L4 */
311 	MOCS_ENTRY(1,
312 		   IG_PAT,
313 		   L3_LKUP(1) | L3_3_WB),
314 	/* L4 - GO:L3 */
315 	MOCS_ENTRY(2,
316 		   IG_PAT,
317 		   L3_LKUP(1) | L3_1_UC),
318 	/* Uncached - GO:L3 */
319 	MOCS_ENTRY(3,
320 		   IG_PAT | L4_3_UC,
321 		   L3_LKUP(1) | L3_1_UC),
322 	/* L4 - GO:Mem */
323 	MOCS_ENTRY(4,
324 		   IG_PAT,
325 		   L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
326 	/* Uncached - GO:Mem */
327 	MOCS_ENTRY(5,
328 		   IG_PAT | L4_3_UC,
329 		   L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC),
330 	/* L4 - L3:NoLKUP; GO:L3 */
331 	MOCS_ENTRY(6,
332 		   IG_PAT,
333 		   L3_1_UC),
334 	/* Uncached - L3:NoLKUP; GO:L3 */
335 	MOCS_ENTRY(7,
336 		   IG_PAT | L4_3_UC,
337 		   L3_1_UC),
338 	/* L4 - L3:NoLKUP; GO:Mem */
339 	MOCS_ENTRY(8,
340 		   IG_PAT,
341 		   L3_GLBGO(1) | L3_1_UC),
342 	/* Uncached - L3:NoLKUP; GO:Mem */
343 	MOCS_ENTRY(9,
344 		   IG_PAT | L4_3_UC,
345 		   L3_GLBGO(1) | L3_1_UC),
346 	/* Display - L3; L4:WT */
347 	MOCS_ENTRY(14,
348 		   IG_PAT | L4_1_WT,
349 		   L3_LKUP(1) | L3_3_WB),
350 	/* CCS - Non-Displayable */
351 	MOCS_ENTRY(15,
352 		   IG_PAT,
353 		   L3_GLBGO(1) | L3_1_UC),
354 };
355 
356 static const struct xe_mocs_entry xe2_mocs_table[] = {
357 	/* Defer to PAT */
358 	MOCS_ENTRY(0, XE2_L3_0_WB | L4_3_UC, 0),
359 	/* Cached L3, Uncached L4 */
360 	MOCS_ENTRY(1, IG_PAT | XE2_L3_0_WB | L4_3_UC, 0),
361 	/* Uncached L3, Cached L4 */
362 	MOCS_ENTRY(2, IG_PAT | XE2_L3_3_UC | L4_0_WB, 0),
363 	/* Uncached L3 + L4 */
364 	MOCS_ENTRY(3, IG_PAT | XE2_L3_3_UC | L4_3_UC, 0),
365 	/* Cached L3 + L4 */
366 	MOCS_ENTRY(4, IG_PAT | XE2_L3_0_WB | L4_0_WB, 0),
367 };
368 
369 static unsigned int get_mocs_settings(struct xe_device *xe,
370 				      struct xe_mocs_info *info)
371 {
372 	unsigned int flags = 0;
373 
374 	memset(info, 0, sizeof(struct xe_mocs_info));
375 
376 	switch (xe->info.platform) {
377 	case XE_LUNARLAKE:
378 	case XE_BATTLEMAGE:
379 		info->size = ARRAY_SIZE(xe2_mocs_table);
380 		info->table = xe2_mocs_table;
381 		info->n_entries = XE2_NUM_MOCS_ENTRIES;
382 		info->uc_index = 3;
383 		info->wb_index = 4;
384 		info->unused_entries_index = 4;
385 		break;
386 	case XE_PVC:
387 		info->size = ARRAY_SIZE(pvc_mocs_desc);
388 		info->table = pvc_mocs_desc;
389 		info->n_entries = PVC_NUM_MOCS_ENTRIES;
390 		info->uc_index = 1;
391 		info->wb_index = 2;
392 		info->unused_entries_index = 2;
393 		break;
394 	case XE_METEORLAKE:
395 		info->size = ARRAY_SIZE(mtl_mocs_desc);
396 		info->table = mtl_mocs_desc;
397 		info->n_entries = MTL_NUM_MOCS_ENTRIES;
398 		info->uc_index = 9;
399 		info->unused_entries_index = 1;
400 		break;
401 	case XE_DG2:
402 		info->size = ARRAY_SIZE(dg2_mocs_desc);
403 		info->table = dg2_mocs_desc;
404 		info->uc_index = 1;
405 		/*
406 		 * Last entry is RO on hardware, don't bother with what was
407 		 * written when checking later
408 		 */
409 		info->n_entries = XELP_NUM_MOCS_ENTRIES - 1;
410 		info->unused_entries_index = 3;
411 		break;
412 	case XE_DG1:
413 		info->size = ARRAY_SIZE(dg1_mocs_desc);
414 		info->table = dg1_mocs_desc;
415 		info->uc_index = 1;
416 		info->n_entries = XELP_NUM_MOCS_ENTRIES;
417 		info->unused_entries_index = 5;
418 		break;
419 	case XE_TIGERLAKE:
420 	case XE_ROCKETLAKE:
421 	case XE_ALDERLAKE_S:
422 	case XE_ALDERLAKE_P:
423 	case XE_ALDERLAKE_N:
424 		info->size  = ARRAY_SIZE(gen12_mocs_desc);
425 		info->table = gen12_mocs_desc;
426 		info->n_entries = XELP_NUM_MOCS_ENTRIES;
427 		info->uc_index = 3;
428 		info->unused_entries_index = 2;
429 		break;
430 	default:
431 		drm_err(&xe->drm, "Platform that should have a MOCS table does not.\n");
432 		return 0;
433 	}
434 
435 	/*
436 	 * Index 0 is a reserved/unused table entry on most platforms, but
437 	 * even on those where it does represent a legitimate MOCS entry, it
438 	 * never represents the "most cached, least coherent" behavior we want
439 	 * to populate undefined table rows with.  So if unused_entries_index
440 	 * is still 0 at this point, we'll assume that it was omitted by
441 	 * mistake in the switch statement above.
442 	 */
443 	xe_assert(xe, info->unused_entries_index != 0);
444 
445 	if (XE_WARN_ON(info->size > info->n_entries)) {
446 		info->table = NULL;
447 		return 0;
448 	}
449 
450 	if (!IS_DGFX(xe) || GRAPHICS_VER(xe) >= 20)
451 		flags |= HAS_GLOBAL_MOCS;
452 	if (GRAPHICS_VER(xe) < 20)
453 		flags |= HAS_LNCF_MOCS;
454 
455 	return flags;
456 }
457 
458 /*
459  * Get control_value from MOCS entry.  If the table entry is not defined, the
460  * settings from unused_entries_index will be returned.
461  */
462 static u32 get_entry_control(const struct xe_mocs_info *info,
463 			     unsigned int index)
464 {
465 	if (index < info->size && info->table[index].used)
466 		return info->table[index].control_value;
467 	return info->table[info->unused_entries_index].control_value;
468 }
469 
470 static bool regs_are_mcr(struct xe_gt *gt)
471 {
472 	struct xe_device *xe = gt_to_xe(gt);
473 
474 	if (xe_gt_is_media_type(gt))
475 		return MEDIA_VER(xe) >= 20;
476 	else
477 		return GRAPHICS_VERx100(xe) >= 1250;
478 }
479 
480 static void __init_mocs_table(struct xe_gt *gt,
481 			      const struct xe_mocs_info *info)
482 {
483 	unsigned int i;
484 	u32 mocs;
485 
486 	xe_gt_WARN_ONCE(gt, !info->unused_entries_index,
487 			"Unused entries index should have been defined\n");
488 
489 	mocs_dbg(gt, "mocs entries: %d\n", info->n_entries);
490 
491 	for (i = 0; i < info->n_entries; i++) {
492 		mocs = get_entry_control(info, i);
493 
494 		mocs_dbg(gt, "GLOB_MOCS[%d] 0x%x 0x%x\n", i,
495 			 XELP_GLOBAL_MOCS(i).addr, mocs);
496 
497 		if (regs_are_mcr(gt))
498 			xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs);
499 		else
500 			xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs);
501 	}
502 }
503 
504 /*
505  * Get l3cc_value from MOCS entry taking into account when it's not used
506  * then if unused_entries_index is not zero then its value will be returned
507  * otherwise I915_MOCS_PTE's value is returned in this case.
508  */
509 static u16 get_entry_l3cc(const struct xe_mocs_info *info,
510 			  unsigned int index)
511 {
512 	if (index < info->size && info->table[index].used)
513 		return info->table[index].l3cc_value;
514 	return info->table[info->unused_entries_index].l3cc_value;
515 }
516 
517 static u32 l3cc_combine(u16 low, u16 high)
518 {
519 	return low | (u32)high << 16;
520 }
521 
522 static void init_l3cc_table(struct xe_gt *gt,
523 			    const struct xe_mocs_info *info)
524 {
525 	unsigned int i;
526 	u32 l3cc;
527 
528 	mocs_dbg(gt, "l3cc entries: %d\n", info->n_entries);
529 
530 	for (i = 0; i < (info->n_entries + 1) / 2; i++) {
531 		l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
532 				    get_entry_l3cc(info, 2 * i + 1));
533 
534 		mocs_dbg(gt, "LNCFCMOCS[%d] 0x%x 0x%x\n", i,
535 			 XELP_LNCFCMOCS(i).addr, l3cc);
536 
537 		if (regs_are_mcr(gt))
538 			xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc);
539 		else
540 			xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc);
541 	}
542 }
543 
544 void xe_mocs_init_early(struct xe_gt *gt)
545 {
546 	struct xe_mocs_info table;
547 
548 	get_mocs_settings(gt_to_xe(gt), &table);
549 	gt->mocs.uc_index = table.uc_index;
550 	gt->mocs.wb_index = table.wb_index;
551 }
552 
553 void xe_mocs_init(struct xe_gt *gt)
554 {
555 	struct xe_mocs_info table;
556 	unsigned int flags;
557 
558 	if (IS_SRIOV_VF(gt_to_xe(gt)))
559 		return;
560 
561 	/*
562 	 * MOCS settings are split between "GLOB_MOCS" and/or "LNCFCMOCS"
563 	 * registers depending on platform.
564 	 *
565 	 * These registers should be programmed before GuC initialization
566 	 * since their values will affect some of the memory transactions
567 	 * performed by the GuC.
568 	 */
569 	flags = get_mocs_settings(gt_to_xe(gt), &table);
570 	mocs_dbg(gt, "flag:0x%x\n", flags);
571 
572 	if (IS_SRIOV_VF(gt_to_xe(gt)))
573 		return;
574 
575 	if (flags & HAS_GLOBAL_MOCS)
576 		__init_mocs_table(gt, &table);
577 	if (flags & HAS_LNCF_MOCS)
578 		init_l3cc_table(gt, &table);
579 }
580 
581 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST)
582 #include "tests/xe_mocs.c"
583 #endif
584