1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_mocs.h" 7 8 #include "regs/xe_gt_regs.h" 9 #include "xe_bo.h" 10 #include "xe_device.h" 11 #include "xe_exec_queue.h" 12 #include "xe_gt.h" 13 #include "xe_gt_mcr.h" 14 #include "xe_mmio.h" 15 #include "xe_platform_types.h" 16 #include "xe_step_types.h" 17 18 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) 19 #define mocs_dbg drm_dbg 20 #else 21 __printf(2, 3) 22 static inline void mocs_dbg(const struct drm_device *dev, 23 const char *format, ...) 24 { /* noop */ } 25 #endif 26 27 enum { 28 HAS_GLOBAL_MOCS = BIT(0), 29 HAS_LNCF_MOCS = BIT(1), 30 }; 31 32 struct xe_mocs_entry { 33 u32 control_value; 34 u16 l3cc_value; 35 u16 used; 36 }; 37 38 struct xe_mocs_info { 39 unsigned int size; 40 unsigned int n_entries; 41 const struct xe_mocs_entry *table; 42 u8 uc_index; 43 u8 wb_index; 44 u8 unused_entries_index; 45 }; 46 47 /* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */ 48 #define _LE_CACHEABILITY(value) ((value) << 0) 49 #define _LE_TGT_CACHE(value) ((value) << 2) 50 #define LE_LRUM(value) ((value) << 4) 51 #define LE_AOM(value) ((value) << 6) 52 #define LE_RSC(value) ((value) << 7) 53 #define LE_SCC(value) ((value) << 8) 54 #define LE_PFM(value) ((value) << 11) 55 #define LE_SCF(value) ((value) << 14) 56 #define LE_COS(value) ((value) << 15) 57 #define LE_SSE(value) ((value) << 17) 58 59 /* Defines for the tables (LNCFMOCS0 - LNCFMOCS31) - two entries per word */ 60 #define L3_ESC(value) ((value) << 0) 61 #define L3_SCC(value) ((value) << 1) 62 #define _L3_CACHEABILITY(value) ((value) << 4) 63 #define L3_GLBGO(value) ((value) << 6) 64 #define L3_LKUP(value) ((value) << 7) 65 66 /* Defines for the tables (GLOB_MOCS_0 - GLOB_MOCS_16) */ 67 #define IG_PAT REG_BIT(8) 68 #define L3_CACHE_POLICY_MASK REG_GENMASK(5, 4) 69 #define L4_CACHE_POLICY_MASK REG_GENMASK(3, 2) 70 71 /* Helper defines */ 72 #define XELP_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ 73 #define PVC_NUM_MOCS_ENTRIES 3 74 #define MTL_NUM_MOCS_ENTRIES 16 75 #define XE2_NUM_MOCS_ENTRIES 16 76 77 /* (e)LLC caching options */ 78 /* 79 * Note: LE_0_PAGETABLE works only up to Gen11; for newer gens it means 80 * the same as LE_UC 81 */ 82 #define LE_0_PAGETABLE _LE_CACHEABILITY(0) 83 #define LE_1_UC _LE_CACHEABILITY(1) 84 #define LE_2_WT _LE_CACHEABILITY(2) 85 #define LE_3_WB _LE_CACHEABILITY(3) 86 87 /* Target cache */ 88 #define LE_TC_0_PAGETABLE _LE_TGT_CACHE(0) 89 #define LE_TC_1_LLC _LE_TGT_CACHE(1) 90 #define LE_TC_2_LLC_ELLC _LE_TGT_CACHE(2) 91 #define LE_TC_3_LLC_ELLC_ALT _LE_TGT_CACHE(3) 92 93 /* L3 caching options */ 94 #define L3_0_DIRECT _L3_CACHEABILITY(0) 95 #define L3_1_UC _L3_CACHEABILITY(1) 96 #define L3_2_RESERVED _L3_CACHEABILITY(2) 97 #define L3_3_WB _L3_CACHEABILITY(3) 98 99 /* L4 caching options */ 100 #define L4_0_WB REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 0) 101 #define L4_1_WT REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 1) 102 #define L4_3_UC REG_FIELD_PREP(L4_CACHE_POLICY_MASK, 3) 103 104 #define XE2_L3_0_WB REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 0) 105 /* XD: WB Transient Display */ 106 #define XE2_L3_1_XD REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 1) 107 #define XE2_L3_3_UC REG_FIELD_PREP(L3_CACHE_POLICY_MASK, 3) 108 109 #define MOCS_ENTRY(__idx, __control_value, __l3cc_value) \ 110 [__idx] = { \ 111 .control_value = __control_value, \ 112 .l3cc_value = __l3cc_value, \ 113 .used = 1, \ 114 } 115 116 /* 117 * MOCS tables 118 * 119 * These are the MOCS tables that are programmed across all the rings. 120 * The control value is programmed to all the rings that support the 121 * MOCS registers. While the l3cc_values are only programmed to the 122 * LNCFCMOCS0 - LNCFCMOCS32 registers. 123 * 124 * These tables are intended to be kept reasonably consistent across 125 * HW platforms, and for ICL+, be identical across OSes. To achieve 126 * that, the list of entries is published as part of bspec. 127 * 128 * Entries not part of the following tables are undefined as far as userspace is 129 * concerned and shouldn't be relied upon. The last few entries are reserved by 130 * the hardware. They should be initialized according to bspec and never used. 131 * 132 * NOTE1: These tables are part of bspec and defined as part of the hardware 133 * interface. It is expected that, for specific hardware platform, existing 134 * entries will remain constant and the table will only be updated by adding new 135 * entries, filling unused positions. 136 * 137 * NOTE2: Reserved and unspecified MOCS indices have been set to L3 WB. These 138 * reserved entries should never be used. They may be changed to low performant 139 * variants with better coherency in the future if more entries are needed. 140 */ 141 142 static const struct xe_mocs_entry gen12_mocs_desc[] = { 143 /* Base - L3 + LLC */ 144 MOCS_ENTRY(2, 145 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), 146 L3_3_WB), 147 /* Base - Uncached */ 148 MOCS_ENTRY(3, 149 LE_1_UC | LE_TC_1_LLC, 150 L3_1_UC), 151 /* Base - L3 */ 152 MOCS_ENTRY(4, 153 LE_1_UC | LE_TC_1_LLC, 154 L3_3_WB), 155 /* Base - LLC */ 156 MOCS_ENTRY(5, 157 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), 158 L3_1_UC), 159 /* Age 0 - LLC */ 160 MOCS_ENTRY(6, 161 LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), 162 L3_1_UC), 163 /* Age 0 - L3 + LLC */ 164 MOCS_ENTRY(7, 165 LE_3_WB | LE_TC_1_LLC | LE_LRUM(1), 166 L3_3_WB), 167 /* Age: Don't Chg. - LLC */ 168 MOCS_ENTRY(8, 169 LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), 170 L3_1_UC), 171 /* Age: Don't Chg. - L3 + LLC */ 172 MOCS_ENTRY(9, 173 LE_3_WB | LE_TC_1_LLC | LE_LRUM(2), 174 L3_3_WB), 175 /* No AOM - LLC */ 176 MOCS_ENTRY(10, 177 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), 178 L3_1_UC), 179 /* No AOM - L3 + LLC */ 180 MOCS_ENTRY(11, 181 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_AOM(1), 182 L3_3_WB), 183 /* No AOM; Age 0 - LLC */ 184 MOCS_ENTRY(12, 185 LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), 186 L3_1_UC), 187 /* No AOM; Age 0 - L3 + LLC */ 188 MOCS_ENTRY(13, 189 LE_3_WB | LE_TC_1_LLC | LE_LRUM(1) | LE_AOM(1), 190 L3_3_WB), 191 /* No AOM; Age:DC - LLC */ 192 MOCS_ENTRY(14, 193 LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), 194 L3_1_UC), 195 /* No AOM; Age:DC - L3 + LLC */ 196 MOCS_ENTRY(15, 197 LE_3_WB | LE_TC_1_LLC | LE_LRUM(2) | LE_AOM(1), 198 L3_3_WB), 199 /* Self-Snoop - L3 + LLC */ 200 MOCS_ENTRY(18, 201 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SSE(3), 202 L3_3_WB), 203 /* Skip Caching - L3 + LLC(12.5%) */ 204 MOCS_ENTRY(19, 205 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(7), 206 L3_3_WB), 207 /* Skip Caching - L3 + LLC(25%) */ 208 MOCS_ENTRY(20, 209 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(3), 210 L3_3_WB), 211 /* Skip Caching - L3 + LLC(50%) */ 212 MOCS_ENTRY(21, 213 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_SCC(1), 214 L3_3_WB), 215 /* Skip Caching - L3 + LLC(75%) */ 216 MOCS_ENTRY(22, 217 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(3), 218 L3_3_WB), 219 /* Skip Caching - L3 + LLC(87.5%) */ 220 MOCS_ENTRY(23, 221 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3) | LE_RSC(1) | LE_SCC(7), 222 L3_3_WB), 223 /* Implicitly enable L1 - HDC:L1 + L3 + LLC */ 224 MOCS_ENTRY(48, 225 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), 226 L3_3_WB), 227 /* Implicitly enable L1 - HDC:L1 + L3 */ 228 MOCS_ENTRY(49, 229 LE_1_UC | LE_TC_1_LLC, 230 L3_3_WB), 231 /* Implicitly enable L1 - HDC:L1 + LLC */ 232 MOCS_ENTRY(50, 233 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), 234 L3_1_UC), 235 /* Implicitly enable L1 - HDC:L1 */ 236 MOCS_ENTRY(51, 237 LE_1_UC | LE_TC_1_LLC, 238 L3_1_UC), 239 /* HW Special Case (CCS) */ 240 MOCS_ENTRY(60, 241 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), 242 L3_1_UC), 243 /* HW Special Case (Displayable) */ 244 MOCS_ENTRY(61, 245 LE_1_UC | LE_TC_1_LLC, 246 L3_3_WB), 247 /* HW Reserved - SW program but never use */ 248 MOCS_ENTRY(62, 249 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), 250 L3_1_UC), 251 /* HW Reserved - SW program but never use */ 252 MOCS_ENTRY(63, 253 LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), 254 L3_1_UC) 255 }; 256 257 static const struct xe_mocs_entry dg1_mocs_desc[] = { 258 /* UC */ 259 MOCS_ENTRY(1, 0, L3_1_UC), 260 /* WB - L3 */ 261 MOCS_ENTRY(5, 0, L3_3_WB), 262 /* WB - L3 50% */ 263 MOCS_ENTRY(6, 0, L3_ESC(1) | L3_SCC(1) | L3_3_WB), 264 /* WB - L3 25% */ 265 MOCS_ENTRY(7, 0, L3_ESC(1) | L3_SCC(3) | L3_3_WB), 266 /* WB - L3 12.5% */ 267 MOCS_ENTRY(8, 0, L3_ESC(1) | L3_SCC(7) | L3_3_WB), 268 269 /* HDC:L1 + L3 */ 270 MOCS_ENTRY(48, 0, L3_3_WB), 271 /* HDC:L1 */ 272 MOCS_ENTRY(49, 0, L3_1_UC), 273 274 /* HW Reserved */ 275 MOCS_ENTRY(60, 0, L3_1_UC), 276 MOCS_ENTRY(61, 0, L3_1_UC), 277 MOCS_ENTRY(62, 0, L3_1_UC), 278 MOCS_ENTRY(63, 0, L3_1_UC), 279 }; 280 281 static const struct xe_mocs_entry dg2_mocs_desc[] = { 282 /* UC - Coherent; GO:L3 */ 283 MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)), 284 /* UC - Coherent; GO:Memory */ 285 MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), 286 /* UC - Non-Coherent; GO:Memory */ 287 MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)), 288 289 /* WB - LC */ 290 MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)), 291 }; 292 293 static const struct xe_mocs_entry dg2_mocs_desc_g10_ax[] = { 294 /* Wa_14011441408: Set Go to Memory for MOCS#0 */ 295 MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), 296 /* UC - Coherent; GO:Memory */ 297 MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)), 298 /* UC - Non-Coherent; GO:Memory */ 299 MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)), 300 301 /* WB - LC */ 302 MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)), 303 }; 304 305 static const struct xe_mocs_entry pvc_mocs_desc[] = { 306 /* Error */ 307 MOCS_ENTRY(0, 0, L3_3_WB), 308 309 /* UC */ 310 MOCS_ENTRY(1, 0, L3_1_UC), 311 312 /* WB */ 313 MOCS_ENTRY(2, 0, L3_3_WB), 314 }; 315 316 static const struct xe_mocs_entry mtl_mocs_desc[] = { 317 /* Error - Reserved for Non-Use */ 318 MOCS_ENTRY(0, 319 0, 320 L3_LKUP(1) | L3_3_WB), 321 /* Cached - L3 + L4 */ 322 MOCS_ENTRY(1, 323 IG_PAT, 324 L3_LKUP(1) | L3_3_WB), 325 /* L4 - GO:L3 */ 326 MOCS_ENTRY(2, 327 IG_PAT, 328 L3_LKUP(1) | L3_1_UC), 329 /* Uncached - GO:L3 */ 330 MOCS_ENTRY(3, 331 IG_PAT | L4_3_UC, 332 L3_LKUP(1) | L3_1_UC), 333 /* L4 - GO:Mem */ 334 MOCS_ENTRY(4, 335 IG_PAT, 336 L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC), 337 /* Uncached - GO:Mem */ 338 MOCS_ENTRY(5, 339 IG_PAT | L4_3_UC, 340 L3_LKUP(1) | L3_GLBGO(1) | L3_1_UC), 341 /* L4 - L3:NoLKUP; GO:L3 */ 342 MOCS_ENTRY(6, 343 IG_PAT, 344 L3_1_UC), 345 /* Uncached - L3:NoLKUP; GO:L3 */ 346 MOCS_ENTRY(7, 347 IG_PAT | L4_3_UC, 348 L3_1_UC), 349 /* L4 - L3:NoLKUP; GO:Mem */ 350 MOCS_ENTRY(8, 351 IG_PAT, 352 L3_GLBGO(1) | L3_1_UC), 353 /* Uncached - L3:NoLKUP; GO:Mem */ 354 MOCS_ENTRY(9, 355 IG_PAT | L4_3_UC, 356 L3_GLBGO(1) | L3_1_UC), 357 /* Display - L3; L4:WT */ 358 MOCS_ENTRY(14, 359 IG_PAT | L4_1_WT, 360 L3_LKUP(1) | L3_3_WB), 361 /* CCS - Non-Displayable */ 362 MOCS_ENTRY(15, 363 IG_PAT, 364 L3_GLBGO(1) | L3_1_UC), 365 }; 366 367 static const struct xe_mocs_entry xe2_mocs_table[] = { 368 /* Defer to PAT */ 369 MOCS_ENTRY(0, XE2_L3_0_WB | L4_3_UC, 0), 370 /* Cached L3, Uncached L4 */ 371 MOCS_ENTRY(1, IG_PAT | XE2_L3_0_WB | L4_3_UC, 0), 372 /* Uncached L3, Cached L4 */ 373 MOCS_ENTRY(2, IG_PAT | XE2_L3_3_UC | L4_0_WB, 0), 374 /* Uncached L3 + L4 */ 375 MOCS_ENTRY(3, IG_PAT | XE2_L3_3_UC | L4_3_UC, 0), 376 /* Cached L3 + L4 */ 377 MOCS_ENTRY(4, IG_PAT | XE2_L3_0_WB | L4_0_WB, 0), 378 }; 379 380 static unsigned int get_mocs_settings(struct xe_device *xe, 381 struct xe_mocs_info *info) 382 { 383 unsigned int flags = 0; 384 385 memset(info, 0, sizeof(struct xe_mocs_info)); 386 387 switch (xe->info.platform) { 388 case XE_LUNARLAKE: 389 info->size = ARRAY_SIZE(xe2_mocs_table); 390 info->table = xe2_mocs_table; 391 info->n_entries = XE2_NUM_MOCS_ENTRIES; 392 info->uc_index = 3; 393 info->wb_index = 4; 394 info->unused_entries_index = 4; 395 break; 396 case XE_PVC: 397 info->size = ARRAY_SIZE(pvc_mocs_desc); 398 info->table = pvc_mocs_desc; 399 info->n_entries = PVC_NUM_MOCS_ENTRIES; 400 info->uc_index = 1; 401 info->wb_index = 2; 402 info->unused_entries_index = 2; 403 break; 404 case XE_METEORLAKE: 405 info->size = ARRAY_SIZE(mtl_mocs_desc); 406 info->table = mtl_mocs_desc; 407 info->n_entries = MTL_NUM_MOCS_ENTRIES; 408 info->uc_index = 9; 409 info->unused_entries_index = 1; 410 break; 411 case XE_DG2: 412 if (xe->info.subplatform == XE_SUBPLATFORM_DG2_G10 && 413 xe->info.step.graphics >= STEP_A0 && 414 xe->info.step.graphics <= STEP_B0) { 415 info->size = ARRAY_SIZE(dg2_mocs_desc_g10_ax); 416 info->table = dg2_mocs_desc_g10_ax; 417 } else { 418 info->size = ARRAY_SIZE(dg2_mocs_desc); 419 info->table = dg2_mocs_desc; 420 } 421 info->uc_index = 1; 422 info->n_entries = XELP_NUM_MOCS_ENTRIES; 423 info->unused_entries_index = 3; 424 break; 425 case XE_DG1: 426 info->size = ARRAY_SIZE(dg1_mocs_desc); 427 info->table = dg1_mocs_desc; 428 info->uc_index = 1; 429 info->n_entries = XELP_NUM_MOCS_ENTRIES; 430 info->unused_entries_index = 5; 431 break; 432 case XE_TIGERLAKE: 433 case XE_ROCKETLAKE: 434 case XE_ALDERLAKE_S: 435 case XE_ALDERLAKE_P: 436 case XE_ALDERLAKE_N: 437 info->size = ARRAY_SIZE(gen12_mocs_desc); 438 info->table = gen12_mocs_desc; 439 info->n_entries = XELP_NUM_MOCS_ENTRIES; 440 info->uc_index = 3; 441 info->unused_entries_index = 2; 442 break; 443 default: 444 drm_err(&xe->drm, "Platform that should have a MOCS table does not.\n"); 445 return 0; 446 } 447 448 /* 449 * Index 0 is a reserved/unused table entry on most platforms, but 450 * even on those where it does represent a legitimate MOCS entry, it 451 * never represents the "most cached, least coherent" behavior we want 452 * to populate undefined table rows with. So if unused_entries_index 453 * is still 0 at this point, we'll assume that it was omitted by 454 * mistake in the switch statement above. 455 */ 456 xe_assert(xe, info->unused_entries_index != 0); 457 458 if (XE_WARN_ON(info->size > info->n_entries)) { 459 info->table = NULL; 460 return 0; 461 } 462 463 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) >= 20) 464 flags |= HAS_GLOBAL_MOCS; 465 if (GRAPHICS_VER(xe) < 20) 466 flags |= HAS_LNCF_MOCS; 467 468 return flags; 469 } 470 471 /* 472 * Get control_value from MOCS entry. If the table entry is not defined, the 473 * settings from unused_entries_index will be returned. 474 */ 475 static u32 get_entry_control(const struct xe_mocs_info *info, 476 unsigned int index) 477 { 478 if (index < info->size && info->table[index].used) 479 return info->table[index].control_value; 480 return info->table[info->unused_entries_index].control_value; 481 } 482 483 static void __init_mocs_table(struct xe_gt *gt, 484 const struct xe_mocs_info *info) 485 { 486 struct xe_device *xe = gt_to_xe(gt); 487 488 unsigned int i; 489 u32 mocs; 490 491 mocs_dbg(>_to_xe(gt)->drm, "entries:%d\n", info->n_entries); 492 drm_WARN_ONCE(&xe->drm, !info->unused_entries_index, 493 "Unused entries index should have been defined\n"); 494 for (i = 0; 495 i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0; 496 i++) { 497 mocs_dbg(>_to_xe(gt)->drm, "GLOB_MOCS[%d] 0x%x 0x%x\n", i, 498 XELP_GLOBAL_MOCS(i).addr, mocs); 499 500 if (GRAPHICS_VERx100(gt_to_xe(gt)) > 1250) 501 xe_gt_mcr_multicast_write(gt, XEHP_GLOBAL_MOCS(i), mocs); 502 else 503 xe_mmio_write32(gt, XELP_GLOBAL_MOCS(i), mocs); 504 } 505 } 506 507 /* 508 * Get l3cc_value from MOCS entry taking into account when it's not used 509 * then if unused_entries_index is not zero then its value will be returned 510 * otherwise I915_MOCS_PTE's value is returned in this case. 511 */ 512 static u16 get_entry_l3cc(const struct xe_mocs_info *info, 513 unsigned int index) 514 { 515 if (index < info->size && info->table[index].used) 516 return info->table[index].l3cc_value; 517 return info->table[info->unused_entries_index].l3cc_value; 518 } 519 520 static u32 l3cc_combine(u16 low, u16 high) 521 { 522 return low | (u32)high << 16; 523 } 524 525 static void init_l3cc_table(struct xe_gt *gt, 526 const struct xe_mocs_info *info) 527 { 528 unsigned int i; 529 u32 l3cc; 530 531 mocs_dbg(>_to_xe(gt)->drm, "entries:%d\n", info->n_entries); 532 for (i = 0; 533 i < (info->n_entries + 1) / 2 ? 534 (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i), 535 get_entry_l3cc(info, 2 * i + 1))), 1 : 0; 536 i++) { 537 mocs_dbg(>_to_xe(gt)->drm, "LNCFCMOCS[%d] 0x%x 0x%x\n", i, XELP_LNCFCMOCS(i).addr, 538 l3cc); 539 540 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250) 541 xe_gt_mcr_multicast_write(gt, XEHP_LNCFCMOCS(i), l3cc); 542 else 543 xe_mmio_write32(gt, XELP_LNCFCMOCS(i), l3cc); 544 } 545 } 546 547 void xe_mocs_init_early(struct xe_gt *gt) 548 { 549 struct xe_mocs_info table; 550 551 get_mocs_settings(gt_to_xe(gt), &table); 552 gt->mocs.uc_index = table.uc_index; 553 gt->mocs.wb_index = table.wb_index; 554 } 555 556 void xe_mocs_init(struct xe_gt *gt) 557 { 558 struct xe_mocs_info table; 559 unsigned int flags; 560 561 /* 562 * MOCS settings are split between "GLOB_MOCS" and/or "LNCFCMOCS" 563 * registers depending on platform. 564 * 565 * These registers should be programmed before GuC initialization 566 * since their values will affect some of the memory transactions 567 * performed by the GuC. 568 */ 569 flags = get_mocs_settings(gt_to_xe(gt), &table); 570 mocs_dbg(>_to_xe(gt)->drm, "flag:0x%x\n", flags); 571 572 if (flags & HAS_GLOBAL_MOCS) 573 __init_mocs_table(gt, &table); 574 if (flags & HAS_LNCF_MOCS) 575 init_l3cc_table(gt, &table); 576 } 577 578 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 579 #include "tests/xe_mocs.c" 580 #endif 581