1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_gt_mcr.h" 7 8 #include "regs/xe_gt_regs.h" 9 #include "xe_gt.h" 10 #include "xe_gt_topology.h" 11 #include "xe_gt_types.h" 12 #include "xe_mmio.h" 13 #include "xe_sriov.h" 14 15 /** 16 * DOC: GT Multicast/Replicated (MCR) Register Support 17 * 18 * Some GT registers are designed as "multicast" or "replicated" registers: 19 * multiple instances of the same register share a single MMIO offset. MCR 20 * registers are generally used when the hardware needs to potentially track 21 * independent values of a register per hardware unit (e.g., per-subslice, 22 * per-L3bank, etc.). The specific types of replication that exist vary 23 * per-platform. 24 * 25 * MMIO accesses to MCR registers are controlled according to the settings 26 * programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR 27 * registers can be done in either multicast (a single write updates all 28 * instances of the register to the same value) or unicast (a write updates only 29 * one specific instance) form. Reads of MCR registers always operate in a 30 * unicast manner regardless of how the multicast/unicast bit is set in 31 * MCR_SELECTOR. Selection of a specific MCR instance for unicast operations is 32 * referred to as "steering." 33 * 34 * If MCR register operations are steered toward a hardware unit that is 35 * fused off or currently powered down due to power gating, the MMIO operation 36 * is "terminated" by the hardware. Terminated read operations will return a 37 * value of zero and terminated unicast write operations will be silently 38 * ignored. During device initialization, the goal of the various 39 * ``init_steering_*()`` functions is to apply the platform-specific rules for 40 * each MCR register type to identify a steering target that will select a 41 * non-terminated instance. 42 * 43 * MCR registers are not available on Virtual Function (VF). 44 */ 45 46 #define STEER_SEMAPHORE XE_REG(0xFD0) 47 48 static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr) 49 { 50 return reg_mcr.__reg; 51 } 52 53 enum { 54 MCR_OP_READ, 55 MCR_OP_WRITE 56 }; 57 58 static const struct xe_mmio_range xelp_l3bank_steering_table[] = { 59 { 0x00B100, 0x00B3FF }, 60 {}, 61 }; 62 63 static const struct xe_mmio_range xehp_l3bank_steering_table[] = { 64 { 0x008C80, 0x008CFF }, 65 { 0x00B100, 0x00B3FF }, 66 {}, 67 }; 68 69 /* 70 * Although the bspec lists more "MSLICE" ranges than shown here, some of those 71 * are of a "GAM" subclass that has special rules and doesn't need to be 72 * included here. 73 */ 74 static const struct xe_mmio_range xehp_mslice_steering_table[] = { 75 { 0x00DD00, 0x00DDFF }, 76 { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */ 77 {}, 78 }; 79 80 static const struct xe_mmio_range xehp_lncf_steering_table[] = { 81 { 0x00B000, 0x00B0FF }, 82 { 0x00D880, 0x00D8FF }, 83 {}, 84 }; 85 86 /* 87 * We have several types of MCR registers where steering to (0,0) will always 88 * provide us with a non-terminated value. We'll stick them all in the same 89 * table for simplicity. 90 */ 91 static const struct xe_mmio_range xehpc_instance0_steering_table[] = { 92 { 0x004000, 0x004AFF }, /* HALF-BSLICE */ 93 { 0x008800, 0x00887F }, /* CC */ 94 { 0x008A80, 0x008AFF }, /* TILEPSMI */ 95 { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */ 96 { 0x00B100, 0x00B3FF }, /* L3BANK */ 97 { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */ 98 { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */ 99 { 0x00DD00, 0x00DDFF }, /* BSLICE */ 100 { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */ 101 { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */ 102 { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */ 103 { 0x024180, 0x0241FF }, /* HALF-BSLICE */ 104 {}, 105 }; 106 107 static const struct xe_mmio_range xelpg_instance0_steering_table[] = { 108 { 0x000B00, 0x000BFF }, /* SQIDI */ 109 { 0x001000, 0x001FFF }, /* SQIDI */ 110 { 0x004000, 0x0048FF }, /* GAM */ 111 { 0x008700, 0x0087FF }, /* SQIDI */ 112 { 0x00B000, 0x00B0FF }, /* NODE */ 113 { 0x00C800, 0x00CFFF }, /* GAM */ 114 { 0x00D880, 0x00D8FF }, /* NODE */ 115 { 0x00DD00, 0x00DDFF }, /* OAAL2 */ 116 {}, 117 }; 118 119 static const struct xe_mmio_range xelpg_l3bank_steering_table[] = { 120 { 0x00B100, 0x00B3FF }, 121 {}, 122 }; 123 124 static const struct xe_mmio_range xelp_dss_steering_table[] = { 125 { 0x008150, 0x00815F }, 126 { 0x009520, 0x00955F }, 127 { 0x00DE80, 0x00E8FF }, 128 { 0x024A00, 0x024A7F }, 129 {}, 130 }; 131 132 /* DSS steering is used for GSLICE ranges as well */ 133 static const struct xe_mmio_range xehp_dss_steering_table[] = { 134 { 0x005200, 0x0052FF }, /* GSLICE */ 135 { 0x005400, 0x007FFF }, /* GSLICE */ 136 { 0x008140, 0x00815F }, /* GSLICE (0x8140-0x814F), DSS (0x8150-0x815F) */ 137 { 0x008D00, 0x008DFF }, /* DSS */ 138 { 0x0094D0, 0x00955F }, /* GSLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */ 139 { 0x009680, 0x0096FF }, /* DSS */ 140 { 0x00D800, 0x00D87F }, /* GSLICE */ 141 { 0x00DC00, 0x00DCFF }, /* GSLICE */ 142 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved ) */ 143 { 0x017000, 0x017FFF }, /* GSLICE */ 144 { 0x024A00, 0x024A7F }, /* DSS */ 145 {}, 146 }; 147 148 /* DSS steering is used for COMPUTE ranges as well */ 149 static const struct xe_mmio_range xehpc_dss_steering_table[] = { 150 { 0x008140, 0x00817F }, /* COMPUTE (0x8140-0x814F & 0x8160-0x817F), DSS (0x8150-0x815F) */ 151 { 0x0094D0, 0x00955F }, /* COMPUTE (0x94D0-0x951F), DSS (0x9520-0x955F) */ 152 { 0x009680, 0x0096FF }, /* DSS */ 153 { 0x00DC00, 0x00DCFF }, /* COMPUTE */ 154 { 0x00DE80, 0x00E7FF }, /* DSS (0xDF00-0xE1FF reserved ) */ 155 {}, 156 }; 157 158 /* DSS steering is used for SLICE ranges as well */ 159 static const struct xe_mmio_range xelpg_dss_steering_table[] = { 160 { 0x005200, 0x0052FF }, /* SLICE */ 161 { 0x005500, 0x007FFF }, /* SLICE */ 162 { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */ 163 { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */ 164 { 0x009680, 0x0096FF }, /* DSS */ 165 { 0x00D800, 0x00D87F }, /* SLICE */ 166 { 0x00DC00, 0x00DCFF }, /* SLICE */ 167 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */ 168 {}, 169 }; 170 171 static const struct xe_mmio_range xelpmp_oaddrm_steering_table[] = { 172 { 0x393200, 0x39323F }, 173 { 0x393400, 0x3934FF }, 174 {}, 175 }; 176 177 static const struct xe_mmio_range dg2_implicit_steering_table[] = { 178 { 0x000B00, 0x000BFF }, /* SF (SQIDI replication) */ 179 { 0x001000, 0x001FFF }, /* SF (SQIDI replication) */ 180 { 0x004000, 0x004AFF }, /* GAM (MSLICE replication) */ 181 { 0x008700, 0x0087FF }, /* MCFG (SQIDI replication) */ 182 { 0x00C800, 0x00CFFF }, /* GAM (MSLICE replication) */ 183 { 0x00F000, 0x00FFFF }, /* GAM (MSLICE replication) */ 184 {}, 185 }; 186 187 static const struct xe_mmio_range xe2lpg_dss_steering_table[] = { 188 { 0x005200, 0x0052FF }, /* SLICE */ 189 { 0x005500, 0x007FFF }, /* SLICE */ 190 { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */ 191 { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */ 192 { 0x009680, 0x0096FF }, /* DSS */ 193 { 0x00D800, 0x00D87F }, /* SLICE */ 194 { 0x00DC00, 0x00DCFF }, /* SLICE */ 195 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */ 196 { 0x00E980, 0x00E9FF }, /* SLICE */ 197 { 0x013000, 0x0133FF }, /* DSS (0x13000-0x131FF), SLICE (0x13200-0x133FF) */ 198 {}, 199 }; 200 201 static const struct xe_mmio_range xe2lpg_sqidi_psmi_steering_table[] = { 202 { 0x000B00, 0x000BFF }, 203 { 0x001000, 0x001FFF }, 204 {}, 205 }; 206 207 static const struct xe_mmio_range xe2lpg_instance0_steering_table[] = { 208 { 0x004000, 0x004AFF }, /* GAM, rsvd, GAMWKR */ 209 { 0x008700, 0x00887F }, /* SQIDI, MEMPIPE */ 210 { 0x00B000, 0x00B3FF }, /* NODE, L3BANK */ 211 { 0x00C800, 0x00CFFF }, /* GAM */ 212 { 0x00D880, 0x00D8FF }, /* NODE */ 213 { 0x00DD00, 0x00DDFF }, /* MEMPIPE */ 214 { 0x00E900, 0x00E97F }, /* MEMPIPE */ 215 { 0x00F000, 0x00FFFF }, /* GAM, GAMWKR */ 216 { 0x013400, 0x0135FF }, /* MEMPIPE */ 217 {}, 218 }; 219 220 static const struct xe_mmio_range xe2lpm_gpmxmt_steering_table[] = { 221 { 0x388160, 0x38817F }, 222 { 0x389480, 0x3894CF }, 223 {}, 224 }; 225 226 static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = { 227 { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */ 228 { 0x384900, 0x384AFF }, /* GAM */ 229 { 0x389560, 0x3895FF }, /* MEDIAINF */ 230 { 0x38B600, 0x38B8FF }, /* L3BANK */ 231 { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */ 232 { 0x38F000, 0x38F0FF }, /* GAM */ 233 { 0x393C00, 0x393C7F }, /* MEDIAINF */ 234 {}, 235 }; 236 237 static void init_steering_l3bank(struct xe_gt *gt) 238 { 239 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { 240 u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, 241 xe_mmio_read32(gt, MIRROR_FUSE3)); 242 u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK, 243 xe_mmio_read32(gt, XEHP_FUSE4)); 244 245 /* 246 * Group selects mslice, instance selects bank within mslice. 247 * Bank 0 is always valid _except_ when the bank mask is 010b. 248 */ 249 gt->steering[L3BANK].group_target = __ffs(mslice_mask); 250 gt->steering[L3BANK].instance_target = 251 bank_mask & BIT(0) ? 0 : 2; 252 } else if (gt_to_xe(gt)->info.platform == XE_DG2) { 253 u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, 254 xe_mmio_read32(gt, MIRROR_FUSE3)); 255 u32 bank = __ffs(mslice_mask) * 8; 256 257 /* 258 * Like mslice registers, look for a valid mslice and steer to 259 * the first L3BANK of that quad. Access to the Nth L3 bank is 260 * split between the first bits of group and instance 261 */ 262 gt->steering[L3BANK].group_target = (bank >> 2) & 0x7; 263 gt->steering[L3BANK].instance_target = bank & 0x3; 264 } else { 265 u32 fuse = REG_FIELD_GET(L3BANK_MASK, 266 ~xe_mmio_read32(gt, MIRROR_FUSE3)); 267 268 gt->steering[L3BANK].group_target = 0; /* unused */ 269 gt->steering[L3BANK].instance_target = __ffs(fuse); 270 } 271 } 272 273 static void init_steering_mslice(struct xe_gt *gt) 274 { 275 u32 mask = REG_FIELD_GET(MEML3_EN_MASK, 276 xe_mmio_read32(gt, MIRROR_FUSE3)); 277 278 /* 279 * mslice registers are valid (not terminated) if either the meml3 280 * associated with the mslice is present, or at least one DSS associated 281 * with the mslice is present. There will always be at least one meml3 282 * so we can just use that to find a non-terminated mslice and ignore 283 * the DSS fusing. 284 */ 285 gt->steering[MSLICE].group_target = __ffs(mask); 286 gt->steering[MSLICE].instance_target = 0; /* unused */ 287 288 /* 289 * LNCF termination is also based on mslice presence, so we'll set 290 * it up here. Either LNCF within a non-terminated mslice will work, 291 * so we just always pick LNCF 0 here. 292 */ 293 gt->steering[LNCF].group_target = __ffs(mask) << 1; 294 gt->steering[LNCF].instance_target = 0; /* unused */ 295 } 296 297 static void init_steering_dss(struct xe_gt *gt) 298 { 299 unsigned int dss = min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0), 300 xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0)); 301 unsigned int dss_per_grp = gt_to_xe(gt)->info.platform == XE_PVC ? 8 : 4; 302 303 gt->steering[DSS].group_target = dss / dss_per_grp; 304 gt->steering[DSS].instance_target = dss % dss_per_grp; 305 } 306 307 static void init_steering_oaddrm(struct xe_gt *gt) 308 { 309 /* 310 * First instance is only terminated if the entire first media slice 311 * is absent (i.e., no VCS0 or VECS0). 312 */ 313 if (gt->info.engine_mask & (XE_HW_ENGINE_VCS0 | XE_HW_ENGINE_VECS0)) 314 gt->steering[OADDRM].group_target = 0; 315 else 316 gt->steering[OADDRM].group_target = 1; 317 318 gt->steering[DSS].instance_target = 0; /* unused */ 319 } 320 321 static void init_steering_sqidi_psmi(struct xe_gt *gt) 322 { 323 u32 mask = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, 324 xe_mmio_read32(gt, MIRROR_FUSE3)); 325 u32 select = __ffs(mask); 326 327 gt->steering[SQIDI_PSMI].group_target = select >> 1; 328 gt->steering[SQIDI_PSMI].instance_target = select & 0x1; 329 } 330 331 static void init_steering_inst0(struct xe_gt *gt) 332 { 333 gt->steering[DSS].group_target = 0; /* unused */ 334 gt->steering[DSS].instance_target = 0; /* unused */ 335 } 336 337 static const struct { 338 const char *name; 339 void (*init)(struct xe_gt *gt); 340 } xe_steering_types[] = { 341 [L3BANK] = { "L3BANK", init_steering_l3bank }, 342 [MSLICE] = { "MSLICE", init_steering_mslice }, 343 [LNCF] = { "LNCF", NULL }, /* initialized by mslice init */ 344 [DSS] = { "DSS", init_steering_dss }, 345 [OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm }, 346 [SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi }, 347 [INSTANCE0] = { "INSTANCE 0", init_steering_inst0 }, 348 [IMPLICIT_STEERING] = { "IMPLICIT", NULL }, 349 }; 350 351 void xe_gt_mcr_init(struct xe_gt *gt) 352 { 353 struct xe_device *xe = gt_to_xe(gt); 354 355 BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES); 356 BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES); 357 358 if (IS_SRIOV_VF(xe)) 359 return; 360 361 spin_lock_init(>->mcr_lock); 362 363 if (gt->info.type == XE_GT_TYPE_MEDIA) { 364 drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13); 365 366 if (MEDIA_VER(xe) >= 20) { 367 gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table; 368 gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table; 369 } else { 370 gt->steering[OADDRM].ranges = xelpmp_oaddrm_steering_table; 371 } 372 } else { 373 if (GRAPHICS_VER(xe) >= 20) { 374 gt->steering[DSS].ranges = xe2lpg_dss_steering_table; 375 gt->steering[SQIDI_PSMI].ranges = xe2lpg_sqidi_psmi_steering_table; 376 gt->steering[INSTANCE0].ranges = xe2lpg_instance0_steering_table; 377 } else if (GRAPHICS_VERx100(xe) >= 1270) { 378 gt->steering[INSTANCE0].ranges = xelpg_instance0_steering_table; 379 gt->steering[L3BANK].ranges = xelpg_l3bank_steering_table; 380 gt->steering[DSS].ranges = xelpg_dss_steering_table; 381 } else if (xe->info.platform == XE_PVC) { 382 gt->steering[INSTANCE0].ranges = xehpc_instance0_steering_table; 383 gt->steering[DSS].ranges = xehpc_dss_steering_table; 384 } else if (xe->info.platform == XE_DG2) { 385 gt->steering[L3BANK].ranges = xehp_l3bank_steering_table; 386 gt->steering[MSLICE].ranges = xehp_mslice_steering_table; 387 gt->steering[LNCF].ranges = xehp_lncf_steering_table; 388 gt->steering[DSS].ranges = xehp_dss_steering_table; 389 gt->steering[IMPLICIT_STEERING].ranges = dg2_implicit_steering_table; 390 } else { 391 gt->steering[L3BANK].ranges = xelp_l3bank_steering_table; 392 gt->steering[DSS].ranges = xelp_dss_steering_table; 393 } 394 } 395 396 /* Select non-terminated steering target for each type */ 397 for (int i = 0; i < NUM_STEERING_TYPES; i++) 398 if (gt->steering[i].ranges && xe_steering_types[i].init) 399 xe_steering_types[i].init(gt); 400 } 401 402 /** 403 * xe_gt_mcr_set_implicit_defaults - Initialize steer control registers 404 * @gt: GT structure 405 * 406 * Some register ranges don't need to have their steering control registers 407 * changed on each access - it's sufficient to set them once on initialization. 408 * This function sets those registers for each platform * 409 */ 410 void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt) 411 { 412 struct xe_device *xe = gt_to_xe(gt); 413 414 if (IS_SRIOV_VF(xe)) 415 return; 416 417 if (xe->info.platform == XE_DG2) { 418 u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) | 419 REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2); 420 421 xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val); 422 xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val); 423 /* 424 * For GAM registers, all reads should be directed to instance 1 425 * (unicast reads against other instances are not allowed), 426 * and instance 1 is already the hardware's default steering 427 * target, which we never change 428 */ 429 } 430 } 431 432 /* 433 * xe_gt_mcr_get_nonterminated_steering - find group/instance values that 434 * will steer a register to a non-terminated instance 435 * @gt: GT structure 436 * @reg: register for which the steering is required 437 * @group: return variable for group steering 438 * @instance: return variable for instance steering 439 * 440 * This function returns a group/instance pair that is guaranteed to work for 441 * read steering of the given register. Note that a value will be returned even 442 * if the register is not replicated and therefore does not actually require 443 * steering. 444 * 445 * Returns true if the caller should steer to the @group/@instance values 446 * returned. Returns false if the caller need not perform any steering 447 */ 448 static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, 449 struct xe_reg_mcr reg_mcr, 450 u8 *group, u8 *instance) 451 { 452 const struct xe_reg reg = to_xe_reg(reg_mcr); 453 const struct xe_mmio_range *implicit_ranges; 454 455 for (int type = 0; type < IMPLICIT_STEERING; type++) { 456 if (!gt->steering[type].ranges) 457 continue; 458 459 for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) { 460 if (xe_mmio_in_range(gt, >->steering[type].ranges[i], reg)) { 461 *group = gt->steering[type].group_target; 462 *instance = gt->steering[type].instance_target; 463 return true; 464 } 465 } 466 } 467 468 implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges; 469 if (implicit_ranges) 470 for (int i = 0; implicit_ranges[i].end > 0; i++) 471 if (xe_mmio_in_range(gt, &implicit_ranges[i], reg)) 472 return false; 473 474 /* 475 * Not found in a steering table and not a register with implicit 476 * steering. Just steer to 0/0 as a guess and raise a warning. 477 */ 478 drm_WARN(>_to_xe(gt)->drm, true, 479 "Did not find MCR register %#x in any MCR steering table\n", 480 reg.addr); 481 *group = 0; 482 *instance = 0; 483 484 return true; 485 } 486 487 /* 488 * Obtain exclusive access to MCR steering. On MTL and beyond we also need 489 * to synchronize with external clients (e.g., firmware), so a semaphore 490 * register will also need to be taken. 491 */ 492 static void mcr_lock(struct xe_gt *gt) __acquires(>->mcr_lock) 493 { 494 struct xe_device *xe = gt_to_xe(gt); 495 int ret = 0; 496 497 spin_lock(>->mcr_lock); 498 499 /* 500 * Starting with MTL we also need to grab a semaphore register 501 * to synchronize with external agents (e.g., firmware) that now 502 * shares the same steering control register. The semaphore is obtained 503 * when a read to the relevant register returns 1. 504 */ 505 if (GRAPHICS_VERx100(xe) >= 1270) 506 ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL, 507 true); 508 509 drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT); 510 } 511 512 static void mcr_unlock(struct xe_gt *gt) __releases(>->mcr_lock) 513 { 514 /* Release hardware semaphore - this is done by writing 1 to the register */ 515 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) 516 xe_mmio_write32(gt, STEER_SEMAPHORE, 0x1); 517 518 spin_unlock(>->mcr_lock); 519 } 520 521 /* 522 * Access a register with specific MCR steering 523 * 524 * Caller needs to make sure the relevant forcewake wells are up. 525 */ 526 static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, 527 u8 rw_flag, int group, int instance, u32 value) 528 { 529 const struct xe_reg reg = to_xe_reg(reg_mcr); 530 struct xe_reg steer_reg; 531 u32 steer_val, val = 0; 532 533 lockdep_assert_held(>->mcr_lock); 534 535 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { 536 steer_reg = MTL_MCR_SELECTOR; 537 steer_val = REG_FIELD_PREP(MTL_MCR_GROUPID, group) | 538 REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance); 539 } else { 540 steer_reg = MCR_SELECTOR; 541 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, group) | 542 REG_FIELD_PREP(MCR_SUBSLICE_MASK, instance); 543 } 544 545 /* 546 * Always leave the hardware in multicast mode when doing reads and only 547 * change it to unicast mode when doing writes of a specific instance. 548 * 549 * The setting of the multicast/unicast bit usually wouldn't matter for 550 * read operations (which always return the value from a single register 551 * instance regardless of how that bit is set), but some platforms may 552 * have workarounds requiring us to remain in multicast mode for reads, 553 * e.g. Wa_22013088509 on PVC. There's no real downside to this, so 554 * we'll just go ahead and do so on all platforms; we'll only clear the 555 * multicast bit from the mask when explicitly doing a write operation. 556 * 557 * No need to save old steering reg value. 558 */ 559 if (rw_flag == MCR_OP_READ) 560 steer_val |= MCR_MULTICAST; 561 562 xe_mmio_write32(gt, steer_reg, steer_val); 563 564 if (rw_flag == MCR_OP_READ) 565 val = xe_mmio_read32(gt, reg); 566 else 567 xe_mmio_write32(gt, reg, value); 568 569 /* 570 * If we turned off the multicast bit (during a write) we're required 571 * to turn it back on before finishing. The group and instance values 572 * don't matter since they'll be re-programmed on the next MCR 573 * operation. 574 */ 575 if (rw_flag == MCR_OP_WRITE) 576 xe_mmio_write32(gt, steer_reg, MCR_MULTICAST); 577 578 return val; 579 } 580 581 /** 582 * xe_gt_mcr_unicast_read_any - reads a non-terminated instance of an MCR register 583 * @gt: GT structure 584 * @reg_mcr: register to read 585 * 586 * Reads a GT MCR register. The read will be steered to a non-terminated 587 * instance (i.e., one that isn't fused off or powered down by power gating). 588 * This function assumes the caller is already holding any necessary forcewake 589 * domains. 590 * 591 * Returns the value from a non-terminated instance of @reg. 592 */ 593 u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr) 594 { 595 const struct xe_reg reg = to_xe_reg(reg_mcr); 596 u8 group, instance; 597 u32 val; 598 bool steer; 599 600 xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); 601 602 steer = xe_gt_mcr_get_nonterminated_steering(gt, reg_mcr, 603 &group, &instance); 604 605 if (steer) { 606 mcr_lock(gt); 607 val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, 608 group, instance, 0); 609 mcr_unlock(gt); 610 } else { 611 val = xe_mmio_read32(gt, reg); 612 } 613 614 return val; 615 } 616 617 /** 618 * xe_gt_mcr_unicast_read - read a specific instance of an MCR register 619 * @gt: GT structure 620 * @reg_mcr: the MCR register to read 621 * @group: the MCR group 622 * @instance: the MCR instance 623 * 624 * Returns the value read from an MCR register after steering toward a specific 625 * group/instance. 626 */ 627 u32 xe_gt_mcr_unicast_read(struct xe_gt *gt, 628 struct xe_reg_mcr reg_mcr, 629 int group, int instance) 630 { 631 u32 val; 632 633 xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); 634 635 mcr_lock(gt); 636 val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, group, instance, 0); 637 mcr_unlock(gt); 638 639 return val; 640 } 641 642 /** 643 * xe_gt_mcr_unicast_write - write a specific instance of an MCR register 644 * @gt: GT structure 645 * @reg_mcr: the MCR register to write 646 * @value: value to write 647 * @group: the MCR group 648 * @instance: the MCR instance 649 * 650 * Write an MCR register in unicast mode after steering toward a specific 651 * group/instance. 652 */ 653 void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, 654 u32 value, int group, int instance) 655 { 656 xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); 657 658 mcr_lock(gt); 659 rw_with_mcr_steering(gt, reg_mcr, MCR_OP_WRITE, group, instance, value); 660 mcr_unlock(gt); 661 } 662 663 /** 664 * xe_gt_mcr_multicast_write - write a value to all instances of an MCR register 665 * @gt: GT structure 666 * @reg_mcr: the MCR register to write 667 * @value: value to write 668 * 669 * Write an MCR register in multicast mode to update all instances. 670 */ 671 void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, 672 u32 value) 673 { 674 struct xe_reg reg = to_xe_reg(reg_mcr); 675 676 xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); 677 678 /* 679 * Synchronize with any unicast operations. Once we have exclusive 680 * access, the MULTICAST bit should already be set, so there's no need 681 * to touch the steering register. 682 */ 683 mcr_lock(gt); 684 xe_mmio_write32(gt, reg, value); 685 mcr_unlock(gt); 686 } 687 688 void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p) 689 { 690 for (int i = 0; i < NUM_STEERING_TYPES; i++) { 691 if (gt->steering[i].ranges) { 692 drm_printf(p, "%s steering: group=%#x, instance=%#x\n", 693 xe_steering_types[i].name, 694 gt->steering[i].group_target, 695 gt->steering[i].instance_target); 696 for (int j = 0; gt->steering[i].ranges[j].end; j++) 697 drm_printf(p, "\t0x%06x - 0x%06x\n", 698 gt->steering[i].ranges[j].start, 699 gt->steering[i].ranges[j].end); 700 } 701 } 702 } 703