1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_gt_mcr.h" 7 8 #include "regs/xe_gt_regs.h" 9 #include "xe_assert.h" 10 #include "xe_gt.h" 11 #include "xe_gt_topology.h" 12 #include "xe_gt_types.h" 13 #include "xe_mmio.h" 14 #include "xe_sriov.h" 15 16 /** 17 * DOC: GT Multicast/Replicated (MCR) Register Support 18 * 19 * Some GT registers are designed as "multicast" or "replicated" registers: 20 * multiple instances of the same register share a single MMIO offset. MCR 21 * registers are generally used when the hardware needs to potentially track 22 * independent values of a register per hardware unit (e.g., per-subslice, 23 * per-L3bank, etc.). The specific types of replication that exist vary 24 * per-platform. 25 * 26 * MMIO accesses to MCR registers are controlled according to the settings 27 * programmed in the platform's MCR_SELECTOR register(s). MMIO writes to MCR 28 * registers can be done in either multicast (a single write updates all 29 * instances of the register to the same value) or unicast (a write updates only 30 * one specific instance) form. Reads of MCR registers always operate in a 31 * unicast manner regardless of how the multicast/unicast bit is set in 32 * MCR_SELECTOR. Selection of a specific MCR instance for unicast operations is 33 * referred to as "steering." 34 * 35 * If MCR register operations are steered toward a hardware unit that is 36 * fused off or currently powered down due to power gating, the MMIO operation 37 * is "terminated" by the hardware. Terminated read operations will return a 38 * value of zero and terminated unicast write operations will be silently 39 * ignored. During device initialization, the goal of the various 40 * ``init_steering_*()`` functions is to apply the platform-specific rules for 41 * each MCR register type to identify a steering target that will select a 42 * non-terminated instance. 43 * 44 * MCR registers are not available on Virtual Function (VF). 45 */ 46 47 #define STEER_SEMAPHORE XE_REG(0xFD0) 48 49 static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr) 50 { 51 return reg_mcr.__reg; 52 } 53 54 enum { 55 MCR_OP_READ, 56 MCR_OP_WRITE 57 }; 58 59 static const struct xe_mmio_range xelp_l3bank_steering_table[] = { 60 { 0x00B100, 0x00B3FF }, 61 {}, 62 }; 63 64 static const struct xe_mmio_range xehp_l3bank_steering_table[] = { 65 { 0x008C80, 0x008CFF }, 66 { 0x00B100, 0x00B3FF }, 67 {}, 68 }; 69 70 /* 71 * Although the bspec lists more "MSLICE" ranges than shown here, some of those 72 * are of a "GAM" subclass that has special rules and doesn't need to be 73 * included here. 74 */ 75 static const struct xe_mmio_range xehp_mslice_steering_table[] = { 76 { 0x00DD00, 0x00DDFF }, 77 { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */ 78 {}, 79 }; 80 81 static const struct xe_mmio_range xehp_lncf_steering_table[] = { 82 { 0x00B000, 0x00B0FF }, 83 { 0x00D880, 0x00D8FF }, 84 {}, 85 }; 86 87 /* 88 * We have several types of MCR registers where steering to (0,0) will always 89 * provide us with a non-terminated value. We'll stick them all in the same 90 * table for simplicity. 91 */ 92 static const struct xe_mmio_range xehpc_instance0_steering_table[] = { 93 { 0x004000, 0x004AFF }, /* HALF-BSLICE */ 94 { 0x008800, 0x00887F }, /* CC */ 95 { 0x008A80, 0x008AFF }, /* TILEPSMI */ 96 { 0x00B000, 0x00B0FF }, /* HALF-BSLICE */ 97 { 0x00B100, 0x00B3FF }, /* L3BANK */ 98 { 0x00C800, 0x00CFFF }, /* HALF-BSLICE */ 99 { 0x00D800, 0x00D8FF }, /* HALF-BSLICE */ 100 { 0x00DD00, 0x00DDFF }, /* BSLICE */ 101 { 0x00E900, 0x00E9FF }, /* HALF-BSLICE */ 102 { 0x00EC00, 0x00EEFF }, /* HALF-BSLICE */ 103 { 0x00F000, 0x00FFFF }, /* HALF-BSLICE */ 104 { 0x024180, 0x0241FF }, /* HALF-BSLICE */ 105 {}, 106 }; 107 108 static const struct xe_mmio_range xelpg_instance0_steering_table[] = { 109 { 0x000B00, 0x000BFF }, /* SQIDI */ 110 { 0x001000, 0x001FFF }, /* SQIDI */ 111 { 0x004000, 0x0048FF }, /* GAM */ 112 { 0x008700, 0x0087FF }, /* SQIDI */ 113 { 0x00B000, 0x00B0FF }, /* NODE */ 114 { 0x00C800, 0x00CFFF }, /* GAM */ 115 { 0x00D880, 0x00D8FF }, /* NODE */ 116 { 0x00DD00, 0x00DDFF }, /* OAAL2 */ 117 {}, 118 }; 119 120 static const struct xe_mmio_range xelpg_l3bank_steering_table[] = { 121 { 0x00B100, 0x00B3FF }, 122 {}, 123 }; 124 125 static const struct xe_mmio_range xelp_dss_steering_table[] = { 126 { 0x008150, 0x00815F }, 127 { 0x009520, 0x00955F }, 128 { 0x00DE80, 0x00E8FF }, 129 { 0x024A00, 0x024A7F }, 130 {}, 131 }; 132 133 /* DSS steering is used for GSLICE ranges as well */ 134 static const struct xe_mmio_range xehp_dss_steering_table[] = { 135 { 0x005200, 0x0052FF }, /* GSLICE */ 136 { 0x005400, 0x007FFF }, /* GSLICE */ 137 { 0x008140, 0x00815F }, /* GSLICE (0x8140-0x814F), DSS (0x8150-0x815F) */ 138 { 0x008D00, 0x008DFF }, /* DSS */ 139 { 0x0094D0, 0x00955F }, /* GSLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */ 140 { 0x009680, 0x0096FF }, /* DSS */ 141 { 0x00D800, 0x00D87F }, /* GSLICE */ 142 { 0x00DC00, 0x00DCFF }, /* GSLICE */ 143 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved ) */ 144 { 0x017000, 0x017FFF }, /* GSLICE */ 145 { 0x024A00, 0x024A7F }, /* DSS */ 146 {}, 147 }; 148 149 /* DSS steering is used for COMPUTE ranges as well */ 150 static const struct xe_mmio_range xehpc_dss_steering_table[] = { 151 { 0x008140, 0x00817F }, /* COMPUTE (0x8140-0x814F & 0x8160-0x817F), DSS (0x8150-0x815F) */ 152 { 0x0094D0, 0x00955F }, /* COMPUTE (0x94D0-0x951F), DSS (0x9520-0x955F) */ 153 { 0x009680, 0x0096FF }, /* DSS */ 154 { 0x00DC00, 0x00DCFF }, /* COMPUTE */ 155 { 0x00DE80, 0x00E7FF }, /* DSS (0xDF00-0xE1FF reserved ) */ 156 {}, 157 }; 158 159 /* DSS steering is used for SLICE ranges as well */ 160 static const struct xe_mmio_range xelpg_dss_steering_table[] = { 161 { 0x005200, 0x0052FF }, /* SLICE */ 162 { 0x005500, 0x007FFF }, /* SLICE */ 163 { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */ 164 { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */ 165 { 0x009680, 0x0096FF }, /* DSS */ 166 { 0x00D800, 0x00D87F }, /* SLICE */ 167 { 0x00DC00, 0x00DCFF }, /* SLICE */ 168 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */ 169 {}, 170 }; 171 172 static const struct xe_mmio_range xelpmp_oaddrm_steering_table[] = { 173 { 0x393200, 0x39323F }, 174 { 0x393400, 0x3934FF }, 175 {}, 176 }; 177 178 static const struct xe_mmio_range dg2_implicit_steering_table[] = { 179 { 0x000B00, 0x000BFF }, /* SF (SQIDI replication) */ 180 { 0x001000, 0x001FFF }, /* SF (SQIDI replication) */ 181 { 0x004000, 0x004AFF }, /* GAM (MSLICE replication) */ 182 { 0x008700, 0x0087FF }, /* MCFG (SQIDI replication) */ 183 { 0x00C800, 0x00CFFF }, /* GAM (MSLICE replication) */ 184 { 0x00F000, 0x00FFFF }, /* GAM (MSLICE replication) */ 185 {}, 186 }; 187 188 static const struct xe_mmio_range xe2lpg_dss_steering_table[] = { 189 { 0x005200, 0x0052FF }, /* SLICE */ 190 { 0x005500, 0x007FFF }, /* SLICE */ 191 { 0x008140, 0x00815F }, /* SLICE (0x8140-0x814F), DSS (0x8150-0x815F) */ 192 { 0x0094D0, 0x00955F }, /* SLICE (0x94D0-0x951F), DSS (0x9520-0x955F) */ 193 { 0x009680, 0x0096FF }, /* DSS */ 194 { 0x00D800, 0x00D87F }, /* SLICE */ 195 { 0x00DC00, 0x00DCFF }, /* SLICE */ 196 { 0x00DE80, 0x00E8FF }, /* DSS (0xE000-0xE0FF reserved) */ 197 { 0x00E980, 0x00E9FF }, /* SLICE */ 198 { 0x013000, 0x0133FF }, /* DSS (0x13000-0x131FF), SLICE (0x13200-0x133FF) */ 199 {}, 200 }; 201 202 static const struct xe_mmio_range xe2lpg_sqidi_psmi_steering_table[] = { 203 { 0x000B00, 0x000BFF }, 204 { 0x001000, 0x001FFF }, 205 {}, 206 }; 207 208 static const struct xe_mmio_range xe2lpg_instance0_steering_table[] = { 209 { 0x004000, 0x004AFF }, /* GAM, rsvd, GAMWKR */ 210 { 0x008700, 0x00887F }, /* SQIDI, MEMPIPE */ 211 { 0x00B000, 0x00B3FF }, /* NODE, L3BANK */ 212 { 0x00C800, 0x00CFFF }, /* GAM */ 213 { 0x00D880, 0x00D8FF }, /* NODE */ 214 { 0x00DD00, 0x00DDFF }, /* MEMPIPE */ 215 { 0x00E900, 0x00E97F }, /* MEMPIPE */ 216 { 0x00F000, 0x00FFFF }, /* GAM, GAMWKR */ 217 { 0x013400, 0x0135FF }, /* MEMPIPE */ 218 {}, 219 }; 220 221 static const struct xe_mmio_range xe2lpm_gpmxmt_steering_table[] = { 222 { 0x388160, 0x38817F }, 223 { 0x389480, 0x3894CF }, 224 {}, 225 }; 226 227 static const struct xe_mmio_range xe2lpm_instance0_steering_table[] = { 228 { 0x384000, 0x3847DF }, /* GAM, rsvd, GAM */ 229 { 0x384900, 0x384AFF }, /* GAM */ 230 { 0x389560, 0x3895FF }, /* MEDIAINF */ 231 { 0x38B600, 0x38B8FF }, /* L3BANK */ 232 { 0x38C800, 0x38D07F }, /* GAM, MEDIAINF */ 233 { 0x38F000, 0x38F0FF }, /* GAM */ 234 { 0x393C00, 0x393C7F }, /* MEDIAINF */ 235 {}, 236 }; 237 238 static void init_steering_l3bank(struct xe_gt *gt) 239 { 240 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { 241 u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, 242 xe_mmio_read32(gt, MIRROR_FUSE3)); 243 u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK, 244 xe_mmio_read32(gt, XEHP_FUSE4)); 245 246 /* 247 * Group selects mslice, instance selects bank within mslice. 248 * Bank 0 is always valid _except_ when the bank mask is 010b. 249 */ 250 gt->steering[L3BANK].group_target = __ffs(mslice_mask); 251 gt->steering[L3BANK].instance_target = 252 bank_mask & BIT(0) ? 0 : 2; 253 } else if (gt_to_xe(gt)->info.platform == XE_DG2) { 254 u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, 255 xe_mmio_read32(gt, MIRROR_FUSE3)); 256 u32 bank = __ffs(mslice_mask) * 8; 257 258 /* 259 * Like mslice registers, look for a valid mslice and steer to 260 * the first L3BANK of that quad. Access to the Nth L3 bank is 261 * split between the first bits of group and instance 262 */ 263 gt->steering[L3BANK].group_target = (bank >> 2) & 0x7; 264 gt->steering[L3BANK].instance_target = bank & 0x3; 265 } else { 266 u32 fuse = REG_FIELD_GET(L3BANK_MASK, 267 ~xe_mmio_read32(gt, MIRROR_FUSE3)); 268 269 gt->steering[L3BANK].group_target = 0; /* unused */ 270 gt->steering[L3BANK].instance_target = __ffs(fuse); 271 } 272 } 273 274 static void init_steering_mslice(struct xe_gt *gt) 275 { 276 u32 mask = REG_FIELD_GET(MEML3_EN_MASK, 277 xe_mmio_read32(gt, MIRROR_FUSE3)); 278 279 /* 280 * mslice registers are valid (not terminated) if either the meml3 281 * associated with the mslice is present, or at least one DSS associated 282 * with the mslice is present. There will always be at least one meml3 283 * so we can just use that to find a non-terminated mslice and ignore 284 * the DSS fusing. 285 */ 286 gt->steering[MSLICE].group_target = __ffs(mask); 287 gt->steering[MSLICE].instance_target = 0; /* unused */ 288 289 /* 290 * LNCF termination is also based on mslice presence, so we'll set 291 * it up here. Either LNCF within a non-terminated mslice will work, 292 * so we just always pick LNCF 0 here. 293 */ 294 gt->steering[LNCF].group_target = __ffs(mask) << 1; 295 gt->steering[LNCF].instance_target = 0; /* unused */ 296 } 297 298 static unsigned int dss_per_group(struct xe_gt *gt) 299 { 300 if (gt_to_xe(gt)->info.platform == XE_PVC) 301 return 8; 302 else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250) 303 return 4; 304 else 305 return 6; 306 } 307 308 /** 309 * xe_gt_mcr_get_dss_steering - Get the group/instance steering for a DSS 310 * @gt: GT structure 311 * @dss: DSS ID to obtain steering for 312 * @group: pointer to storage for steering group ID 313 * @instance: pointer to storage for steering instance ID 314 */ 315 void xe_gt_mcr_get_dss_steering(struct xe_gt *gt, unsigned int dss, u16 *group, u16 *instance) 316 { 317 int dss_per_grp = dss_per_group(gt); 318 319 xe_gt_assert(gt, dss < XE_MAX_DSS_FUSE_BITS); 320 321 *group = dss / dss_per_grp; 322 *instance = dss % dss_per_grp; 323 } 324 325 static void init_steering_dss(struct xe_gt *gt) 326 { 327 xe_gt_mcr_get_dss_steering(gt, 328 min(xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0), 329 xe_dss_mask_group_ffs(gt->fuse_topo.c_dss_mask, 0, 0)), 330 >->steering[DSS].group_target, 331 >->steering[DSS].instance_target); 332 } 333 334 static void init_steering_oaddrm(struct xe_gt *gt) 335 { 336 /* 337 * First instance is only terminated if the entire first media slice 338 * is absent (i.e., no VCS0 or VECS0). 339 */ 340 if (gt->info.engine_mask & (XE_HW_ENGINE_VCS0 | XE_HW_ENGINE_VECS0)) 341 gt->steering[OADDRM].group_target = 0; 342 else 343 gt->steering[OADDRM].group_target = 1; 344 345 gt->steering[OADDRM].instance_target = 0; /* unused */ 346 } 347 348 static void init_steering_sqidi_psmi(struct xe_gt *gt) 349 { 350 u32 mask = REG_FIELD_GET(XE2_NODE_ENABLE_MASK, 351 xe_mmio_read32(gt, MIRROR_FUSE3)); 352 u32 select = __ffs(mask); 353 354 gt->steering[SQIDI_PSMI].group_target = select >> 1; 355 gt->steering[SQIDI_PSMI].instance_target = select & 0x1; 356 } 357 358 static void init_steering_inst0(struct xe_gt *gt) 359 { 360 gt->steering[INSTANCE0].group_target = 0; /* unused */ 361 gt->steering[INSTANCE0].instance_target = 0; /* unused */ 362 } 363 364 static const struct { 365 const char *name; 366 void (*init)(struct xe_gt *gt); 367 } xe_steering_types[] = { 368 [L3BANK] = { "L3BANK", init_steering_l3bank }, 369 [MSLICE] = { "MSLICE", init_steering_mslice }, 370 [LNCF] = { "LNCF", NULL }, /* initialized by mslice init */ 371 [DSS] = { "DSS", init_steering_dss }, 372 [OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm }, 373 [SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi }, 374 [INSTANCE0] = { "INSTANCE 0", init_steering_inst0 }, 375 [IMPLICIT_STEERING] = { "IMPLICIT", NULL }, 376 }; 377 378 /** 379 * xe_gt_mcr_init_early - Early initialization of the MCR support 380 * @gt: GT structure 381 * 382 * Perform early software only initialization of the MCR lock to allow 383 * the synchronization on accessing the STEER_SEMAPHORE register and 384 * use the xe_gt_mcr_multicast_write() function. 385 */ 386 void xe_gt_mcr_init_early(struct xe_gt *gt) 387 { 388 BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES); 389 BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES); 390 391 spin_lock_init(>->mcr_lock); 392 } 393 394 /** 395 * xe_gt_mcr_init - Normal initialization of the MCR support 396 * @gt: GT structure 397 * 398 * Perform normal initialization of the MCR for all usages. 399 */ 400 void xe_gt_mcr_init(struct xe_gt *gt) 401 { 402 struct xe_device *xe = gt_to_xe(gt); 403 404 if (IS_SRIOV_VF(xe)) 405 return; 406 407 if (gt->info.type == XE_GT_TYPE_MEDIA) { 408 drm_WARN_ON(&xe->drm, MEDIA_VER(xe) < 13); 409 410 if (MEDIA_VER(xe) >= 20) { 411 gt->steering[OADDRM].ranges = xe2lpm_gpmxmt_steering_table; 412 gt->steering[INSTANCE0].ranges = xe2lpm_instance0_steering_table; 413 } else { 414 gt->steering[OADDRM].ranges = xelpmp_oaddrm_steering_table; 415 } 416 } else { 417 if (GRAPHICS_VER(xe) >= 20) { 418 gt->steering[DSS].ranges = xe2lpg_dss_steering_table; 419 gt->steering[SQIDI_PSMI].ranges = xe2lpg_sqidi_psmi_steering_table; 420 gt->steering[INSTANCE0].ranges = xe2lpg_instance0_steering_table; 421 } else if (GRAPHICS_VERx100(xe) >= 1270) { 422 gt->steering[INSTANCE0].ranges = xelpg_instance0_steering_table; 423 gt->steering[L3BANK].ranges = xelpg_l3bank_steering_table; 424 gt->steering[DSS].ranges = xelpg_dss_steering_table; 425 } else if (xe->info.platform == XE_PVC) { 426 gt->steering[INSTANCE0].ranges = xehpc_instance0_steering_table; 427 gt->steering[DSS].ranges = xehpc_dss_steering_table; 428 } else if (xe->info.platform == XE_DG2) { 429 gt->steering[L3BANK].ranges = xehp_l3bank_steering_table; 430 gt->steering[MSLICE].ranges = xehp_mslice_steering_table; 431 gt->steering[LNCF].ranges = xehp_lncf_steering_table; 432 gt->steering[DSS].ranges = xehp_dss_steering_table; 433 gt->steering[IMPLICIT_STEERING].ranges = dg2_implicit_steering_table; 434 } else { 435 gt->steering[L3BANK].ranges = xelp_l3bank_steering_table; 436 gt->steering[DSS].ranges = xelp_dss_steering_table; 437 } 438 } 439 440 /* Select non-terminated steering target for each type */ 441 for (int i = 0; i < NUM_STEERING_TYPES; i++) 442 if (gt->steering[i].ranges && xe_steering_types[i].init) 443 xe_steering_types[i].init(gt); 444 } 445 446 /** 447 * xe_gt_mcr_set_implicit_defaults - Initialize steer control registers 448 * @gt: GT structure 449 * 450 * Some register ranges don't need to have their steering control registers 451 * changed on each access - it's sufficient to set them once on initialization. 452 * This function sets those registers for each platform * 453 */ 454 void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt) 455 { 456 struct xe_device *xe = gt_to_xe(gt); 457 458 if (IS_SRIOV_VF(xe)) 459 return; 460 461 if (xe->info.platform == XE_DG2) { 462 u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) | 463 REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2); 464 465 xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val); 466 xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val); 467 /* 468 * For GAM registers, all reads should be directed to instance 1 469 * (unicast reads against other instances are not allowed), 470 * and instance 1 is already the hardware's default steering 471 * target, which we never change 472 */ 473 } 474 } 475 476 /* 477 * xe_gt_mcr_get_nonterminated_steering - find group/instance values that 478 * will steer a register to a non-terminated instance 479 * @gt: GT structure 480 * @reg: register for which the steering is required 481 * @group: return variable for group steering 482 * @instance: return variable for instance steering 483 * 484 * This function returns a group/instance pair that is guaranteed to work for 485 * read steering of the given register. Note that a value will be returned even 486 * if the register is not replicated and therefore does not actually require 487 * steering. 488 * 489 * Returns true if the caller should steer to the @group/@instance values 490 * returned. Returns false if the caller need not perform any steering 491 */ 492 static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, 493 struct xe_reg_mcr reg_mcr, 494 u8 *group, u8 *instance) 495 { 496 const struct xe_reg reg = to_xe_reg(reg_mcr); 497 const struct xe_mmio_range *implicit_ranges; 498 499 for (int type = 0; type < IMPLICIT_STEERING; type++) { 500 if (!gt->steering[type].ranges) 501 continue; 502 503 for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) { 504 if (xe_mmio_in_range(gt, >->steering[type].ranges[i], reg)) { 505 *group = gt->steering[type].group_target; 506 *instance = gt->steering[type].instance_target; 507 return true; 508 } 509 } 510 } 511 512 implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges; 513 if (implicit_ranges) 514 for (int i = 0; implicit_ranges[i].end > 0; i++) 515 if (xe_mmio_in_range(gt, &implicit_ranges[i], reg)) 516 return false; 517 518 /* 519 * Not found in a steering table and not a register with implicit 520 * steering. Just steer to 0/0 as a guess and raise a warning. 521 */ 522 drm_WARN(>_to_xe(gt)->drm, true, 523 "Did not find MCR register %#x in any MCR steering table\n", 524 reg.addr); 525 *group = 0; 526 *instance = 0; 527 528 return true; 529 } 530 531 /* 532 * Obtain exclusive access to MCR steering. On MTL and beyond we also need 533 * to synchronize with external clients (e.g., firmware), so a semaphore 534 * register will also need to be taken. 535 */ 536 static void mcr_lock(struct xe_gt *gt) __acquires(>->mcr_lock) 537 { 538 struct xe_device *xe = gt_to_xe(gt); 539 int ret = 0; 540 541 spin_lock(>->mcr_lock); 542 543 /* 544 * Starting with MTL we also need to grab a semaphore register 545 * to synchronize with external agents (e.g., firmware) that now 546 * shares the same steering control register. The semaphore is obtained 547 * when a read to the relevant register returns 1. 548 */ 549 if (GRAPHICS_VERx100(xe) >= 1270) 550 ret = xe_mmio_wait32(gt, STEER_SEMAPHORE, 0x1, 0x1, 10, NULL, 551 true); 552 553 drm_WARN_ON_ONCE(&xe->drm, ret == -ETIMEDOUT); 554 } 555 556 static void mcr_unlock(struct xe_gt *gt) __releases(>->mcr_lock) 557 { 558 /* Release hardware semaphore - this is done by writing 1 to the register */ 559 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) 560 xe_mmio_write32(gt, STEER_SEMAPHORE, 0x1); 561 562 spin_unlock(>->mcr_lock); 563 } 564 565 /* 566 * Access a register with specific MCR steering 567 * 568 * Caller needs to make sure the relevant forcewake wells are up. 569 */ 570 static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, 571 u8 rw_flag, int group, int instance, u32 value) 572 { 573 const struct xe_reg reg = to_xe_reg(reg_mcr); 574 struct xe_reg steer_reg; 575 u32 steer_val, val = 0; 576 577 lockdep_assert_held(>->mcr_lock); 578 579 if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { 580 steer_reg = MTL_MCR_SELECTOR; 581 steer_val = REG_FIELD_PREP(MTL_MCR_GROUPID, group) | 582 REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance); 583 } else { 584 steer_reg = MCR_SELECTOR; 585 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, group) | 586 REG_FIELD_PREP(MCR_SUBSLICE_MASK, instance); 587 } 588 589 /* 590 * Always leave the hardware in multicast mode when doing reads and only 591 * change it to unicast mode when doing writes of a specific instance. 592 * 593 * The setting of the multicast/unicast bit usually wouldn't matter for 594 * read operations (which always return the value from a single register 595 * instance regardless of how that bit is set), but some platforms may 596 * have workarounds requiring us to remain in multicast mode for reads, 597 * e.g. Wa_22013088509 on PVC. There's no real downside to this, so 598 * we'll just go ahead and do so on all platforms; we'll only clear the 599 * multicast bit from the mask when explicitly doing a write operation. 600 * 601 * No need to save old steering reg value. 602 */ 603 if (rw_flag == MCR_OP_READ) 604 steer_val |= MCR_MULTICAST; 605 606 xe_mmio_write32(gt, steer_reg, steer_val); 607 608 if (rw_flag == MCR_OP_READ) 609 val = xe_mmio_read32(gt, reg); 610 else 611 xe_mmio_write32(gt, reg, value); 612 613 /* 614 * If we turned off the multicast bit (during a write) we're required 615 * to turn it back on before finishing. The group and instance values 616 * don't matter since they'll be re-programmed on the next MCR 617 * operation. 618 */ 619 if (rw_flag == MCR_OP_WRITE) 620 xe_mmio_write32(gt, steer_reg, MCR_MULTICAST); 621 622 return val; 623 } 624 625 /** 626 * xe_gt_mcr_unicast_read_any - reads a non-terminated instance of an MCR register 627 * @gt: GT structure 628 * @reg_mcr: register to read 629 * 630 * Reads a GT MCR register. The read will be steered to a non-terminated 631 * instance (i.e., one that isn't fused off or powered down by power gating). 632 * This function assumes the caller is already holding any necessary forcewake 633 * domains. 634 * 635 * Returns the value from a non-terminated instance of @reg. 636 */ 637 u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr) 638 { 639 const struct xe_reg reg = to_xe_reg(reg_mcr); 640 u8 group, instance; 641 u32 val; 642 bool steer; 643 644 xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); 645 646 steer = xe_gt_mcr_get_nonterminated_steering(gt, reg_mcr, 647 &group, &instance); 648 649 if (steer) { 650 mcr_lock(gt); 651 val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, 652 group, instance, 0); 653 mcr_unlock(gt); 654 } else { 655 val = xe_mmio_read32(gt, reg); 656 } 657 658 return val; 659 } 660 661 /** 662 * xe_gt_mcr_unicast_read - read a specific instance of an MCR register 663 * @gt: GT structure 664 * @reg_mcr: the MCR register to read 665 * @group: the MCR group 666 * @instance: the MCR instance 667 * 668 * Returns the value read from an MCR register after steering toward a specific 669 * group/instance. 670 */ 671 u32 xe_gt_mcr_unicast_read(struct xe_gt *gt, 672 struct xe_reg_mcr reg_mcr, 673 int group, int instance) 674 { 675 u32 val; 676 677 xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); 678 679 mcr_lock(gt); 680 val = rw_with_mcr_steering(gt, reg_mcr, MCR_OP_READ, group, instance, 0); 681 mcr_unlock(gt); 682 683 return val; 684 } 685 686 /** 687 * xe_gt_mcr_unicast_write - write a specific instance of an MCR register 688 * @gt: GT structure 689 * @reg_mcr: the MCR register to write 690 * @value: value to write 691 * @group: the MCR group 692 * @instance: the MCR instance 693 * 694 * Write an MCR register in unicast mode after steering toward a specific 695 * group/instance. 696 */ 697 void xe_gt_mcr_unicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, 698 u32 value, int group, int instance) 699 { 700 xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); 701 702 mcr_lock(gt); 703 rw_with_mcr_steering(gt, reg_mcr, MCR_OP_WRITE, group, instance, value); 704 mcr_unlock(gt); 705 } 706 707 /** 708 * xe_gt_mcr_multicast_write - write a value to all instances of an MCR register 709 * @gt: GT structure 710 * @reg_mcr: the MCR register to write 711 * @value: value to write 712 * 713 * Write an MCR register in multicast mode to update all instances. 714 */ 715 void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, 716 u32 value) 717 { 718 struct xe_reg reg = to_xe_reg(reg_mcr); 719 720 xe_gt_assert(gt, !IS_SRIOV_VF(gt_to_xe(gt))); 721 722 /* 723 * Synchronize with any unicast operations. Once we have exclusive 724 * access, the MULTICAST bit should already be set, so there's no need 725 * to touch the steering register. 726 */ 727 mcr_lock(gt); 728 xe_mmio_write32(gt, reg, value); 729 mcr_unlock(gt); 730 } 731 732 void xe_gt_mcr_steering_dump(struct xe_gt *gt, struct drm_printer *p) 733 { 734 for (int i = 0; i < NUM_STEERING_TYPES; i++) { 735 if (gt->steering[i].ranges) { 736 drm_printf(p, "%s steering: group=%#x, instance=%#x\n", 737 xe_steering_types[i].name, 738 gt->steering[i].group_target, 739 gt->steering[i].instance_target); 740 for (int j = 0; gt->steering[i].ranges[j].end; j++) 741 drm_printf(p, "\t0x%06x - 0x%06x\n", 742 gt->steering[i].ranges[j].start, 743 gt->steering[i].ranges[j].end); 744 } 745 } 746 } 747