1 //===-- AMDGPUSubtarget.cpp - AMDGPU Subtarget Information ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Implements the AMDGPU specific subclass of TargetSubtarget. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPUSubtarget.h" 15 #include "AMDGPUCallLowering.h" 16 #include "AMDGPUInstructionSelector.h" 17 #include "AMDGPULegalizerInfo.h" 18 #include "AMDGPURegisterBankInfo.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "R600Subtarget.h" 21 #include "SIMachineFunctionInfo.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/SmallString.h" 24 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h" 25 #include "llvm/CodeGen/MachineScheduler.h" 26 #include "llvm/CodeGen/TargetFrameLowering.h" 27 #include "llvm/IR/IntrinsicsAMDGPU.h" 28 #include "llvm/IR/IntrinsicsR600.h" 29 #include "llvm/IR/MDBuilder.h" 30 #include "llvm/MC/MCSubtargetInfo.h" 31 #include <algorithm> 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "amdgpu-subtarget" 36 37 #define GET_SUBTARGETINFO_TARGET_DESC 38 #define GET_SUBTARGETINFO_CTOR 39 #define AMDGPUSubtarget GCNSubtarget 40 #include "AMDGPUGenSubtargetInfo.inc" 41 #undef AMDGPUSubtarget 42 43 static cl::opt<bool> EnablePowerSched( 44 "amdgpu-enable-power-sched", 45 cl::desc("Enable scheduling to minimize mAI power bursts"), 46 cl::init(false)); 47 48 static cl::opt<bool> EnableVGPRIndexMode( 49 "amdgpu-vgpr-index-mode", 50 cl::desc("Use GPR indexing mode instead of movrel for vector indexing"), 51 cl::init(false)); 52 53 static cl::opt<bool> UseAA("amdgpu-use-aa-in-codegen", 54 cl::desc("Enable the use of AA during codegen."), 55 cl::init(true)); 56 57 GCNSubtarget::~GCNSubtarget() = default; 58 59 GCNSubtarget & 60 GCNSubtarget::initializeSubtargetDependencies(const Triple &TT, 61 StringRef GPU, StringRef FS) { 62 // Determine default and user-specified characteristics 63 // 64 // We want to be able to turn these off, but making this a subtarget feature 65 // for SI has the unhelpful behavior that it unsets everything else if you 66 // disable it. 67 // 68 // Similarly we want enable-prt-strict-null to be on by default and not to 69 // unset everything else if it is disabled 70 71 SmallString<256> FullFS("+promote-alloca,+load-store-opt,+enable-ds128,"); 72 73 // Turn on features that HSA ABI requires. Also turn on FlatForGlobal by default 74 if (isAmdHsaOS()) 75 FullFS += "+flat-for-global,+unaligned-access-mode,+trap-handler,"; 76 77 FullFS += "+enable-prt-strict-null,"; // This is overridden by a disable in FS 78 79 // Disable mutually exclusive bits. 80 if (FS.contains_insensitive("+wavefrontsize")) { 81 if (!FS.contains_insensitive("wavefrontsize16")) 82 FullFS += "-wavefrontsize16,"; 83 if (!FS.contains_insensitive("wavefrontsize32")) 84 FullFS += "-wavefrontsize32,"; 85 if (!FS.contains_insensitive("wavefrontsize64")) 86 FullFS += "-wavefrontsize64,"; 87 } 88 89 FullFS += FS; 90 91 ParseSubtargetFeatures(GPU, /*TuneCPU*/ GPU, FullFS); 92 93 // Implement the "generic" processors, which acts as the default when no 94 // generation features are enabled (e.g for -mcpu=''). HSA OS defaults to 95 // the first amdgcn target that supports flat addressing. Other OSes defaults 96 // to the first amdgcn target. 97 if (Gen == AMDGPUSubtarget::INVALID) { 98 Gen = TT.getOS() == Triple::AMDHSA ? AMDGPUSubtarget::SEA_ISLANDS 99 : AMDGPUSubtarget::SOUTHERN_ISLANDS; 100 } 101 102 // We don't support FP64 for EG/NI atm. 103 assert(!hasFP64() || (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS)); 104 105 // Targets must either support 64-bit offsets for MUBUF instructions, and/or 106 // support flat operations, otherwise they cannot access a 64-bit global 107 // address space 108 assert(hasAddr64() || hasFlat()); 109 // Unless +-flat-for-global is specified, turn on FlatForGlobal for targets 110 // that do not support ADDR64 variants of MUBUF instructions. Such targets 111 // cannot use a 64 bit offset with a MUBUF instruction to access the global 112 // address space 113 if (!hasAddr64() && !FS.contains("flat-for-global") && !FlatForGlobal) { 114 ToggleFeature(AMDGPU::FeatureFlatForGlobal); 115 FlatForGlobal = true; 116 } 117 // Unless +-flat-for-global is specified, use MUBUF instructions for global 118 // address space access if flat operations are not available. 119 if (!hasFlat() && !FS.contains("flat-for-global") && FlatForGlobal) { 120 ToggleFeature(AMDGPU::FeatureFlatForGlobal); 121 FlatForGlobal = false; 122 } 123 124 // Set defaults if needed. 125 if (MaxPrivateElementSize == 0) 126 MaxPrivateElementSize = 4; 127 128 if (LDSBankCount == 0) 129 LDSBankCount = 32; 130 131 if (TT.getArch() == Triple::amdgcn) { 132 if (LocalMemorySize == 0) 133 LocalMemorySize = 32768; 134 135 // Do something sensible for unspecified target. 136 if (!HasMovrel && !HasVGPRIndexMode) 137 HasMovrel = true; 138 } 139 140 // Don't crash on invalid devices. 141 if (WavefrontSizeLog2 == 0) 142 WavefrontSizeLog2 = 5; 143 144 HasFminFmaxLegacy = getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS; 145 HasSMulHi = getGeneration() >= AMDGPUSubtarget::GFX9; 146 147 TargetID.setTargetIDFromFeaturesString(FS); 148 149 LLVM_DEBUG(dbgs() << "xnack setting for subtarget: " 150 << TargetID.getXnackSetting() << '\n'); 151 LLVM_DEBUG(dbgs() << "sramecc setting for subtarget: " 152 << TargetID.getSramEccSetting() << '\n'); 153 154 return *this; 155 } 156 157 AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT) : TargetTriple(TT) {} 158 159 GCNSubtarget::GCNSubtarget(const Triple &TT, StringRef GPU, StringRef FS, 160 const GCNTargetMachine &TM) 161 : // clang-format off 162 AMDGPUGenSubtargetInfo(TT, GPU, /*TuneCPU*/ GPU, FS), 163 AMDGPUSubtarget(TT), 164 TargetTriple(TT), 165 TargetID(*this), 166 InstrItins(getInstrItineraryForCPU(GPU)), 167 InstrInfo(initializeSubtargetDependencies(TT, GPU, FS)), 168 TLInfo(TM, *this), 169 FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0) { 170 // clang-format on 171 MaxWavesPerEU = AMDGPU::IsaInfo::getMaxWavesPerEU(this); 172 CallLoweringInfo.reset(new AMDGPUCallLowering(*getTargetLowering())); 173 InlineAsmLoweringInfo.reset(new InlineAsmLowering(getTargetLowering())); 174 Legalizer.reset(new AMDGPULegalizerInfo(*this, TM)); 175 RegBankInfo.reset(new AMDGPURegisterBankInfo(*this)); 176 InstSelector.reset(new AMDGPUInstructionSelector( 177 *this, *static_cast<AMDGPURegisterBankInfo *>(RegBankInfo.get()), TM)); 178 } 179 180 unsigned GCNSubtarget::getConstantBusLimit(unsigned Opcode) const { 181 if (getGeneration() < GFX10) 182 return 1; 183 184 switch (Opcode) { 185 case AMDGPU::V_LSHLREV_B64_e64: 186 case AMDGPU::V_LSHLREV_B64_gfx10: 187 case AMDGPU::V_LSHLREV_B64_e64_gfx11: 188 case AMDGPU::V_LSHL_B64_e64: 189 case AMDGPU::V_LSHRREV_B64_e64: 190 case AMDGPU::V_LSHRREV_B64_gfx10: 191 case AMDGPU::V_LSHRREV_B64_e64_gfx11: 192 case AMDGPU::V_LSHR_B64_e64: 193 case AMDGPU::V_ASHRREV_I64_e64: 194 case AMDGPU::V_ASHRREV_I64_gfx10: 195 case AMDGPU::V_ASHRREV_I64_e64_gfx11: 196 case AMDGPU::V_ASHR_I64_e64: 197 return 1; 198 } 199 200 return 2; 201 } 202 203 /// This list was mostly derived from experimentation. 204 bool GCNSubtarget::zeroesHigh16BitsOfDest(unsigned Opcode) const { 205 switch (Opcode) { 206 case AMDGPU::V_CVT_F16_F32_e32: 207 case AMDGPU::V_CVT_F16_F32_e64: 208 case AMDGPU::V_CVT_F16_U16_e32: 209 case AMDGPU::V_CVT_F16_U16_e64: 210 case AMDGPU::V_CVT_F16_I16_e32: 211 case AMDGPU::V_CVT_F16_I16_e64: 212 case AMDGPU::V_RCP_F16_e64: 213 case AMDGPU::V_RCP_F16_e32: 214 case AMDGPU::V_RSQ_F16_e64: 215 case AMDGPU::V_RSQ_F16_e32: 216 case AMDGPU::V_SQRT_F16_e64: 217 case AMDGPU::V_SQRT_F16_e32: 218 case AMDGPU::V_LOG_F16_e64: 219 case AMDGPU::V_LOG_F16_e32: 220 case AMDGPU::V_EXP_F16_e64: 221 case AMDGPU::V_EXP_F16_e32: 222 case AMDGPU::V_SIN_F16_e64: 223 case AMDGPU::V_SIN_F16_e32: 224 case AMDGPU::V_COS_F16_e64: 225 case AMDGPU::V_COS_F16_e32: 226 case AMDGPU::V_FLOOR_F16_e64: 227 case AMDGPU::V_FLOOR_F16_e32: 228 case AMDGPU::V_CEIL_F16_e64: 229 case AMDGPU::V_CEIL_F16_e32: 230 case AMDGPU::V_TRUNC_F16_e64: 231 case AMDGPU::V_TRUNC_F16_e32: 232 case AMDGPU::V_RNDNE_F16_e64: 233 case AMDGPU::V_RNDNE_F16_e32: 234 case AMDGPU::V_FRACT_F16_e64: 235 case AMDGPU::V_FRACT_F16_e32: 236 case AMDGPU::V_FREXP_MANT_F16_e64: 237 case AMDGPU::V_FREXP_MANT_F16_e32: 238 case AMDGPU::V_FREXP_EXP_I16_F16_e64: 239 case AMDGPU::V_FREXP_EXP_I16_F16_e32: 240 case AMDGPU::V_LDEXP_F16_e64: 241 case AMDGPU::V_LDEXP_F16_e32: 242 case AMDGPU::V_LSHLREV_B16_e64: 243 case AMDGPU::V_LSHLREV_B16_e32: 244 case AMDGPU::V_LSHRREV_B16_e64: 245 case AMDGPU::V_LSHRREV_B16_e32: 246 case AMDGPU::V_ASHRREV_I16_e64: 247 case AMDGPU::V_ASHRREV_I16_e32: 248 case AMDGPU::V_ADD_U16_e64: 249 case AMDGPU::V_ADD_U16_e32: 250 case AMDGPU::V_SUB_U16_e64: 251 case AMDGPU::V_SUB_U16_e32: 252 case AMDGPU::V_SUBREV_U16_e64: 253 case AMDGPU::V_SUBREV_U16_e32: 254 case AMDGPU::V_MUL_LO_U16_e64: 255 case AMDGPU::V_MUL_LO_U16_e32: 256 case AMDGPU::V_ADD_F16_e64: 257 case AMDGPU::V_ADD_F16_e32: 258 case AMDGPU::V_SUB_F16_e64: 259 case AMDGPU::V_SUB_F16_e32: 260 case AMDGPU::V_SUBREV_F16_e64: 261 case AMDGPU::V_SUBREV_F16_e32: 262 case AMDGPU::V_MUL_F16_e64: 263 case AMDGPU::V_MUL_F16_e32: 264 case AMDGPU::V_MAX_F16_e64: 265 case AMDGPU::V_MAX_F16_e32: 266 case AMDGPU::V_MIN_F16_e64: 267 case AMDGPU::V_MIN_F16_e32: 268 case AMDGPU::V_MAX_U16_e64: 269 case AMDGPU::V_MAX_U16_e32: 270 case AMDGPU::V_MIN_U16_e64: 271 case AMDGPU::V_MIN_U16_e32: 272 case AMDGPU::V_MAX_I16_e64: 273 case AMDGPU::V_MAX_I16_e32: 274 case AMDGPU::V_MIN_I16_e64: 275 case AMDGPU::V_MIN_I16_e32: 276 case AMDGPU::V_MAD_F16_e64: 277 case AMDGPU::V_MAD_U16_e64: 278 case AMDGPU::V_MAD_I16_e64: 279 case AMDGPU::V_FMA_F16_e64: 280 case AMDGPU::V_DIV_FIXUP_F16_e64: 281 // On gfx10, all 16-bit instructions preserve the high bits. 282 return getGeneration() <= AMDGPUSubtarget::GFX9; 283 case AMDGPU::V_MADAK_F16: 284 case AMDGPU::V_MADMK_F16: 285 case AMDGPU::V_MAC_F16_e64: 286 case AMDGPU::V_MAC_F16_e32: 287 case AMDGPU::V_FMAMK_F16: 288 case AMDGPU::V_FMAAK_F16: 289 case AMDGPU::V_FMAC_F16_e64: 290 case AMDGPU::V_FMAC_F16_e32: 291 // In gfx9, the preferred handling of the unused high 16-bits changed. Most 292 // instructions maintain the legacy behavior of 0ing. Some instructions 293 // changed to preserving the high bits. 294 return getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS; 295 case AMDGPU::V_MAD_MIXLO_F16: 296 case AMDGPU::V_MAD_MIXHI_F16: 297 default: 298 return false; 299 } 300 } 301 302 unsigned AMDGPUSubtarget::getMaxLocalMemSizeWithWaveCount(unsigned NWaves, 303 const Function &F) const { 304 if (NWaves == 1) 305 return getLocalMemorySize(); 306 unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second; 307 unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize); 308 if (!WorkGroupsPerCu) 309 return 0; 310 unsigned MaxWaves = getMaxWavesPerEU(); 311 return getLocalMemorySize() * MaxWaves / WorkGroupsPerCu / NWaves; 312 } 313 314 // FIXME: Should return min,max range. 315 unsigned AMDGPUSubtarget::getOccupancyWithLocalMemSize(uint32_t Bytes, 316 const Function &F) const { 317 const unsigned MaxWorkGroupSize = getFlatWorkGroupSizes(F).second; 318 const unsigned MaxWorkGroupsPerCu = getMaxWorkGroupsPerCU(MaxWorkGroupSize); 319 if (!MaxWorkGroupsPerCu) 320 return 0; 321 322 const unsigned WaveSize = getWavefrontSize(); 323 324 // FIXME: Do we need to account for alignment requirement of LDS rounding the 325 // size up? 326 // Compute restriction based on LDS usage 327 unsigned NumGroups = getLocalMemorySize() / (Bytes ? Bytes : 1u); 328 329 // This can be queried with more LDS than is possible, so just assume the 330 // worst. 331 if (NumGroups == 0) 332 return 1; 333 334 NumGroups = std::min(MaxWorkGroupsPerCu, NumGroups); 335 336 // Round to the number of waves. 337 const unsigned MaxGroupNumWaves = (MaxWorkGroupSize + WaveSize - 1) / WaveSize; 338 unsigned MaxWaves = NumGroups * MaxGroupNumWaves; 339 340 // Clamp to the maximum possible number of waves. 341 MaxWaves = std::min(MaxWaves, getMaxWavesPerEU()); 342 343 // FIXME: Needs to be a multiple of the group size? 344 //MaxWaves = MaxGroupNumWaves * (MaxWaves / MaxGroupNumWaves); 345 346 assert(MaxWaves > 0 && MaxWaves <= getMaxWavesPerEU() && 347 "computed invalid occupancy"); 348 return MaxWaves; 349 } 350 351 unsigned 352 AMDGPUSubtarget::getOccupancyWithLocalMemSize(const MachineFunction &MF) const { 353 const auto *MFI = MF.getInfo<SIMachineFunctionInfo>(); 354 return getOccupancyWithLocalMemSize(MFI->getLDSSize(), MF.getFunction()); 355 } 356 357 std::pair<unsigned, unsigned> 358 AMDGPUSubtarget::getDefaultFlatWorkGroupSize(CallingConv::ID CC) const { 359 switch (CC) { 360 case CallingConv::AMDGPU_VS: 361 case CallingConv::AMDGPU_LS: 362 case CallingConv::AMDGPU_HS: 363 case CallingConv::AMDGPU_ES: 364 case CallingConv::AMDGPU_GS: 365 case CallingConv::AMDGPU_PS: 366 return std::make_pair(1, getWavefrontSize()); 367 default: 368 return std::make_pair(1u, getMaxFlatWorkGroupSize()); 369 } 370 } 371 372 std::pair<unsigned, unsigned> AMDGPUSubtarget::getFlatWorkGroupSizes( 373 const Function &F) const { 374 // Default minimum/maximum flat work group sizes. 375 std::pair<unsigned, unsigned> Default = 376 getDefaultFlatWorkGroupSize(F.getCallingConv()); 377 378 // Requested minimum/maximum flat work group sizes. 379 std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute( 380 F, "amdgpu-flat-work-group-size", Default); 381 382 // Make sure requested minimum is less than requested maximum. 383 if (Requested.first > Requested.second) 384 return Default; 385 386 // Make sure requested values do not violate subtarget's specifications. 387 if (Requested.first < getMinFlatWorkGroupSize()) 388 return Default; 389 if (Requested.second > getMaxFlatWorkGroupSize()) 390 return Default; 391 392 return Requested; 393 } 394 395 std::pair<unsigned, unsigned> AMDGPUSubtarget::getWavesPerEU( 396 const Function &F, std::pair<unsigned, unsigned> FlatWorkGroupSizes) const { 397 // Default minimum/maximum number of waves per execution unit. 398 std::pair<unsigned, unsigned> Default(1, getMaxWavesPerEU()); 399 400 // If minimum/maximum flat work group sizes were explicitly requested using 401 // "amdgpu-flat-work-group-size" attribute, then set default minimum/maximum 402 // number of waves per execution unit to values implied by requested 403 // minimum/maximum flat work group sizes. 404 unsigned MinImpliedByFlatWorkGroupSize = 405 getWavesPerEUForWorkGroup(FlatWorkGroupSizes.second); 406 Default.first = MinImpliedByFlatWorkGroupSize; 407 408 // Requested minimum/maximum number of waves per execution unit. 409 std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute( 410 F, "amdgpu-waves-per-eu", Default, true); 411 412 // Make sure requested minimum is less than requested maximum. 413 if (Requested.second && Requested.first > Requested.second) 414 return Default; 415 416 // Make sure requested values do not violate subtarget's specifications. 417 if (Requested.first < getMinWavesPerEU() || 418 Requested.second > getMaxWavesPerEU()) 419 return Default; 420 421 // Make sure requested values are compatible with values implied by requested 422 // minimum/maximum flat work group sizes. 423 if (Requested.first < MinImpliedByFlatWorkGroupSize) 424 return Default; 425 426 return Requested; 427 } 428 429 static unsigned getReqdWorkGroupSize(const Function &Kernel, unsigned Dim) { 430 auto Node = Kernel.getMetadata("reqd_work_group_size"); 431 if (Node && Node->getNumOperands() == 3) 432 return mdconst::extract<ConstantInt>(Node->getOperand(Dim))->getZExtValue(); 433 return std::numeric_limits<unsigned>::max(); 434 } 435 436 bool AMDGPUSubtarget::isMesaKernel(const Function &F) const { 437 return isMesa3DOS() && !AMDGPU::isShader(F.getCallingConv()); 438 } 439 440 unsigned AMDGPUSubtarget::getMaxWorkitemID(const Function &Kernel, 441 unsigned Dimension) const { 442 unsigned ReqdSize = getReqdWorkGroupSize(Kernel, Dimension); 443 if (ReqdSize != std::numeric_limits<unsigned>::max()) 444 return ReqdSize - 1; 445 return getFlatWorkGroupSizes(Kernel).second - 1; 446 } 447 448 bool AMDGPUSubtarget::makeLIDRangeMetadata(Instruction *I) const { 449 Function *Kernel = I->getParent()->getParent(); 450 unsigned MinSize = 0; 451 unsigned MaxSize = getFlatWorkGroupSizes(*Kernel).second; 452 bool IdQuery = false; 453 454 // If reqd_work_group_size is present it narrows value down. 455 if (auto *CI = dyn_cast<CallInst>(I)) { 456 const Function *F = CI->getCalledFunction(); 457 if (F) { 458 unsigned Dim = UINT_MAX; 459 switch (F->getIntrinsicID()) { 460 case Intrinsic::amdgcn_workitem_id_x: 461 case Intrinsic::r600_read_tidig_x: 462 IdQuery = true; 463 LLVM_FALLTHROUGH; 464 case Intrinsic::r600_read_local_size_x: 465 Dim = 0; 466 break; 467 case Intrinsic::amdgcn_workitem_id_y: 468 case Intrinsic::r600_read_tidig_y: 469 IdQuery = true; 470 LLVM_FALLTHROUGH; 471 case Intrinsic::r600_read_local_size_y: 472 Dim = 1; 473 break; 474 case Intrinsic::amdgcn_workitem_id_z: 475 case Intrinsic::r600_read_tidig_z: 476 IdQuery = true; 477 LLVM_FALLTHROUGH; 478 case Intrinsic::r600_read_local_size_z: 479 Dim = 2; 480 break; 481 default: 482 break; 483 } 484 485 if (Dim <= 3) { 486 unsigned ReqdSize = getReqdWorkGroupSize(*Kernel, Dim); 487 if (ReqdSize != std::numeric_limits<unsigned>::max()) 488 MinSize = MaxSize = ReqdSize; 489 } 490 } 491 } 492 493 if (!MaxSize) 494 return false; 495 496 // Range metadata is [Lo, Hi). For ID query we need to pass max size 497 // as Hi. For size query we need to pass Hi + 1. 498 if (IdQuery) 499 MinSize = 0; 500 else 501 ++MaxSize; 502 503 MDBuilder MDB(I->getContext()); 504 MDNode *MaxWorkGroupSizeRange = MDB.createRange(APInt(32, MinSize), 505 APInt(32, MaxSize)); 506 I->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange); 507 return true; 508 } 509 510 unsigned AMDGPUSubtarget::getImplicitArgNumBytes(const Function &F) const { 511 assert(AMDGPU::isKernel(F.getCallingConv())); 512 513 // We don't allocate the segment if we know the implicit arguments weren't 514 // used, even if the ABI implies we need them. 515 if (F.hasFnAttribute("amdgpu-no-implicitarg-ptr")) 516 return 0; 517 518 if (isMesaKernel(F)) 519 return 16; 520 521 // Assume all implicit inputs are used by default 522 unsigned NBytes = (AMDGPU::getAmdhsaCodeObjectVersion() >= 5) ? 256 : 56; 523 return AMDGPU::getIntegerAttribute(F, "amdgpu-implicitarg-num-bytes", NBytes); 524 } 525 526 uint64_t AMDGPUSubtarget::getExplicitKernArgSize(const Function &F, 527 Align &MaxAlign) const { 528 assert(F.getCallingConv() == CallingConv::AMDGPU_KERNEL || 529 F.getCallingConv() == CallingConv::SPIR_KERNEL); 530 531 const DataLayout &DL = F.getParent()->getDataLayout(); 532 uint64_t ExplicitArgBytes = 0; 533 MaxAlign = Align(1); 534 535 for (const Argument &Arg : F.args()) { 536 const bool IsByRef = Arg.hasByRefAttr(); 537 Type *ArgTy = IsByRef ? Arg.getParamByRefType() : Arg.getType(); 538 Align Alignment = DL.getValueOrABITypeAlignment( 539 IsByRef ? Arg.getParamAlign() : None, ArgTy); 540 uint64_t AllocSize = DL.getTypeAllocSize(ArgTy); 541 ExplicitArgBytes = alignTo(ExplicitArgBytes, Alignment) + AllocSize; 542 MaxAlign = std::max(MaxAlign, Alignment); 543 } 544 545 return ExplicitArgBytes; 546 } 547 548 unsigned AMDGPUSubtarget::getKernArgSegmentSize(const Function &F, 549 Align &MaxAlign) const { 550 uint64_t ExplicitArgBytes = getExplicitKernArgSize(F, MaxAlign); 551 552 unsigned ExplicitOffset = getExplicitKernelArgOffset(F); 553 554 uint64_t TotalSize = ExplicitOffset + ExplicitArgBytes; 555 unsigned ImplicitBytes = getImplicitArgNumBytes(F); 556 if (ImplicitBytes != 0) { 557 const Align Alignment = getAlignmentForImplicitArgPtr(); 558 TotalSize = alignTo(ExplicitArgBytes, Alignment) + ImplicitBytes; 559 MaxAlign = std::max(MaxAlign, Alignment); 560 } 561 562 // Being able to dereference past the end is useful for emitting scalar loads. 563 return alignTo(TotalSize, 4); 564 } 565 566 AMDGPUDwarfFlavour AMDGPUSubtarget::getAMDGPUDwarfFlavour() const { 567 return getWavefrontSize() == 32 ? AMDGPUDwarfFlavour::Wave32 568 : AMDGPUDwarfFlavour::Wave64; 569 } 570 571 void GCNSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy, 572 unsigned NumRegionInstrs) const { 573 // Track register pressure so the scheduler can try to decrease 574 // pressure once register usage is above the threshold defined by 575 // SIRegisterInfo::getRegPressureSetLimit() 576 Policy.ShouldTrackPressure = true; 577 578 // Enabling both top down and bottom up scheduling seems to give us less 579 // register spills than just using one of these approaches on its own. 580 Policy.OnlyTopDown = false; 581 Policy.OnlyBottomUp = false; 582 583 // Enabling ShouldTrackLaneMasks crashes the SI Machine Scheduler. 584 if (!enableSIScheduler()) 585 Policy.ShouldTrackLaneMasks = true; 586 } 587 588 bool GCNSubtarget::hasMadF16() const { 589 return InstrInfo.pseudoToMCOpcode(AMDGPU::V_MAD_F16_e64) != -1; 590 } 591 592 bool GCNSubtarget::useVGPRIndexMode() const { 593 return !hasMovrel() || (EnableVGPRIndexMode && hasVGPRIndexMode()); 594 } 595 596 bool GCNSubtarget::useAA() const { return UseAA; } 597 598 unsigned GCNSubtarget::getOccupancyWithNumSGPRs(unsigned SGPRs) const { 599 if (getGeneration() >= AMDGPUSubtarget::GFX10) 600 return getMaxWavesPerEU(); 601 602 if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 603 if (SGPRs <= 80) 604 return 10; 605 if (SGPRs <= 88) 606 return 9; 607 if (SGPRs <= 100) 608 return 8; 609 return 7; 610 } 611 if (SGPRs <= 48) 612 return 10; 613 if (SGPRs <= 56) 614 return 9; 615 if (SGPRs <= 64) 616 return 8; 617 if (SGPRs <= 72) 618 return 7; 619 if (SGPRs <= 80) 620 return 6; 621 return 5; 622 } 623 624 unsigned GCNSubtarget::getOccupancyWithNumVGPRs(unsigned VGPRs) const { 625 unsigned MaxWaves = getMaxWavesPerEU(); 626 unsigned Granule = getVGPRAllocGranule(); 627 if (VGPRs < Granule) 628 return MaxWaves; 629 unsigned RoundedRegs = ((VGPRs + Granule - 1) / Granule) * Granule; 630 return std::min(std::max(getTotalNumVGPRs() / RoundedRegs, 1u), MaxWaves); 631 } 632 633 unsigned 634 GCNSubtarget::getBaseReservedNumSGPRs(const bool HasFlatScratch) const { 635 if (getGeneration() >= AMDGPUSubtarget::GFX10) 636 return 2; // VCC. FLAT_SCRATCH and XNACK are no longer in SGPRs. 637 638 if (HasFlatScratch || HasArchitectedFlatScratch) { 639 if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 640 return 6; // FLAT_SCRATCH, XNACK, VCC (in that order). 641 if (getGeneration() == AMDGPUSubtarget::SEA_ISLANDS) 642 return 4; // FLAT_SCRATCH, VCC (in that order). 643 } 644 645 if (isXNACKEnabled()) 646 return 4; // XNACK, VCC (in that order). 647 return 2; // VCC. 648 } 649 650 unsigned GCNSubtarget::getReservedNumSGPRs(const MachineFunction &MF) const { 651 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); 652 return getBaseReservedNumSGPRs(MFI.hasFlatScratchInit()); 653 } 654 655 unsigned GCNSubtarget::getReservedNumSGPRs(const Function &F) const { 656 // In principle we do not need to reserve SGPR pair used for flat_scratch if 657 // we know flat instructions do not access the stack anywhere in the 658 // program. For now assume it's needed if we have flat instructions. 659 const bool KernelUsesFlatScratch = hasFlatAddressSpace(); 660 return getBaseReservedNumSGPRs(KernelUsesFlatScratch); 661 } 662 663 unsigned GCNSubtarget::computeOccupancy(const Function &F, unsigned LDSSize, 664 unsigned NumSGPRs, 665 unsigned NumVGPRs) const { 666 unsigned Occupancy = 667 std::min(getMaxWavesPerEU(), 668 getOccupancyWithLocalMemSize(LDSSize, F)); 669 if (NumSGPRs) 670 Occupancy = std::min(Occupancy, getOccupancyWithNumSGPRs(NumSGPRs)); 671 if (NumVGPRs) 672 Occupancy = std::min(Occupancy, getOccupancyWithNumVGPRs(NumVGPRs)); 673 return Occupancy; 674 } 675 676 unsigned GCNSubtarget::getBaseMaxNumSGPRs( 677 const Function &F, std::pair<unsigned, unsigned> WavesPerEU, 678 unsigned PreloadedSGPRs, unsigned ReservedNumSGPRs) const { 679 // Compute maximum number of SGPRs function can use using default/requested 680 // minimum number of waves per execution unit. 681 unsigned MaxNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, false); 682 unsigned MaxAddressableNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, true); 683 684 // Check if maximum number of SGPRs was explicitly requested using 685 // "amdgpu-num-sgpr" attribute. 686 if (F.hasFnAttribute("amdgpu-num-sgpr")) { 687 unsigned Requested = AMDGPU::getIntegerAttribute( 688 F, "amdgpu-num-sgpr", MaxNumSGPRs); 689 690 // Make sure requested value does not violate subtarget's specifications. 691 if (Requested && (Requested <= ReservedNumSGPRs)) 692 Requested = 0; 693 694 // If more SGPRs are required to support the input user/system SGPRs, 695 // increase to accommodate them. 696 // 697 // FIXME: This really ends up using the requested number of SGPRs + number 698 // of reserved special registers in total. Theoretically you could re-use 699 // the last input registers for these special registers, but this would 700 // require a lot of complexity to deal with the weird aliasing. 701 unsigned InputNumSGPRs = PreloadedSGPRs; 702 if (Requested && Requested < InputNumSGPRs) 703 Requested = InputNumSGPRs; 704 705 // Make sure requested value is compatible with values implied by 706 // default/requested minimum/maximum number of waves per execution unit. 707 if (Requested && Requested > getMaxNumSGPRs(WavesPerEU.first, false)) 708 Requested = 0; 709 if (WavesPerEU.second && 710 Requested && Requested < getMinNumSGPRs(WavesPerEU.second)) 711 Requested = 0; 712 713 if (Requested) 714 MaxNumSGPRs = Requested; 715 } 716 717 if (hasSGPRInitBug()) 718 MaxNumSGPRs = AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG; 719 720 return std::min(MaxNumSGPRs - ReservedNumSGPRs, MaxAddressableNumSGPRs); 721 } 722 723 unsigned GCNSubtarget::getMaxNumSGPRs(const MachineFunction &MF) const { 724 const Function &F = MF.getFunction(); 725 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); 726 return getBaseMaxNumSGPRs(F, MFI.getWavesPerEU(), MFI.getNumPreloadedSGPRs(), 727 getReservedNumSGPRs(MF)); 728 } 729 730 static unsigned getMaxNumPreloadedSGPRs() { 731 // Max number of user SGPRs 732 unsigned MaxUserSGPRs = 4 + // private segment buffer 733 2 + // Dispatch ptr 734 2 + // queue ptr 735 2 + // kernel segment ptr 736 2 + // dispatch ID 737 2 + // flat scratch init 738 2; // Implicit buffer ptr 739 740 // Max number of system SGPRs 741 unsigned MaxSystemSGPRs = 1 + // WorkGroupIDX 742 1 + // WorkGroupIDY 743 1 + // WorkGroupIDZ 744 1 + // WorkGroupInfo 745 1; // private segment wave byte offset 746 747 // Max number of synthetic SGPRs 748 unsigned SyntheticSGPRs = 1; // LDSKernelId 749 750 return MaxUserSGPRs + MaxSystemSGPRs + SyntheticSGPRs; 751 } 752 753 unsigned GCNSubtarget::getMaxNumSGPRs(const Function &F) const { 754 return getBaseMaxNumSGPRs(F, getWavesPerEU(F), getMaxNumPreloadedSGPRs(), 755 getReservedNumSGPRs(F)); 756 } 757 758 unsigned GCNSubtarget::getBaseMaxNumVGPRs( 759 const Function &F, std::pair<unsigned, unsigned> WavesPerEU) const { 760 // Compute maximum number of VGPRs function can use using default/requested 761 // minimum number of waves per execution unit. 762 unsigned MaxNumVGPRs = getMaxNumVGPRs(WavesPerEU.first); 763 764 // Check if maximum number of VGPRs was explicitly requested using 765 // "amdgpu-num-vgpr" attribute. 766 if (F.hasFnAttribute("amdgpu-num-vgpr")) { 767 unsigned Requested = AMDGPU::getIntegerAttribute( 768 F, "amdgpu-num-vgpr", MaxNumVGPRs); 769 770 if (hasGFX90AInsts()) 771 Requested *= 2; 772 773 // Make sure requested value is compatible with values implied by 774 // default/requested minimum/maximum number of waves per execution unit. 775 if (Requested && Requested > getMaxNumVGPRs(WavesPerEU.first)) 776 Requested = 0; 777 if (WavesPerEU.second && 778 Requested && Requested < getMinNumVGPRs(WavesPerEU.second)) 779 Requested = 0; 780 781 if (Requested) 782 MaxNumVGPRs = Requested; 783 } 784 785 return MaxNumVGPRs; 786 } 787 788 unsigned GCNSubtarget::getMaxNumVGPRs(const Function &F) const { 789 return getBaseMaxNumVGPRs(F, getWavesPerEU(F)); 790 } 791 792 unsigned GCNSubtarget::getMaxNumVGPRs(const MachineFunction &MF) const { 793 const Function &F = MF.getFunction(); 794 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>(); 795 return getBaseMaxNumVGPRs(F, MFI.getWavesPerEU()); 796 } 797 798 void GCNSubtarget::adjustSchedDependency(SUnit *Def, int DefOpIdx, SUnit *Use, 799 int UseOpIdx, SDep &Dep) const { 800 if (Dep.getKind() != SDep::Kind::Data || !Dep.getReg() || 801 !Def->isInstr() || !Use->isInstr()) 802 return; 803 804 MachineInstr *DefI = Def->getInstr(); 805 MachineInstr *UseI = Use->getInstr(); 806 807 if (DefI->isBundle()) { 808 const SIRegisterInfo *TRI = getRegisterInfo(); 809 auto Reg = Dep.getReg(); 810 MachineBasicBlock::const_instr_iterator I(DefI->getIterator()); 811 MachineBasicBlock::const_instr_iterator E(DefI->getParent()->instr_end()); 812 unsigned Lat = 0; 813 for (++I; I != E && I->isBundledWithPred(); ++I) { 814 if (I->modifiesRegister(Reg, TRI)) 815 Lat = InstrInfo.getInstrLatency(getInstrItineraryData(), *I); 816 else if (Lat) 817 --Lat; 818 } 819 Dep.setLatency(Lat); 820 } else if (UseI->isBundle()) { 821 const SIRegisterInfo *TRI = getRegisterInfo(); 822 auto Reg = Dep.getReg(); 823 MachineBasicBlock::const_instr_iterator I(UseI->getIterator()); 824 MachineBasicBlock::const_instr_iterator E(UseI->getParent()->instr_end()); 825 unsigned Lat = InstrInfo.getInstrLatency(getInstrItineraryData(), *DefI); 826 for (++I; I != E && I->isBundledWithPred() && Lat; ++I) { 827 if (I->readsRegister(Reg, TRI)) 828 break; 829 --Lat; 830 } 831 Dep.setLatency(Lat); 832 } else if (Dep.getLatency() == 0 && Dep.getReg() == AMDGPU::VCC_LO) { 833 // Work around the fact that SIInstrInfo::fixImplicitOperands modifies 834 // implicit operands which come from the MCInstrDesc, which can fool 835 // ScheduleDAGInstrs::addPhysRegDataDeps into treating them as implicit 836 // pseudo operands. 837 Dep.setLatency(InstrInfo.getSchedModel().computeOperandLatency( 838 DefI, DefOpIdx, UseI, UseOpIdx)); 839 } 840 } 841 842 namespace { 843 struct FillMFMAShadowMutation : ScheduleDAGMutation { 844 const SIInstrInfo *TII; 845 846 ScheduleDAGMI *DAG; 847 848 FillMFMAShadowMutation(const SIInstrInfo *tii) : TII(tii) {} 849 850 bool isSALU(const SUnit *SU) const { 851 const MachineInstr *MI = SU->getInstr(); 852 return MI && TII->isSALU(*MI) && !MI->isTerminator(); 853 } 854 855 bool isVALU(const SUnit *SU) const { 856 const MachineInstr *MI = SU->getInstr(); 857 return MI && TII->isVALU(*MI); 858 } 859 860 // Link as many SALU instructions in chain as possible. Return the size 861 // of the chain. Links up to MaxChain instructions. 862 unsigned linkSALUChain(SUnit *From, SUnit *To, unsigned MaxChain, 863 SmallPtrSetImpl<SUnit *> &Visited) const { 864 SmallVector<SUnit *, 8> Worklist({To}); 865 unsigned Linked = 0; 866 867 while (!Worklist.empty() && MaxChain-- > 0) { 868 SUnit *SU = Worklist.pop_back_val(); 869 if (!Visited.insert(SU).second) 870 continue; 871 872 LLVM_DEBUG(dbgs() << "Inserting edge from\n" ; DAG->dumpNode(*From); 873 dbgs() << "to\n"; DAG->dumpNode(*SU); dbgs() << '\n'); 874 875 if (SU != From && From != &DAG->ExitSU && DAG->canAddEdge(SU, From)) 876 if (DAG->addEdge(SU, SDep(From, SDep::Artificial))) 877 ++Linked; 878 879 for (SDep &SI : From->Succs) { 880 SUnit *SUv = SI.getSUnit(); 881 if (SUv != From && SU != &DAG->ExitSU && isVALU(SUv) && 882 DAG->canAddEdge(SUv, SU)) 883 DAG->addEdge(SUv, SDep(SU, SDep::Artificial)); 884 } 885 886 for (SDep &SI : SU->Succs) { 887 SUnit *Succ = SI.getSUnit(); 888 if (Succ != SU && isSALU(Succ)) 889 Worklist.push_back(Succ); 890 } 891 } 892 893 return Linked; 894 } 895 896 void apply(ScheduleDAGInstrs *DAGInstrs) override { 897 const GCNSubtarget &ST = DAGInstrs->MF.getSubtarget<GCNSubtarget>(); 898 if (!ST.hasMAIInsts()) 899 return; 900 DAG = static_cast<ScheduleDAGMI*>(DAGInstrs); 901 const TargetSchedModel *TSchedModel = DAGInstrs->getSchedModel(); 902 if (!TSchedModel || DAG->SUnits.empty()) 903 return; 904 905 // Scan for MFMA long latency instructions and try to add a dependency 906 // of available SALU instructions to give them a chance to fill MFMA 907 // shadow. That is desirable to fill MFMA shadow with SALU instructions 908 // rather than VALU to prevent power consumption bursts and throttle. 909 auto LastSALU = DAG->SUnits.begin(); 910 auto E = DAG->SUnits.end(); 911 SmallPtrSet<SUnit*, 32> Visited; 912 for (SUnit &SU : DAG->SUnits) { 913 MachineInstr &MAI = *SU.getInstr(); 914 if (!TII->isMAI(MAI) || 915 MAI.getOpcode() == AMDGPU::V_ACCVGPR_WRITE_B32_e64 || 916 MAI.getOpcode() == AMDGPU::V_ACCVGPR_READ_B32_e64) 917 continue; 918 919 unsigned Lat = TSchedModel->computeInstrLatency(&MAI) - 1; 920 921 LLVM_DEBUG(dbgs() << "Found MFMA: "; DAG->dumpNode(SU); 922 dbgs() << "Need " << Lat 923 << " instructions to cover latency.\n"); 924 925 // Find up to Lat independent scalar instructions as early as 926 // possible such that they can be scheduled after this MFMA. 927 for ( ; Lat && LastSALU != E; ++LastSALU) { 928 if (Visited.count(&*LastSALU)) 929 continue; 930 931 if (&SU == &DAG->ExitSU || &SU == &*LastSALU || !isSALU(&*LastSALU) || 932 !DAG->canAddEdge(&*LastSALU, &SU)) 933 continue; 934 935 Lat -= linkSALUChain(&SU, &*LastSALU, Lat, Visited); 936 } 937 } 938 } 939 }; 940 } // namespace 941 942 void GCNSubtarget::getPostRAMutations( 943 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const { 944 Mutations.push_back(std::make_unique<FillMFMAShadowMutation>(&InstrInfo)); 945 } 946 947 std::unique_ptr<ScheduleDAGMutation> 948 GCNSubtarget::createFillMFMAShadowMutation(const TargetInstrInfo *TII) const { 949 return EnablePowerSched ? std::make_unique<FillMFMAShadowMutation>(&InstrInfo) 950 : nullptr; 951 } 952 953 const AMDGPUSubtarget &AMDGPUSubtarget::get(const MachineFunction &MF) { 954 if (MF.getTarget().getTargetTriple().getArch() == Triple::amdgcn) 955 return static_cast<const AMDGPUSubtarget&>(MF.getSubtarget<GCNSubtarget>()); 956 else 957 return static_cast<const AMDGPUSubtarget&>(MF.getSubtarget<R600Subtarget>()); 958 } 959 960 const AMDGPUSubtarget &AMDGPUSubtarget::get(const TargetMachine &TM, const Function &F) { 961 if (TM.getTargetTriple().getArch() == Triple::amdgcn) 962 return static_cast<const AMDGPUSubtarget&>(TM.getSubtarget<GCNSubtarget>(F)); 963 else 964 return static_cast<const AMDGPUSubtarget&>(TM.getSubtarget<R600Subtarget>(F)); 965 } 966