1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass eliminates allocas by either converting them into vectors or 10 // by migrating them to local address space. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "GCNSubtarget.h" 16 #include "Utils/AMDGPUBaseInfo.h" 17 #include "llvm/Analysis/CaptureTracking.h" 18 #include "llvm/Analysis/ValueTracking.h" 19 #include "llvm/CodeGen/TargetPassConfig.h" 20 #include "llvm/IR/IRBuilder.h" 21 #include "llvm/IR/IntrinsicInst.h" 22 #include "llvm/IR/IntrinsicsAMDGPU.h" 23 #include "llvm/IR/IntrinsicsR600.h" 24 #include "llvm/Pass.h" 25 #include "llvm/Target/TargetMachine.h" 26 27 #define DEBUG_TYPE "amdgpu-promote-alloca" 28 29 using namespace llvm; 30 31 namespace { 32 33 static cl::opt<bool> DisablePromoteAllocaToVector( 34 "disable-promote-alloca-to-vector", 35 cl::desc("Disable promote alloca to vector"), 36 cl::init(false)); 37 38 static cl::opt<bool> DisablePromoteAllocaToLDS( 39 "disable-promote-alloca-to-lds", 40 cl::desc("Disable promote alloca to LDS"), 41 cl::init(false)); 42 43 static cl::opt<unsigned> PromoteAllocaToVectorLimit( 44 "amdgpu-promote-alloca-to-vector-limit", 45 cl::desc("Maximum byte size to consider promote alloca to vector"), 46 cl::init(0)); 47 48 // FIXME: This can create globals so should be a module pass. 49 class AMDGPUPromoteAlloca : public FunctionPass { 50 public: 51 static char ID; 52 53 AMDGPUPromoteAlloca() : FunctionPass(ID) {} 54 55 bool runOnFunction(Function &F) override; 56 57 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; } 58 59 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 60 61 void getAnalysisUsage(AnalysisUsage &AU) const override { 62 AU.setPreservesCFG(); 63 FunctionPass::getAnalysisUsage(AU); 64 } 65 }; 66 67 class AMDGPUPromoteAllocaImpl { 68 private: 69 const TargetMachine &TM; 70 Module *Mod = nullptr; 71 const DataLayout *DL = nullptr; 72 73 // FIXME: This should be per-kernel. 74 uint32_t LocalMemLimit = 0; 75 uint32_t CurrentLocalMemUsage = 0; 76 unsigned MaxVGPRs; 77 78 bool IsAMDGCN = false; 79 bool IsAMDHSA = false; 80 81 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder); 82 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N); 83 84 /// BaseAlloca is the alloca root the search started from. 85 /// Val may be that alloca or a recursive user of it. 86 bool collectUsesWithPtrTypes(Value *BaseAlloca, 87 Value *Val, 88 std::vector<Value*> &WorkList) const; 89 90 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand 91 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp). 92 /// Returns true if both operands are derived from the same alloca. Val should 93 /// be the same value as one of the input operands of UseInst. 94 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val, 95 Instruction *UseInst, 96 int OpIdx0, int OpIdx1) const; 97 98 /// Check whether we have enough local memory for promotion. 99 bool hasSufficientLocalMem(const Function &F); 100 101 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 102 103 public: 104 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {} 105 bool run(Function &F); 106 }; 107 108 class AMDGPUPromoteAllocaToVector : public FunctionPass { 109 public: 110 static char ID; 111 112 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {} 113 114 bool runOnFunction(Function &F) override; 115 116 StringRef getPassName() const override { 117 return "AMDGPU Promote Alloca to vector"; 118 } 119 120 void getAnalysisUsage(AnalysisUsage &AU) const override { 121 AU.setPreservesCFG(); 122 FunctionPass::getAnalysisUsage(AU); 123 } 124 }; 125 126 } // end anonymous namespace 127 128 char AMDGPUPromoteAlloca::ID = 0; 129 char AMDGPUPromoteAllocaToVector::ID = 0; 130 131 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE, 132 "AMDGPU promote alloca to vector or LDS", false, false) 133 // Move LDS uses from functions to kernels before promote alloca for accurate 134 // estimation of LDS available 135 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS) 136 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE, 137 "AMDGPU promote alloca to vector or LDS", false, false) 138 139 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector", 140 "AMDGPU promote alloca to vector", false, false) 141 142 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID; 143 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID; 144 145 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { 146 if (skipFunction(F)) 147 return false; 148 149 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 150 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F); 151 } 152 return false; 153 } 154 155 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F, 156 FunctionAnalysisManager &AM) { 157 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F); 158 if (Changed) { 159 PreservedAnalyses PA; 160 PA.preserveSet<CFGAnalyses>(); 161 return PA; 162 } 163 return PreservedAnalyses::all(); 164 } 165 166 bool AMDGPUPromoteAllocaImpl::run(Function &F) { 167 Mod = F.getParent(); 168 DL = &Mod->getDataLayout(); 169 170 const Triple &TT = TM.getTargetTriple(); 171 IsAMDGCN = TT.getArch() == Triple::amdgcn; 172 IsAMDHSA = TT.getOS() == Triple::AMDHSA; 173 174 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 175 if (!ST.isPromoteAllocaEnabled()) 176 return false; 177 178 if (IsAMDGCN) { 179 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 180 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); 181 // A non-entry function has only 32 caller preserved registers. 182 // Do not promote alloca which will force spilling. 183 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) 184 MaxVGPRs = std::min(MaxVGPRs, 32u); 185 } else { 186 MaxVGPRs = 128; 187 } 188 189 bool SufficientLDS = hasSufficientLocalMem(F); 190 bool Changed = false; 191 BasicBlock &EntryBB = *F.begin(); 192 193 SmallVector<AllocaInst *, 16> Allocas; 194 for (Instruction &I : EntryBB) { 195 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 196 Allocas.push_back(AI); 197 } 198 199 for (AllocaInst *AI : Allocas) { 200 if (handleAlloca(*AI, SufficientLDS)) 201 Changed = true; 202 } 203 204 return Changed; 205 } 206 207 std::pair<Value *, Value *> 208 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) { 209 Function &F = *Builder.GetInsertBlock()->getParent(); 210 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 211 212 if (!IsAMDHSA) { 213 Function *LocalSizeYFn 214 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y); 215 Function *LocalSizeZFn 216 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z); 217 218 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {}); 219 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {}); 220 221 ST.makeLIDRangeMetadata(LocalSizeY); 222 ST.makeLIDRangeMetadata(LocalSizeZ); 223 224 return std::pair(LocalSizeY, LocalSizeZ); 225 } 226 227 // We must read the size out of the dispatch pointer. 228 assert(IsAMDGCN); 229 230 // We are indexing into this struct, and want to extract the workgroup_size_* 231 // fields. 232 // 233 // typedef struct hsa_kernel_dispatch_packet_s { 234 // uint16_t header; 235 // uint16_t setup; 236 // uint16_t workgroup_size_x ; 237 // uint16_t workgroup_size_y; 238 // uint16_t workgroup_size_z; 239 // uint16_t reserved0; 240 // uint32_t grid_size_x ; 241 // uint32_t grid_size_y ; 242 // uint32_t grid_size_z; 243 // 244 // uint32_t private_segment_size; 245 // uint32_t group_segment_size; 246 // uint64_t kernel_object; 247 // 248 // #ifdef HSA_LARGE_MODEL 249 // void *kernarg_address; 250 // #elif defined HSA_LITTLE_ENDIAN 251 // void *kernarg_address; 252 // uint32_t reserved1; 253 // #else 254 // uint32_t reserved1; 255 // void *kernarg_address; 256 // #endif 257 // uint64_t reserved2; 258 // hsa_signal_t completion_signal; // uint64_t wrapper 259 // } hsa_kernel_dispatch_packet_t 260 // 261 Function *DispatchPtrFn 262 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr); 263 264 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {}); 265 DispatchPtr->addRetAttr(Attribute::NoAlias); 266 DispatchPtr->addRetAttr(Attribute::NonNull); 267 F.removeFnAttr("amdgpu-no-dispatch-ptr"); 268 269 // Size of the dispatch packet struct. 270 DispatchPtr->addDereferenceableRetAttr(64); 271 272 Type *I32Ty = Type::getInt32Ty(Mod->getContext()); 273 Value *CastDispatchPtr = Builder.CreateBitCast( 274 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS)); 275 276 // We could do a single 64-bit load here, but it's likely that the basic 277 // 32-bit and extract sequence is already present, and it is probably easier 278 // to CSE this. The loads should be mergeable later anyway. 279 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1); 280 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4)); 281 282 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2); 283 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4)); 284 285 MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt); 286 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); 287 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); 288 ST.makeLIDRangeMetadata(LoadZU); 289 290 // Extract y component. Upper half of LoadZU should be zero already. 291 Value *Y = Builder.CreateLShr(LoadXY, 16); 292 293 return std::pair(Y, LoadZU); 294 } 295 296 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder, 297 unsigned N) { 298 Function *F = Builder.GetInsertBlock()->getParent(); 299 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F); 300 Intrinsic::ID IntrID = Intrinsic::not_intrinsic; 301 StringRef AttrName; 302 303 switch (N) { 304 case 0: 305 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x 306 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x; 307 AttrName = "amdgpu-no-workitem-id-x"; 308 break; 309 case 1: 310 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y 311 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y; 312 AttrName = "amdgpu-no-workitem-id-y"; 313 break; 314 315 case 2: 316 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z 317 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z; 318 AttrName = "amdgpu-no-workitem-id-z"; 319 break; 320 default: 321 llvm_unreachable("invalid dimension"); 322 } 323 324 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID); 325 CallInst *CI = Builder.CreateCall(WorkitemIdFn); 326 ST.makeLIDRangeMetadata(CI); 327 F->removeFnAttr(AttrName); 328 329 return CI; 330 } 331 332 static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) { 333 return FixedVectorType::get(ArrayTy->getElementType(), 334 ArrayTy->getNumElements()); 335 } 336 337 static Value * 338 calculateVectorIndex(Value *Ptr, 339 const std::map<GetElementPtrInst *, Value *> &GEPIdx) { 340 auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts()); 341 if (!GEP) 342 return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext())); 343 344 auto I = GEPIdx.find(GEP); 345 assert(I != GEPIdx.end() && "Must have entry for GEP!"); 346 return I->second; 347 } 348 349 static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca, 350 Type *VecElemTy, const DataLayout &DL) { 351 // TODO: Extracting a "multiple of X" from a GEP might be a useful generic 352 // helper. 353 unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType()); 354 MapVector<Value *, APInt> VarOffsets; 355 APInt ConstOffset(BW, 0); 356 if (GEP->getPointerOperand()->stripPointerCasts() != Alloca || 357 !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset)) 358 return nullptr; 359 360 unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy); 361 if (VarOffsets.size() > 1) 362 return nullptr; 363 364 if (VarOffsets.size() == 1) { 365 // Only handle cases where we don't need to insert extra arithmetic 366 // instructions. 367 const auto &VarOffset = VarOffsets.front(); 368 if (!ConstOffset.isZero() || VarOffset.second != VecElemSize) 369 return nullptr; 370 return VarOffset.first; 371 } 372 373 APInt Quot; 374 uint64_t Rem; 375 APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem); 376 if (Rem != 0) 377 return nullptr; 378 379 return ConstantInt::get(GEP->getContext(), Quot); 380 } 381 382 struct MemTransferInfo { 383 ConstantInt *SrcIndex = nullptr; 384 ConstantInt *DestIndex = nullptr; 385 }; 386 387 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL, 388 unsigned MaxVGPRs) { 389 390 if (DisablePromoteAllocaToVector) { 391 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n"); 392 return false; 393 } 394 395 Type *AllocaTy = Alloca->getAllocatedType(); 396 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy); 397 if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) { 398 if (VectorType::isValidElementType(ArrayTy->getElementType()) && 399 ArrayTy->getNumElements() > 0) 400 VectorTy = arrayTypeToVecType(ArrayTy); 401 } 402 403 // Use up to 1/4 of available register budget for vectorization. 404 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8 405 : (MaxVGPRs * 32); 406 407 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) { 408 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with " 409 << MaxVGPRs << " registers available\n"); 410 return false; 411 } 412 413 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n"); 414 415 // FIXME: There is no reason why we can't support larger arrays, we 416 // are just being conservative for now. 417 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these 418 // could also be promoted but we don't currently handle this case 419 if (!VectorTy || VectorTy->getNumElements() > 16 || 420 VectorTy->getNumElements() < 2) { 421 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); 422 return false; 423 } 424 425 std::map<GetElementPtrInst*, Value*> GEPVectorIdx; 426 SmallVector<Instruction *> WorkList; 427 SmallVector<Instruction *> DeferredInsts; 428 SmallVector<Use *, 8> Uses; 429 DenseMap<MemTransferInst *, MemTransferInfo> TransferInfo; 430 431 for (Use &U : Alloca->uses()) 432 Uses.push_back(&U); 433 434 Type *VecEltTy = VectorTy->getElementType(); 435 unsigned ElementSize = DL.getTypeSizeInBits(VecEltTy) / 8; 436 while (!Uses.empty()) { 437 Use *U = Uses.pop_back_val(); 438 Instruction *Inst = cast<Instruction>(U->getUser()); 439 440 if (Value *Ptr = getLoadStorePointerOperand(Inst)) { 441 // This is a store of the pointer, not to the pointer. 442 if (isa<StoreInst>(Inst) && 443 U->getOperandNo() != StoreInst::getPointerOperandIndex()) 444 return false; 445 446 Type *AccessTy = getLoadStoreType(Inst); 447 Ptr = Ptr->stripPointerCasts(); 448 449 // Alloca already accessed as vector, leave alone. 450 if (Ptr == Alloca && DL.getTypeStoreSize(Alloca->getAllocatedType()) == 451 DL.getTypeStoreSize(AccessTy)) 452 continue; 453 454 // Check that this is a simple access of a vector element. 455 bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple() 456 : cast<StoreInst>(Inst)->isSimple(); 457 if (!IsSimple || 458 !CastInst::isBitOrNoopPointerCastable(VecEltTy, AccessTy, DL)) 459 return false; 460 461 WorkList.push_back(Inst); 462 continue; 463 } 464 465 if (isa<BitCastInst>(Inst)) { 466 // Look through bitcasts. 467 for (Use &U : Inst->uses()) 468 Uses.push_back(&U); 469 continue; 470 } 471 472 if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) { 473 // If we can't compute a vector index from this GEP, then we can't 474 // promote this alloca to vector. 475 Value *Index = GEPToVectorIndex(GEP, Alloca, VecEltTy, DL); 476 if (!Index) { 477 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP 478 << '\n'); 479 return false; 480 } 481 482 GEPVectorIdx[GEP] = Index; 483 for (Use &U : Inst->uses()) 484 Uses.push_back(&U); 485 continue; 486 } 487 488 if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) { 489 if (TransferInst->isVolatile()) 490 return false; 491 492 ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength()); 493 if (!Len || !!(Len->getZExtValue() % ElementSize)) 494 return false; 495 496 if (!TransferInfo.count(TransferInst)) { 497 DeferredInsts.push_back(Inst); 498 WorkList.push_back(Inst); 499 TransferInfo[TransferInst] = MemTransferInfo(); 500 } 501 502 auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * { 503 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); 504 if (Ptr != Alloca && !GEPVectorIdx.count(GEP)) 505 return nullptr; 506 507 return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx)); 508 }; 509 510 unsigned OpNum = U->getOperandNo(); 511 MemTransferInfo *TI = &TransferInfo[TransferInst]; 512 if (OpNum == 0) { 513 Value *Dest = TransferInst->getDest(); 514 ConstantInt *Index = getPointerIndexOfAlloca(Dest); 515 if (!Index) 516 return false; 517 TI->DestIndex = Index; 518 } else { 519 assert(OpNum == 1); 520 Value *Src = TransferInst->getSource(); 521 ConstantInt *Index = getPointerIndexOfAlloca(Src); 522 if (!Index) 523 return false; 524 TI->SrcIndex = Index; 525 } 526 continue; 527 } 528 529 // Ignore assume-like intrinsics and comparisons used in assumes. 530 if (isAssumeLikeIntrinsic(Inst)) 531 continue; 532 533 if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) { 534 return isAssumeLikeIntrinsic(cast<Instruction>(U)); 535 })) 536 continue; 537 538 // Unknown user. 539 return false; 540 } 541 542 while (!DeferredInsts.empty()) { 543 Instruction *Inst = DeferredInsts.pop_back_val(); 544 MemTransferInst *TransferInst = cast<MemTransferInst>(Inst); 545 // TODO: Support the case if the pointers are from different alloca or 546 // from different address spaces. 547 MemTransferInfo &Info = TransferInfo[TransferInst]; 548 if (!Info.SrcIndex || !Info.DestIndex) 549 return false; 550 } 551 552 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> " 553 << *VectorTy << '\n'); 554 555 for (Instruction *Inst : WorkList) { 556 IRBuilder<> Builder(Inst); 557 switch (Inst->getOpcode()) { 558 case Instruction::Load: { 559 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand(); 560 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 561 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace()); 562 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 563 Value *VecValue = 564 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca->getAlign()); 565 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index); 566 if (Inst->getType() != VecEltTy) 567 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType()); 568 Inst->replaceAllUsesWith(ExtractElement); 569 Inst->eraseFromParent(); 570 break; 571 } 572 case Instruction::Store: { 573 StoreInst *SI = cast<StoreInst>(Inst); 574 Value *Ptr = SI->getPointerOperand(); 575 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 576 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace()); 577 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 578 Value *VecValue = 579 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca->getAlign()); 580 Value *Elt = SI->getValueOperand(); 581 if (Elt->getType() != VecEltTy) 582 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy); 583 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index); 584 Builder.CreateAlignedStore(NewVecValue, BitCast, Alloca->getAlign()); 585 Inst->eraseFromParent(); 586 break; 587 } 588 case Instruction::Call: { 589 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(Inst)) { 590 ConstantInt *Length = cast<ConstantInt>(MTI->getLength()); 591 unsigned NumCopied = Length->getZExtValue() / ElementSize; 592 MemTransferInfo *TI = &TransferInfo[cast<MemTransferInst>(Inst)]; 593 unsigned SrcBegin = TI->SrcIndex->getZExtValue(); 594 unsigned DestBegin = TI->DestIndex->getZExtValue(); 595 596 SmallVector<int> Mask; 597 for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) { 598 if (Idx >= DestBegin && Idx < DestBegin + NumCopied) { 599 Mask.push_back(SrcBegin++); 600 } else { 601 Mask.push_back(Idx); 602 } 603 } 604 Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace()); 605 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 606 Value *VecValue = 607 Builder.CreateAlignedLoad(VectorTy, BitCast, Alloca->getAlign()); 608 Value *NewVecValue = Builder.CreateShuffleVector(VecValue, Mask); 609 Builder.CreateAlignedStore(NewVecValue, BitCast, Alloca->getAlign()); 610 611 Inst->eraseFromParent(); 612 } else { 613 llvm_unreachable("Unsupported call when promoting alloca to vector"); 614 } 615 break; 616 } 617 618 default: 619 llvm_unreachable("Inconsistency in instructions promotable to vector"); 620 } 621 } 622 return true; 623 } 624 625 static bool isCallPromotable(CallInst *CI) { 626 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 627 if (!II) 628 return false; 629 630 switch (II->getIntrinsicID()) { 631 case Intrinsic::memcpy: 632 case Intrinsic::memmove: 633 case Intrinsic::memset: 634 case Intrinsic::lifetime_start: 635 case Intrinsic::lifetime_end: 636 case Intrinsic::invariant_start: 637 case Intrinsic::invariant_end: 638 case Intrinsic::launder_invariant_group: 639 case Intrinsic::strip_invariant_group: 640 case Intrinsic::objectsize: 641 return true; 642 default: 643 return false; 644 } 645 } 646 647 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca( 648 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0, 649 int OpIdx1) const { 650 // Figure out which operand is the one we might not be promoting. 651 Value *OtherOp = Inst->getOperand(OpIdx0); 652 if (Val == OtherOp) 653 OtherOp = Inst->getOperand(OpIdx1); 654 655 if (isa<ConstantPointerNull>(OtherOp)) 656 return true; 657 658 Value *OtherObj = getUnderlyingObject(OtherOp); 659 if (!isa<AllocaInst>(OtherObj)) 660 return false; 661 662 // TODO: We should be able to replace undefs with the right pointer type. 663 664 // TODO: If we know the other base object is another promotable 665 // alloca, not necessarily this alloca, we can do this. The 666 // important part is both must have the same address space at 667 // the end. 668 if (OtherObj != BaseAlloca) { 669 LLVM_DEBUG( 670 dbgs() << "Found a binary instruction with another alloca object\n"); 671 return false; 672 } 673 674 return true; 675 } 676 677 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes( 678 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const { 679 680 for (User *User : Val->users()) { 681 if (is_contained(WorkList, User)) 682 continue; 683 684 if (CallInst *CI = dyn_cast<CallInst>(User)) { 685 if (!isCallPromotable(CI)) 686 return false; 687 688 WorkList.push_back(User); 689 continue; 690 } 691 692 Instruction *UseInst = cast<Instruction>(User); 693 if (UseInst->getOpcode() == Instruction::PtrToInt) 694 return false; 695 696 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) { 697 if (LI->isVolatile()) 698 return false; 699 700 continue; 701 } 702 703 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) { 704 if (SI->isVolatile()) 705 return false; 706 707 // Reject if the stored value is not the pointer operand. 708 if (SI->getPointerOperand() != Val) 709 return false; 710 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { 711 if (RMW->isVolatile()) 712 return false; 713 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) { 714 if (CAS->isVolatile()) 715 return false; 716 } 717 718 // Only promote a select if we know that the other select operand 719 // is from another pointer that will also be promoted. 720 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) { 721 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) 722 return false; 723 724 // May need to rewrite constant operands. 725 WorkList.push_back(ICmp); 726 } 727 728 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) { 729 // Give up if the pointer may be captured. 730 if (PointerMayBeCaptured(UseInst, true, true)) 731 return false; 732 // Don't collect the users of this. 733 WorkList.push_back(User); 734 continue; 735 } 736 737 // Do not promote vector/aggregate type instructions. It is hard to track 738 // their users. 739 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User)) 740 return false; 741 742 if (!User->getType()->isPointerTy()) 743 continue; 744 745 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) { 746 // Be conservative if an address could be computed outside the bounds of 747 // the alloca. 748 if (!GEP->isInBounds()) 749 return false; 750 } 751 752 // Only promote a select if we know that the other select operand is from 753 // another pointer that will also be promoted. 754 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) { 755 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) 756 return false; 757 } 758 759 // Repeat for phis. 760 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) { 761 // TODO: Handle more complex cases. We should be able to replace loops 762 // over arrays. 763 switch (Phi->getNumIncomingValues()) { 764 case 1: 765 break; 766 case 2: 767 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1)) 768 return false; 769 break; 770 default: 771 return false; 772 } 773 } 774 775 WorkList.push_back(User); 776 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList)) 777 return false; 778 } 779 780 return true; 781 } 782 783 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) { 784 785 FunctionType *FTy = F.getFunctionType(); 786 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 787 788 // If the function has any arguments in the local address space, then it's 789 // possible these arguments require the entire local memory space, so 790 // we cannot use local memory in the pass. 791 for (Type *ParamTy : FTy->params()) { 792 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); 793 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 794 LocalMemLimit = 0; 795 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to " 796 "local memory disabled.\n"); 797 return false; 798 } 799 } 800 801 LocalMemLimit = ST.getAddressableLocalMemorySize(); 802 if (LocalMemLimit == 0) 803 return false; 804 805 SmallVector<const Constant *, 16> Stack; 806 SmallPtrSet<const Constant *, 8> VisitedConstants; 807 SmallPtrSet<const GlobalVariable *, 8> UsedLDS; 808 809 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool { 810 for (const User *U : Val->users()) { 811 if (const Instruction *Use = dyn_cast<Instruction>(U)) { 812 if (Use->getParent()->getParent() == &F) 813 return true; 814 } else { 815 const Constant *C = cast<Constant>(U); 816 if (VisitedConstants.insert(C).second) 817 Stack.push_back(C); 818 } 819 } 820 821 return false; 822 }; 823 824 for (GlobalVariable &GV : Mod->globals()) { 825 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) 826 continue; 827 828 if (visitUsers(&GV, &GV)) { 829 UsedLDS.insert(&GV); 830 Stack.clear(); 831 continue; 832 } 833 834 // For any ConstantExpr uses, we need to recursively search the users until 835 // we see a function. 836 while (!Stack.empty()) { 837 const Constant *C = Stack.pop_back_val(); 838 if (visitUsers(&GV, C)) { 839 UsedLDS.insert(&GV); 840 Stack.clear(); 841 break; 842 } 843 } 844 } 845 846 const DataLayout &DL = Mod->getDataLayout(); 847 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes; 848 AllocatedSizes.reserve(UsedLDS.size()); 849 850 for (const GlobalVariable *GV : UsedLDS) { 851 Align Alignment = 852 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType()); 853 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType()); 854 855 // HIP uses an extern unsized array in local address space for dynamically 856 // allocated shared memory. In that case, we have to disable the promotion. 857 if (GV->hasExternalLinkage() && AllocSize == 0) { 858 LocalMemLimit = 0; 859 LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated " 860 "local memory. Promoting to local memory " 861 "disabled.\n"); 862 return false; 863 } 864 865 AllocatedSizes.emplace_back(AllocSize, Alignment); 866 } 867 868 // Sort to try to estimate the worst case alignment padding 869 // 870 // FIXME: We should really do something to fix the addresses to a more optimal 871 // value instead 872 llvm::sort(AllocatedSizes, llvm::less_second()); 873 874 // Check how much local memory is being used by global objects 875 CurrentLocalMemUsage = 0; 876 877 // FIXME: Try to account for padding here. The real padding and address is 878 // currently determined from the inverse order of uses in the function when 879 // legalizing, which could also potentially change. We try to estimate the 880 // worst case here, but we probably should fix the addresses earlier. 881 for (auto Alloc : AllocatedSizes) { 882 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second); 883 CurrentLocalMemUsage += Alloc.first; 884 } 885 886 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, 887 F); 888 889 // Restrict local memory usage so that we don't drastically reduce occupancy, 890 // unless it is already significantly reduced. 891 892 // TODO: Have some sort of hint or other heuristics to guess occupancy based 893 // on other factors.. 894 unsigned OccupancyHint = ST.getWavesPerEU(F).second; 895 if (OccupancyHint == 0) 896 OccupancyHint = 7; 897 898 // Clamp to max value. 899 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU()); 900 901 // Check the hint but ignore it if it's obviously wrong from the existing LDS 902 // usage. 903 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy); 904 905 906 // Round up to the next tier of usage. 907 unsigned MaxSizeWithWaveCount 908 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F); 909 910 // Program is possibly broken by using more local mem than available. 911 if (CurrentLocalMemUsage > MaxSizeWithWaveCount) 912 return false; 913 914 LocalMemLimit = MaxSizeWithWaveCount; 915 916 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage 917 << " bytes of LDS\n" 918 << " Rounding size to " << MaxSizeWithWaveCount 919 << " with a maximum occupancy of " << MaxOccupancy << '\n' 920 << " and " << (LocalMemLimit - CurrentLocalMemUsage) 921 << " available for promotion\n"); 922 923 return true; 924 } 925 926 // FIXME: Should try to pick the most likely to be profitable allocas first. 927 bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) { 928 // Array allocations are probably not worth handling, since an allocation of 929 // the array type is the canonical form. 930 if (!I.isStaticAlloca() || I.isArrayAllocation()) 931 return false; 932 933 const DataLayout &DL = Mod->getDataLayout(); 934 IRBuilder<> Builder(&I); 935 936 // First try to replace the alloca with a vector 937 Type *AllocaTy = I.getAllocatedType(); 938 939 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 940 941 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs)) 942 return true; // Promoted to vector. 943 944 if (DisablePromoteAllocaToLDS) 945 return false; 946 947 const Function &ContainingFunction = *I.getParent()->getParent(); 948 CallingConv::ID CC = ContainingFunction.getCallingConv(); 949 950 // Don't promote the alloca to LDS for shader calling conventions as the work 951 // item ID intrinsics are not supported for these calling conventions. 952 // Furthermore not all LDS is available for some of the stages. 953 switch (CC) { 954 case CallingConv::AMDGPU_KERNEL: 955 case CallingConv::SPIR_KERNEL: 956 break; 957 default: 958 LLVM_DEBUG( 959 dbgs() 960 << " promote alloca to LDS not supported with calling convention.\n"); 961 return false; 962 } 963 964 // Not likely to have sufficient local memory for promotion. 965 if (!SufficientLDS) 966 return false; 967 968 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction); 969 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second; 970 971 Align Alignment = 972 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType()); 973 974 // FIXME: This computed padding is likely wrong since it depends on inverse 975 // usage order. 976 // 977 // FIXME: It is also possible that if we're allowed to use all of the memory 978 // could end up using more than the maximum due to alignment padding. 979 980 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment); 981 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy); 982 NewSize += AllocSize; 983 984 if (NewSize > LocalMemLimit) { 985 LLVM_DEBUG(dbgs() << " " << AllocSize 986 << " bytes of local memory not available to promote\n"); 987 return false; 988 } 989 990 CurrentLocalMemUsage = NewSize; 991 992 std::vector<Value*> WorkList; 993 994 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) { 995 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n"); 996 return false; 997 } 998 999 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n"); 1000 1001 Function *F = I.getParent()->getParent(); 1002 1003 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); 1004 GlobalVariable *GV = new GlobalVariable( 1005 *Mod, GVTy, false, GlobalValue::InternalLinkage, PoisonValue::get(GVTy), 1006 Twine(F->getName()) + Twine('.') + I.getName(), nullptr, 1007 GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS); 1008 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 1009 GV->setAlignment(I.getAlign()); 1010 1011 Value *TCntY, *TCntZ; 1012 1013 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); 1014 Value *TIdX = getWorkitemID(Builder, 0); 1015 Value *TIdY = getWorkitemID(Builder, 1); 1016 Value *TIdZ = getWorkitemID(Builder, 2); 1017 1018 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); 1019 Tmp0 = Builder.CreateMul(Tmp0, TIdX); 1020 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); 1021 Value *TID = Builder.CreateAdd(Tmp0, Tmp1); 1022 TID = Builder.CreateAdd(TID, TIdZ); 1023 1024 Value *Indices[] = { 1025 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), 1026 TID 1027 }; 1028 1029 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); 1030 I.mutateType(Offset->getType()); 1031 I.replaceAllUsesWith(Offset); 1032 I.eraseFromParent(); 1033 1034 SmallVector<IntrinsicInst *> DeferredIntrs; 1035 1036 for (Value *V : WorkList) { 1037 CallInst *Call = dyn_cast<CallInst>(V); 1038 if (!Call) { 1039 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) { 1040 Value *Src0 = CI->getOperand(0); 1041 PointerType *NewTy = PointerType::getWithSamePointeeType( 1042 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS); 1043 1044 if (isa<ConstantPointerNull>(CI->getOperand(0))) 1045 CI->setOperand(0, ConstantPointerNull::get(NewTy)); 1046 1047 if (isa<ConstantPointerNull>(CI->getOperand(1))) 1048 CI->setOperand(1, ConstantPointerNull::get(NewTy)); 1049 1050 continue; 1051 } 1052 1053 // The operand's value should be corrected on its own and we don't want to 1054 // touch the users. 1055 if (isa<AddrSpaceCastInst>(V)) 1056 continue; 1057 1058 PointerType *NewTy = PointerType::getWithSamePointeeType( 1059 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS); 1060 1061 // FIXME: It doesn't really make sense to try to do this for all 1062 // instructions. 1063 V->mutateType(NewTy); 1064 1065 // Adjust the types of any constant operands. 1066 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1067 if (isa<ConstantPointerNull>(SI->getOperand(1))) 1068 SI->setOperand(1, ConstantPointerNull::get(NewTy)); 1069 1070 if (isa<ConstantPointerNull>(SI->getOperand(2))) 1071 SI->setOperand(2, ConstantPointerNull::get(NewTy)); 1072 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { 1073 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1074 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I))) 1075 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); 1076 } 1077 } 1078 1079 continue; 1080 } 1081 1082 IntrinsicInst *Intr = cast<IntrinsicInst>(Call); 1083 Builder.SetInsertPoint(Intr); 1084 switch (Intr->getIntrinsicID()) { 1085 case Intrinsic::lifetime_start: 1086 case Intrinsic::lifetime_end: 1087 // These intrinsics are for address space 0 only 1088 Intr->eraseFromParent(); 1089 continue; 1090 case Intrinsic::memcpy: 1091 case Intrinsic::memmove: 1092 // These have 2 pointer operands. In case if second pointer also needs 1093 // to be replaced we defer processing of these intrinsics until all 1094 // other values are processed. 1095 DeferredIntrs.push_back(Intr); 1096 continue; 1097 case Intrinsic::memset: { 1098 MemSetInst *MemSet = cast<MemSetInst>(Intr); 1099 Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(), 1100 MemSet->getLength(), MemSet->getDestAlign(), 1101 MemSet->isVolatile()); 1102 Intr->eraseFromParent(); 1103 continue; 1104 } 1105 case Intrinsic::invariant_start: 1106 case Intrinsic::invariant_end: 1107 case Intrinsic::launder_invariant_group: 1108 case Intrinsic::strip_invariant_group: 1109 Intr->eraseFromParent(); 1110 // FIXME: I think the invariant marker should still theoretically apply, 1111 // but the intrinsics need to be changed to accept pointers with any 1112 // address space. 1113 continue; 1114 case Intrinsic::objectsize: { 1115 Value *Src = Intr->getOperand(0); 1116 Function *ObjectSize = Intrinsic::getDeclaration( 1117 Mod, Intrinsic::objectsize, 1118 {Intr->getType(), 1119 PointerType::getWithSamePointeeType( 1120 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)}); 1121 1122 CallInst *NewCall = Builder.CreateCall( 1123 ObjectSize, 1124 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)}); 1125 Intr->replaceAllUsesWith(NewCall); 1126 Intr->eraseFromParent(); 1127 continue; 1128 } 1129 default: 1130 Intr->print(errs()); 1131 llvm_unreachable("Don't know how to promote alloca intrinsic use."); 1132 } 1133 } 1134 1135 for (IntrinsicInst *Intr : DeferredIntrs) { 1136 Builder.SetInsertPoint(Intr); 1137 Intrinsic::ID ID = Intr->getIntrinsicID(); 1138 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove); 1139 1140 MemTransferInst *MI = cast<MemTransferInst>(Intr); 1141 auto *B = 1142 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(), 1143 MI->getRawSource(), MI->getSourceAlign(), 1144 MI->getLength(), MI->isVolatile()); 1145 1146 for (unsigned I = 0; I != 2; ++I) { 1147 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) { 1148 B->addDereferenceableParamAttr(I, Bytes); 1149 } 1150 } 1151 1152 Intr->eraseFromParent(); 1153 } 1154 1155 return true; 1156 } 1157 1158 bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) { 1159 // Array allocations are probably not worth handling, since an allocation of 1160 // the array type is the canonical form. 1161 if (!I.isStaticAlloca() || I.isArrayAllocation()) 1162 return false; 1163 1164 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 1165 1166 Module *Mod = I.getParent()->getParent()->getParent(); 1167 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs); 1168 } 1169 1170 bool promoteAllocasToVector(Function &F, TargetMachine &TM) { 1171 if (DisablePromoteAllocaToVector) 1172 return false; 1173 1174 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 1175 if (!ST.isPromoteAllocaEnabled()) 1176 return false; 1177 1178 unsigned MaxVGPRs; 1179 if (TM.getTargetTriple().getArch() == Triple::amdgcn) { 1180 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 1181 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); 1182 // A non-entry function has only 32 caller preserved registers. 1183 // Do not promote alloca which will force spilling. 1184 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) 1185 MaxVGPRs = std::min(MaxVGPRs, 32u); 1186 } else { 1187 MaxVGPRs = 128; 1188 } 1189 1190 bool Changed = false; 1191 BasicBlock &EntryBB = *F.begin(); 1192 1193 SmallVector<AllocaInst *, 16> Allocas; 1194 for (Instruction &I : EntryBB) { 1195 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 1196 Allocas.push_back(AI); 1197 } 1198 1199 for (AllocaInst *AI : Allocas) { 1200 if (handlePromoteAllocaToVector(*AI, MaxVGPRs)) 1201 Changed = true; 1202 } 1203 1204 return Changed; 1205 } 1206 1207 bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) { 1208 if (skipFunction(F)) 1209 return false; 1210 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 1211 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>()); 1212 } 1213 return false; 1214 } 1215 1216 PreservedAnalyses 1217 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) { 1218 bool Changed = promoteAllocasToVector(F, TM); 1219 if (Changed) { 1220 PreservedAnalyses PA; 1221 PA.preserveSet<CFGAnalyses>(); 1222 return PA; 1223 } 1224 return PreservedAnalyses::all(); 1225 } 1226 1227 FunctionPass *llvm::createAMDGPUPromoteAlloca() { 1228 return new AMDGPUPromoteAlloca(); 1229 } 1230 1231 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() { 1232 return new AMDGPUPromoteAllocaToVector(); 1233 } 1234