1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass eliminates allocas by either converting them into vectors or 10 // by migrating them to local address space. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "GCNSubtarget.h" 16 #include "Utils/AMDGPUBaseInfo.h" 17 #include "llvm/Analysis/CaptureTracking.h" 18 #include "llvm/Analysis/ValueTracking.h" 19 #include "llvm/CodeGen/TargetPassConfig.h" 20 #include "llvm/IR/IRBuilder.h" 21 #include "llvm/IR/IntrinsicInst.h" 22 #include "llvm/IR/IntrinsicsAMDGPU.h" 23 #include "llvm/IR/IntrinsicsR600.h" 24 #include "llvm/Pass.h" 25 #include "llvm/Target/TargetMachine.h" 26 27 #define DEBUG_TYPE "amdgpu-promote-alloca" 28 29 using namespace llvm; 30 31 namespace { 32 33 static cl::opt<bool> DisablePromoteAllocaToVector( 34 "disable-promote-alloca-to-vector", 35 cl::desc("Disable promote alloca to vector"), 36 cl::init(false)); 37 38 static cl::opt<bool> DisablePromoteAllocaToLDS( 39 "disable-promote-alloca-to-lds", 40 cl::desc("Disable promote alloca to LDS"), 41 cl::init(false)); 42 43 static cl::opt<unsigned> PromoteAllocaToVectorLimit( 44 "amdgpu-promote-alloca-to-vector-limit", 45 cl::desc("Maximum byte size to consider promote alloca to vector"), 46 cl::init(0)); 47 48 // FIXME: This can create globals so should be a module pass. 49 class AMDGPUPromoteAlloca : public FunctionPass { 50 public: 51 static char ID; 52 53 AMDGPUPromoteAlloca() : FunctionPass(ID) {} 54 55 bool runOnFunction(Function &F) override; 56 57 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; } 58 59 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 60 61 void getAnalysisUsage(AnalysisUsage &AU) const override { 62 AU.setPreservesCFG(); 63 FunctionPass::getAnalysisUsage(AU); 64 } 65 }; 66 67 class AMDGPUPromoteAllocaImpl { 68 private: 69 const TargetMachine &TM; 70 Module *Mod = nullptr; 71 const DataLayout *DL = nullptr; 72 73 // FIXME: This should be per-kernel. 74 uint32_t LocalMemLimit = 0; 75 uint32_t CurrentLocalMemUsage = 0; 76 unsigned MaxVGPRs; 77 78 bool IsAMDGCN = false; 79 bool IsAMDHSA = false; 80 81 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder); 82 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N); 83 84 /// BaseAlloca is the alloca root the search started from. 85 /// Val may be that alloca or a recursive user of it. 86 bool collectUsesWithPtrTypes(Value *BaseAlloca, 87 Value *Val, 88 std::vector<Value*> &WorkList) const; 89 90 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand 91 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp). 92 /// Returns true if both operands are derived from the same alloca. Val should 93 /// be the same value as one of the input operands of UseInst. 94 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val, 95 Instruction *UseInst, 96 int OpIdx0, int OpIdx1) const; 97 98 /// Check whether we have enough local memory for promotion. 99 bool hasSufficientLocalMem(const Function &F); 100 101 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 102 103 public: 104 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {} 105 bool run(Function &F); 106 }; 107 108 class AMDGPUPromoteAllocaToVector : public FunctionPass { 109 public: 110 static char ID; 111 112 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {} 113 114 bool runOnFunction(Function &F) override; 115 116 StringRef getPassName() const override { 117 return "AMDGPU Promote Alloca to vector"; 118 } 119 120 void getAnalysisUsage(AnalysisUsage &AU) const override { 121 AU.setPreservesCFG(); 122 FunctionPass::getAnalysisUsage(AU); 123 } 124 }; 125 126 } // end anonymous namespace 127 128 char AMDGPUPromoteAlloca::ID = 0; 129 char AMDGPUPromoteAllocaToVector::ID = 0; 130 131 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE, 132 "AMDGPU promote alloca to vector or LDS", false, false) 133 // Move LDS uses from functions to kernels before promote alloca for accurate 134 // estimation of LDS available 135 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS) 136 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE, 137 "AMDGPU promote alloca to vector or LDS", false, false) 138 139 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector", 140 "AMDGPU promote alloca to vector", false, false) 141 142 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID; 143 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID; 144 145 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { 146 if (skipFunction(F)) 147 return false; 148 149 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 150 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F); 151 } 152 return false; 153 } 154 155 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F, 156 FunctionAnalysisManager &AM) { 157 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F); 158 if (Changed) { 159 PreservedAnalyses PA; 160 PA.preserveSet<CFGAnalyses>(); 161 return PA; 162 } 163 return PreservedAnalyses::all(); 164 } 165 166 bool AMDGPUPromoteAllocaImpl::run(Function &F) { 167 Mod = F.getParent(); 168 DL = &Mod->getDataLayout(); 169 170 const Triple &TT = TM.getTargetTriple(); 171 IsAMDGCN = TT.getArch() == Triple::amdgcn; 172 IsAMDHSA = TT.getOS() == Triple::AMDHSA; 173 174 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 175 if (!ST.isPromoteAllocaEnabled()) 176 return false; 177 178 if (IsAMDGCN) { 179 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 180 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); 181 // A non-entry function has only 32 caller preserved registers. 182 // Do not promote alloca which will force spilling. 183 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) 184 MaxVGPRs = std::min(MaxVGPRs, 32u); 185 } else { 186 MaxVGPRs = 128; 187 } 188 189 bool SufficientLDS = hasSufficientLocalMem(F); 190 bool Changed = false; 191 BasicBlock &EntryBB = *F.begin(); 192 193 SmallVector<AllocaInst *, 16> Allocas; 194 for (Instruction &I : EntryBB) { 195 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 196 Allocas.push_back(AI); 197 } 198 199 for (AllocaInst *AI : Allocas) { 200 if (handleAlloca(*AI, SufficientLDS)) 201 Changed = true; 202 } 203 204 return Changed; 205 } 206 207 std::pair<Value *, Value *> 208 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) { 209 Function &F = *Builder.GetInsertBlock()->getParent(); 210 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 211 212 if (!IsAMDHSA) { 213 Function *LocalSizeYFn 214 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y); 215 Function *LocalSizeZFn 216 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z); 217 218 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {}); 219 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {}); 220 221 ST.makeLIDRangeMetadata(LocalSizeY); 222 ST.makeLIDRangeMetadata(LocalSizeZ); 223 224 return std::make_pair(LocalSizeY, LocalSizeZ); 225 } 226 227 // We must read the size out of the dispatch pointer. 228 assert(IsAMDGCN); 229 230 // We are indexing into this struct, and want to extract the workgroup_size_* 231 // fields. 232 // 233 // typedef struct hsa_kernel_dispatch_packet_s { 234 // uint16_t header; 235 // uint16_t setup; 236 // uint16_t workgroup_size_x ; 237 // uint16_t workgroup_size_y; 238 // uint16_t workgroup_size_z; 239 // uint16_t reserved0; 240 // uint32_t grid_size_x ; 241 // uint32_t grid_size_y ; 242 // uint32_t grid_size_z; 243 // 244 // uint32_t private_segment_size; 245 // uint32_t group_segment_size; 246 // uint64_t kernel_object; 247 // 248 // #ifdef HSA_LARGE_MODEL 249 // void *kernarg_address; 250 // #elif defined HSA_LITTLE_ENDIAN 251 // void *kernarg_address; 252 // uint32_t reserved1; 253 // #else 254 // uint32_t reserved1; 255 // void *kernarg_address; 256 // #endif 257 // uint64_t reserved2; 258 // hsa_signal_t completion_signal; // uint64_t wrapper 259 // } hsa_kernel_dispatch_packet_t 260 // 261 Function *DispatchPtrFn 262 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr); 263 264 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {}); 265 DispatchPtr->addRetAttr(Attribute::NoAlias); 266 DispatchPtr->addRetAttr(Attribute::NonNull); 267 F.removeFnAttr("amdgpu-no-dispatch-ptr"); 268 269 // Size of the dispatch packet struct. 270 DispatchPtr->addDereferenceableRetAttr(64); 271 272 Type *I32Ty = Type::getInt32Ty(Mod->getContext()); 273 Value *CastDispatchPtr = Builder.CreateBitCast( 274 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS)); 275 276 // We could do a single 64-bit load here, but it's likely that the basic 277 // 32-bit and extract sequence is already present, and it is probably easier 278 // to CSE this. The loads should be mergeable later anyway. 279 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1); 280 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4)); 281 282 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2); 283 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4)); 284 285 MDNode *MD = MDNode::get(Mod->getContext(), None); 286 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); 287 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); 288 ST.makeLIDRangeMetadata(LoadZU); 289 290 // Extract y component. Upper half of LoadZU should be zero already. 291 Value *Y = Builder.CreateLShr(LoadXY, 16); 292 293 return std::make_pair(Y, LoadZU); 294 } 295 296 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder, 297 unsigned N) { 298 Function *F = Builder.GetInsertBlock()->getParent(); 299 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F); 300 Intrinsic::ID IntrID = Intrinsic::not_intrinsic; 301 StringRef AttrName; 302 303 switch (N) { 304 case 0: 305 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x 306 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x; 307 AttrName = "amdgpu-no-workitem-id-x"; 308 break; 309 case 1: 310 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y 311 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y; 312 AttrName = "amdgpu-no-workitem-id-y"; 313 break; 314 315 case 2: 316 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z 317 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z; 318 AttrName = "amdgpu-no-workitem-id-z"; 319 break; 320 default: 321 llvm_unreachable("invalid dimension"); 322 } 323 324 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID); 325 CallInst *CI = Builder.CreateCall(WorkitemIdFn); 326 ST.makeLIDRangeMetadata(CI); 327 F->removeFnAttr(AttrName); 328 329 return CI; 330 } 331 332 static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) { 333 return FixedVectorType::get(ArrayTy->getElementType(), 334 ArrayTy->getNumElements()); 335 } 336 337 static Value *stripBitcasts(Value *V) { 338 while (Instruction *I = dyn_cast<Instruction>(V)) { 339 if (I->getOpcode() != Instruction::BitCast) 340 break; 341 V = I->getOperand(0); 342 } 343 return V; 344 } 345 346 static Value * 347 calculateVectorIndex(Value *Ptr, 348 const std::map<GetElementPtrInst *, Value *> &GEPIdx) { 349 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr)); 350 if (!GEP) 351 return nullptr; 352 353 auto I = GEPIdx.find(GEP); 354 return I == GEPIdx.end() ? nullptr : I->second; 355 } 356 357 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) { 358 // FIXME we only support simple cases 359 if (GEP->getNumOperands() != 3) 360 return nullptr; 361 362 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1)); 363 if (!I0 || !I0->isZero()) 364 return nullptr; 365 366 return GEP->getOperand(2); 367 } 368 369 // Not an instruction handled below to turn into a vector. 370 // 371 // TODO: Check isTriviallyVectorizable for calls and handle other 372 // instructions. 373 static bool canVectorizeInst(Instruction *Inst, User *User, 374 const DataLayout &DL) { 375 switch (Inst->getOpcode()) { 376 case Instruction::Load: { 377 // Currently only handle the case where the Pointer Operand is a GEP. 378 // Also we could not vectorize volatile or atomic loads. 379 LoadInst *LI = cast<LoadInst>(Inst); 380 if (isa<AllocaInst>(User) && 381 LI->getPointerOperandType() == User->getType() && 382 isa<VectorType>(LI->getType())) 383 return true; 384 385 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand()); 386 if (!PtrInst) 387 return false; 388 389 return (PtrInst->getOpcode() == Instruction::GetElementPtr || 390 PtrInst->getOpcode() == Instruction::BitCast) && 391 LI->isSimple(); 392 } 393 case Instruction::BitCast: 394 return true; 395 case Instruction::Store: { 396 // Must be the stored pointer operand, not a stored value, plus 397 // since it should be canonical form, the User should be a GEP. 398 // Also we could not vectorize volatile or atomic stores. 399 StoreInst *SI = cast<StoreInst>(Inst); 400 if (isa<AllocaInst>(User) && 401 SI->getPointerOperandType() == User->getType() && 402 isa<VectorType>(SI->getValueOperand()->getType())) 403 return true; 404 405 Instruction *UserInst = dyn_cast<Instruction>(User); 406 if (!UserInst) 407 return false; 408 409 return (SI->getPointerOperand() == User) && 410 (UserInst->getOpcode() == Instruction::GetElementPtr || 411 UserInst->getOpcode() == Instruction::BitCast) && 412 SI->isSimple(); 413 } 414 default: 415 return false; 416 } 417 } 418 419 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL, 420 unsigned MaxVGPRs) { 421 422 if (DisablePromoteAllocaToVector) { 423 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n"); 424 return false; 425 } 426 427 Type *AllocaTy = Alloca->getAllocatedType(); 428 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy); 429 if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) { 430 if (VectorType::isValidElementType(ArrayTy->getElementType()) && 431 ArrayTy->getNumElements() > 0) 432 VectorTy = arrayTypeToVecType(ArrayTy); 433 } 434 435 // Use up to 1/4 of available register budget for vectorization. 436 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8 437 : (MaxVGPRs * 32); 438 439 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) { 440 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with " 441 << MaxVGPRs << " registers available\n"); 442 return false; 443 } 444 445 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n"); 446 447 // FIXME: There is no reason why we can't support larger arrays, we 448 // are just being conservative for now. 449 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these 450 // could also be promoted but we don't currently handle this case 451 if (!VectorTy || VectorTy->getNumElements() > 16 || 452 VectorTy->getNumElements() < 2) { 453 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); 454 return false; 455 } 456 457 std::map<GetElementPtrInst*, Value*> GEPVectorIdx; 458 std::vector<Value *> WorkList; 459 SmallVector<User *, 8> Users(Alloca->users()); 460 SmallVector<User *, 8> UseUsers(Users.size(), Alloca); 461 Type *VecEltTy = VectorTy->getElementType(); 462 while (!Users.empty()) { 463 User *AllocaUser = Users.pop_back_val(); 464 User *UseUser = UseUsers.pop_back_val(); 465 Instruction *Inst = dyn_cast<Instruction>(AllocaUser); 466 467 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser); 468 if (!GEP) { 469 if (!canVectorizeInst(Inst, UseUser, DL)) 470 return false; 471 472 if (Inst->getOpcode() == Instruction::BitCast) { 473 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType(); 474 Type *ToTy = Inst->getType()->getPointerElementType(); 475 if (FromTy->isAggregateType() || ToTy->isAggregateType() || 476 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy)) 477 continue; 478 479 for (User *CastUser : Inst->users()) { 480 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser))) 481 continue; 482 Users.push_back(CastUser); 483 UseUsers.push_back(Inst); 484 } 485 486 continue; 487 } 488 489 WorkList.push_back(AllocaUser); 490 continue; 491 } 492 493 Value *Index = GEPToVectorIndex(GEP); 494 495 // If we can't compute a vector index from this GEP, then we can't 496 // promote this alloca to vector. 497 if (!Index) { 498 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP 499 << '\n'); 500 return false; 501 } 502 503 GEPVectorIdx[GEP] = Index; 504 Users.append(GEP->user_begin(), GEP->user_end()); 505 UseUsers.append(GEP->getNumUses(), GEP); 506 } 507 508 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> " 509 << *VectorTy << '\n'); 510 511 for (Value *V : WorkList) { 512 Instruction *Inst = cast<Instruction>(V); 513 IRBuilder<> Builder(Inst); 514 switch (Inst->getOpcode()) { 515 case Instruction::Load: { 516 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy()) 517 break; 518 519 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand(); 520 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 521 if (!Index) 522 break; 523 524 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); 525 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 526 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast); 527 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index); 528 if (Inst->getType() != VecEltTy) 529 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType()); 530 Inst->replaceAllUsesWith(ExtractElement); 531 Inst->eraseFromParent(); 532 break; 533 } 534 case Instruction::Store: { 535 StoreInst *SI = cast<StoreInst>(Inst); 536 if (SI->getValueOperand()->getType() == AllocaTy || 537 SI->getValueOperand()->getType()->isVectorTy()) 538 break; 539 540 Value *Ptr = SI->getPointerOperand(); 541 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 542 if (!Index) 543 break; 544 545 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); 546 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 547 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast); 548 Value *Elt = SI->getValueOperand(); 549 if (Elt->getType() != VecEltTy) 550 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy); 551 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index); 552 Builder.CreateStore(NewVecValue, BitCast); 553 Inst->eraseFromParent(); 554 break; 555 } 556 557 default: 558 llvm_unreachable("Inconsistency in instructions promotable to vector"); 559 } 560 } 561 return true; 562 } 563 564 static bool isCallPromotable(CallInst *CI) { 565 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 566 if (!II) 567 return false; 568 569 switch (II->getIntrinsicID()) { 570 case Intrinsic::memcpy: 571 case Intrinsic::memmove: 572 case Intrinsic::memset: 573 case Intrinsic::lifetime_start: 574 case Intrinsic::lifetime_end: 575 case Intrinsic::invariant_start: 576 case Intrinsic::invariant_end: 577 case Intrinsic::launder_invariant_group: 578 case Intrinsic::strip_invariant_group: 579 case Intrinsic::objectsize: 580 return true; 581 default: 582 return false; 583 } 584 } 585 586 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca( 587 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0, 588 int OpIdx1) const { 589 // Figure out which operand is the one we might not be promoting. 590 Value *OtherOp = Inst->getOperand(OpIdx0); 591 if (Val == OtherOp) 592 OtherOp = Inst->getOperand(OpIdx1); 593 594 if (isa<ConstantPointerNull>(OtherOp)) 595 return true; 596 597 Value *OtherObj = getUnderlyingObject(OtherOp); 598 if (!isa<AllocaInst>(OtherObj)) 599 return false; 600 601 // TODO: We should be able to replace undefs with the right pointer type. 602 603 // TODO: If we know the other base object is another promotable 604 // alloca, not necessarily this alloca, we can do this. The 605 // important part is both must have the same address space at 606 // the end. 607 if (OtherObj != BaseAlloca) { 608 LLVM_DEBUG( 609 dbgs() << "Found a binary instruction with another alloca object\n"); 610 return false; 611 } 612 613 return true; 614 } 615 616 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes( 617 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const { 618 619 for (User *User : Val->users()) { 620 if (is_contained(WorkList, User)) 621 continue; 622 623 if (CallInst *CI = dyn_cast<CallInst>(User)) { 624 if (!isCallPromotable(CI)) 625 return false; 626 627 WorkList.push_back(User); 628 continue; 629 } 630 631 Instruction *UseInst = cast<Instruction>(User); 632 if (UseInst->getOpcode() == Instruction::PtrToInt) 633 return false; 634 635 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) { 636 if (LI->isVolatile()) 637 return false; 638 639 continue; 640 } 641 642 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) { 643 if (SI->isVolatile()) 644 return false; 645 646 // Reject if the stored value is not the pointer operand. 647 if (SI->getPointerOperand() != Val) 648 return false; 649 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { 650 if (RMW->isVolatile()) 651 return false; 652 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) { 653 if (CAS->isVolatile()) 654 return false; 655 } 656 657 // Only promote a select if we know that the other select operand 658 // is from another pointer that will also be promoted. 659 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) { 660 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) 661 return false; 662 663 // May need to rewrite constant operands. 664 WorkList.push_back(ICmp); 665 } 666 667 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) { 668 // Give up if the pointer may be captured. 669 if (PointerMayBeCaptured(UseInst, true, true)) 670 return false; 671 // Don't collect the users of this. 672 WorkList.push_back(User); 673 continue; 674 } 675 676 // Do not promote vector/aggregate type instructions. It is hard to track 677 // their users. 678 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User)) 679 return false; 680 681 if (!User->getType()->isPointerTy()) 682 continue; 683 684 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) { 685 // Be conservative if an address could be computed outside the bounds of 686 // the alloca. 687 if (!GEP->isInBounds()) 688 return false; 689 } 690 691 // Only promote a select if we know that the other select operand is from 692 // another pointer that will also be promoted. 693 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) { 694 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) 695 return false; 696 } 697 698 // Repeat for phis. 699 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) { 700 // TODO: Handle more complex cases. We should be able to replace loops 701 // over arrays. 702 switch (Phi->getNumIncomingValues()) { 703 case 1: 704 break; 705 case 2: 706 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1)) 707 return false; 708 break; 709 default: 710 return false; 711 } 712 } 713 714 WorkList.push_back(User); 715 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList)) 716 return false; 717 } 718 719 return true; 720 } 721 722 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) { 723 724 FunctionType *FTy = F.getFunctionType(); 725 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 726 727 // If the function has any arguments in the local address space, then it's 728 // possible these arguments require the entire local memory space, so 729 // we cannot use local memory in the pass. 730 for (Type *ParamTy : FTy->params()) { 731 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); 732 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 733 LocalMemLimit = 0; 734 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to " 735 "local memory disabled.\n"); 736 return false; 737 } 738 } 739 740 LocalMemLimit = ST.getLocalMemorySize(); 741 if (LocalMemLimit == 0) 742 return false; 743 744 SmallVector<const Constant *, 16> Stack; 745 SmallPtrSet<const Constant *, 8> VisitedConstants; 746 SmallPtrSet<const GlobalVariable *, 8> UsedLDS; 747 748 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool { 749 for (const User *U : Val->users()) { 750 if (const Instruction *Use = dyn_cast<Instruction>(U)) { 751 if (Use->getParent()->getParent() == &F) 752 return true; 753 } else { 754 const Constant *C = cast<Constant>(U); 755 if (VisitedConstants.insert(C).second) 756 Stack.push_back(C); 757 } 758 } 759 760 return false; 761 }; 762 763 for (GlobalVariable &GV : Mod->globals()) { 764 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) 765 continue; 766 767 if (visitUsers(&GV, &GV)) { 768 UsedLDS.insert(&GV); 769 Stack.clear(); 770 continue; 771 } 772 773 // For any ConstantExpr uses, we need to recursively search the users until 774 // we see a function. 775 while (!Stack.empty()) { 776 const Constant *C = Stack.pop_back_val(); 777 if (visitUsers(&GV, C)) { 778 UsedLDS.insert(&GV); 779 Stack.clear(); 780 break; 781 } 782 } 783 } 784 785 const DataLayout &DL = Mod->getDataLayout(); 786 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes; 787 AllocatedSizes.reserve(UsedLDS.size()); 788 789 for (const GlobalVariable *GV : UsedLDS) { 790 Align Alignment = 791 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType()); 792 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType()); 793 794 // HIP uses an extern unsized array in local address space for dynamically 795 // allocated shared memory. In that case, we have to disable the promotion. 796 if (GV->hasExternalLinkage() && AllocSize == 0) { 797 LocalMemLimit = 0; 798 LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated " 799 "local memory. Promoting to local memory " 800 "disabled.\n"); 801 return false; 802 } 803 804 AllocatedSizes.emplace_back(AllocSize, Alignment); 805 } 806 807 // Sort to try to estimate the worst case alignment padding 808 // 809 // FIXME: We should really do something to fix the addresses to a more optimal 810 // value instead 811 llvm::sort(AllocatedSizes, [](std::pair<uint64_t, Align> LHS, 812 std::pair<uint64_t, Align> RHS) { 813 return LHS.second < RHS.second; 814 }); 815 816 // Check how much local memory is being used by global objects 817 CurrentLocalMemUsage = 0; 818 819 // FIXME: Try to account for padding here. The real padding and address is 820 // currently determined from the inverse order of uses in the function when 821 // legalizing, which could also potentially change. We try to estimate the 822 // worst case here, but we probably should fix the addresses earlier. 823 for (auto Alloc : AllocatedSizes) { 824 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second); 825 CurrentLocalMemUsage += Alloc.first; 826 } 827 828 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, 829 F); 830 831 // Restrict local memory usage so that we don't drastically reduce occupancy, 832 // unless it is already significantly reduced. 833 834 // TODO: Have some sort of hint or other heuristics to guess occupancy based 835 // on other factors.. 836 unsigned OccupancyHint = ST.getWavesPerEU(F).second; 837 if (OccupancyHint == 0) 838 OccupancyHint = 7; 839 840 // Clamp to max value. 841 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU()); 842 843 // Check the hint but ignore it if it's obviously wrong from the existing LDS 844 // usage. 845 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy); 846 847 848 // Round up to the next tier of usage. 849 unsigned MaxSizeWithWaveCount 850 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F); 851 852 // Program is possibly broken by using more local mem than available. 853 if (CurrentLocalMemUsage > MaxSizeWithWaveCount) 854 return false; 855 856 LocalMemLimit = MaxSizeWithWaveCount; 857 858 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage 859 << " bytes of LDS\n" 860 << " Rounding size to " << MaxSizeWithWaveCount 861 << " with a maximum occupancy of " << MaxOccupancy << '\n' 862 << " and " << (LocalMemLimit - CurrentLocalMemUsage) 863 << " available for promotion\n"); 864 865 return true; 866 } 867 868 // FIXME: Should try to pick the most likely to be profitable allocas first. 869 bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) { 870 // Array allocations are probably not worth handling, since an allocation of 871 // the array type is the canonical form. 872 if (!I.isStaticAlloca() || I.isArrayAllocation()) 873 return false; 874 875 const DataLayout &DL = Mod->getDataLayout(); 876 IRBuilder<> Builder(&I); 877 878 // First try to replace the alloca with a vector 879 Type *AllocaTy = I.getAllocatedType(); 880 881 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 882 883 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs)) 884 return true; // Promoted to vector. 885 886 if (DisablePromoteAllocaToLDS) 887 return false; 888 889 const Function &ContainingFunction = *I.getParent()->getParent(); 890 CallingConv::ID CC = ContainingFunction.getCallingConv(); 891 892 // Don't promote the alloca to LDS for shader calling conventions as the work 893 // item ID intrinsics are not supported for these calling conventions. 894 // Furthermore not all LDS is available for some of the stages. 895 switch (CC) { 896 case CallingConv::AMDGPU_KERNEL: 897 case CallingConv::SPIR_KERNEL: 898 break; 899 default: 900 LLVM_DEBUG( 901 dbgs() 902 << " promote alloca to LDS not supported with calling convention.\n"); 903 return false; 904 } 905 906 // Not likely to have sufficient local memory for promotion. 907 if (!SufficientLDS) 908 return false; 909 910 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction); 911 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second; 912 913 Align Alignment = 914 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType()); 915 916 // FIXME: This computed padding is likely wrong since it depends on inverse 917 // usage order. 918 // 919 // FIXME: It is also possible that if we're allowed to use all of the memory 920 // could could end up using more than the maximum due to alignment padding. 921 922 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment); 923 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy); 924 NewSize += AllocSize; 925 926 if (NewSize > LocalMemLimit) { 927 LLVM_DEBUG(dbgs() << " " << AllocSize 928 << " bytes of local memory not available to promote\n"); 929 return false; 930 } 931 932 CurrentLocalMemUsage = NewSize; 933 934 std::vector<Value*> WorkList; 935 936 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) { 937 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n"); 938 return false; 939 } 940 941 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n"); 942 943 Function *F = I.getParent()->getParent(); 944 945 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); 946 GlobalVariable *GV = new GlobalVariable( 947 *Mod, GVTy, false, GlobalValue::InternalLinkage, 948 UndefValue::get(GVTy), 949 Twine(F->getName()) + Twine('.') + I.getName(), 950 nullptr, 951 GlobalVariable::NotThreadLocal, 952 AMDGPUAS::LOCAL_ADDRESS); 953 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 954 GV->setAlignment(I.getAlign()); 955 956 Value *TCntY, *TCntZ; 957 958 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); 959 Value *TIdX = getWorkitemID(Builder, 0); 960 Value *TIdY = getWorkitemID(Builder, 1); 961 Value *TIdZ = getWorkitemID(Builder, 2); 962 963 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); 964 Tmp0 = Builder.CreateMul(Tmp0, TIdX); 965 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); 966 Value *TID = Builder.CreateAdd(Tmp0, Tmp1); 967 TID = Builder.CreateAdd(TID, TIdZ); 968 969 Value *Indices[] = { 970 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), 971 TID 972 }; 973 974 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); 975 I.mutateType(Offset->getType()); 976 I.replaceAllUsesWith(Offset); 977 I.eraseFromParent(); 978 979 SmallVector<IntrinsicInst *> DeferredIntrs; 980 981 for (Value *V : WorkList) { 982 CallInst *Call = dyn_cast<CallInst>(V); 983 if (!Call) { 984 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) { 985 Value *Src0 = CI->getOperand(0); 986 PointerType *NewTy = PointerType::getWithSamePointeeType( 987 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS); 988 989 if (isa<ConstantPointerNull>(CI->getOperand(0))) 990 CI->setOperand(0, ConstantPointerNull::get(NewTy)); 991 992 if (isa<ConstantPointerNull>(CI->getOperand(1))) 993 CI->setOperand(1, ConstantPointerNull::get(NewTy)); 994 995 continue; 996 } 997 998 // The operand's value should be corrected on its own and we don't want to 999 // touch the users. 1000 if (isa<AddrSpaceCastInst>(V)) 1001 continue; 1002 1003 PointerType *NewTy = PointerType::getWithSamePointeeType( 1004 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS); 1005 1006 // FIXME: It doesn't really make sense to try to do this for all 1007 // instructions. 1008 V->mutateType(NewTy); 1009 1010 // Adjust the types of any constant operands. 1011 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1012 if (isa<ConstantPointerNull>(SI->getOperand(1))) 1013 SI->setOperand(1, ConstantPointerNull::get(NewTy)); 1014 1015 if (isa<ConstantPointerNull>(SI->getOperand(2))) 1016 SI->setOperand(2, ConstantPointerNull::get(NewTy)); 1017 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { 1018 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1019 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I))) 1020 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); 1021 } 1022 } 1023 1024 continue; 1025 } 1026 1027 IntrinsicInst *Intr = cast<IntrinsicInst>(Call); 1028 Builder.SetInsertPoint(Intr); 1029 switch (Intr->getIntrinsicID()) { 1030 case Intrinsic::lifetime_start: 1031 case Intrinsic::lifetime_end: 1032 // These intrinsics are for address space 0 only 1033 Intr->eraseFromParent(); 1034 continue; 1035 case Intrinsic::memcpy: 1036 case Intrinsic::memmove: 1037 // These have 2 pointer operands. In case if second pointer also needs 1038 // to be replaced we defer processing of these intrinsics until all 1039 // other values are processed. 1040 DeferredIntrs.push_back(Intr); 1041 continue; 1042 case Intrinsic::memset: { 1043 MemSetInst *MemSet = cast<MemSetInst>(Intr); 1044 Builder.CreateMemSet( 1045 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(), 1046 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile()); 1047 Intr->eraseFromParent(); 1048 continue; 1049 } 1050 case Intrinsic::invariant_start: 1051 case Intrinsic::invariant_end: 1052 case Intrinsic::launder_invariant_group: 1053 case Intrinsic::strip_invariant_group: 1054 Intr->eraseFromParent(); 1055 // FIXME: I think the invariant marker should still theoretically apply, 1056 // but the intrinsics need to be changed to accept pointers with any 1057 // address space. 1058 continue; 1059 case Intrinsic::objectsize: { 1060 Value *Src = Intr->getOperand(0); 1061 Function *ObjectSize = Intrinsic::getDeclaration( 1062 Mod, Intrinsic::objectsize, 1063 {Intr->getType(), 1064 PointerType::getWithSamePointeeType( 1065 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)}); 1066 1067 CallInst *NewCall = Builder.CreateCall( 1068 ObjectSize, 1069 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)}); 1070 Intr->replaceAllUsesWith(NewCall); 1071 Intr->eraseFromParent(); 1072 continue; 1073 } 1074 default: 1075 Intr->print(errs()); 1076 llvm_unreachable("Don't know how to promote alloca intrinsic use."); 1077 } 1078 } 1079 1080 for (IntrinsicInst *Intr : DeferredIntrs) { 1081 Builder.SetInsertPoint(Intr); 1082 Intrinsic::ID ID = Intr->getIntrinsicID(); 1083 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove); 1084 1085 MemTransferInst *MI = cast<MemTransferInst>(Intr); 1086 auto *B = 1087 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(), 1088 MI->getRawSource(), MI->getSourceAlign(), 1089 MI->getLength(), MI->isVolatile()); 1090 1091 for (unsigned I = 0; I != 2; ++I) { 1092 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) { 1093 B->addDereferenceableParamAttr(I, Bytes); 1094 } 1095 } 1096 1097 Intr->eraseFromParent(); 1098 } 1099 1100 return true; 1101 } 1102 1103 bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) { 1104 // Array allocations are probably not worth handling, since an allocation of 1105 // the array type is the canonical form. 1106 if (!I.isStaticAlloca() || I.isArrayAllocation()) 1107 return false; 1108 1109 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 1110 1111 Module *Mod = I.getParent()->getParent()->getParent(); 1112 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs); 1113 } 1114 1115 bool promoteAllocasToVector(Function &F, TargetMachine &TM) { 1116 if (DisablePromoteAllocaToVector) 1117 return false; 1118 1119 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 1120 if (!ST.isPromoteAllocaEnabled()) 1121 return false; 1122 1123 unsigned MaxVGPRs; 1124 if (TM.getTargetTriple().getArch() == Triple::amdgcn) { 1125 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 1126 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); 1127 // A non-entry function has only 32 caller preserved registers. 1128 // Do not promote alloca which will force spilling. 1129 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) 1130 MaxVGPRs = std::min(MaxVGPRs, 32u); 1131 } else { 1132 MaxVGPRs = 128; 1133 } 1134 1135 bool Changed = false; 1136 BasicBlock &EntryBB = *F.begin(); 1137 1138 SmallVector<AllocaInst *, 16> Allocas; 1139 for (Instruction &I : EntryBB) { 1140 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 1141 Allocas.push_back(AI); 1142 } 1143 1144 for (AllocaInst *AI : Allocas) { 1145 if (handlePromoteAllocaToVector(*AI, MaxVGPRs)) 1146 Changed = true; 1147 } 1148 1149 return Changed; 1150 } 1151 1152 bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) { 1153 if (skipFunction(F)) 1154 return false; 1155 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 1156 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>()); 1157 } 1158 return false; 1159 } 1160 1161 PreservedAnalyses 1162 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) { 1163 bool Changed = promoteAllocasToVector(F, TM); 1164 if (Changed) { 1165 PreservedAnalyses PA; 1166 PA.preserveSet<CFGAnalyses>(); 1167 return PA; 1168 } 1169 return PreservedAnalyses::all(); 1170 } 1171 1172 FunctionPass *llvm::createAMDGPUPromoteAlloca() { 1173 return new AMDGPUPromoteAlloca(); 1174 } 1175 1176 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() { 1177 return new AMDGPUPromoteAllocaToVector(); 1178 } 1179