1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass eliminates allocas by either converting them into vectors or 10 // by migrating them to local address space. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "GCNSubtarget.h" 16 #include "llvm/Analysis/CaptureTracking.h" 17 #include "llvm/Analysis/ValueTracking.h" 18 #include "llvm/CodeGen/TargetPassConfig.h" 19 #include "llvm/IR/IRBuilder.h" 20 #include "llvm/IR/IntrinsicsAMDGPU.h" 21 #include "llvm/IR/IntrinsicsR600.h" 22 #include "llvm/Pass.h" 23 #include "llvm/Target/TargetMachine.h" 24 #include "Utils/AMDGPUBaseInfo.h" 25 26 #define DEBUG_TYPE "amdgpu-promote-alloca" 27 28 using namespace llvm; 29 30 namespace { 31 32 static cl::opt<bool> DisablePromoteAllocaToVector( 33 "disable-promote-alloca-to-vector", 34 cl::desc("Disable promote alloca to vector"), 35 cl::init(false)); 36 37 static cl::opt<bool> DisablePromoteAllocaToLDS( 38 "disable-promote-alloca-to-lds", 39 cl::desc("Disable promote alloca to LDS"), 40 cl::init(false)); 41 42 static cl::opt<unsigned> PromoteAllocaToVectorLimit( 43 "amdgpu-promote-alloca-to-vector-limit", 44 cl::desc("Maximum byte size to consider promote alloca to vector"), 45 cl::init(0)); 46 47 // FIXME: This can create globals so should be a module pass. 48 class AMDGPUPromoteAlloca : public FunctionPass { 49 public: 50 static char ID; 51 52 AMDGPUPromoteAlloca() : FunctionPass(ID) {} 53 54 bool runOnFunction(Function &F) override; 55 56 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; } 57 58 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 59 60 void getAnalysisUsage(AnalysisUsage &AU) const override { 61 AU.setPreservesCFG(); 62 FunctionPass::getAnalysisUsage(AU); 63 } 64 }; 65 66 class AMDGPUPromoteAllocaImpl { 67 private: 68 const TargetMachine &TM; 69 Module *Mod = nullptr; 70 const DataLayout *DL = nullptr; 71 72 // FIXME: This should be per-kernel. 73 uint32_t LocalMemLimit = 0; 74 uint32_t CurrentLocalMemUsage = 0; 75 unsigned MaxVGPRs; 76 77 bool IsAMDGCN = false; 78 bool IsAMDHSA = false; 79 80 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder); 81 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N); 82 83 /// BaseAlloca is the alloca root the search started from. 84 /// Val may be that alloca or a recursive user of it. 85 bool collectUsesWithPtrTypes(Value *BaseAlloca, 86 Value *Val, 87 std::vector<Value*> &WorkList) const; 88 89 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand 90 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp). 91 /// Returns true if both operands are derived from the same alloca. Val should 92 /// be the same value as one of the input operands of UseInst. 93 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val, 94 Instruction *UseInst, 95 int OpIdx0, int OpIdx1) const; 96 97 /// Check whether we have enough local memory for promotion. 98 bool hasSufficientLocalMem(const Function &F); 99 100 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 101 102 public: 103 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {} 104 bool run(Function &F); 105 }; 106 107 class AMDGPUPromoteAllocaToVector : public FunctionPass { 108 public: 109 static char ID; 110 111 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {} 112 113 bool runOnFunction(Function &F) override; 114 115 StringRef getPassName() const override { 116 return "AMDGPU Promote Alloca to vector"; 117 } 118 119 void getAnalysisUsage(AnalysisUsage &AU) const override { 120 AU.setPreservesCFG(); 121 FunctionPass::getAnalysisUsage(AU); 122 } 123 }; 124 125 } // end anonymous namespace 126 127 char AMDGPUPromoteAlloca::ID = 0; 128 char AMDGPUPromoteAllocaToVector::ID = 0; 129 130 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE, 131 "AMDGPU promote alloca to vector or LDS", false, false) 132 // Move LDS uses from functions to kernels before promote alloca for accurate 133 // estimation of LDS available 134 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS) 135 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE, 136 "AMDGPU promote alloca to vector or LDS", false, false) 137 138 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector", 139 "AMDGPU promote alloca to vector", false, false) 140 141 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID; 142 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID; 143 144 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { 145 if (skipFunction(F)) 146 return false; 147 148 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 149 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F); 150 } 151 return false; 152 } 153 154 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F, 155 FunctionAnalysisManager &AM) { 156 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F); 157 if (Changed) { 158 PreservedAnalyses PA; 159 PA.preserveSet<CFGAnalyses>(); 160 return PA; 161 } 162 return PreservedAnalyses::all(); 163 } 164 165 bool AMDGPUPromoteAllocaImpl::run(Function &F) { 166 Mod = F.getParent(); 167 DL = &Mod->getDataLayout(); 168 169 const Triple &TT = TM.getTargetTriple(); 170 IsAMDGCN = TT.getArch() == Triple::amdgcn; 171 IsAMDHSA = TT.getOS() == Triple::AMDHSA; 172 173 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 174 if (!ST.isPromoteAllocaEnabled()) 175 return false; 176 177 if (IsAMDGCN) { 178 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 179 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); 180 // A non-entry function has only 32 caller preserved registers. 181 // Do not promote alloca which will force spilling. 182 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) 183 MaxVGPRs = std::min(MaxVGPRs, 32u); 184 } else { 185 MaxVGPRs = 128; 186 } 187 188 bool SufficientLDS = hasSufficientLocalMem(F); 189 bool Changed = false; 190 BasicBlock &EntryBB = *F.begin(); 191 192 SmallVector<AllocaInst *, 16> Allocas; 193 for (Instruction &I : EntryBB) { 194 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 195 Allocas.push_back(AI); 196 } 197 198 for (AllocaInst *AI : Allocas) { 199 if (handleAlloca(*AI, SufficientLDS)) 200 Changed = true; 201 } 202 203 return Changed; 204 } 205 206 std::pair<Value *, Value *> 207 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) { 208 Function &F = *Builder.GetInsertBlock()->getParent(); 209 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 210 211 if (!IsAMDHSA) { 212 Function *LocalSizeYFn 213 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y); 214 Function *LocalSizeZFn 215 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z); 216 217 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {}); 218 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {}); 219 220 ST.makeLIDRangeMetadata(LocalSizeY); 221 ST.makeLIDRangeMetadata(LocalSizeZ); 222 223 return std::make_pair(LocalSizeY, LocalSizeZ); 224 } 225 226 // We must read the size out of the dispatch pointer. 227 assert(IsAMDGCN); 228 229 // We are indexing into this struct, and want to extract the workgroup_size_* 230 // fields. 231 // 232 // typedef struct hsa_kernel_dispatch_packet_s { 233 // uint16_t header; 234 // uint16_t setup; 235 // uint16_t workgroup_size_x ; 236 // uint16_t workgroup_size_y; 237 // uint16_t workgroup_size_z; 238 // uint16_t reserved0; 239 // uint32_t grid_size_x ; 240 // uint32_t grid_size_y ; 241 // uint32_t grid_size_z; 242 // 243 // uint32_t private_segment_size; 244 // uint32_t group_segment_size; 245 // uint64_t kernel_object; 246 // 247 // #ifdef HSA_LARGE_MODEL 248 // void *kernarg_address; 249 // #elif defined HSA_LITTLE_ENDIAN 250 // void *kernarg_address; 251 // uint32_t reserved1; 252 // #else 253 // uint32_t reserved1; 254 // void *kernarg_address; 255 // #endif 256 // uint64_t reserved2; 257 // hsa_signal_t completion_signal; // uint64_t wrapper 258 // } hsa_kernel_dispatch_packet_t 259 // 260 Function *DispatchPtrFn 261 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr); 262 263 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {}); 264 DispatchPtr->addRetAttr(Attribute::NoAlias); 265 DispatchPtr->addRetAttr(Attribute::NonNull); 266 F.removeFnAttr("amdgpu-no-dispatch-ptr"); 267 268 // Size of the dispatch packet struct. 269 DispatchPtr->addDereferenceableRetAttr(64); 270 271 Type *I32Ty = Type::getInt32Ty(Mod->getContext()); 272 Value *CastDispatchPtr = Builder.CreateBitCast( 273 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS)); 274 275 // We could do a single 64-bit load here, but it's likely that the basic 276 // 32-bit and extract sequence is already present, and it is probably easier 277 // to CSE this. The loads should be mergeable later anyway. 278 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1); 279 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4)); 280 281 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2); 282 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4)); 283 284 MDNode *MD = MDNode::get(Mod->getContext(), None); 285 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); 286 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); 287 ST.makeLIDRangeMetadata(LoadZU); 288 289 // Extract y component. Upper half of LoadZU should be zero already. 290 Value *Y = Builder.CreateLShr(LoadXY, 16); 291 292 return std::make_pair(Y, LoadZU); 293 } 294 295 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder, 296 unsigned N) { 297 Function *F = Builder.GetInsertBlock()->getParent(); 298 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F); 299 Intrinsic::ID IntrID = Intrinsic::not_intrinsic; 300 StringRef AttrName; 301 302 switch (N) { 303 case 0: 304 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x 305 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x; 306 AttrName = "amdgpu-no-workitem-id-x"; 307 break; 308 case 1: 309 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y 310 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y; 311 AttrName = "amdgpu-no-workitem-id-y"; 312 break; 313 314 case 2: 315 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z 316 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z; 317 AttrName = "amdgpu-no-workitem-id-z"; 318 break; 319 default: 320 llvm_unreachable("invalid dimension"); 321 } 322 323 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID); 324 CallInst *CI = Builder.CreateCall(WorkitemIdFn); 325 ST.makeLIDRangeMetadata(CI); 326 F->removeFnAttr(AttrName); 327 328 return CI; 329 } 330 331 static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) { 332 return FixedVectorType::get(ArrayTy->getElementType(), 333 ArrayTy->getNumElements()); 334 } 335 336 static Value *stripBitcasts(Value *V) { 337 while (Instruction *I = dyn_cast<Instruction>(V)) { 338 if (I->getOpcode() != Instruction::BitCast) 339 break; 340 V = I->getOperand(0); 341 } 342 return V; 343 } 344 345 static Value * 346 calculateVectorIndex(Value *Ptr, 347 const std::map<GetElementPtrInst *, Value *> &GEPIdx) { 348 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr)); 349 if (!GEP) 350 return nullptr; 351 352 auto I = GEPIdx.find(GEP); 353 return I == GEPIdx.end() ? nullptr : I->second; 354 } 355 356 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) { 357 // FIXME we only support simple cases 358 if (GEP->getNumOperands() != 3) 359 return nullptr; 360 361 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1)); 362 if (!I0 || !I0->isZero()) 363 return nullptr; 364 365 return GEP->getOperand(2); 366 } 367 368 // Not an instruction handled below to turn into a vector. 369 // 370 // TODO: Check isTriviallyVectorizable for calls and handle other 371 // instructions. 372 static bool canVectorizeInst(Instruction *Inst, User *User, 373 const DataLayout &DL) { 374 switch (Inst->getOpcode()) { 375 case Instruction::Load: { 376 // Currently only handle the case where the Pointer Operand is a GEP. 377 // Also we could not vectorize volatile or atomic loads. 378 LoadInst *LI = cast<LoadInst>(Inst); 379 if (isa<AllocaInst>(User) && 380 LI->getPointerOperandType() == User->getType() && 381 isa<VectorType>(LI->getType())) 382 return true; 383 384 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand()); 385 if (!PtrInst) 386 return false; 387 388 return (PtrInst->getOpcode() == Instruction::GetElementPtr || 389 PtrInst->getOpcode() == Instruction::BitCast) && 390 LI->isSimple(); 391 } 392 case Instruction::BitCast: 393 return true; 394 case Instruction::Store: { 395 // Must be the stored pointer operand, not a stored value, plus 396 // since it should be canonical form, the User should be a GEP. 397 // Also we could not vectorize volatile or atomic stores. 398 StoreInst *SI = cast<StoreInst>(Inst); 399 if (isa<AllocaInst>(User) && 400 SI->getPointerOperandType() == User->getType() && 401 isa<VectorType>(SI->getValueOperand()->getType())) 402 return true; 403 404 Instruction *UserInst = dyn_cast<Instruction>(User); 405 if (!UserInst) 406 return false; 407 408 return (SI->getPointerOperand() == User) && 409 (UserInst->getOpcode() == Instruction::GetElementPtr || 410 UserInst->getOpcode() == Instruction::BitCast) && 411 SI->isSimple(); 412 } 413 default: 414 return false; 415 } 416 } 417 418 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL, 419 unsigned MaxVGPRs) { 420 421 if (DisablePromoteAllocaToVector) { 422 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n"); 423 return false; 424 } 425 426 Type *AllocaTy = Alloca->getAllocatedType(); 427 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy); 428 if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) { 429 if (VectorType::isValidElementType(ArrayTy->getElementType()) && 430 ArrayTy->getNumElements() > 0) 431 VectorTy = arrayTypeToVecType(ArrayTy); 432 } 433 434 // Use up to 1/4 of available register budget for vectorization. 435 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8 436 : (MaxVGPRs * 32); 437 438 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) { 439 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with " 440 << MaxVGPRs << " registers available\n"); 441 return false; 442 } 443 444 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n"); 445 446 // FIXME: There is no reason why we can't support larger arrays, we 447 // are just being conservative for now. 448 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these 449 // could also be promoted but we don't currently handle this case 450 if (!VectorTy || VectorTy->getNumElements() > 16 || 451 VectorTy->getNumElements() < 2) { 452 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); 453 return false; 454 } 455 456 std::map<GetElementPtrInst*, Value*> GEPVectorIdx; 457 std::vector<Value *> WorkList; 458 SmallVector<User *, 8> Users(Alloca->users()); 459 SmallVector<User *, 8> UseUsers(Users.size(), Alloca); 460 Type *VecEltTy = VectorTy->getElementType(); 461 while (!Users.empty()) { 462 User *AllocaUser = Users.pop_back_val(); 463 User *UseUser = UseUsers.pop_back_val(); 464 Instruction *Inst = dyn_cast<Instruction>(AllocaUser); 465 466 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser); 467 if (!GEP) { 468 if (!canVectorizeInst(Inst, UseUser, DL)) 469 return false; 470 471 if (Inst->getOpcode() == Instruction::BitCast) { 472 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType(); 473 Type *ToTy = Inst->getType()->getPointerElementType(); 474 if (FromTy->isAggregateType() || ToTy->isAggregateType() || 475 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy)) 476 continue; 477 478 for (User *CastUser : Inst->users()) { 479 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser))) 480 continue; 481 Users.push_back(CastUser); 482 UseUsers.push_back(Inst); 483 } 484 485 continue; 486 } 487 488 WorkList.push_back(AllocaUser); 489 continue; 490 } 491 492 Value *Index = GEPToVectorIndex(GEP); 493 494 // If we can't compute a vector index from this GEP, then we can't 495 // promote this alloca to vector. 496 if (!Index) { 497 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP 498 << '\n'); 499 return false; 500 } 501 502 GEPVectorIdx[GEP] = Index; 503 Users.append(GEP->user_begin(), GEP->user_end()); 504 UseUsers.append(GEP->getNumUses(), GEP); 505 } 506 507 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> " 508 << *VectorTy << '\n'); 509 510 for (Value *V : WorkList) { 511 Instruction *Inst = cast<Instruction>(V); 512 IRBuilder<> Builder(Inst); 513 switch (Inst->getOpcode()) { 514 case Instruction::Load: { 515 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy()) 516 break; 517 518 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand(); 519 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 520 if (!Index) 521 break; 522 523 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); 524 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 525 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast); 526 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index); 527 if (Inst->getType() != VecEltTy) 528 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType()); 529 Inst->replaceAllUsesWith(ExtractElement); 530 Inst->eraseFromParent(); 531 break; 532 } 533 case Instruction::Store: { 534 StoreInst *SI = cast<StoreInst>(Inst); 535 if (SI->getValueOperand()->getType() == AllocaTy || 536 SI->getValueOperand()->getType()->isVectorTy()) 537 break; 538 539 Value *Ptr = SI->getPointerOperand(); 540 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 541 if (!Index) 542 break; 543 544 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); 545 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 546 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast); 547 Value *Elt = SI->getValueOperand(); 548 if (Elt->getType() != VecEltTy) 549 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy); 550 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index); 551 Builder.CreateStore(NewVecValue, BitCast); 552 Inst->eraseFromParent(); 553 break; 554 } 555 556 default: 557 llvm_unreachable("Inconsistency in instructions promotable to vector"); 558 } 559 } 560 return true; 561 } 562 563 static bool isCallPromotable(CallInst *CI) { 564 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 565 if (!II) 566 return false; 567 568 switch (II->getIntrinsicID()) { 569 case Intrinsic::memcpy: 570 case Intrinsic::memmove: 571 case Intrinsic::memset: 572 case Intrinsic::lifetime_start: 573 case Intrinsic::lifetime_end: 574 case Intrinsic::invariant_start: 575 case Intrinsic::invariant_end: 576 case Intrinsic::launder_invariant_group: 577 case Intrinsic::strip_invariant_group: 578 case Intrinsic::objectsize: 579 return true; 580 default: 581 return false; 582 } 583 } 584 585 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca( 586 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0, 587 int OpIdx1) const { 588 // Figure out which operand is the one we might not be promoting. 589 Value *OtherOp = Inst->getOperand(OpIdx0); 590 if (Val == OtherOp) 591 OtherOp = Inst->getOperand(OpIdx1); 592 593 if (isa<ConstantPointerNull>(OtherOp)) 594 return true; 595 596 Value *OtherObj = getUnderlyingObject(OtherOp); 597 if (!isa<AllocaInst>(OtherObj)) 598 return false; 599 600 // TODO: We should be able to replace undefs with the right pointer type. 601 602 // TODO: If we know the other base object is another promotable 603 // alloca, not necessarily this alloca, we can do this. The 604 // important part is both must have the same address space at 605 // the end. 606 if (OtherObj != BaseAlloca) { 607 LLVM_DEBUG( 608 dbgs() << "Found a binary instruction with another alloca object\n"); 609 return false; 610 } 611 612 return true; 613 } 614 615 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes( 616 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const { 617 618 for (User *User : Val->users()) { 619 if (is_contained(WorkList, User)) 620 continue; 621 622 if (CallInst *CI = dyn_cast<CallInst>(User)) { 623 if (!isCallPromotable(CI)) 624 return false; 625 626 WorkList.push_back(User); 627 continue; 628 } 629 630 Instruction *UseInst = cast<Instruction>(User); 631 if (UseInst->getOpcode() == Instruction::PtrToInt) 632 return false; 633 634 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) { 635 if (LI->isVolatile()) 636 return false; 637 638 continue; 639 } 640 641 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) { 642 if (SI->isVolatile()) 643 return false; 644 645 // Reject if the stored value is not the pointer operand. 646 if (SI->getPointerOperand() != Val) 647 return false; 648 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { 649 if (RMW->isVolatile()) 650 return false; 651 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) { 652 if (CAS->isVolatile()) 653 return false; 654 } 655 656 // Only promote a select if we know that the other select operand 657 // is from another pointer that will also be promoted. 658 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) { 659 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) 660 return false; 661 662 // May need to rewrite constant operands. 663 WorkList.push_back(ICmp); 664 } 665 666 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) { 667 // Give up if the pointer may be captured. 668 if (PointerMayBeCaptured(UseInst, true, true)) 669 return false; 670 // Don't collect the users of this. 671 WorkList.push_back(User); 672 continue; 673 } 674 675 // Do not promote vector/aggregate type instructions. It is hard to track 676 // their users. 677 if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User)) 678 return false; 679 680 if (!User->getType()->isPointerTy()) 681 continue; 682 683 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) { 684 // Be conservative if an address could be computed outside the bounds of 685 // the alloca. 686 if (!GEP->isInBounds()) 687 return false; 688 } 689 690 // Only promote a select if we know that the other select operand is from 691 // another pointer that will also be promoted. 692 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) { 693 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) 694 return false; 695 } 696 697 // Repeat for phis. 698 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) { 699 // TODO: Handle more complex cases. We should be able to replace loops 700 // over arrays. 701 switch (Phi->getNumIncomingValues()) { 702 case 1: 703 break; 704 case 2: 705 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1)) 706 return false; 707 break; 708 default: 709 return false; 710 } 711 } 712 713 WorkList.push_back(User); 714 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList)) 715 return false; 716 } 717 718 return true; 719 } 720 721 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) { 722 723 FunctionType *FTy = F.getFunctionType(); 724 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 725 726 // If the function has any arguments in the local address space, then it's 727 // possible these arguments require the entire local memory space, so 728 // we cannot use local memory in the pass. 729 for (Type *ParamTy : FTy->params()) { 730 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); 731 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 732 LocalMemLimit = 0; 733 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to " 734 "local memory disabled.\n"); 735 return false; 736 } 737 } 738 739 LocalMemLimit = ST.getLocalMemorySize(); 740 if (LocalMemLimit == 0) 741 return false; 742 743 SmallVector<const Constant *, 16> Stack; 744 SmallPtrSet<const Constant *, 8> VisitedConstants; 745 SmallPtrSet<const GlobalVariable *, 8> UsedLDS; 746 747 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool { 748 for (const User *U : Val->users()) { 749 if (const Instruction *Use = dyn_cast<Instruction>(U)) { 750 if (Use->getParent()->getParent() == &F) 751 return true; 752 } else { 753 const Constant *C = cast<Constant>(U); 754 if (VisitedConstants.insert(C).second) 755 Stack.push_back(C); 756 } 757 } 758 759 return false; 760 }; 761 762 for (GlobalVariable &GV : Mod->globals()) { 763 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) 764 continue; 765 766 if (visitUsers(&GV, &GV)) { 767 UsedLDS.insert(&GV); 768 Stack.clear(); 769 continue; 770 } 771 772 // For any ConstantExpr uses, we need to recursively search the users until 773 // we see a function. 774 while (!Stack.empty()) { 775 const Constant *C = Stack.pop_back_val(); 776 if (visitUsers(&GV, C)) { 777 UsedLDS.insert(&GV); 778 Stack.clear(); 779 break; 780 } 781 } 782 } 783 784 const DataLayout &DL = Mod->getDataLayout(); 785 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes; 786 AllocatedSizes.reserve(UsedLDS.size()); 787 788 for (const GlobalVariable *GV : UsedLDS) { 789 Align Alignment = 790 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType()); 791 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType()); 792 AllocatedSizes.emplace_back(AllocSize, Alignment); 793 } 794 795 // Sort to try to estimate the worst case alignment padding 796 // 797 // FIXME: We should really do something to fix the addresses to a more optimal 798 // value instead 799 llvm::sort(AllocatedSizes, [](std::pair<uint64_t, Align> LHS, 800 std::pair<uint64_t, Align> RHS) { 801 return LHS.second < RHS.second; 802 }); 803 804 // Check how much local memory is being used by global objects 805 CurrentLocalMemUsage = 0; 806 807 // FIXME: Try to account for padding here. The real padding and address is 808 // currently determined from the inverse order of uses in the function when 809 // legalizing, which could also potentially change. We try to estimate the 810 // worst case here, but we probably should fix the addresses earlier. 811 for (auto Alloc : AllocatedSizes) { 812 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second); 813 CurrentLocalMemUsage += Alloc.first; 814 } 815 816 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, 817 F); 818 819 // Restrict local memory usage so that we don't drastically reduce occupancy, 820 // unless it is already significantly reduced. 821 822 // TODO: Have some sort of hint or other heuristics to guess occupancy based 823 // on other factors.. 824 unsigned OccupancyHint = ST.getWavesPerEU(F).second; 825 if (OccupancyHint == 0) 826 OccupancyHint = 7; 827 828 // Clamp to max value. 829 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU()); 830 831 // Check the hint but ignore it if it's obviously wrong from the existing LDS 832 // usage. 833 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy); 834 835 836 // Round up to the next tier of usage. 837 unsigned MaxSizeWithWaveCount 838 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F); 839 840 // Program is possibly broken by using more local mem than available. 841 if (CurrentLocalMemUsage > MaxSizeWithWaveCount) 842 return false; 843 844 LocalMemLimit = MaxSizeWithWaveCount; 845 846 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage 847 << " bytes of LDS\n" 848 << " Rounding size to " << MaxSizeWithWaveCount 849 << " with a maximum occupancy of " << MaxOccupancy << '\n' 850 << " and " << (LocalMemLimit - CurrentLocalMemUsage) 851 << " available for promotion\n"); 852 853 return true; 854 } 855 856 // FIXME: Should try to pick the most likely to be profitable allocas first. 857 bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) { 858 // Array allocations are probably not worth handling, since an allocation of 859 // the array type is the canonical form. 860 if (!I.isStaticAlloca() || I.isArrayAllocation()) 861 return false; 862 863 const DataLayout &DL = Mod->getDataLayout(); 864 IRBuilder<> Builder(&I); 865 866 // First try to replace the alloca with a vector 867 Type *AllocaTy = I.getAllocatedType(); 868 869 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 870 871 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs)) 872 return true; // Promoted to vector. 873 874 if (DisablePromoteAllocaToLDS) 875 return false; 876 877 const Function &ContainingFunction = *I.getParent()->getParent(); 878 CallingConv::ID CC = ContainingFunction.getCallingConv(); 879 880 // Don't promote the alloca to LDS for shader calling conventions as the work 881 // item ID intrinsics are not supported for these calling conventions. 882 // Furthermore not all LDS is available for some of the stages. 883 switch (CC) { 884 case CallingConv::AMDGPU_KERNEL: 885 case CallingConv::SPIR_KERNEL: 886 break; 887 default: 888 LLVM_DEBUG( 889 dbgs() 890 << " promote alloca to LDS not supported with calling convention.\n"); 891 return false; 892 } 893 894 // Not likely to have sufficient local memory for promotion. 895 if (!SufficientLDS) 896 return false; 897 898 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction); 899 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second; 900 901 Align Alignment = 902 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType()); 903 904 // FIXME: This computed padding is likely wrong since it depends on inverse 905 // usage order. 906 // 907 // FIXME: It is also possible that if we're allowed to use all of the memory 908 // could could end up using more than the maximum due to alignment padding. 909 910 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment); 911 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy); 912 NewSize += AllocSize; 913 914 if (NewSize > LocalMemLimit) { 915 LLVM_DEBUG(dbgs() << " " << AllocSize 916 << " bytes of local memory not available to promote\n"); 917 return false; 918 } 919 920 CurrentLocalMemUsage = NewSize; 921 922 std::vector<Value*> WorkList; 923 924 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) { 925 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n"); 926 return false; 927 } 928 929 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n"); 930 931 Function *F = I.getParent()->getParent(); 932 933 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); 934 GlobalVariable *GV = new GlobalVariable( 935 *Mod, GVTy, false, GlobalValue::InternalLinkage, 936 UndefValue::get(GVTy), 937 Twine(F->getName()) + Twine('.') + I.getName(), 938 nullptr, 939 GlobalVariable::NotThreadLocal, 940 AMDGPUAS::LOCAL_ADDRESS); 941 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 942 GV->setAlignment(MaybeAlign(I.getAlignment())); 943 944 Value *TCntY, *TCntZ; 945 946 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); 947 Value *TIdX = getWorkitemID(Builder, 0); 948 Value *TIdY = getWorkitemID(Builder, 1); 949 Value *TIdZ = getWorkitemID(Builder, 2); 950 951 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); 952 Tmp0 = Builder.CreateMul(Tmp0, TIdX); 953 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); 954 Value *TID = Builder.CreateAdd(Tmp0, Tmp1); 955 TID = Builder.CreateAdd(TID, TIdZ); 956 957 Value *Indices[] = { 958 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), 959 TID 960 }; 961 962 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); 963 I.mutateType(Offset->getType()); 964 I.replaceAllUsesWith(Offset); 965 I.eraseFromParent(); 966 967 SmallVector<IntrinsicInst *> DeferredIntrs; 968 969 for (Value *V : WorkList) { 970 CallInst *Call = dyn_cast<CallInst>(V); 971 if (!Call) { 972 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) { 973 Value *Src0 = CI->getOperand(0); 974 PointerType *NewTy = PointerType::getWithSamePointeeType( 975 cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS); 976 977 if (isa<ConstantPointerNull>(CI->getOperand(0))) 978 CI->setOperand(0, ConstantPointerNull::get(NewTy)); 979 980 if (isa<ConstantPointerNull>(CI->getOperand(1))) 981 CI->setOperand(1, ConstantPointerNull::get(NewTy)); 982 983 continue; 984 } 985 986 // The operand's value should be corrected on its own and we don't want to 987 // touch the users. 988 if (isa<AddrSpaceCastInst>(V)) 989 continue; 990 991 PointerType *NewTy = PointerType::getWithSamePointeeType( 992 cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS); 993 994 // FIXME: It doesn't really make sense to try to do this for all 995 // instructions. 996 V->mutateType(NewTy); 997 998 // Adjust the types of any constant operands. 999 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 1000 if (isa<ConstantPointerNull>(SI->getOperand(1))) 1001 SI->setOperand(1, ConstantPointerNull::get(NewTy)); 1002 1003 if (isa<ConstantPointerNull>(SI->getOperand(2))) 1004 SI->setOperand(2, ConstantPointerNull::get(NewTy)); 1005 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { 1006 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1007 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I))) 1008 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); 1009 } 1010 } 1011 1012 continue; 1013 } 1014 1015 IntrinsicInst *Intr = cast<IntrinsicInst>(Call); 1016 Builder.SetInsertPoint(Intr); 1017 switch (Intr->getIntrinsicID()) { 1018 case Intrinsic::lifetime_start: 1019 case Intrinsic::lifetime_end: 1020 // These intrinsics are for address space 0 only 1021 Intr->eraseFromParent(); 1022 continue; 1023 case Intrinsic::memcpy: 1024 case Intrinsic::memmove: 1025 // These have 2 pointer operands. In case if second pointer also needs 1026 // to be replaced we defer processing of these intrinsics until all 1027 // other values are processed. 1028 DeferredIntrs.push_back(Intr); 1029 continue; 1030 case Intrinsic::memset: { 1031 MemSetInst *MemSet = cast<MemSetInst>(Intr); 1032 Builder.CreateMemSet( 1033 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(), 1034 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile()); 1035 Intr->eraseFromParent(); 1036 continue; 1037 } 1038 case Intrinsic::invariant_start: 1039 case Intrinsic::invariant_end: 1040 case Intrinsic::launder_invariant_group: 1041 case Intrinsic::strip_invariant_group: 1042 Intr->eraseFromParent(); 1043 // FIXME: I think the invariant marker should still theoretically apply, 1044 // but the intrinsics need to be changed to accept pointers with any 1045 // address space. 1046 continue; 1047 case Intrinsic::objectsize: { 1048 Value *Src = Intr->getOperand(0); 1049 Function *ObjectSize = Intrinsic::getDeclaration( 1050 Mod, Intrinsic::objectsize, 1051 {Intr->getType(), 1052 PointerType::getWithSamePointeeType( 1053 cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)}); 1054 1055 CallInst *NewCall = Builder.CreateCall( 1056 ObjectSize, 1057 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)}); 1058 Intr->replaceAllUsesWith(NewCall); 1059 Intr->eraseFromParent(); 1060 continue; 1061 } 1062 default: 1063 Intr->print(errs()); 1064 llvm_unreachable("Don't know how to promote alloca intrinsic use."); 1065 } 1066 } 1067 1068 for (IntrinsicInst *Intr : DeferredIntrs) { 1069 Builder.SetInsertPoint(Intr); 1070 Intrinsic::ID ID = Intr->getIntrinsicID(); 1071 assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove); 1072 1073 MemTransferInst *MI = cast<MemTransferInst>(Intr); 1074 auto *B = 1075 Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(), 1076 MI->getRawSource(), MI->getSourceAlign(), 1077 MI->getLength(), MI->isVolatile()); 1078 1079 for (unsigned I = 0; I != 2; ++I) { 1080 if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) { 1081 B->addDereferenceableParamAttr(I, Bytes); 1082 } 1083 } 1084 1085 Intr->eraseFromParent(); 1086 } 1087 1088 return true; 1089 } 1090 1091 bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) { 1092 // Array allocations are probably not worth handling, since an allocation of 1093 // the array type is the canonical form. 1094 if (!I.isStaticAlloca() || I.isArrayAllocation()) 1095 return false; 1096 1097 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 1098 1099 Module *Mod = I.getParent()->getParent()->getParent(); 1100 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs); 1101 } 1102 1103 bool promoteAllocasToVector(Function &F, TargetMachine &TM) { 1104 if (DisablePromoteAllocaToVector) 1105 return false; 1106 1107 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 1108 if (!ST.isPromoteAllocaEnabled()) 1109 return false; 1110 1111 unsigned MaxVGPRs; 1112 if (TM.getTargetTriple().getArch() == Triple::amdgcn) { 1113 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 1114 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); 1115 // A non-entry function has only 32 caller preserved registers. 1116 // Do not promote alloca which will force spilling. 1117 if (!AMDGPU::isEntryFunctionCC(F.getCallingConv())) 1118 MaxVGPRs = std::min(MaxVGPRs, 32u); 1119 } else { 1120 MaxVGPRs = 128; 1121 } 1122 1123 bool Changed = false; 1124 BasicBlock &EntryBB = *F.begin(); 1125 1126 SmallVector<AllocaInst *, 16> Allocas; 1127 for (Instruction &I : EntryBB) { 1128 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 1129 Allocas.push_back(AI); 1130 } 1131 1132 for (AllocaInst *AI : Allocas) { 1133 if (handlePromoteAllocaToVector(*AI, MaxVGPRs)) 1134 Changed = true; 1135 } 1136 1137 return Changed; 1138 } 1139 1140 bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) { 1141 if (skipFunction(F)) 1142 return false; 1143 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 1144 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>()); 1145 } 1146 return false; 1147 } 1148 1149 PreservedAnalyses 1150 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) { 1151 bool Changed = promoteAllocasToVector(F, TM); 1152 if (Changed) { 1153 PreservedAnalyses PA; 1154 PA.preserveSet<CFGAnalyses>(); 1155 return PA; 1156 } 1157 return PreservedAnalyses::all(); 1158 } 1159 1160 FunctionPass *llvm::createAMDGPUPromoteAlloca() { 1161 return new AMDGPUPromoteAlloca(); 1162 } 1163 1164 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() { 1165 return new AMDGPUPromoteAllocaToVector(); 1166 } 1167