1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass eliminates allocas by either converting them into vectors or 10 // by migrating them to local address space. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "GCNSubtarget.h" 16 #include "llvm/Analysis/CaptureTracking.h" 17 #include "llvm/Analysis/ValueTracking.h" 18 #include "llvm/CodeGen/TargetPassConfig.h" 19 #include "llvm/IR/IRBuilder.h" 20 #include "llvm/IR/IntrinsicsAMDGPU.h" 21 #include "llvm/IR/IntrinsicsR600.h" 22 #include "llvm/Pass.h" 23 #include "llvm/Target/TargetMachine.h" 24 25 #define DEBUG_TYPE "amdgpu-promote-alloca" 26 27 using namespace llvm; 28 29 namespace { 30 31 static cl::opt<bool> DisablePromoteAllocaToVector( 32 "disable-promote-alloca-to-vector", 33 cl::desc("Disable promote alloca to vector"), 34 cl::init(false)); 35 36 static cl::opt<bool> DisablePromoteAllocaToLDS( 37 "disable-promote-alloca-to-lds", 38 cl::desc("Disable promote alloca to LDS"), 39 cl::init(false)); 40 41 static cl::opt<unsigned> PromoteAllocaToVectorLimit( 42 "amdgpu-promote-alloca-to-vector-limit", 43 cl::desc("Maximum byte size to consider promote alloca to vector"), 44 cl::init(0)); 45 46 // FIXME: This can create globals so should be a module pass. 47 class AMDGPUPromoteAlloca : public FunctionPass { 48 public: 49 static char ID; 50 51 AMDGPUPromoteAlloca() : FunctionPass(ID) {} 52 53 bool runOnFunction(Function &F) override; 54 55 StringRef getPassName() const override { return "AMDGPU Promote Alloca"; } 56 57 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 58 59 void getAnalysisUsage(AnalysisUsage &AU) const override { 60 AU.setPreservesCFG(); 61 FunctionPass::getAnalysisUsage(AU); 62 } 63 }; 64 65 class AMDGPUPromoteAllocaImpl { 66 private: 67 const TargetMachine &TM; 68 Module *Mod = nullptr; 69 const DataLayout *DL = nullptr; 70 71 // FIXME: This should be per-kernel. 72 uint32_t LocalMemLimit = 0; 73 uint32_t CurrentLocalMemUsage = 0; 74 unsigned MaxVGPRs; 75 76 bool IsAMDGCN = false; 77 bool IsAMDHSA = false; 78 79 std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder); 80 Value *getWorkitemID(IRBuilder<> &Builder, unsigned N); 81 82 /// BaseAlloca is the alloca root the search started from. 83 /// Val may be that alloca or a recursive user of it. 84 bool collectUsesWithPtrTypes(Value *BaseAlloca, 85 Value *Val, 86 std::vector<Value*> &WorkList) const; 87 88 /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand 89 /// indices to an instruction with 2 pointer inputs (e.g. select, icmp). 90 /// Returns true if both operands are derived from the same alloca. Val should 91 /// be the same value as one of the input operands of UseInst. 92 bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val, 93 Instruction *UseInst, 94 int OpIdx0, int OpIdx1) const; 95 96 /// Check whether we have enough local memory for promotion. 97 bool hasSufficientLocalMem(const Function &F); 98 99 bool handleAlloca(AllocaInst &I, bool SufficientLDS); 100 101 public: 102 AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {} 103 bool run(Function &F); 104 }; 105 106 class AMDGPUPromoteAllocaToVector : public FunctionPass { 107 public: 108 static char ID; 109 110 AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {} 111 112 bool runOnFunction(Function &F) override; 113 114 StringRef getPassName() const override { 115 return "AMDGPU Promote Alloca to vector"; 116 } 117 118 void getAnalysisUsage(AnalysisUsage &AU) const override { 119 AU.setPreservesCFG(); 120 FunctionPass::getAnalysisUsage(AU); 121 } 122 }; 123 124 } // end anonymous namespace 125 126 char AMDGPUPromoteAlloca::ID = 0; 127 char AMDGPUPromoteAllocaToVector::ID = 0; 128 129 INITIALIZE_PASS(AMDGPUPromoteAlloca, DEBUG_TYPE, 130 "AMDGPU promote alloca to vector or LDS", false, false) 131 132 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector", 133 "AMDGPU promote alloca to vector", false, false) 134 135 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID; 136 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID; 137 138 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) { 139 if (skipFunction(F)) 140 return false; 141 142 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 143 return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F); 144 } 145 return false; 146 } 147 148 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F, 149 FunctionAnalysisManager &AM) { 150 bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F); 151 if (Changed) { 152 PreservedAnalyses PA; 153 PA.preserveSet<CFGAnalyses>(); 154 return PA; 155 } 156 return PreservedAnalyses::all(); 157 } 158 159 bool AMDGPUPromoteAllocaImpl::run(Function &F) { 160 Mod = F.getParent(); 161 DL = &Mod->getDataLayout(); 162 163 const Triple &TT = TM.getTargetTriple(); 164 IsAMDGCN = TT.getArch() == Triple::amdgcn; 165 IsAMDHSA = TT.getOS() == Triple::AMDHSA; 166 167 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 168 if (!ST.isPromoteAllocaEnabled()) 169 return false; 170 171 if (IsAMDGCN) { 172 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 173 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); 174 } else { 175 MaxVGPRs = 128; 176 } 177 178 bool SufficientLDS = hasSufficientLocalMem(F); 179 bool Changed = false; 180 BasicBlock &EntryBB = *F.begin(); 181 182 SmallVector<AllocaInst *, 16> Allocas; 183 for (Instruction &I : EntryBB) { 184 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 185 Allocas.push_back(AI); 186 } 187 188 for (AllocaInst *AI : Allocas) { 189 if (handleAlloca(*AI, SufficientLDS)) 190 Changed = true; 191 } 192 193 return Changed; 194 } 195 196 std::pair<Value *, Value *> 197 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) { 198 const Function &F = *Builder.GetInsertBlock()->getParent(); 199 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 200 201 if (!IsAMDHSA) { 202 Function *LocalSizeYFn 203 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y); 204 Function *LocalSizeZFn 205 = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z); 206 207 CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {}); 208 CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {}); 209 210 ST.makeLIDRangeMetadata(LocalSizeY); 211 ST.makeLIDRangeMetadata(LocalSizeZ); 212 213 return std::make_pair(LocalSizeY, LocalSizeZ); 214 } 215 216 // We must read the size out of the dispatch pointer. 217 assert(IsAMDGCN); 218 219 // We are indexing into this struct, and want to extract the workgroup_size_* 220 // fields. 221 // 222 // typedef struct hsa_kernel_dispatch_packet_s { 223 // uint16_t header; 224 // uint16_t setup; 225 // uint16_t workgroup_size_x ; 226 // uint16_t workgroup_size_y; 227 // uint16_t workgroup_size_z; 228 // uint16_t reserved0; 229 // uint32_t grid_size_x ; 230 // uint32_t grid_size_y ; 231 // uint32_t grid_size_z; 232 // 233 // uint32_t private_segment_size; 234 // uint32_t group_segment_size; 235 // uint64_t kernel_object; 236 // 237 // #ifdef HSA_LARGE_MODEL 238 // void *kernarg_address; 239 // #elif defined HSA_LITTLE_ENDIAN 240 // void *kernarg_address; 241 // uint32_t reserved1; 242 // #else 243 // uint32_t reserved1; 244 // void *kernarg_address; 245 // #endif 246 // uint64_t reserved2; 247 // hsa_signal_t completion_signal; // uint64_t wrapper 248 // } hsa_kernel_dispatch_packet_t 249 // 250 Function *DispatchPtrFn 251 = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr); 252 253 CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {}); 254 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias); 255 DispatchPtr->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull); 256 257 // Size of the dispatch packet struct. 258 DispatchPtr->addDereferenceableAttr(AttributeList::ReturnIndex, 64); 259 260 Type *I32Ty = Type::getInt32Ty(Mod->getContext()); 261 Value *CastDispatchPtr = Builder.CreateBitCast( 262 DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS)); 263 264 // We could do a single 64-bit load here, but it's likely that the basic 265 // 32-bit and extract sequence is already present, and it is probably easier 266 // to CSE this. The loads should be mergable later anyway. 267 Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1); 268 LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4)); 269 270 Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2); 271 LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4)); 272 273 MDNode *MD = MDNode::get(Mod->getContext(), None); 274 LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD); 275 LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD); 276 ST.makeLIDRangeMetadata(LoadZU); 277 278 // Extract y component. Upper half of LoadZU should be zero already. 279 Value *Y = Builder.CreateLShr(LoadXY, 16); 280 281 return std::make_pair(Y, LoadZU); 282 } 283 284 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder, 285 unsigned N) { 286 const AMDGPUSubtarget &ST = 287 AMDGPUSubtarget::get(TM, *Builder.GetInsertBlock()->getParent()); 288 Intrinsic::ID IntrID = Intrinsic::not_intrinsic; 289 290 switch (N) { 291 case 0: 292 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x 293 : (Intrinsic::ID)Intrinsic::r600_read_tidig_x; 294 break; 295 case 1: 296 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y 297 : (Intrinsic::ID)Intrinsic::r600_read_tidig_y; 298 break; 299 300 case 2: 301 IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z 302 : (Intrinsic::ID)Intrinsic::r600_read_tidig_z; 303 break; 304 default: 305 llvm_unreachable("invalid dimension"); 306 } 307 308 Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID); 309 CallInst *CI = Builder.CreateCall(WorkitemIdFn); 310 ST.makeLIDRangeMetadata(CI); 311 312 return CI; 313 } 314 315 static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) { 316 return FixedVectorType::get(ArrayTy->getElementType(), 317 ArrayTy->getNumElements()); 318 } 319 320 static Value *stripBitcasts(Value *V) { 321 while (Instruction *I = dyn_cast<Instruction>(V)) { 322 if (I->getOpcode() != Instruction::BitCast) 323 break; 324 V = I->getOperand(0); 325 } 326 return V; 327 } 328 329 static Value * 330 calculateVectorIndex(Value *Ptr, 331 const std::map<GetElementPtrInst *, Value *> &GEPIdx) { 332 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(stripBitcasts(Ptr)); 333 if (!GEP) 334 return nullptr; 335 336 auto I = GEPIdx.find(GEP); 337 return I == GEPIdx.end() ? nullptr : I->second; 338 } 339 340 static Value* GEPToVectorIndex(GetElementPtrInst *GEP) { 341 // FIXME we only support simple cases 342 if (GEP->getNumOperands() != 3) 343 return nullptr; 344 345 ConstantInt *I0 = dyn_cast<ConstantInt>(GEP->getOperand(1)); 346 if (!I0 || !I0->isZero()) 347 return nullptr; 348 349 return GEP->getOperand(2); 350 } 351 352 // Not an instruction handled below to turn into a vector. 353 // 354 // TODO: Check isTriviallyVectorizable for calls and handle other 355 // instructions. 356 static bool canVectorizeInst(Instruction *Inst, User *User, 357 const DataLayout &DL) { 358 switch (Inst->getOpcode()) { 359 case Instruction::Load: { 360 // Currently only handle the case where the Pointer Operand is a GEP. 361 // Also we could not vectorize volatile or atomic loads. 362 LoadInst *LI = cast<LoadInst>(Inst); 363 if (isa<AllocaInst>(User) && 364 LI->getPointerOperandType() == User->getType() && 365 isa<VectorType>(LI->getType())) 366 return true; 367 368 Instruction *PtrInst = dyn_cast<Instruction>(LI->getPointerOperand()); 369 if (!PtrInst) 370 return false; 371 372 return (PtrInst->getOpcode() == Instruction::GetElementPtr || 373 PtrInst->getOpcode() == Instruction::BitCast) && 374 LI->isSimple(); 375 } 376 case Instruction::BitCast: 377 return true; 378 case Instruction::Store: { 379 // Must be the stored pointer operand, not a stored value, plus 380 // since it should be canonical form, the User should be a GEP. 381 // Also we could not vectorize volatile or atomic stores. 382 StoreInst *SI = cast<StoreInst>(Inst); 383 if (isa<AllocaInst>(User) && 384 SI->getPointerOperandType() == User->getType() && 385 isa<VectorType>(SI->getValueOperand()->getType())) 386 return true; 387 388 Instruction *UserInst = dyn_cast<Instruction>(User); 389 if (!UserInst) 390 return false; 391 392 return (SI->getPointerOperand() == User) && 393 (UserInst->getOpcode() == Instruction::GetElementPtr || 394 UserInst->getOpcode() == Instruction::BitCast) && 395 SI->isSimple(); 396 } 397 default: 398 return false; 399 } 400 } 401 402 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL, 403 unsigned MaxVGPRs) { 404 405 if (DisablePromoteAllocaToVector) { 406 LLVM_DEBUG(dbgs() << " Promotion alloca to vector is disabled\n"); 407 return false; 408 } 409 410 Type *AllocaTy = Alloca->getAllocatedType(); 411 auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy); 412 if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) { 413 if (VectorType::isValidElementType(ArrayTy->getElementType()) && 414 ArrayTy->getNumElements() > 0) 415 VectorTy = arrayTypeToVecType(ArrayTy); 416 } 417 418 // Use up to 1/4 of available register budget for vectorization. 419 unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8 420 : (MaxVGPRs * 32); 421 422 if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) { 423 LLVM_DEBUG(dbgs() << " Alloca too big for vectorization with " 424 << MaxVGPRs << " registers available\n"); 425 return false; 426 } 427 428 LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n"); 429 430 // FIXME: There is no reason why we can't support larger arrays, we 431 // are just being conservative for now. 432 // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these 433 // could also be promoted but we don't currently handle this case 434 if (!VectorTy || VectorTy->getNumElements() > 16 || 435 VectorTy->getNumElements() < 2) { 436 LLVM_DEBUG(dbgs() << " Cannot convert type to vector\n"); 437 return false; 438 } 439 440 std::map<GetElementPtrInst*, Value*> GEPVectorIdx; 441 std::vector<Value *> WorkList; 442 SmallVector<User *, 8> Users(Alloca->users()); 443 SmallVector<User *, 8> UseUsers(Users.size(), Alloca); 444 Type *VecEltTy = VectorTy->getElementType(); 445 while (!Users.empty()) { 446 User *AllocaUser = Users.pop_back_val(); 447 User *UseUser = UseUsers.pop_back_val(); 448 Instruction *Inst = dyn_cast<Instruction>(AllocaUser); 449 450 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(AllocaUser); 451 if (!GEP) { 452 if (!canVectorizeInst(Inst, UseUser, DL)) 453 return false; 454 455 if (Inst->getOpcode() == Instruction::BitCast) { 456 Type *FromTy = Inst->getOperand(0)->getType()->getPointerElementType(); 457 Type *ToTy = Inst->getType()->getPointerElementType(); 458 if (FromTy->isAggregateType() || ToTy->isAggregateType() || 459 DL.getTypeSizeInBits(FromTy) != DL.getTypeSizeInBits(ToTy)) 460 continue; 461 462 for (User *CastUser : Inst->users()) { 463 if (isAssumeLikeIntrinsic(cast<Instruction>(CastUser))) 464 continue; 465 Users.push_back(CastUser); 466 UseUsers.push_back(Inst); 467 } 468 469 continue; 470 } 471 472 WorkList.push_back(AllocaUser); 473 continue; 474 } 475 476 Value *Index = GEPToVectorIndex(GEP); 477 478 // If we can't compute a vector index from this GEP, then we can't 479 // promote this alloca to vector. 480 if (!Index) { 481 LLVM_DEBUG(dbgs() << " Cannot compute vector index for GEP " << *GEP 482 << '\n'); 483 return false; 484 } 485 486 GEPVectorIdx[GEP] = Index; 487 Users.append(GEP->user_begin(), GEP->user_end()); 488 UseUsers.append(GEP->getNumUses(), GEP); 489 } 490 491 LLVM_DEBUG(dbgs() << " Converting alloca to vector " << *AllocaTy << " -> " 492 << *VectorTy << '\n'); 493 494 for (Value *V : WorkList) { 495 Instruction *Inst = cast<Instruction>(V); 496 IRBuilder<> Builder(Inst); 497 switch (Inst->getOpcode()) { 498 case Instruction::Load: { 499 if (Inst->getType() == AllocaTy || Inst->getType()->isVectorTy()) 500 break; 501 502 Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand(); 503 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 504 if (!Index) 505 break; 506 507 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); 508 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 509 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast); 510 Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index); 511 if (Inst->getType() != VecEltTy) 512 ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType()); 513 Inst->replaceAllUsesWith(ExtractElement); 514 Inst->eraseFromParent(); 515 break; 516 } 517 case Instruction::Store: { 518 StoreInst *SI = cast<StoreInst>(Inst); 519 if (SI->getValueOperand()->getType() == AllocaTy || 520 SI->getValueOperand()->getType()->isVectorTy()) 521 break; 522 523 Value *Ptr = SI->getPointerOperand(); 524 Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx); 525 if (!Index) 526 break; 527 528 Type *VecPtrTy = VectorTy->getPointerTo(AMDGPUAS::PRIVATE_ADDRESS); 529 Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy); 530 Value *VecValue = Builder.CreateLoad(VectorTy, BitCast); 531 Value *Elt = SI->getValueOperand(); 532 if (Elt->getType() != VecEltTy) 533 Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy); 534 Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index); 535 Builder.CreateStore(NewVecValue, BitCast); 536 Inst->eraseFromParent(); 537 break; 538 } 539 540 default: 541 llvm_unreachable("Inconsistency in instructions promotable to vector"); 542 } 543 } 544 return true; 545 } 546 547 static bool isCallPromotable(CallInst *CI) { 548 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI); 549 if (!II) 550 return false; 551 552 switch (II->getIntrinsicID()) { 553 case Intrinsic::memcpy: 554 case Intrinsic::memmove: 555 case Intrinsic::memset: 556 case Intrinsic::lifetime_start: 557 case Intrinsic::lifetime_end: 558 case Intrinsic::invariant_start: 559 case Intrinsic::invariant_end: 560 case Intrinsic::launder_invariant_group: 561 case Intrinsic::strip_invariant_group: 562 case Intrinsic::objectsize: 563 return true; 564 default: 565 return false; 566 } 567 } 568 569 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca( 570 Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0, 571 int OpIdx1) const { 572 // Figure out which operand is the one we might not be promoting. 573 Value *OtherOp = Inst->getOperand(OpIdx0); 574 if (Val == OtherOp) 575 OtherOp = Inst->getOperand(OpIdx1); 576 577 if (isa<ConstantPointerNull>(OtherOp)) 578 return true; 579 580 Value *OtherObj = getUnderlyingObject(OtherOp); 581 if (!isa<AllocaInst>(OtherObj)) 582 return false; 583 584 // TODO: We should be able to replace undefs with the right pointer type. 585 586 // TODO: If we know the other base object is another promotable 587 // alloca, not necessarily this alloca, we can do this. The 588 // important part is both must have the same address space at 589 // the end. 590 if (OtherObj != BaseAlloca) { 591 LLVM_DEBUG( 592 dbgs() << "Found a binary instruction with another alloca object\n"); 593 return false; 594 } 595 596 return true; 597 } 598 599 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes( 600 Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const { 601 602 for (User *User : Val->users()) { 603 if (is_contained(WorkList, User)) 604 continue; 605 606 if (CallInst *CI = dyn_cast<CallInst>(User)) { 607 if (!isCallPromotable(CI)) 608 return false; 609 610 WorkList.push_back(User); 611 continue; 612 } 613 614 Instruction *UseInst = cast<Instruction>(User); 615 if (UseInst->getOpcode() == Instruction::PtrToInt) 616 return false; 617 618 if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) { 619 if (LI->isVolatile()) 620 return false; 621 622 continue; 623 } 624 625 if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) { 626 if (SI->isVolatile()) 627 return false; 628 629 // Reject if the stored value is not the pointer operand. 630 if (SI->getPointerOperand() != Val) 631 return false; 632 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) { 633 if (RMW->isVolatile()) 634 return false; 635 } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) { 636 if (CAS->isVolatile()) 637 return false; 638 } 639 640 // Only promote a select if we know that the other select operand 641 // is from another pointer that will also be promoted. 642 if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) { 643 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1)) 644 return false; 645 646 // May need to rewrite constant operands. 647 WorkList.push_back(ICmp); 648 } 649 650 if (UseInst->getOpcode() == Instruction::AddrSpaceCast) { 651 // Give up if the pointer may be captured. 652 if (PointerMayBeCaptured(UseInst, true, true)) 653 return false; 654 // Don't collect the users of this. 655 WorkList.push_back(User); 656 continue; 657 } 658 659 if (!User->getType()->isPointerTy()) 660 continue; 661 662 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) { 663 // Be conservative if an address could be computed outside the bounds of 664 // the alloca. 665 if (!GEP->isInBounds()) 666 return false; 667 } 668 669 // Only promote a select if we know that the other select operand is from 670 // another pointer that will also be promoted. 671 if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) { 672 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2)) 673 return false; 674 } 675 676 // Repeat for phis. 677 if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) { 678 // TODO: Handle more complex cases. We should be able to replace loops 679 // over arrays. 680 switch (Phi->getNumIncomingValues()) { 681 case 1: 682 break; 683 case 2: 684 if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1)) 685 return false; 686 break; 687 default: 688 return false; 689 } 690 } 691 692 WorkList.push_back(User); 693 if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList)) 694 return false; 695 } 696 697 return true; 698 } 699 700 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) { 701 702 FunctionType *FTy = F.getFunctionType(); 703 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 704 705 // If the function has any arguments in the local address space, then it's 706 // possible these arguments require the entire local memory space, so 707 // we cannot use local memory in the pass. 708 for (Type *ParamTy : FTy->params()) { 709 PointerType *PtrTy = dyn_cast<PointerType>(ParamTy); 710 if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) { 711 LocalMemLimit = 0; 712 LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to " 713 "local memory disabled.\n"); 714 return false; 715 } 716 } 717 718 LocalMemLimit = ST.getLocalMemorySize(); 719 if (LocalMemLimit == 0) 720 return false; 721 722 SmallVector<const Constant *, 16> Stack; 723 SmallPtrSet<const Constant *, 8> VisitedConstants; 724 SmallPtrSet<const GlobalVariable *, 8> UsedLDS; 725 726 auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool { 727 for (const User *U : Val->users()) { 728 if (const Instruction *Use = dyn_cast<Instruction>(U)) { 729 if (Use->getParent()->getParent() == &F) 730 return true; 731 } else { 732 const Constant *C = cast<Constant>(U); 733 if (VisitedConstants.insert(C).second) 734 Stack.push_back(C); 735 } 736 } 737 738 return false; 739 }; 740 741 for (GlobalVariable &GV : Mod->globals()) { 742 if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) 743 continue; 744 745 if (visitUsers(&GV, &GV)) { 746 UsedLDS.insert(&GV); 747 Stack.clear(); 748 continue; 749 } 750 751 // For any ConstantExpr uses, we need to recursively search the users until 752 // we see a function. 753 while (!Stack.empty()) { 754 const Constant *C = Stack.pop_back_val(); 755 if (visitUsers(&GV, C)) { 756 UsedLDS.insert(&GV); 757 Stack.clear(); 758 break; 759 } 760 } 761 } 762 763 const DataLayout &DL = Mod->getDataLayout(); 764 SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes; 765 AllocatedSizes.reserve(UsedLDS.size()); 766 767 for (const GlobalVariable *GV : UsedLDS) { 768 Align Alignment = 769 DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType()); 770 uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType()); 771 AllocatedSizes.emplace_back(AllocSize, Alignment); 772 } 773 774 // Sort to try to estimate the worst case alignment padding 775 // 776 // FIXME: We should really do something to fix the addresses to a more optimal 777 // value instead 778 llvm::sort(AllocatedSizes, [](std::pair<uint64_t, Align> LHS, 779 std::pair<uint64_t, Align> RHS) { 780 return LHS.second < RHS.second; 781 }); 782 783 // Check how much local memory is being used by global objects 784 CurrentLocalMemUsage = 0; 785 786 // FIXME: Try to account for padding here. The real padding and address is 787 // currently determined from the inverse order of uses in the function when 788 // legalizing, which could also potentially change. We try to estimate the 789 // worst case here, but we probably should fix the addresses earlier. 790 for (auto Alloc : AllocatedSizes) { 791 CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second); 792 CurrentLocalMemUsage += Alloc.first; 793 } 794 795 unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, 796 F); 797 798 // Restrict local memory usage so that we don't drastically reduce occupancy, 799 // unless it is already significantly reduced. 800 801 // TODO: Have some sort of hint or other heuristics to guess occupancy based 802 // on other factors.. 803 unsigned OccupancyHint = ST.getWavesPerEU(F).second; 804 if (OccupancyHint == 0) 805 OccupancyHint = 7; 806 807 // Clamp to max value. 808 OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU()); 809 810 // Check the hint but ignore it if it's obviously wrong from the existing LDS 811 // usage. 812 MaxOccupancy = std::min(OccupancyHint, MaxOccupancy); 813 814 815 // Round up to the next tier of usage. 816 unsigned MaxSizeWithWaveCount 817 = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F); 818 819 // Program is possibly broken by using more local mem than available. 820 if (CurrentLocalMemUsage > MaxSizeWithWaveCount) 821 return false; 822 823 LocalMemLimit = MaxSizeWithWaveCount; 824 825 LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage 826 << " bytes of LDS\n" 827 << " Rounding size to " << MaxSizeWithWaveCount 828 << " with a maximum occupancy of " << MaxOccupancy << '\n' 829 << " and " << (LocalMemLimit - CurrentLocalMemUsage) 830 << " available for promotion\n"); 831 832 return true; 833 } 834 835 // FIXME: Should try to pick the most likely to be profitable allocas first. 836 bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) { 837 // Array allocations are probably not worth handling, since an allocation of 838 // the array type is the canonical form. 839 if (!I.isStaticAlloca() || I.isArrayAllocation()) 840 return false; 841 842 const DataLayout &DL = Mod->getDataLayout(); 843 IRBuilder<> Builder(&I); 844 845 // First try to replace the alloca with a vector 846 Type *AllocaTy = I.getAllocatedType(); 847 848 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 849 850 if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs)) 851 return true; // Promoted to vector. 852 853 if (DisablePromoteAllocaToLDS) 854 return false; 855 856 const Function &ContainingFunction = *I.getParent()->getParent(); 857 CallingConv::ID CC = ContainingFunction.getCallingConv(); 858 859 // Don't promote the alloca to LDS for shader calling conventions as the work 860 // item ID intrinsics are not supported for these calling conventions. 861 // Furthermore not all LDS is available for some of the stages. 862 switch (CC) { 863 case CallingConv::AMDGPU_KERNEL: 864 case CallingConv::SPIR_KERNEL: 865 break; 866 default: 867 LLVM_DEBUG( 868 dbgs() 869 << " promote alloca to LDS not supported with calling convention.\n"); 870 return false; 871 } 872 873 // Not likely to have sufficient local memory for promotion. 874 if (!SufficientLDS) 875 return false; 876 877 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction); 878 unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second; 879 880 Align Alignment = 881 DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType()); 882 883 // FIXME: This computed padding is likely wrong since it depends on inverse 884 // usage order. 885 // 886 // FIXME: It is also possible that if we're allowed to use all of the memory 887 // could could end up using more than the maximum due to alignment padding. 888 889 uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment); 890 uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy); 891 NewSize += AllocSize; 892 893 if (NewSize > LocalMemLimit) { 894 LLVM_DEBUG(dbgs() << " " << AllocSize 895 << " bytes of local memory not available to promote\n"); 896 return false; 897 } 898 899 CurrentLocalMemUsage = NewSize; 900 901 std::vector<Value*> WorkList; 902 903 if (!collectUsesWithPtrTypes(&I, &I, WorkList)) { 904 LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n"); 905 return false; 906 } 907 908 LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n"); 909 910 Function *F = I.getParent()->getParent(); 911 912 Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize); 913 GlobalVariable *GV = new GlobalVariable( 914 *Mod, GVTy, false, GlobalValue::InternalLinkage, 915 UndefValue::get(GVTy), 916 Twine(F->getName()) + Twine('.') + I.getName(), 917 nullptr, 918 GlobalVariable::NotThreadLocal, 919 AMDGPUAS::LOCAL_ADDRESS); 920 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global); 921 GV->setAlignment(MaybeAlign(I.getAlignment())); 922 923 Value *TCntY, *TCntZ; 924 925 std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder); 926 Value *TIdX = getWorkitemID(Builder, 0); 927 Value *TIdY = getWorkitemID(Builder, 1); 928 Value *TIdZ = getWorkitemID(Builder, 2); 929 930 Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true); 931 Tmp0 = Builder.CreateMul(Tmp0, TIdX); 932 Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true); 933 Value *TID = Builder.CreateAdd(Tmp0, Tmp1); 934 TID = Builder.CreateAdd(TID, TIdZ); 935 936 Value *Indices[] = { 937 Constant::getNullValue(Type::getInt32Ty(Mod->getContext())), 938 TID 939 }; 940 941 Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices); 942 I.mutateType(Offset->getType()); 943 I.replaceAllUsesWith(Offset); 944 I.eraseFromParent(); 945 946 for (Value *V : WorkList) { 947 CallInst *Call = dyn_cast<CallInst>(V); 948 if (!Call) { 949 if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) { 950 Value *Src0 = CI->getOperand(0); 951 Type *EltTy = Src0->getType()->getPointerElementType(); 952 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS); 953 954 if (isa<ConstantPointerNull>(CI->getOperand(0))) 955 CI->setOperand(0, ConstantPointerNull::get(NewTy)); 956 957 if (isa<ConstantPointerNull>(CI->getOperand(1))) 958 CI->setOperand(1, ConstantPointerNull::get(NewTy)); 959 960 continue; 961 } 962 963 // The operand's value should be corrected on its own and we don't want to 964 // touch the users. 965 if (isa<AddrSpaceCastInst>(V)) 966 continue; 967 968 Type *EltTy = V->getType()->getPointerElementType(); 969 PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS); 970 971 // FIXME: It doesn't really make sense to try to do this for all 972 // instructions. 973 V->mutateType(NewTy); 974 975 // Adjust the types of any constant operands. 976 if (SelectInst *SI = dyn_cast<SelectInst>(V)) { 977 if (isa<ConstantPointerNull>(SI->getOperand(1))) 978 SI->setOperand(1, ConstantPointerNull::get(NewTy)); 979 980 if (isa<ConstantPointerNull>(SI->getOperand(2))) 981 SI->setOperand(2, ConstantPointerNull::get(NewTy)); 982 } else if (PHINode *Phi = dyn_cast<PHINode>(V)) { 983 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 984 if (isa<ConstantPointerNull>(Phi->getIncomingValue(I))) 985 Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy)); 986 } 987 } 988 989 continue; 990 } 991 992 IntrinsicInst *Intr = cast<IntrinsicInst>(Call); 993 Builder.SetInsertPoint(Intr); 994 switch (Intr->getIntrinsicID()) { 995 case Intrinsic::lifetime_start: 996 case Intrinsic::lifetime_end: 997 // These intrinsics are for address space 0 only 998 Intr->eraseFromParent(); 999 continue; 1000 case Intrinsic::memcpy: { 1001 MemCpyInst *MemCpy = cast<MemCpyInst>(Intr); 1002 Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlign(), 1003 MemCpy->getRawSource(), MemCpy->getSourceAlign(), 1004 MemCpy->getLength(), MemCpy->isVolatile()); 1005 Intr->eraseFromParent(); 1006 continue; 1007 } 1008 case Intrinsic::memmove: { 1009 MemMoveInst *MemMove = cast<MemMoveInst>(Intr); 1010 Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlign(), 1011 MemMove->getRawSource(), MemMove->getSourceAlign(), 1012 MemMove->getLength(), MemMove->isVolatile()); 1013 Intr->eraseFromParent(); 1014 continue; 1015 } 1016 case Intrinsic::memset: { 1017 MemSetInst *MemSet = cast<MemSetInst>(Intr); 1018 Builder.CreateMemSet( 1019 MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(), 1020 MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile()); 1021 Intr->eraseFromParent(); 1022 continue; 1023 } 1024 case Intrinsic::invariant_start: 1025 case Intrinsic::invariant_end: 1026 case Intrinsic::launder_invariant_group: 1027 case Intrinsic::strip_invariant_group: 1028 Intr->eraseFromParent(); 1029 // FIXME: I think the invariant marker should still theoretically apply, 1030 // but the intrinsics need to be changed to accept pointers with any 1031 // address space. 1032 continue; 1033 case Intrinsic::objectsize: { 1034 Value *Src = Intr->getOperand(0); 1035 Type *SrcTy = Src->getType()->getPointerElementType(); 1036 Function *ObjectSize = Intrinsic::getDeclaration(Mod, 1037 Intrinsic::objectsize, 1038 { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) } 1039 ); 1040 1041 CallInst *NewCall = Builder.CreateCall( 1042 ObjectSize, 1043 {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)}); 1044 Intr->replaceAllUsesWith(NewCall); 1045 Intr->eraseFromParent(); 1046 continue; 1047 } 1048 default: 1049 Intr->print(errs()); 1050 llvm_unreachable("Don't know how to promote alloca intrinsic use."); 1051 } 1052 } 1053 return true; 1054 } 1055 1056 bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) { 1057 // Array allocations are probably not worth handling, since an allocation of 1058 // the array type is the canonical form. 1059 if (!I.isStaticAlloca() || I.isArrayAllocation()) 1060 return false; 1061 1062 LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n'); 1063 1064 Module *Mod = I.getParent()->getParent()->getParent(); 1065 return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs); 1066 } 1067 1068 bool promoteAllocasToVector(Function &F, TargetMachine &TM) { 1069 if (DisablePromoteAllocaToVector) 1070 return false; 1071 1072 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F); 1073 if (!ST.isPromoteAllocaEnabled()) 1074 return false; 1075 1076 unsigned MaxVGPRs; 1077 if (TM.getTargetTriple().getArch() == Triple::amdgcn) { 1078 const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F); 1079 MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first); 1080 } else { 1081 MaxVGPRs = 128; 1082 } 1083 1084 bool Changed = false; 1085 BasicBlock &EntryBB = *F.begin(); 1086 1087 SmallVector<AllocaInst *, 16> Allocas; 1088 for (Instruction &I : EntryBB) { 1089 if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) 1090 Allocas.push_back(AI); 1091 } 1092 1093 for (AllocaInst *AI : Allocas) { 1094 if (handlePromoteAllocaToVector(*AI, MaxVGPRs)) 1095 Changed = true; 1096 } 1097 1098 return Changed; 1099 } 1100 1101 bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) { 1102 if (skipFunction(F)) 1103 return false; 1104 if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) { 1105 return promoteAllocasToVector(F, TPC->getTM<TargetMachine>()); 1106 } 1107 return false; 1108 } 1109 1110 PreservedAnalyses 1111 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) { 1112 bool Changed = promoteAllocasToVector(F, TM); 1113 if (Changed) { 1114 PreservedAnalyses PA; 1115 PA.preserveSet<CFGAnalyses>(); 1116 return PA; 1117 } 1118 return PreservedAnalyses::all(); 1119 } 1120 1121 FunctionPass *llvm::createAMDGPUPromoteAlloca() { 1122 return new AMDGPUPromoteAlloca(); 1123 } 1124 1125 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() { 1126 return new AMDGPUPromoteAllocaToVector(); 1127 } 1128