xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Eliminates allocas by either converting them into vectors or by migrating
10 // them to local address space.
11 //
12 // Two passes are exposed by this file:
13 //    - "promote-alloca-to-vector", which runs early in the pipeline and only
14 //      promotes to vector. Promotion to vector is almost always profitable
15 //      except when the alloca is too big and the promotion would result in
16 //      very high register pressure.
17 //    - "promote-alloca", which does both promotion to vector and LDS and runs
18 //      much later in the pipeline. This runs after SROA because promoting to
19 //      LDS is of course less profitable than getting rid of the alloca or
20 //      vectorizing it, thus we only want to do it when the only alternative is
21 //      lowering the alloca to stack.
22 //
23 // Note that both of them exist for the old and new PMs. The new PM passes are
24 // declared in AMDGPU.h and the legacy PM ones are declared here.s
25 //
26 //===----------------------------------------------------------------------===//
27 
28 #include "AMDGPU.h"
29 #include "GCNSubtarget.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Analysis/CaptureTracking.h"
33 #include "llvm/Analysis/InstSimplifyFolder.h"
34 #include "llvm/Analysis/InstructionSimplify.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/CodeGen/TargetPassConfig.h"
37 #include "llvm/IR/IRBuilder.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/IntrinsicsAMDGPU.h"
40 #include "llvm/IR/IntrinsicsR600.h"
41 #include "llvm/IR/PatternMatch.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Target/TargetMachine.h"
44 #include "llvm/Transforms/Utils/SSAUpdater.h"
45 
46 #define DEBUG_TYPE "amdgpu-promote-alloca"
47 
48 using namespace llvm;
49 
50 namespace {
51 
52 static cl::opt<bool>
53     DisablePromoteAllocaToVector("disable-promote-alloca-to-vector",
54                                  cl::desc("Disable promote alloca to vector"),
55                                  cl::init(false));
56 
57 static cl::opt<bool>
58     DisablePromoteAllocaToLDS("disable-promote-alloca-to-lds",
59                               cl::desc("Disable promote alloca to LDS"),
60                               cl::init(false));
61 
62 static cl::opt<unsigned> PromoteAllocaToVectorLimit(
63     "amdgpu-promote-alloca-to-vector-limit",
64     cl::desc("Maximum byte size to consider promote alloca to vector"),
65     cl::init(0));
66 
67 // Shared implementation which can do both promotion to vector and to LDS.
68 class AMDGPUPromoteAllocaImpl {
69 private:
70   const TargetMachine &TM;
71   Module *Mod = nullptr;
72   const DataLayout *DL = nullptr;
73 
74   // FIXME: This should be per-kernel.
75   uint32_t LocalMemLimit = 0;
76   uint32_t CurrentLocalMemUsage = 0;
77   unsigned MaxVGPRs;
78 
79   bool IsAMDGCN = false;
80   bool IsAMDHSA = false;
81 
82   std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
83   Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
84 
85   /// BaseAlloca is the alloca root the search started from.
86   /// Val may be that alloca or a recursive user of it.
87   bool collectUsesWithPtrTypes(Value *BaseAlloca, Value *Val,
88                                std::vector<Value *> &WorkList) const;
89 
90   /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91   /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92   /// Returns true if both operands are derived from the same alloca. Val should
93   /// be the same value as one of the input operands of UseInst.
94   bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95                                        Instruction *UseInst, int OpIdx0,
96                                        int OpIdx1) const;
97 
98   /// Check whether we have enough local memory for promotion.
99   bool hasSufficientLocalMem(const Function &F);
100 
101   bool tryPromoteAllocaToVector(AllocaInst &I);
102   bool tryPromoteAllocaToLDS(AllocaInst &I, bool SufficientLDS);
103 
104 public:
105   AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {
106     const Triple &TT = TM.getTargetTriple();
107     IsAMDGCN = TT.getArch() == Triple::amdgcn;
108     IsAMDHSA = TT.getOS() == Triple::AMDHSA;
109   }
110 
111   bool run(Function &F, bool PromoteToLDS);
112 };
113 
114 // FIXME: This can create globals so should be a module pass.
115 class AMDGPUPromoteAlloca : public FunctionPass {
116 public:
117   static char ID;
118 
119   AMDGPUPromoteAlloca() : FunctionPass(ID) {}
120 
121   bool runOnFunction(Function &F) override {
122     if (skipFunction(F))
123       return false;
124     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
125       return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
126           .run(F, /*PromoteToLDS*/ true);
127     return false;
128   }
129 
130   StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
131 
132   void getAnalysisUsage(AnalysisUsage &AU) const override {
133     AU.setPreservesCFG();
134     FunctionPass::getAnalysisUsage(AU);
135   }
136 };
137 
138 class AMDGPUPromoteAllocaToVector : public FunctionPass {
139 public:
140   static char ID;
141 
142   AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
143 
144   bool runOnFunction(Function &F) override {
145     if (skipFunction(F))
146       return false;
147     if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>())
148       return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>())
149           .run(F, /*PromoteToLDS*/ false);
150     return false;
151   }
152 
153   StringRef getPassName() const override {
154     return "AMDGPU Promote Alloca to vector";
155   }
156 
157   void getAnalysisUsage(AnalysisUsage &AU) const override {
158     AU.setPreservesCFG();
159     FunctionPass::getAnalysisUsage(AU);
160   }
161 };
162 
163 unsigned getMaxVGPRs(const TargetMachine &TM, const Function &F) {
164   if (!TM.getTargetTriple().isAMDGCN())
165     return 128;
166 
167   const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
168   unsigned MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
169 
170   // A non-entry function has only 32 caller preserved registers.
171   // Do not promote alloca which will force spilling unless we know the function
172   // will be inlined.
173   if (!F.hasFnAttribute(Attribute::AlwaysInline) &&
174       !AMDGPU::isEntryFunctionCC(F.getCallingConv()))
175     MaxVGPRs = std::min(MaxVGPRs, 32u);
176   return MaxVGPRs;
177 }
178 
179 } // end anonymous namespace
180 
181 char AMDGPUPromoteAlloca::ID = 0;
182 char AMDGPUPromoteAllocaToVector::ID = 0;
183 
184 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,
185                       "AMDGPU promote alloca to vector or LDS", false, false)
186 // Move LDS uses from functions to kernels before promote alloca for accurate
187 // estimation of LDS available
188 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDSLegacy)
189 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
190                     "AMDGPU promote alloca to vector or LDS", false, false)
191 
192 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
193                 "AMDGPU promote alloca to vector", false, false)
194 
195 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
196 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
197 
198 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
199                                                FunctionAnalysisManager &AM) {
200   bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ true);
201   if (Changed) {
202     PreservedAnalyses PA;
203     PA.preserveSet<CFGAnalyses>();
204     return PA;
205   }
206   return PreservedAnalyses::all();
207 }
208 
209 PreservedAnalyses
210 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
211   bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F, /*PromoteToLDS*/ false);
212   if (Changed) {
213     PreservedAnalyses PA;
214     PA.preserveSet<CFGAnalyses>();
215     return PA;
216   }
217   return PreservedAnalyses::all();
218 }
219 
220 FunctionPass *llvm::createAMDGPUPromoteAlloca() {
221   return new AMDGPUPromoteAlloca();
222 }
223 
224 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
225   return new AMDGPUPromoteAllocaToVector();
226 }
227 
228 bool AMDGPUPromoteAllocaImpl::run(Function &F, bool PromoteToLDS) {
229   Mod = F.getParent();
230   DL = &Mod->getDataLayout();
231 
232   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
233   if (!ST.isPromoteAllocaEnabled())
234     return false;
235 
236   MaxVGPRs = getMaxVGPRs(TM, F);
237 
238   bool SufficientLDS = PromoteToLDS ? hasSufficientLocalMem(F) : false;
239 
240   SmallVector<AllocaInst *, 16> Allocas;
241   for (Instruction &I : F.getEntryBlock()) {
242     if (AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
243       // Array allocations are probably not worth handling, since an allocation
244       // of the array type is the canonical form.
245       if (!AI->isStaticAlloca() || AI->isArrayAllocation())
246         continue;
247       Allocas.push_back(AI);
248     }
249   }
250 
251   bool Changed = false;
252   for (AllocaInst *AI : Allocas) {
253     if (tryPromoteAllocaToVector(*AI))
254       Changed = true;
255     else if (PromoteToLDS && tryPromoteAllocaToLDS(*AI, SufficientLDS))
256       Changed = true;
257   }
258 
259   // NOTE: tryPromoteAllocaToVector removes the alloca, so Allocas contains
260   // dangling pointers. If we want to reuse it past this point, the loop above
261   // would need to be updated to remove successfully promoted allocas.
262 
263   return Changed;
264 }
265 
266 struct MemTransferInfo {
267   ConstantInt *SrcIndex = nullptr;
268   ConstantInt *DestIndex = nullptr;
269 };
270 
271 // Checks if the instruction I is a memset user of the alloca AI that we can
272 // deal with. Currently, only non-volatile memsets that affect the whole alloca
273 // are handled.
274 static bool isSupportedMemset(MemSetInst *I, AllocaInst *AI,
275                               const DataLayout &DL) {
276   using namespace PatternMatch;
277   // For now we only care about non-volatile memsets that affect the whole type
278   // (start at index 0 and fill the whole alloca).
279   //
280   // TODO: Now that we moved to PromoteAlloca we could handle any memsets
281   // (except maybe volatile ones?) - we just need to use shufflevector if it
282   // only affects a subset of the vector.
283   const unsigned Size = DL.getTypeStoreSize(AI->getAllocatedType());
284   return I->getOperand(0) == AI &&
285          match(I->getOperand(2), m_SpecificInt(Size)) && !I->isVolatile();
286 }
287 
288 static Value *
289 calculateVectorIndex(Value *Ptr,
290                      const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
291   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
292   if (!GEP)
293     return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
294 
295   auto I = GEPIdx.find(GEP);
296   assert(I != GEPIdx.end() && "Must have entry for GEP!");
297   return I->second;
298 }
299 
300 static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
301                                Type *VecElemTy, const DataLayout &DL) {
302   // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
303   // helper.
304   unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
305   MapVector<Value *, APInt> VarOffsets;
306   APInt ConstOffset(BW, 0);
307   if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
308       !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
309     return nullptr;
310 
311   unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
312   if (VarOffsets.size() > 1)
313     return nullptr;
314 
315   if (VarOffsets.size() == 1) {
316     // Only handle cases where we don't need to insert extra arithmetic
317     // instructions.
318     const auto &VarOffset = VarOffsets.front();
319     if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
320       return nullptr;
321     return VarOffset.first;
322   }
323 
324   APInt Quot;
325   uint64_t Rem;
326   APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
327   if (Rem != 0)
328     return nullptr;
329 
330   return ConstantInt::get(GEP->getContext(), Quot);
331 }
332 
333 /// Promotes a single user of the alloca to a vector form.
334 ///
335 /// \param Inst           Instruction to be promoted.
336 /// \param DL             Module Data Layout.
337 /// \param VectorTy       Vectorized Type.
338 /// \param VecStoreSize   Size of \p VectorTy in bytes.
339 /// \param ElementSize    Size of \p VectorTy element type in bytes.
340 /// \param TransferInfo   MemTransferInst info map.
341 /// \param GEPVectorIdx   GEP -> VectorIdx cache.
342 /// \param CurVal         Current value of the vector (e.g. last stored value)
343 /// \param[out]  DeferredLoads \p Inst is added to this vector if it can't
344 ///              be promoted now. This happens when promoting requires \p
345 ///              CurVal, but \p CurVal is nullptr.
346 /// \return the stored value if \p Inst would have written to the alloca, or
347 ///         nullptr otherwise.
348 static Value *promoteAllocaUserToVector(
349     Instruction *Inst, const DataLayout &DL, FixedVectorType *VectorTy,
350     unsigned VecStoreSize, unsigned ElementSize,
351     DenseMap<MemTransferInst *, MemTransferInfo> &TransferInfo,
352     std::map<GetElementPtrInst *, Value *> &GEPVectorIdx, Value *CurVal,
353     SmallVectorImpl<LoadInst *> &DeferredLoads) {
354   // Note: we use InstSimplifyFolder because it can leverage the DataLayout
355   // to do more folding, especially in the case of vector splats.
356   IRBuilder<InstSimplifyFolder> Builder(Inst->getContext(),
357                                         InstSimplifyFolder(DL));
358   Builder.SetInsertPoint(Inst);
359 
360   const auto GetOrLoadCurrentVectorValue = [&]() -> Value * {
361     if (CurVal)
362       return CurVal;
363 
364     // If the current value is not known, insert a dummy load and lower it on
365     // the second pass.
366     LoadInst *Dummy =
367         Builder.CreateLoad(VectorTy, PoisonValue::get(Builder.getPtrTy()),
368                            "promotealloca.dummyload");
369     DeferredLoads.push_back(Dummy);
370     return Dummy;
371   };
372 
373   const auto CreateTempPtrIntCast = [&Builder, DL](Value *Val,
374                                                    Type *PtrTy) -> Value * {
375     assert(DL.getTypeStoreSize(Val->getType()) == DL.getTypeStoreSize(PtrTy));
376     const unsigned Size = DL.getTypeStoreSizeInBits(PtrTy);
377     if (!PtrTy->isVectorTy())
378       return Builder.CreateBitOrPointerCast(Val, Builder.getIntNTy(Size));
379     const unsigned NumPtrElts = cast<FixedVectorType>(PtrTy)->getNumElements();
380     // If we want to cast to cast, e.g. a <2 x ptr> into a <4 x i32>, we need to
381     // first cast the ptr vector to <2 x i64>.
382     assert((Size % NumPtrElts == 0) && "Vector size not divisble");
383     Type *EltTy = Builder.getIntNTy(Size / NumPtrElts);
384     return Builder.CreateBitOrPointerCast(
385         Val, FixedVectorType::get(EltTy, NumPtrElts));
386   };
387 
388   Type *VecEltTy = VectorTy->getElementType();
389 
390   switch (Inst->getOpcode()) {
391   case Instruction::Load: {
392     // Loads can only be lowered if the value is known.
393     if (!CurVal) {
394       DeferredLoads.push_back(cast<LoadInst>(Inst));
395       return nullptr;
396     }
397 
398     Value *Index = calculateVectorIndex(
399         cast<LoadInst>(Inst)->getPointerOperand(), GEPVectorIdx);
400 
401     // We're loading the full vector.
402     Type *AccessTy = Inst->getType();
403     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
404     if (AccessSize == VecStoreSize && cast<Constant>(Index)->isZeroValue()) {
405       if (AccessTy->isPtrOrPtrVectorTy())
406         CurVal = CreateTempPtrIntCast(CurVal, AccessTy);
407       else if (CurVal->getType()->isPtrOrPtrVectorTy())
408         CurVal = CreateTempPtrIntCast(CurVal, CurVal->getType());
409       Value *NewVal = Builder.CreateBitOrPointerCast(CurVal, AccessTy);
410       Inst->replaceAllUsesWith(NewVal);
411       return nullptr;
412     }
413 
414     // Loading a subvector.
415     if (isa<FixedVectorType>(AccessTy)) {
416       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
417       const unsigned NumLoadedElts = AccessSize / DL.getTypeStoreSize(VecEltTy);
418       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumLoadedElts);
419       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
420 
421       Value *SubVec = PoisonValue::get(SubVecTy);
422       for (unsigned K = 0; K < NumLoadedElts; ++K) {
423         Value *CurIdx =
424             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
425         SubVec = Builder.CreateInsertElement(
426             SubVec, Builder.CreateExtractElement(CurVal, CurIdx), K);
427       }
428 
429       if (AccessTy->isPtrOrPtrVectorTy())
430         SubVec = CreateTempPtrIntCast(SubVec, AccessTy);
431       else if (SubVecTy->isPtrOrPtrVectorTy())
432         SubVec = CreateTempPtrIntCast(SubVec, SubVecTy);
433 
434       SubVec = Builder.CreateBitOrPointerCast(SubVec, AccessTy);
435       Inst->replaceAllUsesWith(SubVec);
436       return nullptr;
437     }
438 
439     // We're loading one element.
440     Value *ExtractElement = Builder.CreateExtractElement(CurVal, Index);
441     if (AccessTy != VecEltTy)
442       ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, AccessTy);
443 
444     Inst->replaceAllUsesWith(ExtractElement);
445     return nullptr;
446   }
447   case Instruction::Store: {
448     // For stores, it's a bit trickier and it depends on whether we're storing
449     // the full vector or not. If we're storing the full vector, we don't need
450     // to know the current value. If this is a store of a single element, we
451     // need to know the value.
452     StoreInst *SI = cast<StoreInst>(Inst);
453     Value *Index = calculateVectorIndex(SI->getPointerOperand(), GEPVectorIdx);
454     Value *Val = SI->getValueOperand();
455 
456     // We're storing the full vector, we can handle this without knowing CurVal.
457     Type *AccessTy = Val->getType();
458     TypeSize AccessSize = DL.getTypeStoreSize(AccessTy);
459     if (AccessSize == VecStoreSize && cast<Constant>(Index)->isZeroValue()) {
460       if (AccessTy->isPtrOrPtrVectorTy())
461         Val = CreateTempPtrIntCast(Val, AccessTy);
462       else if (VectorTy->isPtrOrPtrVectorTy())
463         Val = CreateTempPtrIntCast(Val, VectorTy);
464       return Builder.CreateBitOrPointerCast(Val, VectorTy);
465     }
466 
467     // Storing a subvector.
468     if (isa<FixedVectorType>(AccessTy)) {
469       assert(AccessSize.isKnownMultipleOf(DL.getTypeStoreSize(VecEltTy)));
470       const unsigned NumWrittenElts =
471           AccessSize / DL.getTypeStoreSize(VecEltTy);
472       const unsigned NumVecElts = VectorTy->getNumElements();
473       auto *SubVecTy = FixedVectorType::get(VecEltTy, NumWrittenElts);
474       assert(DL.getTypeStoreSize(SubVecTy) == DL.getTypeStoreSize(AccessTy));
475 
476       if (SubVecTy->isPtrOrPtrVectorTy())
477         Val = CreateTempPtrIntCast(Val, SubVecTy);
478       else if (AccessTy->isPtrOrPtrVectorTy())
479         Val = CreateTempPtrIntCast(Val, AccessTy);
480 
481       Val = Builder.CreateBitOrPointerCast(Val, SubVecTy);
482 
483       Value *CurVec = GetOrLoadCurrentVectorValue();
484       for (unsigned K = 0, NumElts = std::min(NumWrittenElts, NumVecElts);
485            K < NumElts; ++K) {
486         Value *CurIdx =
487             Builder.CreateAdd(Index, ConstantInt::get(Index->getType(), K));
488         CurVec = Builder.CreateInsertElement(
489             CurVec, Builder.CreateExtractElement(Val, K), CurIdx);
490       }
491       return CurVec;
492     }
493 
494     if (Val->getType() != VecEltTy)
495       Val = Builder.CreateBitOrPointerCast(Val, VecEltTy);
496     return Builder.CreateInsertElement(GetOrLoadCurrentVectorValue(), Val,
497                                        Index);
498   }
499   case Instruction::Call: {
500     if (auto *MTI = dyn_cast<MemTransferInst>(Inst)) {
501       // For memcpy, we need to know curval.
502       ConstantInt *Length = cast<ConstantInt>(MTI->getLength());
503       unsigned NumCopied = Length->getZExtValue() / ElementSize;
504       MemTransferInfo *TI = &TransferInfo[MTI];
505       unsigned SrcBegin = TI->SrcIndex->getZExtValue();
506       unsigned DestBegin = TI->DestIndex->getZExtValue();
507 
508       SmallVector<int> Mask;
509       for (unsigned Idx = 0; Idx < VectorTy->getNumElements(); ++Idx) {
510         if (Idx >= DestBegin && Idx < DestBegin + NumCopied) {
511           Mask.push_back(SrcBegin++);
512         } else {
513           Mask.push_back(Idx);
514         }
515       }
516 
517       return Builder.CreateShuffleVector(GetOrLoadCurrentVectorValue(), Mask);
518     }
519 
520     if (auto *MSI = dyn_cast<MemSetInst>(Inst)) {
521       // For memset, we don't need to know the previous value because we
522       // currently only allow memsets that cover the whole alloca.
523       Value *Elt = MSI->getOperand(1);
524       if (DL.getTypeStoreSize(VecEltTy) > 1) {
525         Value *EltBytes =
526             Builder.CreateVectorSplat(DL.getTypeStoreSize(VecEltTy), Elt);
527         Elt = Builder.CreateBitCast(EltBytes, VecEltTy);
528       }
529 
530       return Builder.CreateVectorSplat(VectorTy->getElementCount(), Elt);
531     }
532 
533     if (auto *Intr = dyn_cast<IntrinsicInst>(Inst)) {
534       if (Intr->getIntrinsicID() == Intrinsic::objectsize) {
535         Intr->replaceAllUsesWith(
536             Builder.getIntN(Intr->getType()->getIntegerBitWidth(),
537                             DL.getTypeAllocSize(VectorTy)));
538         return nullptr;
539       }
540     }
541 
542     llvm_unreachable("Unsupported call when promoting alloca to vector");
543   }
544 
545   default:
546     llvm_unreachable("Inconsistency in instructions promotable to vector");
547   }
548 
549   llvm_unreachable("Did not return after promoting instruction!");
550 }
551 
552 static bool isSupportedAccessType(FixedVectorType *VecTy, Type *AccessTy,
553                                   const DataLayout &DL) {
554   // Access as a vector type can work if the size of the access vector is a
555   // multiple of the size of the alloca's vector element type.
556   //
557   // Examples:
558   //    - VecTy = <8 x float>, AccessTy = <4 x float> -> OK
559   //    - VecTy = <4 x double>, AccessTy = <2 x float> -> OK
560   //    - VecTy = <4 x double>, AccessTy = <3 x float> -> NOT OK
561   //        - 3*32 is not a multiple of 64
562   //
563   // We could handle more complicated cases, but it'd make things a lot more
564   // complicated.
565   if (isa<FixedVectorType>(AccessTy)) {
566     TypeSize AccTS = DL.getTypeStoreSize(AccessTy);
567     TypeSize VecTS = DL.getTypeStoreSize(VecTy->getElementType());
568     return AccTS.isKnownMultipleOf(VecTS);
569   }
570 
571   return CastInst::isBitOrNoopPointerCastable(VecTy->getElementType(), AccessTy,
572                                               DL);
573 }
574 
575 /// Iterates over an instruction worklist that may contain multiple instructions
576 /// from the same basic block, but in a different order.
577 template <typename InstContainer>
578 static void forEachWorkListItem(const InstContainer &WorkList,
579                                 std::function<void(Instruction *)> Fn) {
580   // Bucket up uses of the alloca by the block they occur in.
581   // This is important because we have to handle multiple defs/uses in a block
582   // ourselves: SSAUpdater is purely for cross-block references.
583   DenseMap<BasicBlock *, SmallDenseSet<Instruction *>> UsesByBlock;
584   for (Instruction *User : WorkList)
585     UsesByBlock[User->getParent()].insert(User);
586 
587   for (Instruction *User : WorkList) {
588     BasicBlock *BB = User->getParent();
589     auto &BlockUses = UsesByBlock[BB];
590 
591     // Already processed, skip.
592     if (BlockUses.empty())
593       continue;
594 
595     // Only user in the block, directly process it.
596     if (BlockUses.size() == 1) {
597       Fn(User);
598       continue;
599     }
600 
601     // Multiple users in the block, do a linear scan to see users in order.
602     for (Instruction &Inst : *BB) {
603       if (!BlockUses.contains(&Inst))
604         continue;
605 
606       Fn(&Inst);
607     }
608 
609     // Clear the block so we know it's been processed.
610     BlockUses.clear();
611   }
612 }
613 
614 // FIXME: Should try to pick the most likely to be profitable allocas first.
615 bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToVector(AllocaInst &Alloca) {
616   LLVM_DEBUG(dbgs() << "Trying to promote to vector: " << Alloca << '\n');
617 
618   if (DisablePromoteAllocaToVector) {
619     LLVM_DEBUG(dbgs() << "  Promote alloca to vector is disabled\n");
620     return false;
621   }
622 
623   Type *AllocaTy = Alloca.getAllocatedType();
624   auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
625   if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
626     if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
627         ArrayTy->getNumElements() > 0)
628       VectorTy = FixedVectorType::get(ArrayTy->getElementType(),
629                                       ArrayTy->getNumElements());
630   }
631 
632   // Use up to 1/4 of available register budget for vectorization.
633   unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
634                                               : (MaxVGPRs * 32);
635 
636   if (DL->getTypeSizeInBits(AllocaTy) * 4 > Limit) {
637     LLVM_DEBUG(dbgs() << "  Alloca too big for vectorization with " << MaxVGPRs
638                       << " registers available\n");
639     return false;
640   }
641 
642   // FIXME: There is no reason why we can't support larger arrays, we
643   // are just being conservative for now.
644   // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or
645   // equivalent. Potentially these could also be promoted but we don't currently
646   // handle this case
647   if (!VectorTy) {
648     LLVM_DEBUG(dbgs() << "  Cannot convert type to vector\n");
649     return false;
650   }
651 
652   if (VectorTy->getNumElements() > 16 || VectorTy->getNumElements() < 2) {
653     LLVM_DEBUG(dbgs() << "  " << *VectorTy
654                       << " has an unsupported number of elements\n");
655     return false;
656   }
657 
658   std::map<GetElementPtrInst *, Value *> GEPVectorIdx;
659   SmallVector<Instruction *> WorkList;
660   SmallVector<Instruction *> UsersToRemove;
661   SmallVector<Instruction *> DeferredInsts;
662   SmallVector<Use *, 8> Uses;
663   DenseMap<MemTransferInst *, MemTransferInfo> TransferInfo;
664 
665   const auto RejectUser = [&](Instruction *Inst, Twine Msg) {
666     LLVM_DEBUG(dbgs() << "  Cannot promote alloca to vector: " << Msg << "\n"
667                       << "    " << *Inst << "\n");
668     return false;
669   };
670 
671   for (Use &U : Alloca.uses())
672     Uses.push_back(&U);
673 
674   LLVM_DEBUG(dbgs() << "  Attempting promotion to: " << *VectorTy << "\n");
675 
676   Type *VecEltTy = VectorTy->getElementType();
677   unsigned ElementSize = DL->getTypeSizeInBits(VecEltTy) / 8;
678   while (!Uses.empty()) {
679     Use *U = Uses.pop_back_val();
680     Instruction *Inst = cast<Instruction>(U->getUser());
681 
682     if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
683       // This is a store of the pointer, not to the pointer.
684       if (isa<StoreInst>(Inst) &&
685           U->getOperandNo() != StoreInst::getPointerOperandIndex())
686         return RejectUser(Inst, "pointer is being stored");
687 
688       Type *AccessTy = getLoadStoreType(Inst);
689       if (AccessTy->isAggregateType())
690         return RejectUser(Inst, "unsupported load/store as aggregate");
691       assert(!AccessTy->isAggregateType() || AccessTy->isArrayTy());
692 
693       // Check that this is a simple access of a vector element.
694       bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
695                                           : cast<StoreInst>(Inst)->isSimple();
696       if (!IsSimple)
697         return RejectUser(Inst, "not a simple load or store");
698 
699       Ptr = Ptr->stripPointerCasts();
700 
701       // Alloca already accessed as vector.
702       if (Ptr == &Alloca && DL->getTypeStoreSize(Alloca.getAllocatedType()) ==
703                                 DL->getTypeStoreSize(AccessTy)) {
704         WorkList.push_back(Inst);
705         continue;
706       }
707 
708       if (!isSupportedAccessType(VectorTy, AccessTy, *DL))
709         return RejectUser(Inst, "not a supported access type");
710 
711       WorkList.push_back(Inst);
712       continue;
713     }
714 
715     if (isa<BitCastInst>(Inst)) {
716       // Look through bitcasts.
717       for (Use &U : Inst->uses())
718         Uses.push_back(&U);
719       UsersToRemove.push_back(Inst);
720       continue;
721     }
722 
723     if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
724       // If we can't compute a vector index from this GEP, then we can't
725       // promote this alloca to vector.
726       Value *Index = GEPToVectorIndex(GEP, &Alloca, VecEltTy, *DL);
727       if (!Index)
728         return RejectUser(Inst, "cannot compute vector index for GEP");
729 
730       GEPVectorIdx[GEP] = Index;
731       for (Use &U : Inst->uses())
732         Uses.push_back(&U);
733       UsersToRemove.push_back(Inst);
734       continue;
735     }
736 
737     if (MemSetInst *MSI = dyn_cast<MemSetInst>(Inst);
738         MSI && isSupportedMemset(MSI, &Alloca, *DL)) {
739       WorkList.push_back(Inst);
740       continue;
741     }
742 
743     if (MemTransferInst *TransferInst = dyn_cast<MemTransferInst>(Inst)) {
744       if (TransferInst->isVolatile())
745         return RejectUser(Inst, "mem transfer inst is volatile");
746 
747       ConstantInt *Len = dyn_cast<ConstantInt>(TransferInst->getLength());
748       if (!Len || (Len->getZExtValue() % ElementSize))
749         return RejectUser(Inst, "mem transfer inst length is non-constant or "
750                                 "not a multiple of the vector element size");
751 
752       if (!TransferInfo.count(TransferInst)) {
753         DeferredInsts.push_back(Inst);
754         WorkList.push_back(Inst);
755         TransferInfo[TransferInst] = MemTransferInfo();
756       }
757 
758       auto getPointerIndexOfAlloca = [&](Value *Ptr) -> ConstantInt * {
759         GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr);
760         if (Ptr != &Alloca && !GEPVectorIdx.count(GEP))
761           return nullptr;
762 
763         return dyn_cast<ConstantInt>(calculateVectorIndex(Ptr, GEPVectorIdx));
764       };
765 
766       unsigned OpNum = U->getOperandNo();
767       MemTransferInfo *TI = &TransferInfo[TransferInst];
768       if (OpNum == 0) {
769         Value *Dest = TransferInst->getDest();
770         ConstantInt *Index = getPointerIndexOfAlloca(Dest);
771         if (!Index)
772           return RejectUser(Inst, "could not calculate constant dest index");
773         TI->DestIndex = Index;
774       } else {
775         assert(OpNum == 1);
776         Value *Src = TransferInst->getSource();
777         ConstantInt *Index = getPointerIndexOfAlloca(Src);
778         if (!Index)
779           return RejectUser(Inst, "could not calculate constant src index");
780         TI->SrcIndex = Index;
781       }
782       continue;
783     }
784 
785     if (auto *Intr = dyn_cast<IntrinsicInst>(Inst)) {
786       if (Intr->getIntrinsicID() == Intrinsic::objectsize) {
787         WorkList.push_back(Inst);
788         continue;
789       }
790     }
791 
792     // Ignore assume-like intrinsics and comparisons used in assumes.
793     if (isAssumeLikeIntrinsic(Inst)) {
794       if (!Inst->use_empty())
795         return RejectUser(Inst, "assume-like intrinsic cannot have any users");
796       UsersToRemove.push_back(Inst);
797       continue;
798     }
799 
800     if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
801           return isAssumeLikeIntrinsic(cast<Instruction>(U));
802         })) {
803       UsersToRemove.push_back(Inst);
804       continue;
805     }
806 
807     return RejectUser(Inst, "unhandled alloca user");
808   }
809 
810   while (!DeferredInsts.empty()) {
811     Instruction *Inst = DeferredInsts.pop_back_val();
812     MemTransferInst *TransferInst = cast<MemTransferInst>(Inst);
813     // TODO: Support the case if the pointers are from different alloca or
814     // from different address spaces.
815     MemTransferInfo &Info = TransferInfo[TransferInst];
816     if (!Info.SrcIndex || !Info.DestIndex)
817       return RejectUser(
818           Inst, "mem transfer inst is missing constant src and/or dst index");
819   }
820 
821   LLVM_DEBUG(dbgs() << "  Converting alloca to vector " << *AllocaTy << " -> "
822                     << *VectorTy << '\n');
823   const unsigned VecStoreSize = DL->getTypeStoreSize(VectorTy);
824 
825   // Alloca is uninitialized memory. Imitate that by making the first value
826   // undef.
827   SSAUpdater Updater;
828   Updater.Initialize(VectorTy, "promotealloca");
829   Updater.AddAvailableValue(Alloca.getParent(), UndefValue::get(VectorTy));
830 
831   // First handle the initial worklist.
832   SmallVector<LoadInst *, 4> DeferredLoads;
833   forEachWorkListItem(WorkList, [&](Instruction *I) {
834     BasicBlock *BB = I->getParent();
835     // On the first pass, we only take values that are trivially known, i.e.
836     // where AddAvailableValue was already called in this block.
837     Value *Result = promoteAllocaUserToVector(
838         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
839         Updater.FindValueForBlock(BB), DeferredLoads);
840     if (Result)
841       Updater.AddAvailableValue(BB, Result);
842   });
843 
844   // Then handle deferred loads.
845   forEachWorkListItem(DeferredLoads, [&](Instruction *I) {
846     SmallVector<LoadInst *, 0> NewDLs;
847     BasicBlock *BB = I->getParent();
848     // On the second pass, we use GetValueInMiddleOfBlock to guarantee we always
849     // get a value, inserting PHIs as needed.
850     Value *Result = promoteAllocaUserToVector(
851         I, *DL, VectorTy, VecStoreSize, ElementSize, TransferInfo, GEPVectorIdx,
852         Updater.GetValueInMiddleOfBlock(I->getParent()), NewDLs);
853     if (Result)
854       Updater.AddAvailableValue(BB, Result);
855     assert(NewDLs.empty() && "No more deferred loads should be queued!");
856   });
857 
858   // Delete all instructions. On the first pass, new dummy loads may have been
859   // added so we need to collect them too.
860   DenseSet<Instruction *> InstsToDelete(WorkList.begin(), WorkList.end());
861   InstsToDelete.insert(DeferredLoads.begin(), DeferredLoads.end());
862   for (Instruction *I : InstsToDelete) {
863     assert(I->use_empty());
864     I->eraseFromParent();
865   }
866 
867   // Delete all the users that are known to be removeable.
868   for (Instruction *I : reverse(UsersToRemove)) {
869     I->dropDroppableUses();
870     assert(I->use_empty());
871     I->eraseFromParent();
872   }
873 
874   // Alloca should now be dead too.
875   assert(Alloca.use_empty());
876   Alloca.eraseFromParent();
877   return true;
878 }
879 
880 std::pair<Value *, Value *>
881 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
882   Function &F = *Builder.GetInsertBlock()->getParent();
883   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
884 
885   if (!IsAMDHSA) {
886     Function *LocalSizeYFn =
887         Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
888     Function *LocalSizeZFn =
889         Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
890 
891     CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
892     CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
893 
894     ST.makeLIDRangeMetadata(LocalSizeY);
895     ST.makeLIDRangeMetadata(LocalSizeZ);
896 
897     return std::pair(LocalSizeY, LocalSizeZ);
898   }
899 
900   // We must read the size out of the dispatch pointer.
901   assert(IsAMDGCN);
902 
903   // We are indexing into this struct, and want to extract the workgroup_size_*
904   // fields.
905   //
906   //   typedef struct hsa_kernel_dispatch_packet_s {
907   //     uint16_t header;
908   //     uint16_t setup;
909   //     uint16_t workgroup_size_x ;
910   //     uint16_t workgroup_size_y;
911   //     uint16_t workgroup_size_z;
912   //     uint16_t reserved0;
913   //     uint32_t grid_size_x ;
914   //     uint32_t grid_size_y ;
915   //     uint32_t grid_size_z;
916   //
917   //     uint32_t private_segment_size;
918   //     uint32_t group_segment_size;
919   //     uint64_t kernel_object;
920   //
921   // #ifdef HSA_LARGE_MODEL
922   //     void *kernarg_address;
923   // #elif defined HSA_LITTLE_ENDIAN
924   //     void *kernarg_address;
925   //     uint32_t reserved1;
926   // #else
927   //     uint32_t reserved1;
928   //     void *kernarg_address;
929   // #endif
930   //     uint64_t reserved2;
931   //     hsa_signal_t completion_signal; // uint64_t wrapper
932   //   } hsa_kernel_dispatch_packet_t
933   //
934   Function *DispatchPtrFn =
935       Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
936 
937   CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
938   DispatchPtr->addRetAttr(Attribute::NoAlias);
939   DispatchPtr->addRetAttr(Attribute::NonNull);
940   F.removeFnAttr("amdgpu-no-dispatch-ptr");
941 
942   // Size of the dispatch packet struct.
943   DispatchPtr->addDereferenceableRetAttr(64);
944 
945   Type *I32Ty = Type::getInt32Ty(Mod->getContext());
946   Value *CastDispatchPtr = Builder.CreateBitCast(
947       DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
948 
949   // We could do a single 64-bit load here, but it's likely that the basic
950   // 32-bit and extract sequence is already present, and it is probably easier
951   // to CSE this. The loads should be mergeable later anyway.
952   Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
953   LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
954 
955   Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
956   LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
957 
958   MDNode *MD = MDNode::get(Mod->getContext(), std::nullopt);
959   LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
960   LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
961   ST.makeLIDRangeMetadata(LoadZU);
962 
963   // Extract y component. Upper half of LoadZU should be zero already.
964   Value *Y = Builder.CreateLShr(LoadXY, 16);
965 
966   return std::pair(Y, LoadZU);
967 }
968 
969 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
970                                               unsigned N) {
971   Function *F = Builder.GetInsertBlock()->getParent();
972   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
973   Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
974   StringRef AttrName;
975 
976   switch (N) {
977   case 0:
978     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
979                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
980     AttrName = "amdgpu-no-workitem-id-x";
981     break;
982   case 1:
983     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
984                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
985     AttrName = "amdgpu-no-workitem-id-y";
986     break;
987 
988   case 2:
989     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
990                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
991     AttrName = "amdgpu-no-workitem-id-z";
992     break;
993   default:
994     llvm_unreachable("invalid dimension");
995   }
996 
997   Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
998   CallInst *CI = Builder.CreateCall(WorkitemIdFn);
999   ST.makeLIDRangeMetadata(CI);
1000   F->removeFnAttr(AttrName);
1001 
1002   return CI;
1003 }
1004 
1005 static bool isCallPromotable(CallInst *CI) {
1006   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
1007   if (!II)
1008     return false;
1009 
1010   switch (II->getIntrinsicID()) {
1011   case Intrinsic::memcpy:
1012   case Intrinsic::memmove:
1013   case Intrinsic::memset:
1014   case Intrinsic::lifetime_start:
1015   case Intrinsic::lifetime_end:
1016   case Intrinsic::invariant_start:
1017   case Intrinsic::invariant_end:
1018   case Intrinsic::launder_invariant_group:
1019   case Intrinsic::strip_invariant_group:
1020   case Intrinsic::objectsize:
1021     return true;
1022   default:
1023     return false;
1024   }
1025 }
1026 
1027 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
1028     Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
1029     int OpIdx1) const {
1030   // Figure out which operand is the one we might not be promoting.
1031   Value *OtherOp = Inst->getOperand(OpIdx0);
1032   if (Val == OtherOp)
1033     OtherOp = Inst->getOperand(OpIdx1);
1034 
1035   if (isa<ConstantPointerNull>(OtherOp))
1036     return true;
1037 
1038   Value *OtherObj = getUnderlyingObject(OtherOp);
1039   if (!isa<AllocaInst>(OtherObj))
1040     return false;
1041 
1042   // TODO: We should be able to replace undefs with the right pointer type.
1043 
1044   // TODO: If we know the other base object is another promotable
1045   // alloca, not necessarily this alloca, we can do this. The
1046   // important part is both must have the same address space at
1047   // the end.
1048   if (OtherObj != BaseAlloca) {
1049     LLVM_DEBUG(
1050         dbgs() << "Found a binary instruction with another alloca object\n");
1051     return false;
1052   }
1053 
1054   return true;
1055 }
1056 
1057 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
1058     Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
1059 
1060   for (User *User : Val->users()) {
1061     if (is_contained(WorkList, User))
1062       continue;
1063 
1064     if (CallInst *CI = dyn_cast<CallInst>(User)) {
1065       if (!isCallPromotable(CI))
1066         return false;
1067 
1068       WorkList.push_back(User);
1069       continue;
1070     }
1071 
1072     Instruction *UseInst = cast<Instruction>(User);
1073     if (UseInst->getOpcode() == Instruction::PtrToInt)
1074       return false;
1075 
1076     if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
1077       if (LI->isVolatile())
1078         return false;
1079 
1080       continue;
1081     }
1082 
1083     if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
1084       if (SI->isVolatile())
1085         return false;
1086 
1087       // Reject if the stored value is not the pointer operand.
1088       if (SI->getPointerOperand() != Val)
1089         return false;
1090     } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
1091       if (RMW->isVolatile())
1092         return false;
1093     } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
1094       if (CAS->isVolatile())
1095         return false;
1096     }
1097 
1098     // Only promote a select if we know that the other select operand
1099     // is from another pointer that will also be promoted.
1100     if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
1101       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
1102         return false;
1103 
1104       // May need to rewrite constant operands.
1105       WorkList.push_back(ICmp);
1106     }
1107 
1108     if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
1109       // Give up if the pointer may be captured.
1110       if (PointerMayBeCaptured(UseInst, true, true))
1111         return false;
1112       // Don't collect the users of this.
1113       WorkList.push_back(User);
1114       continue;
1115     }
1116 
1117     // Do not promote vector/aggregate type instructions. It is hard to track
1118     // their users.
1119     if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
1120       return false;
1121 
1122     if (!User->getType()->isPointerTy())
1123       continue;
1124 
1125     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
1126       // Be conservative if an address could be computed outside the bounds of
1127       // the alloca.
1128       if (!GEP->isInBounds())
1129         return false;
1130     }
1131 
1132     // Only promote a select if we know that the other select operand is from
1133     // another pointer that will also be promoted.
1134     if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
1135       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
1136         return false;
1137     }
1138 
1139     // Repeat for phis.
1140     if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
1141       // TODO: Handle more complex cases. We should be able to replace loops
1142       // over arrays.
1143       switch (Phi->getNumIncomingValues()) {
1144       case 1:
1145         break;
1146       case 2:
1147         if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
1148           return false;
1149         break;
1150       default:
1151         return false;
1152       }
1153     }
1154 
1155     WorkList.push_back(User);
1156     if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
1157       return false;
1158   }
1159 
1160   return true;
1161 }
1162 
1163 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
1164 
1165   FunctionType *FTy = F.getFunctionType();
1166   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1167 
1168   // If the function has any arguments in the local address space, then it's
1169   // possible these arguments require the entire local memory space, so
1170   // we cannot use local memory in the pass.
1171   for (Type *ParamTy : FTy->params()) {
1172     PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
1173     if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
1174       LocalMemLimit = 0;
1175       LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
1176                            "local memory disabled.\n");
1177       return false;
1178     }
1179   }
1180 
1181   LocalMemLimit = ST.getAddressableLocalMemorySize();
1182   if (LocalMemLimit == 0)
1183     return false;
1184 
1185   SmallVector<const Constant *, 16> Stack;
1186   SmallPtrSet<const Constant *, 8> VisitedConstants;
1187   SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
1188 
1189   auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
1190     for (const User *U : Val->users()) {
1191       if (const Instruction *Use = dyn_cast<Instruction>(U)) {
1192         if (Use->getParent()->getParent() == &F)
1193           return true;
1194       } else {
1195         const Constant *C = cast<Constant>(U);
1196         if (VisitedConstants.insert(C).second)
1197           Stack.push_back(C);
1198       }
1199     }
1200 
1201     return false;
1202   };
1203 
1204   for (GlobalVariable &GV : Mod->globals()) {
1205     if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
1206       continue;
1207 
1208     if (visitUsers(&GV, &GV)) {
1209       UsedLDS.insert(&GV);
1210       Stack.clear();
1211       continue;
1212     }
1213 
1214     // For any ConstantExpr uses, we need to recursively search the users until
1215     // we see a function.
1216     while (!Stack.empty()) {
1217       const Constant *C = Stack.pop_back_val();
1218       if (visitUsers(&GV, C)) {
1219         UsedLDS.insert(&GV);
1220         Stack.clear();
1221         break;
1222       }
1223     }
1224   }
1225 
1226   const DataLayout &DL = Mod->getDataLayout();
1227   SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
1228   AllocatedSizes.reserve(UsedLDS.size());
1229 
1230   for (const GlobalVariable *GV : UsedLDS) {
1231     Align Alignment =
1232         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
1233     uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
1234 
1235     // HIP uses an extern unsized array in local address space for dynamically
1236     // allocated shared memory.  In that case, we have to disable the promotion.
1237     if (GV->hasExternalLinkage() && AllocSize == 0) {
1238       LocalMemLimit = 0;
1239       LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
1240                            "local memory. Promoting to local memory "
1241                            "disabled.\n");
1242       return false;
1243     }
1244 
1245     AllocatedSizes.emplace_back(AllocSize, Alignment);
1246   }
1247 
1248   // Sort to try to estimate the worst case alignment padding
1249   //
1250   // FIXME: We should really do something to fix the addresses to a more optimal
1251   // value instead
1252   llvm::sort(AllocatedSizes, llvm::less_second());
1253 
1254   // Check how much local memory is being used by global objects
1255   CurrentLocalMemUsage = 0;
1256 
1257   // FIXME: Try to account for padding here. The real padding and address is
1258   // currently determined from the inverse order of uses in the function when
1259   // legalizing, which could also potentially change. We try to estimate the
1260   // worst case here, but we probably should fix the addresses earlier.
1261   for (auto Alloc : AllocatedSizes) {
1262     CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
1263     CurrentLocalMemUsage += Alloc.first;
1264   }
1265 
1266   unsigned MaxOccupancy =
1267       ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage, F);
1268 
1269   // Restrict local memory usage so that we don't drastically reduce occupancy,
1270   // unless it is already significantly reduced.
1271 
1272   // TODO: Have some sort of hint or other heuristics to guess occupancy based
1273   // on other factors..
1274   unsigned OccupancyHint = ST.getWavesPerEU(F).second;
1275   if (OccupancyHint == 0)
1276     OccupancyHint = 7;
1277 
1278   // Clamp to max value.
1279   OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
1280 
1281   // Check the hint but ignore it if it's obviously wrong from the existing LDS
1282   // usage.
1283   MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
1284 
1285   // Round up to the next tier of usage.
1286   unsigned MaxSizeWithWaveCount =
1287       ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
1288 
1289   // Program is possibly broken by using more local mem than available.
1290   if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
1291     return false;
1292 
1293   LocalMemLimit = MaxSizeWithWaveCount;
1294 
1295   LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
1296                     << " bytes of LDS\n"
1297                     << "  Rounding size to " << MaxSizeWithWaveCount
1298                     << " with a maximum occupancy of " << MaxOccupancy << '\n'
1299                     << " and " << (LocalMemLimit - CurrentLocalMemUsage)
1300                     << " available for promotion\n");
1301 
1302   return true;
1303 }
1304 
1305 // FIXME: Should try to pick the most likely to be profitable allocas first.
1306 bool AMDGPUPromoteAllocaImpl::tryPromoteAllocaToLDS(AllocaInst &I,
1307                                                     bool SufficientLDS) {
1308   LLVM_DEBUG(dbgs() << "Trying to promote to LDS: " << I << '\n');
1309 
1310   if (DisablePromoteAllocaToLDS) {
1311     LLVM_DEBUG(dbgs() << "  Promote alloca to LDS is disabled\n");
1312     return false;
1313   }
1314 
1315   const DataLayout &DL = Mod->getDataLayout();
1316   IRBuilder<> Builder(&I);
1317 
1318   const Function &ContainingFunction = *I.getParent()->getParent();
1319   CallingConv::ID CC = ContainingFunction.getCallingConv();
1320 
1321   // Don't promote the alloca to LDS for shader calling conventions as the work
1322   // item ID intrinsics are not supported for these calling conventions.
1323   // Furthermore not all LDS is available for some of the stages.
1324   switch (CC) {
1325   case CallingConv::AMDGPU_KERNEL:
1326   case CallingConv::SPIR_KERNEL:
1327     break;
1328   default:
1329     LLVM_DEBUG(
1330         dbgs()
1331         << " promote alloca to LDS not supported with calling convention.\n");
1332     return false;
1333   }
1334 
1335   // Not likely to have sufficient local memory for promotion.
1336   if (!SufficientLDS)
1337     return false;
1338 
1339   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
1340   unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
1341 
1342   Align Alignment =
1343       DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
1344 
1345   // FIXME: This computed padding is likely wrong since it depends on inverse
1346   // usage order.
1347   //
1348   // FIXME: It is also possible that if we're allowed to use all of the memory
1349   // could end up using more than the maximum due to alignment padding.
1350 
1351   uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
1352   uint32_t AllocSize =
1353       WorkGroupSize * DL.getTypeAllocSize(I.getAllocatedType());
1354   NewSize += AllocSize;
1355 
1356   if (NewSize > LocalMemLimit) {
1357     LLVM_DEBUG(dbgs() << "  " << AllocSize
1358                       << " bytes of local memory not available to promote\n");
1359     return false;
1360   }
1361 
1362   CurrentLocalMemUsage = NewSize;
1363 
1364   std::vector<Value *> WorkList;
1365 
1366   if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
1367     LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
1368     return false;
1369   }
1370 
1371   LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
1372 
1373   Function *F = I.getParent()->getParent();
1374 
1375   Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
1376   GlobalVariable *GV = new GlobalVariable(
1377       *Mod, GVTy, false, GlobalValue::InternalLinkage, PoisonValue::get(GVTy),
1378       Twine(F->getName()) + Twine('.') + I.getName(), nullptr,
1379       GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);
1380   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
1381   GV->setAlignment(I.getAlign());
1382 
1383   Value *TCntY, *TCntZ;
1384 
1385   std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
1386   Value *TIdX = getWorkitemID(Builder, 0);
1387   Value *TIdY = getWorkitemID(Builder, 1);
1388   Value *TIdZ = getWorkitemID(Builder, 2);
1389 
1390   Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
1391   Tmp0 = Builder.CreateMul(Tmp0, TIdX);
1392   Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
1393   Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
1394   TID = Builder.CreateAdd(TID, TIdZ);
1395 
1396   LLVMContext &Context = Mod->getContext();
1397   Value *Indices[] = {Constant::getNullValue(Type::getInt32Ty(Context)), TID};
1398 
1399   Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
1400   I.mutateType(Offset->getType());
1401   I.replaceAllUsesWith(Offset);
1402   I.eraseFromParent();
1403 
1404   SmallVector<IntrinsicInst *> DeferredIntrs;
1405 
1406   for (Value *V : WorkList) {
1407     CallInst *Call = dyn_cast<CallInst>(V);
1408     if (!Call) {
1409       if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
1410         PointerType *NewTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
1411 
1412         if (isa<ConstantPointerNull>(CI->getOperand(0)))
1413           CI->setOperand(0, ConstantPointerNull::get(NewTy));
1414 
1415         if (isa<ConstantPointerNull>(CI->getOperand(1)))
1416           CI->setOperand(1, ConstantPointerNull::get(NewTy));
1417 
1418         continue;
1419       }
1420 
1421       // The operand's value should be corrected on its own and we don't want to
1422       // touch the users.
1423       if (isa<AddrSpaceCastInst>(V))
1424         continue;
1425 
1426       PointerType *NewTy = PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS);
1427 
1428       // FIXME: It doesn't really make sense to try to do this for all
1429       // instructions.
1430       V->mutateType(NewTy);
1431 
1432       // Adjust the types of any constant operands.
1433       if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
1434         if (isa<ConstantPointerNull>(SI->getOperand(1)))
1435           SI->setOperand(1, ConstantPointerNull::get(NewTy));
1436 
1437         if (isa<ConstantPointerNull>(SI->getOperand(2)))
1438           SI->setOperand(2, ConstantPointerNull::get(NewTy));
1439       } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
1440         for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1441           if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
1442             Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
1443         }
1444       }
1445 
1446       continue;
1447     }
1448 
1449     IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
1450     Builder.SetInsertPoint(Intr);
1451     switch (Intr->getIntrinsicID()) {
1452     case Intrinsic::lifetime_start:
1453     case Intrinsic::lifetime_end:
1454       // These intrinsics are for address space 0 only
1455       Intr->eraseFromParent();
1456       continue;
1457     case Intrinsic::memcpy:
1458     case Intrinsic::memmove:
1459       // These have 2 pointer operands. In case if second pointer also needs
1460       // to be replaced we defer processing of these intrinsics until all
1461       // other values are processed.
1462       DeferredIntrs.push_back(Intr);
1463       continue;
1464     case Intrinsic::memset: {
1465       MemSetInst *MemSet = cast<MemSetInst>(Intr);
1466       Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
1467                            MemSet->getLength(), MemSet->getDestAlign(),
1468                            MemSet->isVolatile());
1469       Intr->eraseFromParent();
1470       continue;
1471     }
1472     case Intrinsic::invariant_start:
1473     case Intrinsic::invariant_end:
1474     case Intrinsic::launder_invariant_group:
1475     case Intrinsic::strip_invariant_group:
1476       Intr->eraseFromParent();
1477       // FIXME: I think the invariant marker should still theoretically apply,
1478       // but the intrinsics need to be changed to accept pointers with any
1479       // address space.
1480       continue;
1481     case Intrinsic::objectsize: {
1482       Value *Src = Intr->getOperand(0);
1483       Function *ObjectSize = Intrinsic::getDeclaration(
1484           Mod, Intrinsic::objectsize,
1485           {Intr->getType(),
1486            PointerType::get(Context, AMDGPUAS::LOCAL_ADDRESS)});
1487 
1488       CallInst *NewCall = Builder.CreateCall(
1489           ObjectSize,
1490           {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1491       Intr->replaceAllUsesWith(NewCall);
1492       Intr->eraseFromParent();
1493       continue;
1494     }
1495     default:
1496       Intr->print(errs());
1497       llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1498     }
1499   }
1500 
1501   for (IntrinsicInst *Intr : DeferredIntrs) {
1502     Builder.SetInsertPoint(Intr);
1503     Intrinsic::ID ID = Intr->getIntrinsicID();
1504     assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1505 
1506     MemTransferInst *MI = cast<MemTransferInst>(Intr);
1507     auto *B = Builder.CreateMemTransferInst(
1508         ID, MI->getRawDest(), MI->getDestAlign(), MI->getRawSource(),
1509         MI->getSourceAlign(), MI->getLength(), MI->isVolatile());
1510 
1511     for (unsigned I = 0; I != 2; ++I) {
1512       if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1513         B->addDereferenceableParamAttr(I, Bytes);
1514       }
1515     }
1516 
1517     Intr->eraseFromParent();
1518   }
1519 
1520   return true;
1521 }
1522