xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (revision e32fecd0c2c3ee37c47ee100f169e7eb0282a873)
1 //===-- AMDGPUPromoteAlloca.cpp - Promote Allocas -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass eliminates allocas by either converting them into vectors or
10 // by migrating them to local address space.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "GCNSubtarget.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "llvm/Analysis/CaptureTracking.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/CodeGen/TargetPassConfig.h"
20 #include "llvm/IR/IRBuilder.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/IR/IntrinsicsAMDGPU.h"
23 #include "llvm/IR/IntrinsicsR600.h"
24 #include "llvm/Pass.h"
25 #include "llvm/Target/TargetMachine.h"
26 
27 #define DEBUG_TYPE "amdgpu-promote-alloca"
28 
29 using namespace llvm;
30 
31 namespace {
32 
33 static cl::opt<bool> DisablePromoteAllocaToVector(
34   "disable-promote-alloca-to-vector",
35   cl::desc("Disable promote alloca to vector"),
36   cl::init(false));
37 
38 static cl::opt<bool> DisablePromoteAllocaToLDS(
39   "disable-promote-alloca-to-lds",
40   cl::desc("Disable promote alloca to LDS"),
41   cl::init(false));
42 
43 static cl::opt<unsigned> PromoteAllocaToVectorLimit(
44   "amdgpu-promote-alloca-to-vector-limit",
45   cl::desc("Maximum byte size to consider promote alloca to vector"),
46   cl::init(0));
47 
48 // FIXME: This can create globals so should be a module pass.
49 class AMDGPUPromoteAlloca : public FunctionPass {
50 public:
51   static char ID;
52 
53   AMDGPUPromoteAlloca() : FunctionPass(ID) {}
54 
55   bool runOnFunction(Function &F) override;
56 
57   StringRef getPassName() const override { return "AMDGPU Promote Alloca"; }
58 
59   bool handleAlloca(AllocaInst &I, bool SufficientLDS);
60 
61   void getAnalysisUsage(AnalysisUsage &AU) const override {
62     AU.setPreservesCFG();
63     FunctionPass::getAnalysisUsage(AU);
64   }
65 };
66 
67 class AMDGPUPromoteAllocaImpl {
68 private:
69   const TargetMachine &TM;
70   Module *Mod = nullptr;
71   const DataLayout *DL = nullptr;
72 
73   // FIXME: This should be per-kernel.
74   uint32_t LocalMemLimit = 0;
75   uint32_t CurrentLocalMemUsage = 0;
76   unsigned MaxVGPRs;
77 
78   bool IsAMDGCN = false;
79   bool IsAMDHSA = false;
80 
81   std::pair<Value *, Value *> getLocalSizeYZ(IRBuilder<> &Builder);
82   Value *getWorkitemID(IRBuilder<> &Builder, unsigned N);
83 
84   /// BaseAlloca is the alloca root the search started from.
85   /// Val may be that alloca or a recursive user of it.
86   bool collectUsesWithPtrTypes(Value *BaseAlloca,
87                                Value *Val,
88                                std::vector<Value*> &WorkList) const;
89 
90   /// Val is a derived pointer from Alloca. OpIdx0/OpIdx1 are the operand
91   /// indices to an instruction with 2 pointer inputs (e.g. select, icmp).
92   /// Returns true if both operands are derived from the same alloca. Val should
93   /// be the same value as one of the input operands of UseInst.
94   bool binaryOpIsDerivedFromSameAlloca(Value *Alloca, Value *Val,
95                                        Instruction *UseInst,
96                                        int OpIdx0, int OpIdx1) const;
97 
98   /// Check whether we have enough local memory for promotion.
99   bool hasSufficientLocalMem(const Function &F);
100 
101   bool handleAlloca(AllocaInst &I, bool SufficientLDS);
102 
103 public:
104   AMDGPUPromoteAllocaImpl(TargetMachine &TM) : TM(TM) {}
105   bool run(Function &F);
106 };
107 
108 class AMDGPUPromoteAllocaToVector : public FunctionPass {
109 public:
110   static char ID;
111 
112   AMDGPUPromoteAllocaToVector() : FunctionPass(ID) {}
113 
114   bool runOnFunction(Function &F) override;
115 
116   StringRef getPassName() const override {
117     return "AMDGPU Promote Alloca to vector";
118   }
119 
120   void getAnalysisUsage(AnalysisUsage &AU) const override {
121     AU.setPreservesCFG();
122     FunctionPass::getAnalysisUsage(AU);
123   }
124 };
125 
126 } // end anonymous namespace
127 
128 char AMDGPUPromoteAlloca::ID = 0;
129 char AMDGPUPromoteAllocaToVector::ID = 0;
130 
131 INITIALIZE_PASS_BEGIN(AMDGPUPromoteAlloca, DEBUG_TYPE,
132                       "AMDGPU promote alloca to vector or LDS", false, false)
133 // Move LDS uses from functions to kernels before promote alloca for accurate
134 // estimation of LDS available
135 INITIALIZE_PASS_DEPENDENCY(AMDGPULowerModuleLDS)
136 INITIALIZE_PASS_END(AMDGPUPromoteAlloca, DEBUG_TYPE,
137                     "AMDGPU promote alloca to vector or LDS", false, false)
138 
139 INITIALIZE_PASS(AMDGPUPromoteAllocaToVector, DEBUG_TYPE "-to-vector",
140                 "AMDGPU promote alloca to vector", false, false)
141 
142 char &llvm::AMDGPUPromoteAllocaID = AMDGPUPromoteAlloca::ID;
143 char &llvm::AMDGPUPromoteAllocaToVectorID = AMDGPUPromoteAllocaToVector::ID;
144 
145 bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
146   if (skipFunction(F))
147     return false;
148 
149   if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
150     return AMDGPUPromoteAllocaImpl(TPC->getTM<TargetMachine>()).run(F);
151   }
152   return false;
153 }
154 
155 PreservedAnalyses AMDGPUPromoteAllocaPass::run(Function &F,
156                                                FunctionAnalysisManager &AM) {
157   bool Changed = AMDGPUPromoteAllocaImpl(TM).run(F);
158   if (Changed) {
159     PreservedAnalyses PA;
160     PA.preserveSet<CFGAnalyses>();
161     return PA;
162   }
163   return PreservedAnalyses::all();
164 }
165 
166 bool AMDGPUPromoteAllocaImpl::run(Function &F) {
167   Mod = F.getParent();
168   DL = &Mod->getDataLayout();
169 
170   const Triple &TT = TM.getTargetTriple();
171   IsAMDGCN = TT.getArch() == Triple::amdgcn;
172   IsAMDHSA = TT.getOS() == Triple::AMDHSA;
173 
174   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
175   if (!ST.isPromoteAllocaEnabled())
176     return false;
177 
178   if (IsAMDGCN) {
179     const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
180     MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
181     // A non-entry function has only 32 caller preserved registers.
182     // Do not promote alloca which will force spilling.
183     if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
184       MaxVGPRs = std::min(MaxVGPRs, 32u);
185   } else {
186     MaxVGPRs = 128;
187   }
188 
189   bool SufficientLDS = hasSufficientLocalMem(F);
190   bool Changed = false;
191   BasicBlock &EntryBB = *F.begin();
192 
193   SmallVector<AllocaInst *, 16> Allocas;
194   for (Instruction &I : EntryBB) {
195     if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
196       Allocas.push_back(AI);
197   }
198 
199   for (AllocaInst *AI : Allocas) {
200     if (handleAlloca(*AI, SufficientLDS))
201       Changed = true;
202   }
203 
204   return Changed;
205 }
206 
207 std::pair<Value *, Value *>
208 AMDGPUPromoteAllocaImpl::getLocalSizeYZ(IRBuilder<> &Builder) {
209   Function &F = *Builder.GetInsertBlock()->getParent();
210   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
211 
212   if (!IsAMDHSA) {
213     Function *LocalSizeYFn
214       = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_y);
215     Function *LocalSizeZFn
216       = Intrinsic::getDeclaration(Mod, Intrinsic::r600_read_local_size_z);
217 
218     CallInst *LocalSizeY = Builder.CreateCall(LocalSizeYFn, {});
219     CallInst *LocalSizeZ = Builder.CreateCall(LocalSizeZFn, {});
220 
221     ST.makeLIDRangeMetadata(LocalSizeY);
222     ST.makeLIDRangeMetadata(LocalSizeZ);
223 
224     return std::make_pair(LocalSizeY, LocalSizeZ);
225   }
226 
227   // We must read the size out of the dispatch pointer.
228   assert(IsAMDGCN);
229 
230   // We are indexing into this struct, and want to extract the workgroup_size_*
231   // fields.
232   //
233   //   typedef struct hsa_kernel_dispatch_packet_s {
234   //     uint16_t header;
235   //     uint16_t setup;
236   //     uint16_t workgroup_size_x ;
237   //     uint16_t workgroup_size_y;
238   //     uint16_t workgroup_size_z;
239   //     uint16_t reserved0;
240   //     uint32_t grid_size_x ;
241   //     uint32_t grid_size_y ;
242   //     uint32_t grid_size_z;
243   //
244   //     uint32_t private_segment_size;
245   //     uint32_t group_segment_size;
246   //     uint64_t kernel_object;
247   //
248   // #ifdef HSA_LARGE_MODEL
249   //     void *kernarg_address;
250   // #elif defined HSA_LITTLE_ENDIAN
251   //     void *kernarg_address;
252   //     uint32_t reserved1;
253   // #else
254   //     uint32_t reserved1;
255   //     void *kernarg_address;
256   // #endif
257   //     uint64_t reserved2;
258   //     hsa_signal_t completion_signal; // uint64_t wrapper
259   //   } hsa_kernel_dispatch_packet_t
260   //
261   Function *DispatchPtrFn
262     = Intrinsic::getDeclaration(Mod, Intrinsic::amdgcn_dispatch_ptr);
263 
264   CallInst *DispatchPtr = Builder.CreateCall(DispatchPtrFn, {});
265   DispatchPtr->addRetAttr(Attribute::NoAlias);
266   DispatchPtr->addRetAttr(Attribute::NonNull);
267   F.removeFnAttr("amdgpu-no-dispatch-ptr");
268 
269   // Size of the dispatch packet struct.
270   DispatchPtr->addDereferenceableRetAttr(64);
271 
272   Type *I32Ty = Type::getInt32Ty(Mod->getContext());
273   Value *CastDispatchPtr = Builder.CreateBitCast(
274     DispatchPtr, PointerType::get(I32Ty, AMDGPUAS::CONSTANT_ADDRESS));
275 
276   // We could do a single 64-bit load here, but it's likely that the basic
277   // 32-bit and extract sequence is already present, and it is probably easier
278   // to CSE this. The loads should be mergeable later anyway.
279   Value *GEPXY = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 1);
280   LoadInst *LoadXY = Builder.CreateAlignedLoad(I32Ty, GEPXY, Align(4));
281 
282   Value *GEPZU = Builder.CreateConstInBoundsGEP1_64(I32Ty, CastDispatchPtr, 2);
283   LoadInst *LoadZU = Builder.CreateAlignedLoad(I32Ty, GEPZU, Align(4));
284 
285   MDNode *MD = MDNode::get(Mod->getContext(), None);
286   LoadXY->setMetadata(LLVMContext::MD_invariant_load, MD);
287   LoadZU->setMetadata(LLVMContext::MD_invariant_load, MD);
288   ST.makeLIDRangeMetadata(LoadZU);
289 
290   // Extract y component. Upper half of LoadZU should be zero already.
291   Value *Y = Builder.CreateLShr(LoadXY, 16);
292 
293   return std::make_pair(Y, LoadZU);
294 }
295 
296 Value *AMDGPUPromoteAllocaImpl::getWorkitemID(IRBuilder<> &Builder,
297                                               unsigned N) {
298   Function *F = Builder.GetInsertBlock()->getParent();
299   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, *F);
300   Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
301   StringRef AttrName;
302 
303   switch (N) {
304   case 0:
305     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_x
306                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_x;
307     AttrName = "amdgpu-no-workitem-id-x";
308     break;
309   case 1:
310     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_y
311                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_y;
312     AttrName = "amdgpu-no-workitem-id-y";
313     break;
314 
315   case 2:
316     IntrID = IsAMDGCN ? (Intrinsic::ID)Intrinsic::amdgcn_workitem_id_z
317                       : (Intrinsic::ID)Intrinsic::r600_read_tidig_z;
318     AttrName = "amdgpu-no-workitem-id-z";
319     break;
320   default:
321     llvm_unreachable("invalid dimension");
322   }
323 
324   Function *WorkitemIdFn = Intrinsic::getDeclaration(Mod, IntrID);
325   CallInst *CI = Builder.CreateCall(WorkitemIdFn);
326   ST.makeLIDRangeMetadata(CI);
327   F->removeFnAttr(AttrName);
328 
329   return CI;
330 }
331 
332 static FixedVectorType *arrayTypeToVecType(ArrayType *ArrayTy) {
333   return FixedVectorType::get(ArrayTy->getElementType(),
334                               ArrayTy->getNumElements());
335 }
336 
337 static Value *
338 calculateVectorIndex(Value *Ptr,
339                      const std::map<GetElementPtrInst *, Value *> &GEPIdx) {
340   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr->stripPointerCasts());
341   if (!GEP)
342     return ConstantInt::getNullValue(Type::getInt32Ty(Ptr->getContext()));
343 
344   auto I = GEPIdx.find(GEP);
345   assert(I != GEPIdx.end() && "Must have entry for GEP!");
346   return I->second;
347 }
348 
349 static Value *GEPToVectorIndex(GetElementPtrInst *GEP, AllocaInst *Alloca,
350                                Type *VecElemTy, const DataLayout &DL) {
351   // TODO: Extracting a "multiple of X" from a GEP might be a useful generic
352   // helper.
353   unsigned BW = DL.getIndexTypeSizeInBits(GEP->getType());
354   MapVector<Value *, APInt> VarOffsets;
355   APInt ConstOffset(BW, 0);
356   if (GEP->getPointerOperand()->stripPointerCasts() != Alloca ||
357       !GEP->collectOffset(DL, BW, VarOffsets, ConstOffset))
358     return nullptr;
359 
360   unsigned VecElemSize = DL.getTypeAllocSize(VecElemTy);
361   if (VarOffsets.size() > 1)
362     return nullptr;
363 
364   if (VarOffsets.size() == 1) {
365     // Only handle cases where we don't need to insert extra arithmetic
366     // instructions.
367     const auto &VarOffset = VarOffsets.front();
368     if (!ConstOffset.isZero() || VarOffset.second != VecElemSize)
369       return nullptr;
370     return VarOffset.first;
371   }
372 
373   APInt Quot;
374   uint64_t Rem;
375   APInt::udivrem(ConstOffset, VecElemSize, Quot, Rem);
376   if (Rem != 0)
377     return nullptr;
378 
379   return ConstantInt::get(GEP->getContext(), Quot);
380 }
381 
382 static bool tryPromoteAllocaToVector(AllocaInst *Alloca, const DataLayout &DL,
383                                      unsigned MaxVGPRs) {
384 
385   if (DisablePromoteAllocaToVector) {
386     LLVM_DEBUG(dbgs() << "  Promotion alloca to vector is disabled\n");
387     return false;
388   }
389 
390   Type *AllocaTy = Alloca->getAllocatedType();
391   auto *VectorTy = dyn_cast<FixedVectorType>(AllocaTy);
392   if (auto *ArrayTy = dyn_cast<ArrayType>(AllocaTy)) {
393     if (VectorType::isValidElementType(ArrayTy->getElementType()) &&
394         ArrayTy->getNumElements() > 0)
395       VectorTy = arrayTypeToVecType(ArrayTy);
396   }
397 
398   // Use up to 1/4 of available register budget for vectorization.
399   unsigned Limit = PromoteAllocaToVectorLimit ? PromoteAllocaToVectorLimit * 8
400                                               : (MaxVGPRs * 32);
401 
402   if (DL.getTypeSizeInBits(AllocaTy) * 4 > Limit) {
403     LLVM_DEBUG(dbgs() << "  Alloca too big for vectorization with "
404                       << MaxVGPRs << " registers available\n");
405     return false;
406   }
407 
408   LLVM_DEBUG(dbgs() << "Alloca candidate for vectorization\n");
409 
410   // FIXME: There is no reason why we can't support larger arrays, we
411   // are just being conservative for now.
412   // FIXME: We also reject alloca's of the form [ 2 x [ 2 x i32 ]] or equivalent. Potentially these
413   // could also be promoted but we don't currently handle this case
414   if (!VectorTy || VectorTy->getNumElements() > 16 ||
415       VectorTy->getNumElements() < 2) {
416     LLVM_DEBUG(dbgs() << "  Cannot convert type to vector\n");
417     return false;
418   }
419 
420   std::map<GetElementPtrInst*, Value*> GEPVectorIdx;
421   SmallVector<Instruction *> WorkList;
422   SmallVector<Use *, 8> Uses;
423   for (Use &U : Alloca->uses())
424     Uses.push_back(&U);
425 
426   Type *VecEltTy = VectorTy->getElementType();
427   while (!Uses.empty()) {
428     Use *U = Uses.pop_back_val();
429     Instruction *Inst = dyn_cast<Instruction>(U->getUser());
430 
431     if (Value *Ptr = getLoadStorePointerOperand(Inst)) {
432       // This is a store of the pointer, not to the pointer.
433       if (isa<StoreInst>(Inst) &&
434           U->getOperandNo() != StoreInst::getPointerOperandIndex())
435         return false;
436 
437       Type *AccessTy = getLoadStoreType(Inst);
438       Ptr = Ptr->stripPointerCasts();
439 
440       // Alloca already accessed as vector, leave alone.
441       if (Ptr == Alloca && DL.getTypeStoreSize(Alloca->getAllocatedType()) ==
442                                DL.getTypeStoreSize(AccessTy))
443         continue;
444 
445       // Check that this is a simple access of a vector element.
446       bool IsSimple = isa<LoadInst>(Inst) ? cast<LoadInst>(Inst)->isSimple()
447                                           : cast<StoreInst>(Inst)->isSimple();
448       if (!IsSimple ||
449           !CastInst::isBitOrNoopPointerCastable(VecEltTy, AccessTy, DL))
450         return false;
451 
452       WorkList.push_back(Inst);
453       continue;
454     }
455 
456     if (isa<BitCastInst>(Inst)) {
457       // Look through bitcasts.
458       for (Use &U : Inst->uses())
459         Uses.push_back(&U);
460       continue;
461     }
462 
463     if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
464       // If we can't compute a vector index from this GEP, then we can't
465       // promote this alloca to vector.
466       Value *Index = GEPToVectorIndex(GEP, Alloca, VecEltTy, DL);
467       if (!Index) {
468         LLVM_DEBUG(dbgs() << "  Cannot compute vector index for GEP " << *GEP
469                           << '\n');
470         return false;
471       }
472 
473       GEPVectorIdx[GEP] = Index;
474       for (Use &U : Inst->uses())
475         Uses.push_back(&U);
476       continue;
477     }
478 
479     // Ignore assume-like intrinsics and comparisons used in assumes.
480     if (isAssumeLikeIntrinsic(Inst))
481       continue;
482 
483     if (isa<ICmpInst>(Inst) && all_of(Inst->users(), [](User *U) {
484           return isAssumeLikeIntrinsic(cast<Instruction>(U));
485         }))
486       continue;
487 
488     // Unknown user.
489     return false;
490   }
491 
492   LLVM_DEBUG(dbgs() << "  Converting alloca to vector " << *AllocaTy << " -> "
493                     << *VectorTy << '\n');
494 
495   for (Instruction *Inst : WorkList) {
496     IRBuilder<> Builder(Inst);
497     switch (Inst->getOpcode()) {
498     case Instruction::Load: {
499       Value *Ptr = cast<LoadInst>(Inst)->getPointerOperand();
500       Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
501       Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
502       Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
503       Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
504       Value *ExtractElement = Builder.CreateExtractElement(VecValue, Index);
505       if (Inst->getType() != VecEltTy)
506         ExtractElement = Builder.CreateBitOrPointerCast(ExtractElement, Inst->getType());
507       Inst->replaceAllUsesWith(ExtractElement);
508       Inst->eraseFromParent();
509       break;
510     }
511     case Instruction::Store: {
512       StoreInst *SI = cast<StoreInst>(Inst);
513       Value *Ptr = SI->getPointerOperand();
514       Value *Index = calculateVectorIndex(Ptr, GEPVectorIdx);
515       Type *VecPtrTy = VectorTy->getPointerTo(Alloca->getAddressSpace());
516       Value *BitCast = Builder.CreateBitCast(Alloca, VecPtrTy);
517       Value *VecValue = Builder.CreateLoad(VectorTy, BitCast);
518       Value *Elt = SI->getValueOperand();
519       if (Elt->getType() != VecEltTy)
520         Elt = Builder.CreateBitOrPointerCast(Elt, VecEltTy);
521       Value *NewVecValue = Builder.CreateInsertElement(VecValue, Elt, Index);
522       Builder.CreateStore(NewVecValue, BitCast);
523       Inst->eraseFromParent();
524       break;
525     }
526 
527     default:
528       llvm_unreachable("Inconsistency in instructions promotable to vector");
529     }
530   }
531   return true;
532 }
533 
534 static bool isCallPromotable(CallInst *CI) {
535   IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
536   if (!II)
537     return false;
538 
539   switch (II->getIntrinsicID()) {
540   case Intrinsic::memcpy:
541   case Intrinsic::memmove:
542   case Intrinsic::memset:
543   case Intrinsic::lifetime_start:
544   case Intrinsic::lifetime_end:
545   case Intrinsic::invariant_start:
546   case Intrinsic::invariant_end:
547   case Intrinsic::launder_invariant_group:
548   case Intrinsic::strip_invariant_group:
549   case Intrinsic::objectsize:
550     return true;
551   default:
552     return false;
553   }
554 }
555 
556 bool AMDGPUPromoteAllocaImpl::binaryOpIsDerivedFromSameAlloca(
557     Value *BaseAlloca, Value *Val, Instruction *Inst, int OpIdx0,
558     int OpIdx1) const {
559   // Figure out which operand is the one we might not be promoting.
560   Value *OtherOp = Inst->getOperand(OpIdx0);
561   if (Val == OtherOp)
562     OtherOp = Inst->getOperand(OpIdx1);
563 
564   if (isa<ConstantPointerNull>(OtherOp))
565     return true;
566 
567   Value *OtherObj = getUnderlyingObject(OtherOp);
568   if (!isa<AllocaInst>(OtherObj))
569     return false;
570 
571   // TODO: We should be able to replace undefs with the right pointer type.
572 
573   // TODO: If we know the other base object is another promotable
574   // alloca, not necessarily this alloca, we can do this. The
575   // important part is both must have the same address space at
576   // the end.
577   if (OtherObj != BaseAlloca) {
578     LLVM_DEBUG(
579         dbgs() << "Found a binary instruction with another alloca object\n");
580     return false;
581   }
582 
583   return true;
584 }
585 
586 bool AMDGPUPromoteAllocaImpl::collectUsesWithPtrTypes(
587     Value *BaseAlloca, Value *Val, std::vector<Value *> &WorkList) const {
588 
589   for (User *User : Val->users()) {
590     if (is_contained(WorkList, User))
591       continue;
592 
593     if (CallInst *CI = dyn_cast<CallInst>(User)) {
594       if (!isCallPromotable(CI))
595         return false;
596 
597       WorkList.push_back(User);
598       continue;
599     }
600 
601     Instruction *UseInst = cast<Instruction>(User);
602     if (UseInst->getOpcode() == Instruction::PtrToInt)
603       return false;
604 
605     if (LoadInst *LI = dyn_cast<LoadInst>(UseInst)) {
606       if (LI->isVolatile())
607         return false;
608 
609       continue;
610     }
611 
612     if (StoreInst *SI = dyn_cast<StoreInst>(UseInst)) {
613       if (SI->isVolatile())
614         return false;
615 
616       // Reject if the stored value is not the pointer operand.
617       if (SI->getPointerOperand() != Val)
618         return false;
619     } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(UseInst)) {
620       if (RMW->isVolatile())
621         return false;
622     } else if (AtomicCmpXchgInst *CAS = dyn_cast<AtomicCmpXchgInst>(UseInst)) {
623       if (CAS->isVolatile())
624         return false;
625     }
626 
627     // Only promote a select if we know that the other select operand
628     // is from another pointer that will also be promoted.
629     if (ICmpInst *ICmp = dyn_cast<ICmpInst>(UseInst)) {
630       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, ICmp, 0, 1))
631         return false;
632 
633       // May need to rewrite constant operands.
634       WorkList.push_back(ICmp);
635     }
636 
637     if (UseInst->getOpcode() == Instruction::AddrSpaceCast) {
638       // Give up if the pointer may be captured.
639       if (PointerMayBeCaptured(UseInst, true, true))
640         return false;
641       // Don't collect the users of this.
642       WorkList.push_back(User);
643       continue;
644     }
645 
646     // Do not promote vector/aggregate type instructions. It is hard to track
647     // their users.
648     if (isa<InsertValueInst>(User) || isa<InsertElementInst>(User))
649       return false;
650 
651     if (!User->getType()->isPointerTy())
652       continue;
653 
654     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(UseInst)) {
655       // Be conservative if an address could be computed outside the bounds of
656       // the alloca.
657       if (!GEP->isInBounds())
658         return false;
659     }
660 
661     // Only promote a select if we know that the other select operand is from
662     // another pointer that will also be promoted.
663     if (SelectInst *SI = dyn_cast<SelectInst>(UseInst)) {
664       if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, SI, 1, 2))
665         return false;
666     }
667 
668     // Repeat for phis.
669     if (PHINode *Phi = dyn_cast<PHINode>(UseInst)) {
670       // TODO: Handle more complex cases. We should be able to replace loops
671       // over arrays.
672       switch (Phi->getNumIncomingValues()) {
673       case 1:
674         break;
675       case 2:
676         if (!binaryOpIsDerivedFromSameAlloca(BaseAlloca, Val, Phi, 0, 1))
677           return false;
678         break;
679       default:
680         return false;
681       }
682     }
683 
684     WorkList.push_back(User);
685     if (!collectUsesWithPtrTypes(BaseAlloca, User, WorkList))
686       return false;
687   }
688 
689   return true;
690 }
691 
692 bool AMDGPUPromoteAllocaImpl::hasSufficientLocalMem(const Function &F) {
693 
694   FunctionType *FTy = F.getFunctionType();
695   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
696 
697   // If the function has any arguments in the local address space, then it's
698   // possible these arguments require the entire local memory space, so
699   // we cannot use local memory in the pass.
700   for (Type *ParamTy : FTy->params()) {
701     PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
702     if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
703       LocalMemLimit = 0;
704       LLVM_DEBUG(dbgs() << "Function has local memory argument. Promoting to "
705                            "local memory disabled.\n");
706       return false;
707     }
708   }
709 
710   LocalMemLimit = ST.getLocalMemorySize();
711   if (LocalMemLimit == 0)
712     return false;
713 
714   SmallVector<const Constant *, 16> Stack;
715   SmallPtrSet<const Constant *, 8> VisitedConstants;
716   SmallPtrSet<const GlobalVariable *, 8> UsedLDS;
717 
718   auto visitUsers = [&](const GlobalVariable *GV, const Constant *Val) -> bool {
719     for (const User *U : Val->users()) {
720       if (const Instruction *Use = dyn_cast<Instruction>(U)) {
721         if (Use->getParent()->getParent() == &F)
722           return true;
723       } else {
724         const Constant *C = cast<Constant>(U);
725         if (VisitedConstants.insert(C).second)
726           Stack.push_back(C);
727       }
728     }
729 
730     return false;
731   };
732 
733   for (GlobalVariable &GV : Mod->globals()) {
734     if (GV.getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
735       continue;
736 
737     if (visitUsers(&GV, &GV)) {
738       UsedLDS.insert(&GV);
739       Stack.clear();
740       continue;
741     }
742 
743     // For any ConstantExpr uses, we need to recursively search the users until
744     // we see a function.
745     while (!Stack.empty()) {
746       const Constant *C = Stack.pop_back_val();
747       if (visitUsers(&GV, C)) {
748         UsedLDS.insert(&GV);
749         Stack.clear();
750         break;
751       }
752     }
753   }
754 
755   const DataLayout &DL = Mod->getDataLayout();
756   SmallVector<std::pair<uint64_t, Align>, 16> AllocatedSizes;
757   AllocatedSizes.reserve(UsedLDS.size());
758 
759   for (const GlobalVariable *GV : UsedLDS) {
760     Align Alignment =
761         DL.getValueOrABITypeAlignment(GV->getAlign(), GV->getValueType());
762     uint64_t AllocSize = DL.getTypeAllocSize(GV->getValueType());
763 
764     // HIP uses an extern unsized array in local address space for dynamically
765     // allocated shared memory.  In that case, we have to disable the promotion.
766     if (GV->hasExternalLinkage() && AllocSize == 0) {
767       LocalMemLimit = 0;
768       LLVM_DEBUG(dbgs() << "Function has a reference to externally allocated "
769                            "local memory. Promoting to local memory "
770                            "disabled.\n");
771       return false;
772     }
773 
774     AllocatedSizes.emplace_back(AllocSize, Alignment);
775   }
776 
777   // Sort to try to estimate the worst case alignment padding
778   //
779   // FIXME: We should really do something to fix the addresses to a more optimal
780   // value instead
781   llvm::sort(AllocatedSizes, llvm::less_second());
782 
783   // Check how much local memory is being used by global objects
784   CurrentLocalMemUsage = 0;
785 
786   // FIXME: Try to account for padding here. The real padding and address is
787   // currently determined from the inverse order of uses in the function when
788   // legalizing, which could also potentially change. We try to estimate the
789   // worst case here, but we probably should fix the addresses earlier.
790   for (auto Alloc : AllocatedSizes) {
791     CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Alloc.second);
792     CurrentLocalMemUsage += Alloc.first;
793   }
794 
795   unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
796                                                           F);
797 
798   // Restrict local memory usage so that we don't drastically reduce occupancy,
799   // unless it is already significantly reduced.
800 
801   // TODO: Have some sort of hint or other heuristics to guess occupancy based
802   // on other factors..
803   unsigned OccupancyHint = ST.getWavesPerEU(F).second;
804   if (OccupancyHint == 0)
805     OccupancyHint = 7;
806 
807   // Clamp to max value.
808   OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());
809 
810   // Check the hint but ignore it if it's obviously wrong from the existing LDS
811   // usage.
812   MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);
813 
814 
815   // Round up to the next tier of usage.
816   unsigned MaxSizeWithWaveCount
817     = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);
818 
819   // Program is possibly broken by using more local mem than available.
820   if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
821     return false;
822 
823   LocalMemLimit = MaxSizeWithWaveCount;
824 
825   LLVM_DEBUG(dbgs() << F.getName() << " uses " << CurrentLocalMemUsage
826                     << " bytes of LDS\n"
827                     << "  Rounding size to " << MaxSizeWithWaveCount
828                     << " with a maximum occupancy of " << MaxOccupancy << '\n'
829                     << " and " << (LocalMemLimit - CurrentLocalMemUsage)
830                     << " available for promotion\n");
831 
832   return true;
833 }
834 
835 // FIXME: Should try to pick the most likely to be profitable allocas first.
836 bool AMDGPUPromoteAllocaImpl::handleAlloca(AllocaInst &I, bool SufficientLDS) {
837   // Array allocations are probably not worth handling, since an allocation of
838   // the array type is the canonical form.
839   if (!I.isStaticAlloca() || I.isArrayAllocation())
840     return false;
841 
842   const DataLayout &DL = Mod->getDataLayout();
843   IRBuilder<> Builder(&I);
844 
845   // First try to replace the alloca with a vector
846   Type *AllocaTy = I.getAllocatedType();
847 
848   LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
849 
850   if (tryPromoteAllocaToVector(&I, DL, MaxVGPRs))
851     return true; // Promoted to vector.
852 
853   if (DisablePromoteAllocaToLDS)
854     return false;
855 
856   const Function &ContainingFunction = *I.getParent()->getParent();
857   CallingConv::ID CC = ContainingFunction.getCallingConv();
858 
859   // Don't promote the alloca to LDS for shader calling conventions as the work
860   // item ID intrinsics are not supported for these calling conventions.
861   // Furthermore not all LDS is available for some of the stages.
862   switch (CC) {
863   case CallingConv::AMDGPU_KERNEL:
864   case CallingConv::SPIR_KERNEL:
865     break;
866   default:
867     LLVM_DEBUG(
868         dbgs()
869         << " promote alloca to LDS not supported with calling convention.\n");
870     return false;
871   }
872 
873   // Not likely to have sufficient local memory for promotion.
874   if (!SufficientLDS)
875     return false;
876 
877   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, ContainingFunction);
878   unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;
879 
880   Align Alignment =
881       DL.getValueOrABITypeAlignment(I.getAlign(), I.getAllocatedType());
882 
883   // FIXME: This computed padding is likely wrong since it depends on inverse
884   // usage order.
885   //
886   // FIXME: It is also possible that if we're allowed to use all of the memory
887   // could end up using more than the maximum due to alignment padding.
888 
889   uint32_t NewSize = alignTo(CurrentLocalMemUsage, Alignment);
890   uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
891   NewSize += AllocSize;
892 
893   if (NewSize > LocalMemLimit) {
894     LLVM_DEBUG(dbgs() << "  " << AllocSize
895                       << " bytes of local memory not available to promote\n");
896     return false;
897   }
898 
899   CurrentLocalMemUsage = NewSize;
900 
901   std::vector<Value*> WorkList;
902 
903   if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
904     LLVM_DEBUG(dbgs() << " Do not know how to convert all uses\n");
905     return false;
906   }
907 
908   LLVM_DEBUG(dbgs() << "Promoting alloca to local memory\n");
909 
910   Function *F = I.getParent()->getParent();
911 
912   Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
913   GlobalVariable *GV = new GlobalVariable(
914       *Mod, GVTy, false, GlobalValue::InternalLinkage,
915       UndefValue::get(GVTy),
916       Twine(F->getName()) + Twine('.') + I.getName(),
917       nullptr,
918       GlobalVariable::NotThreadLocal,
919       AMDGPUAS::LOCAL_ADDRESS);
920   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
921   GV->setAlignment(I.getAlign());
922 
923   Value *TCntY, *TCntZ;
924 
925   std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
926   Value *TIdX = getWorkitemID(Builder, 0);
927   Value *TIdY = getWorkitemID(Builder, 1);
928   Value *TIdZ = getWorkitemID(Builder, 2);
929 
930   Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
931   Tmp0 = Builder.CreateMul(Tmp0, TIdX);
932   Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
933   Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
934   TID = Builder.CreateAdd(TID, TIdZ);
935 
936   Value *Indices[] = {
937     Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
938     TID
939   };
940 
941   Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
942   I.mutateType(Offset->getType());
943   I.replaceAllUsesWith(Offset);
944   I.eraseFromParent();
945 
946   SmallVector<IntrinsicInst *> DeferredIntrs;
947 
948   for (Value *V : WorkList) {
949     CallInst *Call = dyn_cast<CallInst>(V);
950     if (!Call) {
951       if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
952         Value *Src0 = CI->getOperand(0);
953         PointerType *NewTy = PointerType::getWithSamePointeeType(
954             cast<PointerType>(Src0->getType()), AMDGPUAS::LOCAL_ADDRESS);
955 
956         if (isa<ConstantPointerNull>(CI->getOperand(0)))
957           CI->setOperand(0, ConstantPointerNull::get(NewTy));
958 
959         if (isa<ConstantPointerNull>(CI->getOperand(1)))
960           CI->setOperand(1, ConstantPointerNull::get(NewTy));
961 
962         continue;
963       }
964 
965       // The operand's value should be corrected on its own and we don't want to
966       // touch the users.
967       if (isa<AddrSpaceCastInst>(V))
968         continue;
969 
970       PointerType *NewTy = PointerType::getWithSamePointeeType(
971           cast<PointerType>(V->getType()), AMDGPUAS::LOCAL_ADDRESS);
972 
973       // FIXME: It doesn't really make sense to try to do this for all
974       // instructions.
975       V->mutateType(NewTy);
976 
977       // Adjust the types of any constant operands.
978       if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
979         if (isa<ConstantPointerNull>(SI->getOperand(1)))
980           SI->setOperand(1, ConstantPointerNull::get(NewTy));
981 
982         if (isa<ConstantPointerNull>(SI->getOperand(2)))
983           SI->setOperand(2, ConstantPointerNull::get(NewTy));
984       } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
985         for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
986           if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
987             Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
988         }
989       }
990 
991       continue;
992     }
993 
994     IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
995     Builder.SetInsertPoint(Intr);
996     switch (Intr->getIntrinsicID()) {
997     case Intrinsic::lifetime_start:
998     case Intrinsic::lifetime_end:
999       // These intrinsics are for address space 0 only
1000       Intr->eraseFromParent();
1001       continue;
1002     case Intrinsic::memcpy:
1003     case Intrinsic::memmove:
1004       // These have 2 pointer operands. In case if second pointer also needs
1005       // to be replaced we defer processing of these intrinsics until all
1006       // other values are processed.
1007       DeferredIntrs.push_back(Intr);
1008       continue;
1009     case Intrinsic::memset: {
1010       MemSetInst *MemSet = cast<MemSetInst>(Intr);
1011       Builder.CreateMemSet(
1012           MemSet->getRawDest(), MemSet->getValue(), MemSet->getLength(),
1013           MaybeAlign(MemSet->getDestAlignment()), MemSet->isVolatile());
1014       Intr->eraseFromParent();
1015       continue;
1016     }
1017     case Intrinsic::invariant_start:
1018     case Intrinsic::invariant_end:
1019     case Intrinsic::launder_invariant_group:
1020     case Intrinsic::strip_invariant_group:
1021       Intr->eraseFromParent();
1022       // FIXME: I think the invariant marker should still theoretically apply,
1023       // but the intrinsics need to be changed to accept pointers with any
1024       // address space.
1025       continue;
1026     case Intrinsic::objectsize: {
1027       Value *Src = Intr->getOperand(0);
1028       Function *ObjectSize = Intrinsic::getDeclaration(
1029           Mod, Intrinsic::objectsize,
1030           {Intr->getType(),
1031            PointerType::getWithSamePointeeType(
1032                cast<PointerType>(Src->getType()), AMDGPUAS::LOCAL_ADDRESS)});
1033 
1034       CallInst *NewCall = Builder.CreateCall(
1035           ObjectSize,
1036           {Src, Intr->getOperand(1), Intr->getOperand(2), Intr->getOperand(3)});
1037       Intr->replaceAllUsesWith(NewCall);
1038       Intr->eraseFromParent();
1039       continue;
1040     }
1041     default:
1042       Intr->print(errs());
1043       llvm_unreachable("Don't know how to promote alloca intrinsic use.");
1044     }
1045   }
1046 
1047   for (IntrinsicInst *Intr : DeferredIntrs) {
1048     Builder.SetInsertPoint(Intr);
1049     Intrinsic::ID ID = Intr->getIntrinsicID();
1050     assert(ID == Intrinsic::memcpy || ID == Intrinsic::memmove);
1051 
1052     MemTransferInst *MI = cast<MemTransferInst>(Intr);
1053     auto *B =
1054       Builder.CreateMemTransferInst(ID, MI->getRawDest(), MI->getDestAlign(),
1055                                     MI->getRawSource(), MI->getSourceAlign(),
1056                                     MI->getLength(), MI->isVolatile());
1057 
1058     for (unsigned I = 0; I != 2; ++I) {
1059       if (uint64_t Bytes = Intr->getParamDereferenceableBytes(I)) {
1060         B->addDereferenceableParamAttr(I, Bytes);
1061       }
1062     }
1063 
1064     Intr->eraseFromParent();
1065   }
1066 
1067   return true;
1068 }
1069 
1070 bool handlePromoteAllocaToVector(AllocaInst &I, unsigned MaxVGPRs) {
1071   // Array allocations are probably not worth handling, since an allocation of
1072   // the array type is the canonical form.
1073   if (!I.isStaticAlloca() || I.isArrayAllocation())
1074     return false;
1075 
1076   LLVM_DEBUG(dbgs() << "Trying to promote " << I << '\n');
1077 
1078   Module *Mod = I.getParent()->getParent()->getParent();
1079   return tryPromoteAllocaToVector(&I, Mod->getDataLayout(), MaxVGPRs);
1080 }
1081 
1082 bool promoteAllocasToVector(Function &F, TargetMachine &TM) {
1083   if (DisablePromoteAllocaToVector)
1084     return false;
1085 
1086   const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(TM, F);
1087   if (!ST.isPromoteAllocaEnabled())
1088     return false;
1089 
1090   unsigned MaxVGPRs;
1091   if (TM.getTargetTriple().getArch() == Triple::amdgcn) {
1092     const GCNSubtarget &ST = TM.getSubtarget<GCNSubtarget>(F);
1093     MaxVGPRs = ST.getMaxNumVGPRs(ST.getWavesPerEU(F).first);
1094     // A non-entry function has only 32 caller preserved registers.
1095     // Do not promote alloca which will force spilling.
1096     if (!AMDGPU::isEntryFunctionCC(F.getCallingConv()))
1097       MaxVGPRs = std::min(MaxVGPRs, 32u);
1098   } else {
1099     MaxVGPRs = 128;
1100   }
1101 
1102   bool Changed = false;
1103   BasicBlock &EntryBB = *F.begin();
1104 
1105   SmallVector<AllocaInst *, 16> Allocas;
1106   for (Instruction &I : EntryBB) {
1107     if (AllocaInst *AI = dyn_cast<AllocaInst>(&I))
1108       Allocas.push_back(AI);
1109   }
1110 
1111   for (AllocaInst *AI : Allocas) {
1112     if (handlePromoteAllocaToVector(*AI, MaxVGPRs))
1113       Changed = true;
1114   }
1115 
1116   return Changed;
1117 }
1118 
1119 bool AMDGPUPromoteAllocaToVector::runOnFunction(Function &F) {
1120   if (skipFunction(F))
1121     return false;
1122   if (auto *TPC = getAnalysisIfAvailable<TargetPassConfig>()) {
1123     return promoteAllocasToVector(F, TPC->getTM<TargetMachine>());
1124   }
1125   return false;
1126 }
1127 
1128 PreservedAnalyses
1129 AMDGPUPromoteAllocaToVectorPass::run(Function &F, FunctionAnalysisManager &AM) {
1130   bool Changed = promoteAllocasToVector(F, TM);
1131   if (Changed) {
1132     PreservedAnalyses PA;
1133     PA.preserveSet<CFGAnalyses>();
1134     return PA;
1135   }
1136   return PreservedAnalyses::all();
1137 }
1138 
1139 FunctionPass *llvm::createAMDGPUPromoteAlloca() {
1140   return new AMDGPUPromoteAlloca();
1141 }
1142 
1143 FunctionPass *llvm::createAMDGPUPromoteAllocaToVector() {
1144   return new AMDGPUPromoteAllocaToVector();
1145 }
1146