xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for load, store and alloca.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/MapVector.h"
15 #include "llvm/ADT/SetOperations.h"
16 #include "llvm/ADT/SmallString.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/IR/LLVMContext.h"
23 #include "llvm/IR/PatternMatch.h"
24 #include "llvm/Transforms/InstCombine/InstCombiner.h"
25 #include "llvm/Transforms/Utils/Local.h"
26 using namespace llvm;
27 using namespace PatternMatch;
28 
29 #define DEBUG_TYPE "instcombine"
30 
31 STATISTIC(NumDeadStore, "Number of dead stores eliminated");
32 STATISTIC(NumGlobalCopies, "Number of allocas copied from constant global");
33 
34 static cl::opt<unsigned> MaxCopiedFromConstantUsers(
35     "instcombine-max-copied-from-constant-users", cl::init(300),
36     cl::desc("Maximum users to visit in copy from constant transform"),
37     cl::Hidden);
38 
39 /// isOnlyCopiedFromConstantMemory - Recursively walk the uses of a (derived)
40 /// pointer to an alloca.  Ignore any reads of the pointer, return false if we
41 /// see any stores or other unknown uses.  If we see pointer arithmetic, keep
42 /// track of whether it moves the pointer (with IsOffset) but otherwise traverse
43 /// the uses.  If we see a memcpy/memmove that targets an unoffseted pointer to
44 /// the alloca, and if the source pointer is a pointer to a constant memory
45 /// location, we can optimize this.
46 static bool
isOnlyCopiedFromConstantMemory(AAResults * AA,AllocaInst * V,MemTransferInst * & TheCopy,SmallVectorImpl<Instruction * > & ToDelete)47 isOnlyCopiedFromConstantMemory(AAResults *AA, AllocaInst *V,
48                                MemTransferInst *&TheCopy,
49                                SmallVectorImpl<Instruction *> &ToDelete) {
50   // We track lifetime intrinsics as we encounter them.  If we decide to go
51   // ahead and replace the value with the memory location, this lets the caller
52   // quickly eliminate the markers.
53 
54   using ValueAndIsOffset = PointerIntPair<Value *, 1, bool>;
55   SmallVector<ValueAndIsOffset, 32> Worklist;
56   SmallPtrSet<ValueAndIsOffset, 32> Visited;
57   Worklist.emplace_back(V, false);
58   while (!Worklist.empty()) {
59     ValueAndIsOffset Elem = Worklist.pop_back_val();
60     if (!Visited.insert(Elem).second)
61       continue;
62     if (Visited.size() > MaxCopiedFromConstantUsers)
63       return false;
64 
65     const auto [Value, IsOffset] = Elem;
66     for (auto &U : Value->uses()) {
67       auto *I = cast<Instruction>(U.getUser());
68 
69       if (auto *LI = dyn_cast<LoadInst>(I)) {
70         // Ignore non-volatile loads, they are always ok.
71         if (!LI->isSimple()) return false;
72         continue;
73       }
74 
75       if (isa<PHINode, SelectInst>(I)) {
76         // We set IsOffset=true, to forbid the memcpy from occurring after the
77         // phi: If one of the phi operands is not based on the alloca, we
78         // would incorrectly omit a write.
79         Worklist.emplace_back(I, true);
80         continue;
81       }
82       if (isa<BitCastInst, AddrSpaceCastInst>(I)) {
83         // If uses of the bitcast are ok, we are ok.
84         Worklist.emplace_back(I, IsOffset);
85         continue;
86       }
87       if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
88         // If the GEP has all zero indices, it doesn't offset the pointer. If it
89         // doesn't, it does.
90         Worklist.emplace_back(I, IsOffset || !GEP->hasAllZeroIndices());
91         continue;
92       }
93 
94       if (auto *Call = dyn_cast<CallBase>(I)) {
95         // If this is the function being called then we treat it like a load and
96         // ignore it.
97         if (Call->isCallee(&U))
98           continue;
99 
100         unsigned DataOpNo = Call->getDataOperandNo(&U);
101         bool IsArgOperand = Call->isArgOperand(&U);
102 
103         // Inalloca arguments are clobbered by the call.
104         if (IsArgOperand && Call->isInAllocaArgument(DataOpNo))
105           return false;
106 
107         // If this call site doesn't modify the memory, then we know it is just
108         // a load (but one that potentially returns the value itself), so we can
109         // ignore it if we know that the value isn't captured.
110         bool NoCapture = Call->doesNotCapture(DataOpNo);
111         if ((Call->onlyReadsMemory() && (Call->use_empty() || NoCapture)) ||
112             (Call->onlyReadsMemory(DataOpNo) && NoCapture))
113           continue;
114       }
115 
116       // Lifetime intrinsics can be handled by the caller.
117       if (I->isLifetimeStartOrEnd()) {
118         assert(I->use_empty() && "Lifetime markers have no result to use!");
119         ToDelete.push_back(I);
120         continue;
121       }
122 
123       // If this is isn't our memcpy/memmove, reject it as something we can't
124       // handle.
125       MemTransferInst *MI = dyn_cast<MemTransferInst>(I);
126       if (!MI)
127         return false;
128 
129       // If the transfer is volatile, reject it.
130       if (MI->isVolatile())
131         return false;
132 
133       // If the transfer is using the alloca as a source of the transfer, then
134       // ignore it since it is a load (unless the transfer is volatile).
135       if (U.getOperandNo() == 1)
136         continue;
137 
138       // If we already have seen a copy, reject the second one.
139       if (TheCopy) return false;
140 
141       // If the pointer has been offset from the start of the alloca, we can't
142       // safely handle this.
143       if (IsOffset) return false;
144 
145       // If the memintrinsic isn't using the alloca as the dest, reject it.
146       if (U.getOperandNo() != 0) return false;
147 
148       // If the source of the memcpy/move is not constant, reject it.
149       if (isModSet(AA->getModRefInfoMask(MI->getSource())))
150         return false;
151 
152       // Otherwise, the transform is safe.  Remember the copy instruction.
153       TheCopy = MI;
154     }
155   }
156   return true;
157 }
158 
159 /// isOnlyCopiedFromConstantMemory - Return true if the specified alloca is only
160 /// modified by a copy from a constant memory location. If we can prove this, we
161 /// can replace any uses of the alloca with uses of the memory location
162 /// directly.
163 static MemTransferInst *
isOnlyCopiedFromConstantMemory(AAResults * AA,AllocaInst * AI,SmallVectorImpl<Instruction * > & ToDelete)164 isOnlyCopiedFromConstantMemory(AAResults *AA,
165                                AllocaInst *AI,
166                                SmallVectorImpl<Instruction *> &ToDelete) {
167   MemTransferInst *TheCopy = nullptr;
168   if (isOnlyCopiedFromConstantMemory(AA, AI, TheCopy, ToDelete))
169     return TheCopy;
170   return nullptr;
171 }
172 
173 /// Returns true if V is dereferenceable for size of alloca.
isDereferenceableForAllocaSize(const Value * V,const AllocaInst * AI,const DataLayout & DL)174 static bool isDereferenceableForAllocaSize(const Value *V, const AllocaInst *AI,
175                                            const DataLayout &DL) {
176   if (AI->isArrayAllocation())
177     return false;
178   uint64_t AllocaSize = DL.getTypeStoreSize(AI->getAllocatedType());
179   if (!AllocaSize)
180     return false;
181   return isDereferenceableAndAlignedPointer(V, AI->getAlign(),
182                                             APInt(64, AllocaSize), DL);
183 }
184 
simplifyAllocaArraySize(InstCombinerImpl & IC,AllocaInst & AI,DominatorTree & DT)185 static Instruction *simplifyAllocaArraySize(InstCombinerImpl &IC,
186                                             AllocaInst &AI, DominatorTree &DT) {
187   // Check for array size of 1 (scalar allocation).
188   if (!AI.isArrayAllocation()) {
189     // i32 1 is the canonical array size for scalar allocations.
190     if (AI.getArraySize()->getType()->isIntegerTy(32))
191       return nullptr;
192 
193     // Canonicalize it.
194     return IC.replaceOperand(AI, 0, IC.Builder.getInt32(1));
195   }
196 
197   // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
198   if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
199     if (C->getValue().getActiveBits() <= 64) {
200       Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
201       AllocaInst *New = IC.Builder.CreateAlloca(NewTy, AI.getAddressSpace(),
202                                                 nullptr, AI.getName());
203       New->setAlignment(AI.getAlign());
204       New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
205 
206       replaceAllDbgUsesWith(AI, *New, *New, DT);
207       return IC.replaceInstUsesWith(AI, New);
208     }
209   }
210 
211   if (isa<UndefValue>(AI.getArraySize()))
212     return IC.replaceInstUsesWith(AI, Constant::getNullValue(AI.getType()));
213 
214   // Ensure that the alloca array size argument has type equal to the offset
215   // size of the alloca() pointer, which, in the tyical case, is intptr_t,
216   // so that any casting is exposed early.
217   Type *PtrIdxTy = IC.getDataLayout().getIndexType(AI.getType());
218   if (AI.getArraySize()->getType() != PtrIdxTy) {
219     Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), PtrIdxTy, false);
220     return IC.replaceOperand(AI, 0, V);
221   }
222 
223   return nullptr;
224 }
225 
226 namespace {
227 // If I and V are pointers in different address space, it is not allowed to
228 // use replaceAllUsesWith since I and V have different types. A
229 // non-target-specific transformation should not use addrspacecast on V since
230 // the two address space may be disjoint depending on target.
231 //
232 // This class chases down uses of the old pointer until reaching the load
233 // instructions, then replaces the old pointer in the load instructions with
234 // the new pointer. If during the chasing it sees bitcast or GEP, it will
235 // create new bitcast or GEP with the new pointer and use them in the load
236 // instruction.
237 class PointerReplacer {
238 public:
PointerReplacer(InstCombinerImpl & IC,Instruction & Root,unsigned SrcAS)239   PointerReplacer(InstCombinerImpl &IC, Instruction &Root, unsigned SrcAS)
240       : IC(IC), Root(Root), FromAS(SrcAS) {}
241 
242   bool collectUsers();
243   void replacePointer(Value *V);
244 
245 private:
246   void replace(Instruction *I);
getReplacement(Value * V) const247   Value *getReplacement(Value *V) const { return WorkMap.lookup(V); }
isAvailable(Instruction * I) const248   bool isAvailable(Instruction *I) const {
249     return I == &Root || UsersToReplace.contains(I);
250   }
251 
isEqualOrValidAddrSpaceCast(const Instruction * I,unsigned FromAS) const252   bool isEqualOrValidAddrSpaceCast(const Instruction *I,
253                                    unsigned FromAS) const {
254     const auto *ASC = dyn_cast<AddrSpaceCastInst>(I);
255     if (!ASC)
256       return false;
257     unsigned ToAS = ASC->getDestAddressSpace();
258     return (FromAS == ToAS) || IC.isValidAddrSpaceCast(FromAS, ToAS);
259   }
260 
261   SmallSetVector<Instruction *, 32> UsersToReplace;
262   MapVector<Value *, Value *> WorkMap;
263   InstCombinerImpl &IC;
264   Instruction &Root;
265   unsigned FromAS;
266 };
267 } // end anonymous namespace
268 
collectUsers()269 bool PointerReplacer::collectUsers() {
270   SmallVector<Instruction *> Worklist;
271   SmallSetVector<Instruction *, 32> ValuesToRevisit;
272 
273   auto PushUsersToWorklist = [&](Instruction *Inst) {
274     for (auto *U : Inst->users())
275       if (auto *I = dyn_cast<Instruction>(U))
276         if (!isAvailable(I) && !ValuesToRevisit.contains(I))
277           Worklist.emplace_back(I);
278   };
279 
280   PushUsersToWorklist(&Root);
281   while (!Worklist.empty()) {
282     Instruction *Inst = Worklist.pop_back_val();
283     if (auto *Load = dyn_cast<LoadInst>(Inst)) {
284       if (Load->isVolatile())
285         return false;
286       UsersToReplace.insert(Load);
287     } else if (auto *PHI = dyn_cast<PHINode>(Inst)) {
288       /// TODO: Handle poison and null pointers for PHI and select.
289       // If all incoming values are available, mark this PHI as
290       // replacable and push it's users into the worklist.
291       bool IsReplaceable = true;
292       if (all_of(PHI->incoming_values(), [&](Value *V) {
293             if (!isa<Instruction>(V))
294               return IsReplaceable = false;
295             return isAvailable(cast<Instruction>(V));
296           })) {
297         UsersToReplace.insert(PHI);
298         PushUsersToWorklist(PHI);
299         continue;
300       }
301 
302       // Either an incoming value is not an instruction or not all
303       // incoming values are available. If this PHI was already
304       // visited prior to this iteration, return false.
305       if (!IsReplaceable || !ValuesToRevisit.insert(PHI))
306         return false;
307 
308       // Push PHI back into the stack, followed by unavailable
309       // incoming values.
310       Worklist.emplace_back(PHI);
311       for (unsigned Idx = 0; Idx < PHI->getNumIncomingValues(); ++Idx) {
312         auto *IncomingValue = cast<Instruction>(PHI->getIncomingValue(Idx));
313         if (UsersToReplace.contains(IncomingValue))
314           continue;
315         if (!ValuesToRevisit.insert(IncomingValue))
316           return false;
317         Worklist.emplace_back(IncomingValue);
318       }
319     } else if (auto *SI = dyn_cast<SelectInst>(Inst)) {
320       auto *TrueInst = dyn_cast<Instruction>(SI->getTrueValue());
321       auto *FalseInst = dyn_cast<Instruction>(SI->getFalseValue());
322       if (!TrueInst || !FalseInst)
323         return false;
324 
325       UsersToReplace.insert(SI);
326       PushUsersToWorklist(SI);
327     } else if (auto *GEP = dyn_cast<GetElementPtrInst>(Inst)) {
328       UsersToReplace.insert(GEP);
329       PushUsersToWorklist(GEP);
330     } else if (auto *MI = dyn_cast<MemTransferInst>(Inst)) {
331       if (MI->isVolatile())
332         return false;
333       UsersToReplace.insert(Inst);
334     } else if (isEqualOrValidAddrSpaceCast(Inst, FromAS)) {
335       UsersToReplace.insert(Inst);
336       PushUsersToWorklist(Inst);
337     } else if (Inst->isLifetimeStartOrEnd()) {
338       continue;
339     } else {
340       // TODO: For arbitrary uses with address space mismatches, should we check
341       // if we can introduce a valid addrspacecast?
342       LLVM_DEBUG(dbgs() << "Cannot handle pointer user: " << *Inst << '\n');
343       return false;
344     }
345   }
346 
347   return true;
348 }
349 
replacePointer(Value * V)350 void PointerReplacer::replacePointer(Value *V) {
351   assert(cast<PointerType>(Root.getType()) != cast<PointerType>(V->getType()) &&
352          "Invalid usage");
353   WorkMap[&Root] = V;
354   SmallVector<Instruction *> Worklist;
355   SetVector<Instruction *> PostOrderWorklist;
356   SmallPtrSet<Instruction *, 32> Visited;
357 
358   // Perform a postorder traversal of the users of Root.
359   Worklist.push_back(&Root);
360   while (!Worklist.empty()) {
361     Instruction *I = Worklist.back();
362 
363     // If I has not been processed before, push each of its
364     // replacable users into the worklist.
365     if (Visited.insert(I).second) {
366       for (auto *U : I->users()) {
367         auto *UserInst = cast<Instruction>(U);
368         if (UsersToReplace.contains(UserInst) && !Visited.contains(UserInst))
369           Worklist.push_back(UserInst);
370       }
371       // Otherwise, users of I have already been pushed into
372       // the PostOrderWorklist. Push I as well.
373     } else {
374       PostOrderWorklist.insert(I);
375       Worklist.pop_back();
376     }
377   }
378 
379   // Replace pointers in reverse-postorder.
380   for (Instruction *I : reverse(PostOrderWorklist))
381     replace(I);
382 }
383 
replace(Instruction * I)384 void PointerReplacer::replace(Instruction *I) {
385   if (getReplacement(I))
386     return;
387 
388   if (auto *LT = dyn_cast<LoadInst>(I)) {
389     auto *V = getReplacement(LT->getPointerOperand());
390     assert(V && "Operand not replaced");
391     auto *NewI = new LoadInst(LT->getType(), V, "", LT->isVolatile(),
392                               LT->getAlign(), LT->getOrdering(),
393                               LT->getSyncScopeID());
394     NewI->takeName(LT);
395     copyMetadataForLoad(*NewI, *LT);
396 
397     IC.InsertNewInstWith(NewI, LT->getIterator());
398     IC.replaceInstUsesWith(*LT, NewI);
399     // LT has actually been replaced by NewI. It is useless to insert LT into
400     // the map. Instead, we insert NewI into the map to indicate this is the
401     // replacement (new value).
402     WorkMap[NewI] = NewI;
403   } else if (auto *PHI = dyn_cast<PHINode>(I)) {
404     // Create a new PHI by replacing any incoming value that is a user of the
405     // root pointer and has a replacement.
406     Value *V = WorkMap.lookup(PHI->getIncomingValue(0));
407     PHI->mutateType(V ? V->getType() : PHI->getIncomingValue(0)->getType());
408     for (unsigned int I = 0; I < PHI->getNumIncomingValues(); ++I) {
409       Value *V = WorkMap.lookup(PHI->getIncomingValue(I));
410       PHI->setIncomingValue(I, V ? V : PHI->getIncomingValue(I));
411     }
412     WorkMap[PHI] = PHI;
413   } else if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
414     auto *V = getReplacement(GEP->getPointerOperand());
415     assert(V && "Operand not replaced");
416     SmallVector<Value *, 8> Indices(GEP->indices());
417     auto *NewI =
418         GetElementPtrInst::Create(GEP->getSourceElementType(), V, Indices);
419     IC.InsertNewInstWith(NewI, GEP->getIterator());
420     NewI->takeName(GEP);
421     NewI->setNoWrapFlags(GEP->getNoWrapFlags());
422     WorkMap[GEP] = NewI;
423   } else if (auto *SI = dyn_cast<SelectInst>(I)) {
424     Value *TrueValue = SI->getTrueValue();
425     Value *FalseValue = SI->getFalseValue();
426     if (Value *Replacement = getReplacement(TrueValue))
427       TrueValue = Replacement;
428     if (Value *Replacement = getReplacement(FalseValue))
429       FalseValue = Replacement;
430     auto *NewSI = SelectInst::Create(SI->getCondition(), TrueValue, FalseValue,
431                                      SI->getName(), nullptr, SI);
432     IC.InsertNewInstWith(NewSI, SI->getIterator());
433     NewSI->takeName(SI);
434     WorkMap[SI] = NewSI;
435   } else if (auto *MemCpy = dyn_cast<MemTransferInst>(I)) {
436     auto *DestV = MemCpy->getRawDest();
437     auto *SrcV = MemCpy->getRawSource();
438 
439     if (auto *DestReplace = getReplacement(DestV))
440       DestV = DestReplace;
441     if (auto *SrcReplace = getReplacement(SrcV))
442       SrcV = SrcReplace;
443 
444     IC.Builder.SetInsertPoint(MemCpy);
445     auto *NewI = IC.Builder.CreateMemTransferInst(
446         MemCpy->getIntrinsicID(), DestV, MemCpy->getDestAlign(), SrcV,
447         MemCpy->getSourceAlign(), MemCpy->getLength(), MemCpy->isVolatile());
448     AAMDNodes AAMD = MemCpy->getAAMetadata();
449     if (AAMD)
450       NewI->setAAMetadata(AAMD);
451 
452     IC.eraseInstFromFunction(*MemCpy);
453     WorkMap[MemCpy] = NewI;
454   } else if (auto *ASC = dyn_cast<AddrSpaceCastInst>(I)) {
455     auto *V = getReplacement(ASC->getPointerOperand());
456     assert(V && "Operand not replaced");
457     assert(isEqualOrValidAddrSpaceCast(
458                ASC, V->getType()->getPointerAddressSpace()) &&
459            "Invalid address space cast!");
460 
461     if (V->getType()->getPointerAddressSpace() !=
462         ASC->getType()->getPointerAddressSpace()) {
463       auto *NewI = new AddrSpaceCastInst(V, ASC->getType(), "");
464       NewI->takeName(ASC);
465       IC.InsertNewInstWith(NewI, ASC->getIterator());
466       WorkMap[ASC] = NewI;
467     } else {
468       WorkMap[ASC] = V;
469     }
470 
471   } else {
472     llvm_unreachable("should never reach here");
473   }
474 }
475 
visitAllocaInst(AllocaInst & AI)476 Instruction *InstCombinerImpl::visitAllocaInst(AllocaInst &AI) {
477   if (auto *I = simplifyAllocaArraySize(*this, AI, DT))
478     return I;
479 
480   if (AI.getAllocatedType()->isSized()) {
481     // Move all alloca's of zero byte objects to the entry block and merge them
482     // together.  Note that we only do this for alloca's, because malloc should
483     // allocate and return a unique pointer, even for a zero byte allocation.
484     if (DL.getTypeAllocSize(AI.getAllocatedType()).getKnownMinValue() == 0) {
485       // For a zero sized alloca there is no point in doing an array allocation.
486       // This is helpful if the array size is a complicated expression not used
487       // elsewhere.
488       if (AI.isArrayAllocation())
489         return replaceOperand(AI, 0,
490             ConstantInt::get(AI.getArraySize()->getType(), 1));
491 
492       // Get the first instruction in the entry block.
493       BasicBlock &EntryBlock = AI.getParent()->getParent()->getEntryBlock();
494       BasicBlock::iterator FirstInst = EntryBlock.getFirstNonPHIOrDbg();
495       if (&*FirstInst != &AI) {
496         // If the entry block doesn't start with a zero-size alloca then move
497         // this one to the start of the entry block.  There is no problem with
498         // dominance as the array size was forced to a constant earlier already.
499         AllocaInst *EntryAI = dyn_cast<AllocaInst>(FirstInst);
500         if (!EntryAI || !EntryAI->getAllocatedType()->isSized() ||
501             DL.getTypeAllocSize(EntryAI->getAllocatedType())
502                     .getKnownMinValue() != 0) {
503           AI.moveBefore(FirstInst);
504           return &AI;
505         }
506 
507         // Replace this zero-sized alloca with the one at the start of the entry
508         // block after ensuring that the address will be aligned enough for both
509         // types.
510         const Align MaxAlign = std::max(EntryAI->getAlign(), AI.getAlign());
511         EntryAI->setAlignment(MaxAlign);
512         return replaceInstUsesWith(AI, EntryAI);
513       }
514     }
515   }
516 
517   // Check to see if this allocation is only modified by a memcpy/memmove from
518   // a memory location whose alignment is equal to or exceeds that of the
519   // allocation. If this is the case, we can change all users to use the
520   // constant memory location instead.  This is commonly produced by the CFE by
521   // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
522   // is only subsequently read.
523   SmallVector<Instruction *, 4> ToDelete;
524   if (MemTransferInst *Copy = isOnlyCopiedFromConstantMemory(AA, &AI, ToDelete)) {
525     Value *TheSrc = Copy->getSource();
526     Align AllocaAlign = AI.getAlign();
527     Align SourceAlign = getOrEnforceKnownAlignment(
528       TheSrc, AllocaAlign, DL, &AI, &AC, &DT);
529     if (AllocaAlign <= SourceAlign &&
530         isDereferenceableForAllocaSize(TheSrc, &AI, DL) &&
531         !isa<Instruction>(TheSrc)) {
532       // FIXME: Can we sink instructions without violating dominance when TheSrc
533       // is an instruction instead of a constant or argument?
534       LLVM_DEBUG(dbgs() << "Found alloca equal to global: " << AI << '\n');
535       LLVM_DEBUG(dbgs() << "  memcpy = " << *Copy << '\n');
536       unsigned SrcAddrSpace = TheSrc->getType()->getPointerAddressSpace();
537       if (AI.getAddressSpace() == SrcAddrSpace) {
538         for (Instruction *Delete : ToDelete)
539           eraseInstFromFunction(*Delete);
540 
541         Instruction *NewI = replaceInstUsesWith(AI, TheSrc);
542         eraseInstFromFunction(*Copy);
543         ++NumGlobalCopies;
544         return NewI;
545       }
546 
547       PointerReplacer PtrReplacer(*this, AI, SrcAddrSpace);
548       if (PtrReplacer.collectUsers()) {
549         for (Instruction *Delete : ToDelete)
550           eraseInstFromFunction(*Delete);
551 
552         PtrReplacer.replacePointer(TheSrc);
553         ++NumGlobalCopies;
554       }
555     }
556   }
557 
558   // At last, use the generic allocation site handler to aggressively remove
559   // unused allocas.
560   return visitAllocSite(AI);
561 }
562 
563 // Are we allowed to form a atomic load or store of this type?
isSupportedAtomicType(Type * Ty)564 static bool isSupportedAtomicType(Type *Ty) {
565   return Ty->isIntOrPtrTy() || Ty->isFloatingPointTy();
566 }
567 
568 /// Helper to combine a load to a new type.
569 ///
570 /// This just does the work of combining a load to a new type. It handles
571 /// metadata, etc., and returns the new instruction. The \c NewTy should be the
572 /// loaded *value* type. This will convert it to a pointer, cast the operand to
573 /// that pointer type, load it, etc.
574 ///
575 /// Note that this will create all of the instructions with whatever insert
576 /// point the \c InstCombinerImpl currently is using.
combineLoadToNewType(LoadInst & LI,Type * NewTy,const Twine & Suffix)577 LoadInst *InstCombinerImpl::combineLoadToNewType(LoadInst &LI, Type *NewTy,
578                                                  const Twine &Suffix) {
579   assert((!LI.isAtomic() || isSupportedAtomicType(NewTy)) &&
580          "can't fold an atomic load to requested type");
581 
582   LoadInst *NewLoad =
583       Builder.CreateAlignedLoad(NewTy, LI.getPointerOperand(), LI.getAlign(),
584                                 LI.isVolatile(), LI.getName() + Suffix);
585   NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
586   copyMetadataForLoad(*NewLoad, LI);
587   return NewLoad;
588 }
589 
590 /// Combine a store to a new type.
591 ///
592 /// Returns the newly created store instruction.
combineStoreToNewValue(InstCombinerImpl & IC,StoreInst & SI,Value * V)593 static StoreInst *combineStoreToNewValue(InstCombinerImpl &IC, StoreInst &SI,
594                                          Value *V) {
595   assert((!SI.isAtomic() || isSupportedAtomicType(V->getType())) &&
596          "can't fold an atomic store of requested type");
597 
598   Value *Ptr = SI.getPointerOperand();
599   SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
600   SI.getAllMetadata(MD);
601 
602   StoreInst *NewStore =
603       IC.Builder.CreateAlignedStore(V, Ptr, SI.getAlign(), SI.isVolatile());
604   NewStore->setAtomic(SI.getOrdering(), SI.getSyncScopeID());
605   for (const auto &MDPair : MD) {
606     unsigned ID = MDPair.first;
607     MDNode *N = MDPair.second;
608     // Note, essentially every kind of metadata should be preserved here! This
609     // routine is supposed to clone a store instruction changing *only its
610     // type*. The only metadata it makes sense to drop is metadata which is
611     // invalidated when the pointer type changes. This should essentially
612     // never be the case in LLVM, but we explicitly switch over only known
613     // metadata to be conservatively correct. If you are adding metadata to
614     // LLVM which pertains to stores, you almost certainly want to add it
615     // here.
616     switch (ID) {
617     case LLVMContext::MD_dbg:
618     case LLVMContext::MD_DIAssignID:
619     case LLVMContext::MD_tbaa:
620     case LLVMContext::MD_prof:
621     case LLVMContext::MD_fpmath:
622     case LLVMContext::MD_tbaa_struct:
623     case LLVMContext::MD_alias_scope:
624     case LLVMContext::MD_noalias:
625     case LLVMContext::MD_nontemporal:
626     case LLVMContext::MD_mem_parallel_loop_access:
627     case LLVMContext::MD_access_group:
628       // All of these directly apply.
629       NewStore->setMetadata(ID, N);
630       break;
631     case LLVMContext::MD_invariant_load:
632     case LLVMContext::MD_nonnull:
633     case LLVMContext::MD_noundef:
634     case LLVMContext::MD_range:
635     case LLVMContext::MD_align:
636     case LLVMContext::MD_dereferenceable:
637     case LLVMContext::MD_dereferenceable_or_null:
638       // These don't apply for stores.
639       break;
640     }
641   }
642 
643   return NewStore;
644 }
645 
646 /// Combine loads to match the type of their uses' value after looking
647 /// through intervening bitcasts.
648 ///
649 /// The core idea here is that if the result of a load is used in an operation,
650 /// we should load the type most conducive to that operation. For example, when
651 /// loading an integer and converting that immediately to a pointer, we should
652 /// instead directly load a pointer.
653 ///
654 /// However, this routine must never change the width of a load or the number of
655 /// loads as that would introduce a semantic change. This combine is expected to
656 /// be a semantic no-op which just allows loads to more closely model the types
657 /// of their consuming operations.
658 ///
659 /// Currently, we also refuse to change the precise type used for an atomic load
660 /// or a volatile load. This is debatable, and might be reasonable to change
661 /// later. However, it is risky in case some backend or other part of LLVM is
662 /// relying on the exact type loaded to select appropriate atomic operations.
combineLoadToOperationType(InstCombinerImpl & IC,LoadInst & Load)663 static Instruction *combineLoadToOperationType(InstCombinerImpl &IC,
664                                                LoadInst &Load) {
665   // FIXME: We could probably with some care handle both volatile and ordered
666   // atomic loads here but it isn't clear that this is important.
667   if (!Load.isUnordered())
668     return nullptr;
669 
670   if (Load.use_empty())
671     return nullptr;
672 
673   // swifterror values can't be bitcasted.
674   if (Load.getPointerOperand()->isSwiftError())
675     return nullptr;
676 
677   // Fold away bit casts of the loaded value by loading the desired type.
678   // Note that we should not do this for pointer<->integer casts,
679   // because that would result in type punning.
680   if (Load.hasOneUse()) {
681     // Don't transform when the type is x86_amx, it makes the pass that lower
682     // x86_amx type happy.
683     Type *LoadTy = Load.getType();
684     if (auto *BC = dyn_cast<BitCastInst>(Load.user_back())) {
685       assert(!LoadTy->isX86_AMXTy() && "Load from x86_amx* should not happen!");
686       if (BC->getType()->isX86_AMXTy())
687         return nullptr;
688     }
689 
690     if (auto *CastUser = dyn_cast<CastInst>(Load.user_back())) {
691       Type *DestTy = CastUser->getDestTy();
692       if (CastUser->isNoopCast(IC.getDataLayout()) &&
693           LoadTy->isPtrOrPtrVectorTy() == DestTy->isPtrOrPtrVectorTy() &&
694           (!Load.isAtomic() || isSupportedAtomicType(DestTy))) {
695         LoadInst *NewLoad = IC.combineLoadToNewType(Load, DestTy);
696         CastUser->replaceAllUsesWith(NewLoad);
697         IC.eraseInstFromFunction(*CastUser);
698         return &Load;
699       }
700     }
701   }
702 
703   // FIXME: We should also canonicalize loads of vectors when their elements are
704   // cast to other types.
705   return nullptr;
706 }
707 
unpackLoadToAggregate(InstCombinerImpl & IC,LoadInst & LI)708 static Instruction *unpackLoadToAggregate(InstCombinerImpl &IC, LoadInst &LI) {
709   // FIXME: We could probably with some care handle both volatile and atomic
710   // stores here but it isn't clear that this is important.
711   if (!LI.isSimple())
712     return nullptr;
713 
714   Type *T = LI.getType();
715   if (!T->isAggregateType())
716     return nullptr;
717 
718   StringRef Name = LI.getName();
719 
720   if (auto *ST = dyn_cast<StructType>(T)) {
721     // If the struct only have one element, we unpack.
722     auto NumElements = ST->getNumElements();
723     if (NumElements == 1) {
724       LoadInst *NewLoad = IC.combineLoadToNewType(LI, ST->getTypeAtIndex(0U),
725                                                   ".unpack");
726       NewLoad->setAAMetadata(LI.getAAMetadata());
727       return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
728         PoisonValue::get(T), NewLoad, 0, Name));
729     }
730 
731     // We don't want to break loads with padding here as we'd loose
732     // the knowledge that padding exists for the rest of the pipeline.
733     const DataLayout &DL = IC.getDataLayout();
734     auto *SL = DL.getStructLayout(ST);
735 
736     if (SL->hasPadding())
737       return nullptr;
738 
739     const auto Align = LI.getAlign();
740     auto *Addr = LI.getPointerOperand();
741     auto *IdxType = DL.getIndexType(Addr->getType());
742 
743     Value *V = PoisonValue::get(T);
744     for (unsigned i = 0; i < NumElements; i++) {
745       auto *Ptr = IC.Builder.CreateInBoundsPtrAdd(
746           Addr, IC.Builder.CreateTypeSize(IdxType, SL->getElementOffset(i)),
747           Name + ".elt");
748       auto *L = IC.Builder.CreateAlignedLoad(
749           ST->getElementType(i), Ptr,
750           commonAlignment(Align, SL->getElementOffset(i).getKnownMinValue()),
751           Name + ".unpack");
752       // Propagate AA metadata. It'll still be valid on the narrowed load.
753       L->setAAMetadata(LI.getAAMetadata());
754       V = IC.Builder.CreateInsertValue(V, L, i);
755     }
756 
757     V->setName(Name);
758     return IC.replaceInstUsesWith(LI, V);
759   }
760 
761   if (auto *AT = dyn_cast<ArrayType>(T)) {
762     auto *ET = AT->getElementType();
763     auto NumElements = AT->getNumElements();
764     if (NumElements == 1) {
765       LoadInst *NewLoad = IC.combineLoadToNewType(LI, ET, ".unpack");
766       NewLoad->setAAMetadata(LI.getAAMetadata());
767       return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
768         PoisonValue::get(T), NewLoad, 0, Name));
769     }
770 
771     // Bail out if the array is too large. Ideally we would like to optimize
772     // arrays of arbitrary size but this has a terrible impact on compile time.
773     // The threshold here is chosen arbitrarily, maybe needs a little bit of
774     // tuning.
775     if (NumElements > IC.MaxArraySizeForCombine)
776       return nullptr;
777 
778     const DataLayout &DL = IC.getDataLayout();
779     TypeSize EltSize = DL.getTypeAllocSize(ET);
780     const auto Align = LI.getAlign();
781 
782     auto *Addr = LI.getPointerOperand();
783     auto *IdxType = Type::getInt64Ty(T->getContext());
784     auto *Zero = ConstantInt::get(IdxType, 0);
785 
786     Value *V = PoisonValue::get(T);
787     TypeSize Offset = TypeSize::getZero();
788     for (uint64_t i = 0; i < NumElements; i++) {
789       Value *Indices[2] = {
790         Zero,
791         ConstantInt::get(IdxType, i),
792       };
793       auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices),
794                                                Name + ".elt");
795       auto EltAlign = commonAlignment(Align, Offset.getKnownMinValue());
796       auto *L = IC.Builder.CreateAlignedLoad(AT->getElementType(), Ptr,
797                                              EltAlign, Name + ".unpack");
798       L->setAAMetadata(LI.getAAMetadata());
799       V = IC.Builder.CreateInsertValue(V, L, i);
800       Offset += EltSize;
801     }
802 
803     V->setName(Name);
804     return IC.replaceInstUsesWith(LI, V);
805   }
806 
807   return nullptr;
808 }
809 
810 // If we can determine that all possible objects pointed to by the provided
811 // pointer value are, not only dereferenceable, but also definitively less than
812 // or equal to the provided maximum size, then return true. Otherwise, return
813 // false (constant global values and allocas fall into this category).
814 //
815 // FIXME: This should probably live in ValueTracking (or similar).
isObjectSizeLessThanOrEq(Value * V,uint64_t MaxSize,const DataLayout & DL)816 static bool isObjectSizeLessThanOrEq(Value *V, uint64_t MaxSize,
817                                      const DataLayout &DL) {
818   SmallPtrSet<Value *, 4> Visited;
819   SmallVector<Value *, 4> Worklist(1, V);
820 
821   do {
822     Value *P = Worklist.pop_back_val();
823     P = P->stripPointerCasts();
824 
825     if (!Visited.insert(P).second)
826       continue;
827 
828     if (SelectInst *SI = dyn_cast<SelectInst>(P)) {
829       Worklist.push_back(SI->getTrueValue());
830       Worklist.push_back(SI->getFalseValue());
831       continue;
832     }
833 
834     if (PHINode *PN = dyn_cast<PHINode>(P)) {
835       append_range(Worklist, PN->incoming_values());
836       continue;
837     }
838 
839     if (GlobalAlias *GA = dyn_cast<GlobalAlias>(P)) {
840       if (GA->isInterposable())
841         return false;
842       Worklist.push_back(GA->getAliasee());
843       continue;
844     }
845 
846     // If we know how big this object is, and it is less than MaxSize, continue
847     // searching. Otherwise, return false.
848     if (AllocaInst *AI = dyn_cast<AllocaInst>(P)) {
849       if (!AI->getAllocatedType()->isSized())
850         return false;
851 
852       ConstantInt *CS = dyn_cast<ConstantInt>(AI->getArraySize());
853       if (!CS)
854         return false;
855 
856       TypeSize TS = DL.getTypeAllocSize(AI->getAllocatedType());
857       if (TS.isScalable())
858         return false;
859       // Make sure that, even if the multiplication below would wrap as an
860       // uint64_t, we still do the right thing.
861       if ((CS->getValue().zext(128) * APInt(128, TS.getFixedValue()))
862               .ugt(MaxSize))
863         return false;
864       continue;
865     }
866 
867     if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) {
868       if (!GV->hasDefinitiveInitializer() || !GV->isConstant())
869         return false;
870 
871       uint64_t InitSize = DL.getTypeAllocSize(GV->getValueType());
872       if (InitSize > MaxSize)
873         return false;
874       continue;
875     }
876 
877     return false;
878   } while (!Worklist.empty());
879 
880   return true;
881 }
882 
883 // If we're indexing into an object of a known size, and the outer index is
884 // not a constant, but having any value but zero would lead to undefined
885 // behavior, replace it with zero.
886 //
887 // For example, if we have:
888 // @f.a = private unnamed_addr constant [1 x i32] [i32 12], align 4
889 // ...
890 // %arrayidx = getelementptr inbounds [1 x i32]* @f.a, i64 0, i64 %x
891 // ... = load i32* %arrayidx, align 4
892 // Then we know that we can replace %x in the GEP with i64 0.
893 //
894 // FIXME: We could fold any GEP index to zero that would cause UB if it were
895 // not zero. Currently, we only handle the first such index. Also, we could
896 // also search through non-zero constant indices if we kept track of the
897 // offsets those indices implied.
canReplaceGEPIdxWithZero(InstCombinerImpl & IC,GetElementPtrInst * GEPI,Instruction * MemI,unsigned & Idx)898 static bool canReplaceGEPIdxWithZero(InstCombinerImpl &IC,
899                                      GetElementPtrInst *GEPI, Instruction *MemI,
900                                      unsigned &Idx) {
901   if (GEPI->getNumOperands() < 2)
902     return false;
903 
904   // Find the first non-zero index of a GEP. If all indices are zero, return
905   // one past the last index.
906   auto FirstNZIdx = [](const GetElementPtrInst *GEPI) {
907     unsigned I = 1;
908     for (unsigned IE = GEPI->getNumOperands(); I != IE; ++I) {
909       Value *V = GEPI->getOperand(I);
910       if (const ConstantInt *CI = dyn_cast<ConstantInt>(V))
911         if (CI->isZero())
912           continue;
913 
914       break;
915     }
916 
917     return I;
918   };
919 
920   // Skip through initial 'zero' indices, and find the corresponding pointer
921   // type. See if the next index is not a constant.
922   Idx = FirstNZIdx(GEPI);
923   if (Idx == GEPI->getNumOperands())
924     return false;
925   if (isa<Constant>(GEPI->getOperand(Idx)))
926     return false;
927 
928   SmallVector<Value *, 4> Ops(GEPI->idx_begin(), GEPI->idx_begin() + Idx);
929   Type *SourceElementType = GEPI->getSourceElementType();
930   // Size information about scalable vectors is not available, so we cannot
931   // deduce whether indexing at n is undefined behaviour or not. Bail out.
932   if (SourceElementType->isScalableTy())
933     return false;
934 
935   Type *AllocTy = GetElementPtrInst::getIndexedType(SourceElementType, Ops);
936   if (!AllocTy || !AllocTy->isSized())
937     return false;
938   const DataLayout &DL = IC.getDataLayout();
939   uint64_t TyAllocSize = DL.getTypeAllocSize(AllocTy).getFixedValue();
940 
941   // If there are more indices after the one we might replace with a zero, make
942   // sure they're all non-negative. If any of them are negative, the overall
943   // address being computed might be before the base address determined by the
944   // first non-zero index.
945   auto IsAllNonNegative = [&]() {
946     for (unsigned i = Idx+1, e = GEPI->getNumOperands(); i != e; ++i) {
947       KnownBits Known = IC.computeKnownBits(GEPI->getOperand(i), MemI);
948       if (Known.isNonNegative())
949         continue;
950       return false;
951     }
952 
953     return true;
954   };
955 
956   // FIXME: If the GEP is not inbounds, and there are extra indices after the
957   // one we'll replace, those could cause the address computation to wrap
958   // (rendering the IsAllNonNegative() check below insufficient). We can do
959   // better, ignoring zero indices (and other indices we can prove small
960   // enough not to wrap).
961   if (Idx+1 != GEPI->getNumOperands() && !GEPI->isInBounds())
962     return false;
963 
964   // Note that isObjectSizeLessThanOrEq will return true only if the pointer is
965   // also known to be dereferenceable.
966   return isObjectSizeLessThanOrEq(GEPI->getOperand(0), TyAllocSize, DL) &&
967          IsAllNonNegative();
968 }
969 
970 // If we're indexing into an object with a variable index for the memory
971 // access, but the object has only one element, we can assume that the index
972 // will always be zero. If we replace the GEP, return it.
replaceGEPIdxWithZero(InstCombinerImpl & IC,Value * Ptr,Instruction & MemI)973 static Instruction *replaceGEPIdxWithZero(InstCombinerImpl &IC, Value *Ptr,
974                                           Instruction &MemI) {
975   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr)) {
976     unsigned Idx;
977     if (canReplaceGEPIdxWithZero(IC, GEPI, &MemI, Idx)) {
978       Instruction *NewGEPI = GEPI->clone();
979       NewGEPI->setOperand(Idx,
980         ConstantInt::get(GEPI->getOperand(Idx)->getType(), 0));
981       IC.InsertNewInstBefore(NewGEPI, GEPI->getIterator());
982       return NewGEPI;
983     }
984   }
985 
986   return nullptr;
987 }
988 
canSimplifyNullStoreOrGEP(StoreInst & SI)989 static bool canSimplifyNullStoreOrGEP(StoreInst &SI) {
990   if (NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
991     return false;
992 
993   auto *Ptr = SI.getPointerOperand();
994   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Ptr))
995     Ptr = GEPI->getOperand(0);
996   return (isa<ConstantPointerNull>(Ptr) &&
997           !NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()));
998 }
999 
canSimplifyNullLoadOrGEP(LoadInst & LI,Value * Op)1000 static bool canSimplifyNullLoadOrGEP(LoadInst &LI, Value *Op) {
1001   if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(Op)) {
1002     const Value *GEPI0 = GEPI->getOperand(0);
1003     if (isa<ConstantPointerNull>(GEPI0) &&
1004         !NullPointerIsDefined(LI.getFunction(), GEPI->getPointerAddressSpace()))
1005       return true;
1006   }
1007   if (isa<UndefValue>(Op) ||
1008       (isa<ConstantPointerNull>(Op) &&
1009        !NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace())))
1010     return true;
1011   return false;
1012 }
1013 
simplifyNonNullOperand(Value * V,bool HasDereferenceable,unsigned Depth)1014 Value *InstCombinerImpl::simplifyNonNullOperand(Value *V,
1015                                                 bool HasDereferenceable,
1016                                                 unsigned Depth) {
1017   if (auto *Sel = dyn_cast<SelectInst>(V)) {
1018     if (isa<ConstantPointerNull>(Sel->getOperand(1)))
1019       return Sel->getOperand(2);
1020 
1021     if (isa<ConstantPointerNull>(Sel->getOperand(2)))
1022       return Sel->getOperand(1);
1023   }
1024 
1025   if (!V->hasOneUse())
1026     return nullptr;
1027 
1028   constexpr unsigned RecursionLimit = 3;
1029   if (Depth == RecursionLimit)
1030     return nullptr;
1031 
1032   if (auto *GEP = dyn_cast<GetElementPtrInst>(V)) {
1033     if (HasDereferenceable || GEP->isInBounds()) {
1034       if (auto *Res = simplifyNonNullOperand(GEP->getPointerOperand(),
1035                                              HasDereferenceable, Depth + 1)) {
1036         replaceOperand(*GEP, 0, Res);
1037         addToWorklist(GEP);
1038         return nullptr;
1039       }
1040     }
1041   }
1042 
1043   if (auto *PHI = dyn_cast<PHINode>(V)) {
1044     bool Changed = false;
1045     for (Use &U : PHI->incoming_values()) {
1046       // We set Depth to RecursionLimit to avoid expensive recursion.
1047       if (auto *Res = simplifyNonNullOperand(U.get(), HasDereferenceable,
1048                                              RecursionLimit)) {
1049         replaceUse(U, Res);
1050         Changed = true;
1051       }
1052     }
1053     if (Changed)
1054       addToWorklist(PHI);
1055     return nullptr;
1056   }
1057 
1058   return nullptr;
1059 }
1060 
visitLoadInst(LoadInst & LI)1061 Instruction *InstCombinerImpl::visitLoadInst(LoadInst &LI) {
1062   Value *Op = LI.getOperand(0);
1063   if (Value *Res = simplifyLoadInst(&LI, Op, SQ.getWithInstruction(&LI)))
1064     return replaceInstUsesWith(LI, Res);
1065 
1066   // Try to canonicalize the loaded type.
1067   if (Instruction *Res = combineLoadToOperationType(*this, LI))
1068     return Res;
1069 
1070   // Replace GEP indices if possible.
1071   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Op, LI))
1072     return replaceOperand(LI, 0, NewGEPI);
1073 
1074   if (Instruction *Res = unpackLoadToAggregate(*this, LI))
1075     return Res;
1076 
1077   // Do really simple store-to-load forwarding and load CSE, to catch cases
1078   // where there are several consecutive memory accesses to the same location,
1079   // separated by a few arithmetic operations.
1080   bool IsLoadCSE = false;
1081   BatchAAResults BatchAA(*AA);
1082   if (Value *AvailableVal = FindAvailableLoadedValue(&LI, BatchAA, &IsLoadCSE)) {
1083     if (IsLoadCSE)
1084       combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI, false);
1085 
1086     return replaceInstUsesWith(
1087         LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
1088                                            LI.getName() + ".cast"));
1089   }
1090 
1091   // None of the following transforms are legal for volatile/ordered atomic
1092   // loads.  Most of them do apply for unordered atomics.
1093   if (!LI.isUnordered()) return nullptr;
1094 
1095   // load(gep null, ...) -> unreachable
1096   // load null/undef -> unreachable
1097   // TODO: Consider a target hook for valid address spaces for this xforms.
1098   if (canSimplifyNullLoadOrGEP(LI, Op)) {
1099     CreateNonTerminatorUnreachable(&LI);
1100     return replaceInstUsesWith(LI, PoisonValue::get(LI.getType()));
1101   }
1102 
1103   if (Op->hasOneUse()) {
1104     // Change select and PHI nodes to select values instead of addresses: this
1105     // helps alias analysis out a lot, allows many others simplifications, and
1106     // exposes redundancy in the code.
1107     //
1108     // Note that we cannot do the transformation unless we know that the
1109     // introduced loads cannot trap!  Something like this is valid as long as
1110     // the condition is always false: load (select bool %C, int* null, int* %G),
1111     // but it would not be valid if we transformed it to load from null
1112     // unconditionally.
1113     //
1114     if (SelectInst *SI = dyn_cast<SelectInst>(Op)) {
1115       // load (select (Cond, &V1, &V2))  --> select(Cond, load &V1, load &V2).
1116       Align Alignment = LI.getAlign();
1117       if (isSafeToLoadUnconditionally(SI->getOperand(1), LI.getType(),
1118                                       Alignment, DL, SI) &&
1119           isSafeToLoadUnconditionally(SI->getOperand(2), LI.getType(),
1120                                       Alignment, DL, SI)) {
1121         LoadInst *V1 =
1122             Builder.CreateLoad(LI.getType(), SI->getOperand(1),
1123                                SI->getOperand(1)->getName() + ".val");
1124         LoadInst *V2 =
1125             Builder.CreateLoad(LI.getType(), SI->getOperand(2),
1126                                SI->getOperand(2)->getName() + ".val");
1127         assert(LI.isUnordered() && "implied by above");
1128         V1->setAlignment(Alignment);
1129         V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1130         V2->setAlignment(Alignment);
1131         V2->setAtomic(LI.getOrdering(), LI.getSyncScopeID());
1132         // It is safe to copy any metadata that does not trigger UB. Copy any
1133         // poison-generating metadata.
1134         V1->copyMetadata(LI, Metadata::PoisonGeneratingIDs);
1135         V2->copyMetadata(LI, Metadata::PoisonGeneratingIDs);
1136         return SelectInst::Create(SI->getCondition(), V1, V2);
1137       }
1138     }
1139   }
1140 
1141   if (!NullPointerIsDefined(LI.getFunction(), LI.getPointerAddressSpace()))
1142     if (Value *V = simplifyNonNullOperand(Op, /*HasDereferenceable=*/true))
1143       return replaceOperand(LI, 0, V);
1144 
1145   return nullptr;
1146 }
1147 
1148 /// Look for extractelement/insertvalue sequence that acts like a bitcast.
1149 ///
1150 /// \returns underlying value that was "cast", or nullptr otherwise.
1151 ///
1152 /// For example, if we have:
1153 ///
1154 ///     %E0 = extractelement <2 x double> %U, i32 0
1155 ///     %V0 = insertvalue [2 x double] undef, double %E0, 0
1156 ///     %E1 = extractelement <2 x double> %U, i32 1
1157 ///     %V1 = insertvalue [2 x double] %V0, double %E1, 1
1158 ///
1159 /// and the layout of a <2 x double> is isomorphic to a [2 x double],
1160 /// then %V1 can be safely approximated by a conceptual "bitcast" of %U.
1161 /// Note that %U may contain non-undef values where %V1 has undef.
likeBitCastFromVector(InstCombinerImpl & IC,Value * V)1162 static Value *likeBitCastFromVector(InstCombinerImpl &IC, Value *V) {
1163   Value *U = nullptr;
1164   while (auto *IV = dyn_cast<InsertValueInst>(V)) {
1165     auto *E = dyn_cast<ExtractElementInst>(IV->getInsertedValueOperand());
1166     if (!E)
1167       return nullptr;
1168     auto *W = E->getVectorOperand();
1169     if (!U)
1170       U = W;
1171     else if (U != W)
1172       return nullptr;
1173     auto *CI = dyn_cast<ConstantInt>(E->getIndexOperand());
1174     if (!CI || IV->getNumIndices() != 1 || CI->getZExtValue() != *IV->idx_begin())
1175       return nullptr;
1176     V = IV->getAggregateOperand();
1177   }
1178   if (!match(V, m_Undef()) || !U)
1179     return nullptr;
1180 
1181   auto *UT = cast<VectorType>(U->getType());
1182   auto *VT = V->getType();
1183   // Check that types UT and VT are bitwise isomorphic.
1184   const auto &DL = IC.getDataLayout();
1185   if (DL.getTypeStoreSizeInBits(UT) != DL.getTypeStoreSizeInBits(VT)) {
1186     return nullptr;
1187   }
1188   if (auto *AT = dyn_cast<ArrayType>(VT)) {
1189     if (AT->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1190       return nullptr;
1191   } else {
1192     auto *ST = cast<StructType>(VT);
1193     if (ST->getNumElements() != cast<FixedVectorType>(UT)->getNumElements())
1194       return nullptr;
1195     for (const auto *EltT : ST->elements()) {
1196       if (EltT != UT->getElementType())
1197         return nullptr;
1198     }
1199   }
1200   return U;
1201 }
1202 
1203 /// Combine stores to match the type of value being stored.
1204 ///
1205 /// The core idea here is that the memory does not have any intrinsic type and
1206 /// where we can we should match the type of a store to the type of value being
1207 /// stored.
1208 ///
1209 /// However, this routine must never change the width of a store or the number of
1210 /// stores as that would introduce a semantic change. This combine is expected to
1211 /// be a semantic no-op which just allows stores to more closely model the types
1212 /// of their incoming values.
1213 ///
1214 /// Currently, we also refuse to change the precise type used for an atomic or
1215 /// volatile store. This is debatable, and might be reasonable to change later.
1216 /// However, it is risky in case some backend or other part of LLVM is relying
1217 /// on the exact type stored to select appropriate atomic operations.
1218 ///
1219 /// \returns true if the store was successfully combined away. This indicates
1220 /// the caller must erase the store instruction. We have to let the caller erase
1221 /// the store instruction as otherwise there is no way to signal whether it was
1222 /// combined or not: IC.EraseInstFromFunction returns a null pointer.
combineStoreToValueType(InstCombinerImpl & IC,StoreInst & SI)1223 static bool combineStoreToValueType(InstCombinerImpl &IC, StoreInst &SI) {
1224   // FIXME: We could probably with some care handle both volatile and ordered
1225   // atomic stores here but it isn't clear that this is important.
1226   if (!SI.isUnordered())
1227     return false;
1228 
1229   // swifterror values can't be bitcasted.
1230   if (SI.getPointerOperand()->isSwiftError())
1231     return false;
1232 
1233   Value *V = SI.getValueOperand();
1234 
1235   // Fold away bit casts of the stored value by storing the original type.
1236   if (auto *BC = dyn_cast<BitCastInst>(V)) {
1237     assert(!BC->getType()->isX86_AMXTy() &&
1238            "store to x86_amx* should not happen!");
1239     V = BC->getOperand(0);
1240     // Don't transform when the type is x86_amx, it makes the pass that lower
1241     // x86_amx type happy.
1242     if (V->getType()->isX86_AMXTy())
1243       return false;
1244     if (!SI.isAtomic() || isSupportedAtomicType(V->getType())) {
1245       combineStoreToNewValue(IC, SI, V);
1246       return true;
1247     }
1248   }
1249 
1250   if (Value *U = likeBitCastFromVector(IC, V))
1251     if (!SI.isAtomic() || isSupportedAtomicType(U->getType())) {
1252       combineStoreToNewValue(IC, SI, U);
1253       return true;
1254     }
1255 
1256   // FIXME: We should also canonicalize stores of vectors when their elements
1257   // are cast to other types.
1258   return false;
1259 }
1260 
unpackStoreToAggregate(InstCombinerImpl & IC,StoreInst & SI)1261 static bool unpackStoreToAggregate(InstCombinerImpl &IC, StoreInst &SI) {
1262   // FIXME: We could probably with some care handle both volatile and atomic
1263   // stores here but it isn't clear that this is important.
1264   if (!SI.isSimple())
1265     return false;
1266 
1267   Value *V = SI.getValueOperand();
1268   Type *T = V->getType();
1269 
1270   if (!T->isAggregateType())
1271     return false;
1272 
1273   if (auto *ST = dyn_cast<StructType>(T)) {
1274     // If the struct only have one element, we unpack.
1275     unsigned Count = ST->getNumElements();
1276     if (Count == 1) {
1277       V = IC.Builder.CreateExtractValue(V, 0);
1278       combineStoreToNewValue(IC, SI, V);
1279       return true;
1280     }
1281 
1282     // We don't want to break loads with padding here as we'd loose
1283     // the knowledge that padding exists for the rest of the pipeline.
1284     const DataLayout &DL = IC.getDataLayout();
1285     auto *SL = DL.getStructLayout(ST);
1286 
1287     if (SL->hasPadding())
1288       return false;
1289 
1290     const auto Align = SI.getAlign();
1291 
1292     SmallString<16> EltName = V->getName();
1293     EltName += ".elt";
1294     auto *Addr = SI.getPointerOperand();
1295     SmallString<16> AddrName = Addr->getName();
1296     AddrName += ".repack";
1297 
1298     auto *IdxType = DL.getIndexType(Addr->getType());
1299     for (unsigned i = 0; i < Count; i++) {
1300       auto *Ptr = IC.Builder.CreateInBoundsPtrAdd(
1301           Addr, IC.Builder.CreateTypeSize(IdxType, SL->getElementOffset(i)),
1302           AddrName);
1303       auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1304       auto EltAlign =
1305           commonAlignment(Align, SL->getElementOffset(i).getKnownMinValue());
1306       llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1307       NS->setAAMetadata(SI.getAAMetadata());
1308     }
1309 
1310     return true;
1311   }
1312 
1313   if (auto *AT = dyn_cast<ArrayType>(T)) {
1314     // If the array only have one element, we unpack.
1315     auto NumElements = AT->getNumElements();
1316     if (NumElements == 1) {
1317       V = IC.Builder.CreateExtractValue(V, 0);
1318       combineStoreToNewValue(IC, SI, V);
1319       return true;
1320     }
1321 
1322     // Bail out if the array is too large. Ideally we would like to optimize
1323     // arrays of arbitrary size but this has a terrible impact on compile time.
1324     // The threshold here is chosen arbitrarily, maybe needs a little bit of
1325     // tuning.
1326     if (NumElements > IC.MaxArraySizeForCombine)
1327       return false;
1328 
1329     const DataLayout &DL = IC.getDataLayout();
1330     TypeSize EltSize = DL.getTypeAllocSize(AT->getElementType());
1331     const auto Align = SI.getAlign();
1332 
1333     SmallString<16> EltName = V->getName();
1334     EltName += ".elt";
1335     auto *Addr = SI.getPointerOperand();
1336     SmallString<16> AddrName = Addr->getName();
1337     AddrName += ".repack";
1338 
1339     auto *IdxType = Type::getInt64Ty(T->getContext());
1340     auto *Zero = ConstantInt::get(IdxType, 0);
1341 
1342     TypeSize Offset = TypeSize::getZero();
1343     for (uint64_t i = 0; i < NumElements; i++) {
1344       Value *Indices[2] = {
1345         Zero,
1346         ConstantInt::get(IdxType, i),
1347       };
1348       auto *Ptr =
1349           IC.Builder.CreateInBoundsGEP(AT, Addr, ArrayRef(Indices), AddrName);
1350       auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
1351       auto EltAlign = commonAlignment(Align, Offset.getKnownMinValue());
1352       Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
1353       NS->setAAMetadata(SI.getAAMetadata());
1354       Offset += EltSize;
1355     }
1356 
1357     return true;
1358   }
1359 
1360   return false;
1361 }
1362 
1363 /// equivalentAddressValues - Test if A and B will obviously have the same
1364 /// value. This includes recognizing that %t0 and %t1 will have the same
1365 /// value in code like this:
1366 ///   %t0 = getelementptr \@a, 0, 3
1367 ///   store i32 0, i32* %t0
1368 ///   %t1 = getelementptr \@a, 0, 3
1369 ///   %t2 = load i32* %t1
1370 ///
equivalentAddressValues(Value * A,Value * B)1371 static bool equivalentAddressValues(Value *A, Value *B) {
1372   // Test if the values are trivially equivalent.
1373   if (A == B) return true;
1374 
1375   // Test if the values come form identical arithmetic instructions.
1376   // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
1377   // its only used to compare two uses within the same basic block, which
1378   // means that they'll always either have the same value or one of them
1379   // will have an undefined value.
1380   if (isa<BinaryOperator>(A) ||
1381       isa<CastInst>(A) ||
1382       isa<PHINode>(A) ||
1383       isa<GetElementPtrInst>(A))
1384     if (Instruction *BI = dyn_cast<Instruction>(B))
1385       if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
1386         return true;
1387 
1388   // Otherwise they may not be equivalent.
1389   return false;
1390 }
1391 
visitStoreInst(StoreInst & SI)1392 Instruction *InstCombinerImpl::visitStoreInst(StoreInst &SI) {
1393   Value *Val = SI.getOperand(0);
1394   Value *Ptr = SI.getOperand(1);
1395 
1396   // Try to canonicalize the stored type.
1397   if (combineStoreToValueType(*this, SI))
1398     return eraseInstFromFunction(SI);
1399 
1400   // Try to canonicalize the stored type.
1401   if (unpackStoreToAggregate(*this, SI))
1402     return eraseInstFromFunction(SI);
1403 
1404   // Replace GEP indices if possible.
1405   if (Instruction *NewGEPI = replaceGEPIdxWithZero(*this, Ptr, SI))
1406     return replaceOperand(SI, 1, NewGEPI);
1407 
1408   // Don't hack volatile/ordered stores.
1409   // FIXME: Some bits are legal for ordered atomic stores; needs refactoring.
1410   if (!SI.isUnordered()) return nullptr;
1411 
1412   // If the RHS is an alloca with a single use, zapify the store, making the
1413   // alloca dead.
1414   if (Ptr->hasOneUse()) {
1415     if (isa<AllocaInst>(Ptr))
1416       return eraseInstFromFunction(SI);
1417     if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr)) {
1418       if (isa<AllocaInst>(GEP->getOperand(0))) {
1419         if (GEP->getOperand(0)->hasOneUse())
1420           return eraseInstFromFunction(SI);
1421       }
1422     }
1423   }
1424 
1425   // If we have a store to a location which is known constant, we can conclude
1426   // that the store must be storing the constant value (else the memory
1427   // wouldn't be constant), and this must be a noop.
1428   if (!isModSet(AA->getModRefInfoMask(Ptr)))
1429     return eraseInstFromFunction(SI);
1430 
1431   // Do really simple DSE, to catch cases where there are several consecutive
1432   // stores to the same location, separated by a few arithmetic operations. This
1433   // situation often occurs with bitfield accesses.
1434   BasicBlock::iterator BBI(SI);
1435   for (unsigned ScanInsts = 6; BBI != SI.getParent()->begin() && ScanInsts;
1436        --ScanInsts) {
1437     --BBI;
1438     // Don't count debug info directives, lest they affect codegen,
1439     // and we skip pointer-to-pointer bitcasts, which are NOPs.
1440     if (BBI->isDebugOrPseudoInst()) {
1441       ScanInsts++;
1442       continue;
1443     }
1444 
1445     if (StoreInst *PrevSI = dyn_cast<StoreInst>(BBI)) {
1446       // Prev store isn't volatile, and stores to the same location?
1447       if (PrevSI->isUnordered() &&
1448           equivalentAddressValues(PrevSI->getOperand(1), SI.getOperand(1)) &&
1449           PrevSI->getValueOperand()->getType() ==
1450               SI.getValueOperand()->getType()) {
1451         ++NumDeadStore;
1452         // Manually add back the original store to the worklist now, so it will
1453         // be processed after the operands of the removed store, as this may
1454         // expose additional DSE opportunities.
1455         Worklist.push(&SI);
1456         eraseInstFromFunction(*PrevSI);
1457         return nullptr;
1458       }
1459       break;
1460     }
1461 
1462     // If this is a load, we have to stop.  However, if the loaded value is from
1463     // the pointer we're loading and is producing the pointer we're storing,
1464     // then *this* store is dead (X = load P; store X -> P).
1465     if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
1466       if (LI == Val && equivalentAddressValues(LI->getOperand(0), Ptr)) {
1467         assert(SI.isUnordered() && "can't eliminate ordering operation");
1468         return eraseInstFromFunction(SI);
1469       }
1470 
1471       // Otherwise, this is a load from some other location.  Stores before it
1472       // may not be dead.
1473       break;
1474     }
1475 
1476     // Don't skip over loads, throws or things that can modify memory.
1477     if (BBI->mayWriteToMemory() || BBI->mayReadFromMemory() || BBI->mayThrow())
1478       break;
1479   }
1480 
1481   // store X, null    -> turns into 'unreachable' in SimplifyCFG
1482   // store X, GEP(null, Y) -> turns into 'unreachable' in SimplifyCFG
1483   if (canSimplifyNullStoreOrGEP(SI)) {
1484     if (!isa<PoisonValue>(Val))
1485       return replaceOperand(SI, 0, PoisonValue::get(Val->getType()));
1486     return nullptr;  // Do not modify these!
1487   }
1488 
1489   // This is a non-terminator unreachable marker. Don't remove it.
1490   if (isa<UndefValue>(Ptr)) {
1491     // Remove guaranteed-to-transfer instructions before the marker.
1492     if (removeInstructionsBeforeUnreachable(SI))
1493       return &SI;
1494 
1495     // Remove all instructions after the marker and handle dead blocks this
1496     // implies.
1497     SmallVector<BasicBlock *> Worklist;
1498     handleUnreachableFrom(SI.getNextNode(), Worklist);
1499     handlePotentiallyDeadBlocks(Worklist);
1500     return nullptr;
1501   }
1502 
1503   // store undef, Ptr -> noop
1504   // FIXME: This is technically incorrect because it might overwrite a poison
1505   // value. Change to PoisonValue once #52930 is resolved.
1506   if (isa<UndefValue>(Val))
1507     return eraseInstFromFunction(SI);
1508 
1509   if (!NullPointerIsDefined(SI.getFunction(), SI.getPointerAddressSpace()))
1510     if (Value *V = simplifyNonNullOperand(Ptr, /*HasDereferenceable=*/true))
1511       return replaceOperand(SI, 1, V);
1512 
1513   return nullptr;
1514 }
1515 
1516 /// Try to transform:
1517 ///   if () { *P = v1; } else { *P = v2 }
1518 /// or:
1519 ///   *P = v1; if () { *P = v2; }
1520 /// into a phi node with a store in the successor.
mergeStoreIntoSuccessor(StoreInst & SI)1521 bool InstCombinerImpl::mergeStoreIntoSuccessor(StoreInst &SI) {
1522   if (!SI.isUnordered())
1523     return false; // This code has not been audited for volatile/ordered case.
1524 
1525   // Check if the successor block has exactly 2 incoming edges.
1526   BasicBlock *StoreBB = SI.getParent();
1527   BasicBlock *DestBB = StoreBB->getTerminator()->getSuccessor(0);
1528   if (!DestBB->hasNPredecessors(2))
1529     return false;
1530 
1531   // Capture the other block (the block that doesn't contain our store).
1532   pred_iterator PredIter = pred_begin(DestBB);
1533   if (*PredIter == StoreBB)
1534     ++PredIter;
1535   BasicBlock *OtherBB = *PredIter;
1536 
1537   // Bail out if all of the relevant blocks aren't distinct. This can happen,
1538   // for example, if SI is in an infinite loop.
1539   if (StoreBB == DestBB || OtherBB == DestBB)
1540     return false;
1541 
1542   // Verify that the other block ends in a branch and is not otherwise empty.
1543   BasicBlock::iterator BBI(OtherBB->getTerminator());
1544   BranchInst *OtherBr = dyn_cast<BranchInst>(BBI);
1545   if (!OtherBr || BBI == OtherBB->begin())
1546     return false;
1547 
1548   auto OtherStoreIsMergeable = [&](StoreInst *OtherStore) -> bool {
1549     if (!OtherStore ||
1550         OtherStore->getPointerOperand() != SI.getPointerOperand())
1551       return false;
1552 
1553     auto *SIVTy = SI.getValueOperand()->getType();
1554     auto *OSVTy = OtherStore->getValueOperand()->getType();
1555     return CastInst::isBitOrNoopPointerCastable(OSVTy, SIVTy, DL) &&
1556            SI.hasSameSpecialState(OtherStore);
1557   };
1558 
1559   // If the other block ends in an unconditional branch, check for the 'if then
1560   // else' case. There is an instruction before the branch.
1561   StoreInst *OtherStore = nullptr;
1562   if (OtherBr->isUnconditional()) {
1563     --BBI;
1564     // Skip over debugging info and pseudo probes.
1565     while (BBI->isDebugOrPseudoInst()) {
1566       if (BBI==OtherBB->begin())
1567         return false;
1568       --BBI;
1569     }
1570     // If this isn't a store, isn't a store to the same location, or is not the
1571     // right kind of store, bail out.
1572     OtherStore = dyn_cast<StoreInst>(BBI);
1573     if (!OtherStoreIsMergeable(OtherStore))
1574       return false;
1575   } else {
1576     // Otherwise, the other block ended with a conditional branch. If one of the
1577     // destinations is StoreBB, then we have the if/then case.
1578     if (OtherBr->getSuccessor(0) != StoreBB &&
1579         OtherBr->getSuccessor(1) != StoreBB)
1580       return false;
1581 
1582     // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
1583     // if/then triangle. See if there is a store to the same ptr as SI that
1584     // lives in OtherBB.
1585     for (;; --BBI) {
1586       // Check to see if we find the matching store.
1587       OtherStore = dyn_cast<StoreInst>(BBI);
1588       if (OtherStoreIsMergeable(OtherStore))
1589         break;
1590 
1591       // If we find something that may be using or overwriting the stored
1592       // value, or if we run out of instructions, we can't do the transform.
1593       if (BBI->mayReadFromMemory() || BBI->mayThrow() ||
1594           BBI->mayWriteToMemory() || BBI == OtherBB->begin())
1595         return false;
1596     }
1597 
1598     // In order to eliminate the store in OtherBr, we have to make sure nothing
1599     // reads or overwrites the stored value in StoreBB.
1600     for (BasicBlock::iterator I = StoreBB->begin(); &*I != &SI; ++I) {
1601       // FIXME: This should really be AA driven.
1602       if (I->mayReadFromMemory() || I->mayThrow() || I->mayWriteToMemory())
1603         return false;
1604     }
1605   }
1606 
1607   // Insert a PHI node now if we need it.
1608   Value *MergedVal = OtherStore->getValueOperand();
1609   // The debug locations of the original instructions might differ. Merge them.
1610   DebugLoc MergedLoc =
1611       DebugLoc::getMergedLocation(SI.getDebugLoc(), OtherStore->getDebugLoc());
1612   if (MergedVal != SI.getValueOperand()) {
1613     PHINode *PN =
1614         PHINode::Create(SI.getValueOperand()->getType(), 2, "storemerge");
1615     PN->addIncoming(SI.getValueOperand(), SI.getParent());
1616     Builder.SetInsertPoint(OtherStore);
1617     PN->addIncoming(Builder.CreateBitOrPointerCast(MergedVal, PN->getType()),
1618                     OtherBB);
1619     MergedVal = InsertNewInstBefore(PN, DestBB->begin());
1620     PN->setDebugLoc(MergedLoc);
1621   }
1622 
1623   // Advance to a place where it is safe to insert the new store and insert it.
1624   BBI = DestBB->getFirstInsertionPt();
1625   StoreInst *NewSI =
1626       new StoreInst(MergedVal, SI.getOperand(1), SI.isVolatile(), SI.getAlign(),
1627                     SI.getOrdering(), SI.getSyncScopeID());
1628   InsertNewInstBefore(NewSI, BBI);
1629   NewSI->setDebugLoc(MergedLoc);
1630   NewSI->mergeDIAssignID({&SI, OtherStore});
1631 
1632   // If the two stores had AA tags, merge them.
1633   AAMDNodes AATags = SI.getAAMetadata();
1634   if (AATags)
1635     NewSI->setAAMetadata(AATags.merge(OtherStore->getAAMetadata()));
1636 
1637   // Nuke the old stores.
1638   eraseInstFromFunction(SI);
1639   eraseInstFromFunction(*OtherStore);
1640   return true;
1641 }
1642