xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp (revision ee0fe82ee2892f5ece189db0eab38913aaab5f0f)
1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs various transformations related to eliminating memcpy
10 // calls, or transforming sets of stores into memset's.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/IRBuilder.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Operator.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/MathExtras.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Transforms/Scalar.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstdint>
61 #include <utility>
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "memcpyopt"
66 
67 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
68 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
69 STATISTIC(NumMoveToCpy,   "Number of memmoves converted to memcpy");
70 STATISTIC(NumCpyToSet,    "Number of memcpys converted to memset");
71 
72 static int64_t GetOffsetFromIndex(const GEPOperator *GEP, unsigned Idx,
73                                   bool &VariableIdxFound,
74                                   const DataLayout &DL) {
75   // Skip over the first indices.
76   gep_type_iterator GTI = gep_type_begin(GEP);
77   for (unsigned i = 1; i != Idx; ++i, ++GTI)
78     /*skip along*/;
79 
80   // Compute the offset implied by the rest of the indices.
81   int64_t Offset = 0;
82   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
83     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
84     if (!OpC)
85       return VariableIdxFound = true;
86     if (OpC->isZero()) continue;  // No offset.
87 
88     // Handle struct indices, which add their field offset to the pointer.
89     if (StructType *STy = GTI.getStructTypeOrNull()) {
90       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
91       continue;
92     }
93 
94     // Otherwise, we have a sequential type like an array or vector.  Multiply
95     // the index by the ElementSize.
96     uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
97     Offset += Size*OpC->getSExtValue();
98   }
99 
100   return Offset;
101 }
102 
103 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
104 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
105 /// might be &A[40]. In this case offset would be -8.
106 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
107                             const DataLayout &DL) {
108   Ptr1 = Ptr1->stripPointerCasts();
109   Ptr2 = Ptr2->stripPointerCasts();
110 
111   // Handle the trivial case first.
112   if (Ptr1 == Ptr2) {
113     Offset = 0;
114     return true;
115   }
116 
117   GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
118   GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
119 
120   bool VariableIdxFound = false;
121 
122   // If one pointer is a GEP and the other isn't, then see if the GEP is a
123   // constant offset from the base, as in "P" and "gep P, 1".
124   if (GEP1 && !GEP2 && GEP1->getOperand(0)->stripPointerCasts() == Ptr2) {
125     Offset = -GetOffsetFromIndex(GEP1, 1, VariableIdxFound, DL);
126     return !VariableIdxFound;
127   }
128 
129   if (GEP2 && !GEP1 && GEP2->getOperand(0)->stripPointerCasts() == Ptr1) {
130     Offset = GetOffsetFromIndex(GEP2, 1, VariableIdxFound, DL);
131     return !VariableIdxFound;
132   }
133 
134   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
135   // base.  After that base, they may have some number of common (and
136   // potentially variable) indices.  After that they handle some constant
137   // offset, which determines their offset from each other.  At this point, we
138   // handle no other case.
139   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
140     return false;
141 
142   // Skip any common indices and track the GEP types.
143   unsigned Idx = 1;
144   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
145     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
146       break;
147 
148   int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, DL);
149   int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, DL);
150   if (VariableIdxFound) return false;
151 
152   Offset = Offset2-Offset1;
153   return true;
154 }
155 
156 namespace {
157 
158 /// Represents a range of memset'd bytes with the ByteVal value.
159 /// This allows us to analyze stores like:
160 ///   store 0 -> P+1
161 ///   store 0 -> P+0
162 ///   store 0 -> P+3
163 ///   store 0 -> P+2
164 /// which sometimes happens with stores to arrays of structs etc.  When we see
165 /// the first store, we make a range [1, 2).  The second store extends the range
166 /// to [0, 2).  The third makes a new range [2, 3).  The fourth store joins the
167 /// two ranges into [0, 3) which is memset'able.
168 struct MemsetRange {
169   // Start/End - A semi range that describes the span that this range covers.
170   // The range is closed at the start and open at the end: [Start, End).
171   int64_t Start, End;
172 
173   /// StartPtr - The getelementptr instruction that points to the start of the
174   /// range.
175   Value *StartPtr;
176 
177   /// Alignment - The known alignment of the first store.
178   unsigned Alignment;
179 
180   /// TheStores - The actual stores that make up this range.
181   SmallVector<Instruction*, 16> TheStores;
182 
183   bool isProfitableToUseMemset(const DataLayout &DL) const;
184 };
185 
186 } // end anonymous namespace
187 
188 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
189   // If we found more than 4 stores to merge or 16 bytes, use memset.
190   if (TheStores.size() >= 4 || End-Start >= 16) return true;
191 
192   // If there is nothing to merge, don't do anything.
193   if (TheStores.size() < 2) return false;
194 
195   // If any of the stores are a memset, then it is always good to extend the
196   // memset.
197   for (Instruction *SI : TheStores)
198     if (!isa<StoreInst>(SI))
199       return true;
200 
201   // Assume that the code generator is capable of merging pairs of stores
202   // together if it wants to.
203   if (TheStores.size() == 2) return false;
204 
205   // If we have fewer than 8 stores, it can still be worthwhile to do this.
206   // For example, merging 4 i8 stores into an i32 store is useful almost always.
207   // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
208   // memset will be split into 2 32-bit stores anyway) and doing so can
209   // pessimize the llvm optimizer.
210   //
211   // Since we don't have perfect knowledge here, make some assumptions: assume
212   // the maximum GPR width is the same size as the largest legal integer
213   // size. If so, check to see whether we will end up actually reducing the
214   // number of stores used.
215   unsigned Bytes = unsigned(End-Start);
216   unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
217   if (MaxIntSize == 0)
218     MaxIntSize = 1;
219   unsigned NumPointerStores = Bytes / MaxIntSize;
220 
221   // Assume the remaining bytes if any are done a byte at a time.
222   unsigned NumByteStores = Bytes % MaxIntSize;
223 
224   // If we will reduce the # stores (according to this heuristic), do the
225   // transformation.  This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
226   // etc.
227   return TheStores.size() > NumPointerStores+NumByteStores;
228 }
229 
230 namespace {
231 
232 class MemsetRanges {
233   using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
234 
235   /// A sorted list of the memset ranges.
236   SmallVector<MemsetRange, 8> Ranges;
237 
238   const DataLayout &DL;
239 
240 public:
241   MemsetRanges(const DataLayout &DL) : DL(DL) {}
242 
243   using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
244 
245   const_iterator begin() const { return Ranges.begin(); }
246   const_iterator end() const { return Ranges.end(); }
247   bool empty() const { return Ranges.empty(); }
248 
249   void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
250     if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
251       addStore(OffsetFromFirst, SI);
252     else
253       addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
254   }
255 
256   void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
257     int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
258 
259     addRange(OffsetFromFirst, StoreSize,
260              SI->getPointerOperand(), SI->getAlignment(), SI);
261   }
262 
263   void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
264     int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
265     addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
266   }
267 
268   void addRange(int64_t Start, int64_t Size, Value *Ptr,
269                 unsigned Alignment, Instruction *Inst);
270 };
271 
272 } // end anonymous namespace
273 
274 /// Add a new store to the MemsetRanges data structure.  This adds a
275 /// new range for the specified store at the specified offset, merging into
276 /// existing ranges as appropriate.
277 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
278                             unsigned Alignment, Instruction *Inst) {
279   int64_t End = Start+Size;
280 
281   range_iterator I = partition_point(
282       Ranges, [=](const MemsetRange &O) { return O.End < Start; });
283 
284   // We now know that I == E, in which case we didn't find anything to merge
285   // with, or that Start <= I->End.  If End < I->Start or I == E, then we need
286   // to insert a new range.  Handle this now.
287   if (I == Ranges.end() || End < I->Start) {
288     MemsetRange &R = *Ranges.insert(I, MemsetRange());
289     R.Start        = Start;
290     R.End          = End;
291     R.StartPtr     = Ptr;
292     R.Alignment    = Alignment;
293     R.TheStores.push_back(Inst);
294     return;
295   }
296 
297   // This store overlaps with I, add it.
298   I->TheStores.push_back(Inst);
299 
300   // At this point, we may have an interval that completely contains our store.
301   // If so, just add it to the interval and return.
302   if (I->Start <= Start && I->End >= End)
303     return;
304 
305   // Now we know that Start <= I->End and End >= I->Start so the range overlaps
306   // but is not entirely contained within the range.
307 
308   // See if the range extends the start of the range.  In this case, it couldn't
309   // possibly cause it to join the prior range, because otherwise we would have
310   // stopped on *it*.
311   if (Start < I->Start) {
312     I->Start = Start;
313     I->StartPtr = Ptr;
314     I->Alignment = Alignment;
315   }
316 
317   // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
318   // is in or right at the end of I), and that End >= I->Start.  Extend I out to
319   // End.
320   if (End > I->End) {
321     I->End = End;
322     range_iterator NextI = I;
323     while (++NextI != Ranges.end() && End >= NextI->Start) {
324       // Merge the range in.
325       I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
326       if (NextI->End > I->End)
327         I->End = NextI->End;
328       Ranges.erase(NextI);
329       NextI = I;
330     }
331   }
332 }
333 
334 //===----------------------------------------------------------------------===//
335 //                         MemCpyOptLegacyPass Pass
336 //===----------------------------------------------------------------------===//
337 
338 namespace {
339 
340 class MemCpyOptLegacyPass : public FunctionPass {
341   MemCpyOptPass Impl;
342 
343 public:
344   static char ID; // Pass identification, replacement for typeid
345 
346   MemCpyOptLegacyPass() : FunctionPass(ID) {
347     initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
348   }
349 
350   bool runOnFunction(Function &F) override;
351 
352 private:
353   // This transformation requires dominator postdominator info
354   void getAnalysisUsage(AnalysisUsage &AU) const override {
355     AU.setPreservesCFG();
356     AU.addRequired<AssumptionCacheTracker>();
357     AU.addRequired<DominatorTreeWrapperPass>();
358     AU.addRequired<MemoryDependenceWrapperPass>();
359     AU.addRequired<AAResultsWrapperPass>();
360     AU.addRequired<TargetLibraryInfoWrapperPass>();
361     AU.addPreserved<GlobalsAAWrapperPass>();
362     AU.addPreserved<MemoryDependenceWrapperPass>();
363   }
364 };
365 
366 } // end anonymous namespace
367 
368 char MemCpyOptLegacyPass::ID = 0;
369 
370 /// The public interface to this file...
371 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
372 
373 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
374                       false, false)
375 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
376 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
377 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
378 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
379 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
380 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
381 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
382                     false, false)
383 
384 /// When scanning forward over instructions, we look for some other patterns to
385 /// fold away. In particular, this looks for stores to neighboring locations of
386 /// memory. If it sees enough consecutive ones, it attempts to merge them
387 /// together into a memcpy/memset.
388 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
389                                                  Value *StartPtr,
390                                                  Value *ByteVal) {
391   const DataLayout &DL = StartInst->getModule()->getDataLayout();
392 
393   // Okay, so we now have a single store that can be splatable.  Scan to find
394   // all subsequent stores of the same value to offset from the same pointer.
395   // Join these together into ranges, so we can decide whether contiguous blocks
396   // are stored.
397   MemsetRanges Ranges(DL);
398 
399   BasicBlock::iterator BI(StartInst);
400   for (++BI; !BI->isTerminator(); ++BI) {
401     if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
402       // If the instruction is readnone, ignore it, otherwise bail out.  We
403       // don't even allow readonly here because we don't want something like:
404       // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
405       if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
406         break;
407       continue;
408     }
409 
410     if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
411       // If this is a store, see if we can merge it in.
412       if (!NextStore->isSimple()) break;
413 
414       // Check to see if this stored value is of the same byte-splattable value.
415       Value *StoredByte = isBytewiseValue(NextStore->getOperand(0), DL);
416       if (isa<UndefValue>(ByteVal) && StoredByte)
417         ByteVal = StoredByte;
418       if (ByteVal != StoredByte)
419         break;
420 
421       // Check to see if this store is to a constant offset from the start ptr.
422       int64_t Offset;
423       if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset,
424                            DL))
425         break;
426 
427       Ranges.addStore(Offset, NextStore);
428     } else {
429       MemSetInst *MSI = cast<MemSetInst>(BI);
430 
431       if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
432           !isa<ConstantInt>(MSI->getLength()))
433         break;
434 
435       // Check to see if this store is to a constant offset from the start ptr.
436       int64_t Offset;
437       if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, DL))
438         break;
439 
440       Ranges.addMemSet(Offset, MSI);
441     }
442   }
443 
444   // If we have no ranges, then we just had a single store with nothing that
445   // could be merged in.  This is a very common case of course.
446   if (Ranges.empty())
447     return nullptr;
448 
449   // If we had at least one store that could be merged in, add the starting
450   // store as well.  We try to avoid this unless there is at least something
451   // interesting as a small compile-time optimization.
452   Ranges.addInst(0, StartInst);
453 
454   // If we create any memsets, we put it right before the first instruction that
455   // isn't part of the memset block.  This ensure that the memset is dominated
456   // by any addressing instruction needed by the start of the block.
457   IRBuilder<> Builder(&*BI);
458 
459   // Now that we have full information about ranges, loop over the ranges and
460   // emit memset's for anything big enough to be worthwhile.
461   Instruction *AMemSet = nullptr;
462   for (const MemsetRange &Range : Ranges) {
463     if (Range.TheStores.size() == 1) continue;
464 
465     // If it is profitable to lower this range to memset, do so now.
466     if (!Range.isProfitableToUseMemset(DL))
467       continue;
468 
469     // Otherwise, we do want to transform this!  Create a new memset.
470     // Get the starting pointer of the block.
471     StartPtr = Range.StartPtr;
472 
473     // Determine alignment
474     unsigned Alignment = Range.Alignment;
475     if (Alignment == 0) {
476       Type *EltType =
477         cast<PointerType>(StartPtr->getType())->getElementType();
478       Alignment = DL.getABITypeAlignment(EltType);
479     }
480 
481     AMemSet =
482       Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
483 
484     LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
485                                                    : Range.TheStores) dbgs()
486                                               << *SI << '\n';
487                dbgs() << "With: " << *AMemSet << '\n');
488 
489     if (!Range.TheStores.empty())
490       AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
491 
492     // Zap all the stores.
493     for (Instruction *SI : Range.TheStores) {
494       MD->removeInstruction(SI);
495       SI->eraseFromParent();
496     }
497     ++NumMemSetInfer;
498   }
499 
500   return AMemSet;
501 }
502 
503 static unsigned findStoreAlignment(const DataLayout &DL, const StoreInst *SI) {
504   unsigned StoreAlign = SI->getAlignment();
505   if (!StoreAlign)
506     StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
507   return StoreAlign;
508 }
509 
510 static unsigned findLoadAlignment(const DataLayout &DL, const LoadInst *LI) {
511   unsigned LoadAlign = LI->getAlignment();
512   if (!LoadAlign)
513     LoadAlign = DL.getABITypeAlignment(LI->getType());
514   return LoadAlign;
515 }
516 
517 static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI,
518                                      const LoadInst *LI) {
519   unsigned StoreAlign = findStoreAlignment(DL, SI);
520   unsigned LoadAlign = findLoadAlignment(DL, LI);
521   return MinAlign(StoreAlign, LoadAlign);
522 }
523 
524 // This method try to lift a store instruction before position P.
525 // It will lift the store and its argument + that anything that
526 // may alias with these.
527 // The method returns true if it was successful.
528 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
529                    const LoadInst *LI) {
530   // If the store alias this position, early bail out.
531   MemoryLocation StoreLoc = MemoryLocation::get(SI);
532   if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc)))
533     return false;
534 
535   // Keep track of the arguments of all instruction we plan to lift
536   // so we can make sure to lift them as well if appropriate.
537   DenseSet<Instruction*> Args;
538   if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
539     if (Ptr->getParent() == SI->getParent())
540       Args.insert(Ptr);
541 
542   // Instruction to lift before P.
543   SmallVector<Instruction*, 8> ToLift;
544 
545   // Memory locations of lifted instructions.
546   SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
547 
548   // Lifted calls.
549   SmallVector<const CallBase *, 8> Calls;
550 
551   const MemoryLocation LoadLoc = MemoryLocation::get(LI);
552 
553   for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
554     auto *C = &*I;
555 
556     bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None));
557 
558     bool NeedLift = false;
559     if (Args.erase(C))
560       NeedLift = true;
561     else if (MayAlias) {
562       NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
563         return isModOrRefSet(AA.getModRefInfo(C, ML));
564       });
565 
566       if (!NeedLift)
567         NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) {
568           return isModOrRefSet(AA.getModRefInfo(C, Call));
569         });
570     }
571 
572     if (!NeedLift)
573       continue;
574 
575     if (MayAlias) {
576       // Since LI is implicitly moved downwards past the lifted instructions,
577       // none of them may modify its source.
578       if (isModSet(AA.getModRefInfo(C, LoadLoc)))
579         return false;
580       else if (const auto *Call = dyn_cast<CallBase>(C)) {
581         // If we can't lift this before P, it's game over.
582         if (isModOrRefSet(AA.getModRefInfo(P, Call)))
583           return false;
584 
585         Calls.push_back(Call);
586       } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
587         // If we can't lift this before P, it's game over.
588         auto ML = MemoryLocation::get(C);
589         if (isModOrRefSet(AA.getModRefInfo(P, ML)))
590           return false;
591 
592         MemLocs.push_back(ML);
593       } else
594         // We don't know how to lift this instruction.
595         return false;
596     }
597 
598     ToLift.push_back(C);
599     for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
600       if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
601         if (A->getParent() == SI->getParent()) {
602           // Cannot hoist user of P above P
603           if(A == P) return false;
604           Args.insert(A);
605         }
606       }
607   }
608 
609   // We made it, we need to lift
610   for (auto *I : llvm::reverse(ToLift)) {
611     LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
612     I->moveBefore(P);
613   }
614 
615   return true;
616 }
617 
618 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
619   if (!SI->isSimple()) return false;
620 
621   // Avoid merging nontemporal stores since the resulting
622   // memcpy/memset would not be able to preserve the nontemporal hint.
623   // In theory we could teach how to propagate the !nontemporal metadata to
624   // memset calls. However, that change would force the backend to
625   // conservatively expand !nontemporal memset calls back to sequences of
626   // store instructions (effectively undoing the merging).
627   if (SI->getMetadata(LLVMContext::MD_nontemporal))
628     return false;
629 
630   const DataLayout &DL = SI->getModule()->getDataLayout();
631 
632   // Load to store forwarding can be interpreted as memcpy.
633   if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
634     if (LI->isSimple() && LI->hasOneUse() &&
635         LI->getParent() == SI->getParent()) {
636 
637       auto *T = LI->getType();
638       if (T->isAggregateType()) {
639         AliasAnalysis &AA = LookupAliasAnalysis();
640         MemoryLocation LoadLoc = MemoryLocation::get(LI);
641 
642         // We use alias analysis to check if an instruction may store to
643         // the memory we load from in between the load and the store. If
644         // such an instruction is found, we try to promote there instead
645         // of at the store position.
646         Instruction *P = SI;
647         for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
648           if (isModSet(AA.getModRefInfo(&I, LoadLoc))) {
649             P = &I;
650             break;
651           }
652         }
653 
654         // We found an instruction that may write to the loaded memory.
655         // We can try to promote at this position instead of the store
656         // position if nothing alias the store memory after this and the store
657         // destination is not in the range.
658         if (P && P != SI) {
659           if (!moveUp(AA, SI, P, LI))
660             P = nullptr;
661         }
662 
663         // If a valid insertion position is found, then we can promote
664         // the load/store pair to a memcpy.
665         if (P) {
666           // If we load from memory that may alias the memory we store to,
667           // memmove must be used to preserve semantic. If not, memcpy can
668           // be used.
669           bool UseMemMove = false;
670           if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc))
671             UseMemMove = true;
672 
673           uint64_t Size = DL.getTypeStoreSize(T);
674 
675           IRBuilder<> Builder(P);
676           Instruction *M;
677           if (UseMemMove)
678             M = Builder.CreateMemMove(
679                 SI->getPointerOperand(), findStoreAlignment(DL, SI),
680                 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
681           else
682             M = Builder.CreateMemCpy(
683                 SI->getPointerOperand(), findStoreAlignment(DL, SI),
684                 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
685 
686           LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
687                             << *M << "\n");
688 
689           MD->removeInstruction(SI);
690           SI->eraseFromParent();
691           MD->removeInstruction(LI);
692           LI->eraseFromParent();
693           ++NumMemCpyInstr;
694 
695           // Make sure we do not invalidate the iterator.
696           BBI = M->getIterator();
697           return true;
698         }
699       }
700 
701       // Detect cases where we're performing call slot forwarding, but
702       // happen to be using a load-store pair to implement it, rather than
703       // a memcpy.
704       MemDepResult ldep = MD->getDependency(LI);
705       CallInst *C = nullptr;
706       if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
707         C = dyn_cast<CallInst>(ldep.getInst());
708 
709       if (C) {
710         // Check that nothing touches the dest of the "copy" between
711         // the call and the store.
712         Value *CpyDest = SI->getPointerOperand()->stripPointerCasts();
713         bool CpyDestIsLocal = isa<AllocaInst>(CpyDest);
714         AliasAnalysis &AA = LookupAliasAnalysis();
715         MemoryLocation StoreLoc = MemoryLocation::get(SI);
716         for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
717              I != E; --I) {
718           if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) {
719             C = nullptr;
720             break;
721           }
722           // The store to dest may never happen if an exception can be thrown
723           // between the load and the store.
724           if (I->mayThrow() && !CpyDestIsLocal) {
725             C = nullptr;
726             break;
727           }
728         }
729       }
730 
731       if (C) {
732         bool changed = performCallSlotOptzn(
733             LI, SI->getPointerOperand()->stripPointerCasts(),
734             LI->getPointerOperand()->stripPointerCasts(),
735             DL.getTypeStoreSize(SI->getOperand(0)->getType()),
736             findCommonAlignment(DL, SI, LI), C);
737         if (changed) {
738           MD->removeInstruction(SI);
739           SI->eraseFromParent();
740           MD->removeInstruction(LI);
741           LI->eraseFromParent();
742           ++NumMemCpyInstr;
743           return true;
744         }
745       }
746     }
747   }
748 
749   // There are two cases that are interesting for this code to handle: memcpy
750   // and memset.  Right now we only handle memset.
751 
752   // Ensure that the value being stored is something that can be memset'able a
753   // byte at a time like "0" or "-1" or any width, as well as things like
754   // 0xA0A0A0A0 and 0.0.
755   auto *V = SI->getOperand(0);
756   if (Value *ByteVal = isBytewiseValue(V, DL)) {
757     if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
758                                               ByteVal)) {
759       BBI = I->getIterator(); // Don't invalidate iterator.
760       return true;
761     }
762 
763     // If we have an aggregate, we try to promote it to memset regardless
764     // of opportunity for merging as it can expose optimization opportunities
765     // in subsequent passes.
766     auto *T = V->getType();
767     if (T->isAggregateType()) {
768       uint64_t Size = DL.getTypeStoreSize(T);
769       unsigned Align = SI->getAlignment();
770       if (!Align)
771         Align = DL.getABITypeAlignment(T);
772       IRBuilder<> Builder(SI);
773       auto *M =
774           Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, Align);
775 
776       LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
777 
778       MD->removeInstruction(SI);
779       SI->eraseFromParent();
780       NumMemSetInfer++;
781 
782       // Make sure we do not invalidate the iterator.
783       BBI = M->getIterator();
784       return true;
785     }
786   }
787 
788   return false;
789 }
790 
791 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
792   // See if there is another memset or store neighboring this memset which
793   // allows us to widen out the memset to do a single larger store.
794   if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
795     if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
796                                               MSI->getValue())) {
797       BBI = I->getIterator(); // Don't invalidate iterator.
798       return true;
799     }
800   return false;
801 }
802 
803 /// Takes a memcpy and a call that it depends on,
804 /// and checks for the possibility of a call slot optimization by having
805 /// the call write its result directly into the destination of the memcpy.
806 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest,
807                                          Value *cpySrc, uint64_t cpyLen,
808                                          unsigned cpyAlign, CallInst *C) {
809   // The general transformation to keep in mind is
810   //
811   //   call @func(..., src, ...)
812   //   memcpy(dest, src, ...)
813   //
814   // ->
815   //
816   //   memcpy(dest, src, ...)
817   //   call @func(..., dest, ...)
818   //
819   // Since moving the memcpy is technically awkward, we additionally check that
820   // src only holds uninitialized values at the moment of the call, meaning that
821   // the memcpy can be discarded rather than moved.
822 
823   // Lifetime marks shouldn't be operated on.
824   if (Function *F = C->getCalledFunction())
825     if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
826       return false;
827 
828   // Deliberately get the source and destination with bitcasts stripped away,
829   // because we'll need to do type comparisons based on the underlying type.
830   CallSite CS(C);
831 
832   // Require that src be an alloca.  This simplifies the reasoning considerably.
833   AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
834   if (!srcAlloca)
835     return false;
836 
837   ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
838   if (!srcArraySize)
839     return false;
840 
841   const DataLayout &DL = cpy->getModule()->getDataLayout();
842   uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
843                      srcArraySize->getZExtValue();
844 
845   if (cpyLen < srcSize)
846     return false;
847 
848   // Check that accessing the first srcSize bytes of dest will not cause a
849   // trap.  Otherwise the transform is invalid since it might cause a trap
850   // to occur earlier than it otherwise would.
851   if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
852     // The destination is an alloca.  Check it is larger than srcSize.
853     ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
854     if (!destArraySize)
855       return false;
856 
857     uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
858                         destArraySize->getZExtValue();
859 
860     if (destSize < srcSize)
861       return false;
862   } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
863     // The store to dest may never happen if the call can throw.
864     if (C->mayThrow())
865       return false;
866 
867     if (A->getDereferenceableBytes() < srcSize) {
868       // If the destination is an sret parameter then only accesses that are
869       // outside of the returned struct type can trap.
870       if (!A->hasStructRetAttr())
871         return false;
872 
873       Type *StructTy = cast<PointerType>(A->getType())->getElementType();
874       if (!StructTy->isSized()) {
875         // The call may never return and hence the copy-instruction may never
876         // be executed, and therefore it's not safe to say "the destination
877         // has at least <cpyLen> bytes, as implied by the copy-instruction",
878         return false;
879       }
880 
881       uint64_t destSize = DL.getTypeAllocSize(StructTy);
882       if (destSize < srcSize)
883         return false;
884     }
885   } else {
886     return false;
887   }
888 
889   // Check that dest points to memory that is at least as aligned as src.
890   unsigned srcAlign = srcAlloca->getAlignment();
891   if (!srcAlign)
892     srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
893   bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
894   // If dest is not aligned enough and we can't increase its alignment then
895   // bail out.
896   if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
897     return false;
898 
899   // Check that src is not accessed except via the call and the memcpy.  This
900   // guarantees that it holds only undefined values when passed in (so the final
901   // memcpy can be dropped), that it is not read or written between the call and
902   // the memcpy, and that writing beyond the end of it is undefined.
903   SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
904                                    srcAlloca->user_end());
905   while (!srcUseList.empty()) {
906     User *U = srcUseList.pop_back_val();
907 
908     if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
909       for (User *UU : U->users())
910         srcUseList.push_back(UU);
911       continue;
912     }
913     if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
914       if (!G->hasAllZeroIndices())
915         return false;
916 
917       for (User *UU : U->users())
918         srcUseList.push_back(UU);
919       continue;
920     }
921     if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
922       if (IT->isLifetimeStartOrEnd())
923         continue;
924 
925     if (U != C && U != cpy)
926       return false;
927   }
928 
929   // Check that src isn't captured by the called function since the
930   // transformation can cause aliasing issues in that case.
931   for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
932     if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
933       return false;
934 
935   // Since we're changing the parameter to the callsite, we need to make sure
936   // that what would be the new parameter dominates the callsite.
937   DominatorTree &DT = LookupDomTree();
938   if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
939     if (!DT.dominates(cpyDestInst, C))
940       return false;
941 
942   // In addition to knowing that the call does not access src in some
943   // unexpected manner, for example via a global, which we deduce from
944   // the use analysis, we also need to know that it does not sneakily
945   // access dest.  We rely on AA to figure this out for us.
946   AliasAnalysis &AA = LookupAliasAnalysis();
947   ModRefInfo MR = AA.getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
948   // If necessary, perform additional analysis.
949   if (isModOrRefSet(MR))
950     MR = AA.callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), &DT);
951   if (isModOrRefSet(MR))
952     return false;
953 
954   // We can't create address space casts here because we don't know if they're
955   // safe for the target.
956   if (cpySrc->getType()->getPointerAddressSpace() !=
957       cpyDest->getType()->getPointerAddressSpace())
958     return false;
959   for (unsigned i = 0; i < CS.arg_size(); ++i)
960     if (CS.getArgument(i)->stripPointerCasts() == cpySrc &&
961         cpySrc->getType()->getPointerAddressSpace() !=
962         CS.getArgument(i)->getType()->getPointerAddressSpace())
963       return false;
964 
965   // All the checks have passed, so do the transformation.
966   bool changedArgument = false;
967   for (unsigned i = 0; i < CS.arg_size(); ++i)
968     if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
969       Value *Dest = cpySrc->getType() == cpyDest->getType() ?  cpyDest
970         : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
971                                       cpyDest->getName(), C);
972       changedArgument = true;
973       if (CS.getArgument(i)->getType() == Dest->getType())
974         CS.setArgument(i, Dest);
975       else
976         CS.setArgument(i, CastInst::CreatePointerCast(Dest,
977                           CS.getArgument(i)->getType(), Dest->getName(), C));
978     }
979 
980   if (!changedArgument)
981     return false;
982 
983   // If the destination wasn't sufficiently aligned then increase its alignment.
984   if (!isDestSufficientlyAligned) {
985     assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
986     cast<AllocaInst>(cpyDest)->setAlignment(srcAlign);
987   }
988 
989   // Drop any cached information about the call, because we may have changed
990   // its dependence information by changing its parameter.
991   MD->removeInstruction(C);
992 
993   // Update AA metadata
994   // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
995   // handled here, but combineMetadata doesn't support them yet
996   unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
997                          LLVMContext::MD_noalias,
998                          LLVMContext::MD_invariant_group,
999                          LLVMContext::MD_access_group};
1000   combineMetadata(C, cpy, KnownIDs, true);
1001 
1002   // Remove the memcpy.
1003   MD->removeInstruction(cpy);
1004   ++NumMemCpyInstr;
1005 
1006   return true;
1007 }
1008 
1009 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
1010 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
1011 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
1012                                                   MemCpyInst *MDep) {
1013   // We can only transforms memcpy's where the dest of one is the source of the
1014   // other.
1015   if (M->getSource() != MDep->getDest() || MDep->isVolatile())
1016     return false;
1017 
1018   // If dep instruction is reading from our current input, then it is a noop
1019   // transfer and substituting the input won't change this instruction.  Just
1020   // ignore the input and let someone else zap MDep.  This handles cases like:
1021   //    memcpy(a <- a)
1022   //    memcpy(b <- a)
1023   if (M->getSource() == MDep->getSource())
1024     return false;
1025 
1026   // Second, the length of the memcpy's must be the same, or the preceding one
1027   // must be larger than the following one.
1028   ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
1029   ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
1030   if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
1031     return false;
1032 
1033   AliasAnalysis &AA = LookupAliasAnalysis();
1034 
1035   // Verify that the copied-from memory doesn't change in between the two
1036   // transfers.  For example, in:
1037   //    memcpy(a <- b)
1038   //    *b = 42;
1039   //    memcpy(c <- a)
1040   // It would be invalid to transform the second memcpy into memcpy(c <- b).
1041   //
1042   // TODO: If the code between M and MDep is transparent to the destination "c",
1043   // then we could still perform the xform by moving M up to the first memcpy.
1044   //
1045   // NOTE: This is conservative, it will stop on any read from the source loc,
1046   // not just the defining memcpy.
1047   MemDepResult SourceDep =
1048       MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
1049                                    M->getIterator(), M->getParent());
1050   if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1051     return false;
1052 
1053   // If the dest of the second might alias the source of the first, then the
1054   // source and dest might overlap.  We still want to eliminate the intermediate
1055   // value, but we have to generate a memmove instead of memcpy.
1056   bool UseMemMove = false;
1057   if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1058                     MemoryLocation::getForSource(MDep)))
1059     UseMemMove = true;
1060 
1061   // If all checks passed, then we can transform M.
1062   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
1063                     << *MDep << '\n' << *M << '\n');
1064 
1065   // TODO: Is this worth it if we're creating a less aligned memcpy? For
1066   // example we could be moving from movaps -> movq on x86.
1067   IRBuilder<> Builder(M);
1068   if (UseMemMove)
1069     Builder.CreateMemMove(M->getRawDest(), M->getDestAlignment(),
1070                           MDep->getRawSource(), MDep->getSourceAlignment(),
1071                           M->getLength(), M->isVolatile());
1072   else
1073     Builder.CreateMemCpy(M->getRawDest(), M->getDestAlignment(),
1074                          MDep->getRawSource(), MDep->getSourceAlignment(),
1075                          M->getLength(), M->isVolatile());
1076 
1077   // Remove the instruction we're replacing.
1078   MD->removeInstruction(M);
1079   M->eraseFromParent();
1080   ++NumMemCpyInstr;
1081   return true;
1082 }
1083 
1084 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
1085 /// \p MemSet.  Try to simplify \p MemSet to only set the trailing bytes that
1086 /// weren't copied over by \p MemCpy.
1087 ///
1088 /// In other words, transform:
1089 /// \code
1090 ///   memset(dst, c, dst_size);
1091 ///   memcpy(dst, src, src_size);
1092 /// \endcode
1093 /// into:
1094 /// \code
1095 ///   memcpy(dst, src, src_size);
1096 ///   memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1097 /// \endcode
1098 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1099                                                   MemSetInst *MemSet) {
1100   // We can only transform memset/memcpy with the same destination.
1101   if (MemSet->getDest() != MemCpy->getDest())
1102     return false;
1103 
1104   // Check that there are no other dependencies on the memset destination.
1105   MemDepResult DstDepInfo =
1106       MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
1107                                    MemCpy->getIterator(), MemCpy->getParent());
1108   if (DstDepInfo.getInst() != MemSet)
1109     return false;
1110 
1111   // Use the same i8* dest as the memcpy, killing the memset dest if different.
1112   Value *Dest = MemCpy->getRawDest();
1113   Value *DestSize = MemSet->getLength();
1114   Value *SrcSize = MemCpy->getLength();
1115 
1116   // By default, create an unaligned memset.
1117   unsigned Align = 1;
1118   // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1119   // of the sum.
1120   const unsigned DestAlign =
1121       std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
1122   if (DestAlign > 1)
1123     if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1124       Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1125 
1126   IRBuilder<> Builder(MemCpy);
1127 
1128   // If the sizes have different types, zext the smaller one.
1129   if (DestSize->getType() != SrcSize->getType()) {
1130     if (DestSize->getType()->getIntegerBitWidth() >
1131         SrcSize->getType()->getIntegerBitWidth())
1132       SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1133     else
1134       DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1135   }
1136 
1137   Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1138   Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1139   Value *MemsetLen = Builder.CreateSelect(
1140       Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1141   Builder.CreateMemSet(
1142       Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest,
1143                         SrcSize),
1144       MemSet->getOperand(1), MemsetLen, Align);
1145 
1146   MD->removeInstruction(MemSet);
1147   MemSet->eraseFromParent();
1148   return true;
1149 }
1150 
1151 /// Determine whether the instruction has undefined content for the given Size,
1152 /// either because it was freshly alloca'd or started its lifetime.
1153 static bool hasUndefContents(Instruction *I, ConstantInt *Size) {
1154   if (isa<AllocaInst>(I))
1155     return true;
1156 
1157   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1158     if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1159       if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1160         if (LTSize->getZExtValue() >= Size->getZExtValue())
1161           return true;
1162 
1163   return false;
1164 }
1165 
1166 /// Transform memcpy to memset when its source was just memset.
1167 /// In other words, turn:
1168 /// \code
1169 ///   memset(dst1, c, dst1_size);
1170 ///   memcpy(dst2, dst1, dst2_size);
1171 /// \endcode
1172 /// into:
1173 /// \code
1174 ///   memset(dst1, c, dst1_size);
1175 ///   memset(dst2, c, dst2_size);
1176 /// \endcode
1177 /// When dst2_size <= dst1_size.
1178 ///
1179 /// The \p MemCpy must have a Constant length.
1180 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1181                                                MemSetInst *MemSet) {
1182   AliasAnalysis &AA = LookupAliasAnalysis();
1183 
1184   // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1185   // memcpying from the same address. Otherwise it is hard to reason about.
1186   if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1187     return false;
1188 
1189   // A known memset size is required.
1190   ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
1191   if (!MemSetSize)
1192     return false;
1193 
1194   // Make sure the memcpy doesn't read any more than what the memset wrote.
1195   // Don't worry about sizes larger than i64.
1196   ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
1197   if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) {
1198     // If the memcpy is larger than the memset, but the memory was undef prior
1199     // to the memset, we can just ignore the tail. Technically we're only
1200     // interested in the bytes from MemSetSize..CopySize here, but as we can't
1201     // easily represent this location, we use the full 0..CopySize range.
1202     MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
1203     MemDepResult DepInfo = MD->getPointerDependencyFrom(
1204         MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent());
1205     if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize))
1206       CopySize = MemSetSize;
1207     else
1208       return false;
1209   }
1210 
1211   IRBuilder<> Builder(MemCpy);
1212   Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
1213                        CopySize, MemCpy->getDestAlignment());
1214   return true;
1215 }
1216 
1217 /// Perform simplification of memcpy's.  If we have memcpy A
1218 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1219 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
1220 /// circumstances). This allows later passes to remove the first memcpy
1221 /// altogether.
1222 bool MemCpyOptPass::processMemCpy(MemCpyInst *M) {
1223   // We can only optimize non-volatile memcpy's.
1224   if (M->isVolatile()) return false;
1225 
1226   // If the source and destination of the memcpy are the same, then zap it.
1227   if (M->getSource() == M->getDest()) {
1228     MD->removeInstruction(M);
1229     M->eraseFromParent();
1230     return false;
1231   }
1232 
1233   // If copying from a constant, try to turn the memcpy into a memset.
1234   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1235     if (GV->isConstant() && GV->hasDefinitiveInitializer())
1236       if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
1237                                            M->getModule()->getDataLayout())) {
1238         IRBuilder<> Builder(M);
1239         Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1240                              M->getDestAlignment(), false);
1241         MD->removeInstruction(M);
1242         M->eraseFromParent();
1243         ++NumCpyToSet;
1244         return true;
1245       }
1246 
1247   MemDepResult DepInfo = MD->getDependency(M);
1248 
1249   // Try to turn a partially redundant memset + memcpy into
1250   // memcpy + smaller memset.  We don't need the memcpy size for this.
1251   if (DepInfo.isClobber())
1252     if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
1253       if (processMemSetMemCpyDependence(M, MDep))
1254         return true;
1255 
1256   // The optimizations after this point require the memcpy size.
1257   ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
1258   if (!CopySize) return false;
1259 
1260   // There are four possible optimizations we can do for memcpy:
1261   //   a) memcpy-memcpy xform which exposes redundance for DSE.
1262   //   b) call-memcpy xform for return slot optimization.
1263   //   c) memcpy from freshly alloca'd space or space that has just started its
1264   //      lifetime copies undefined data, and we can therefore eliminate the
1265   //      memcpy in favor of the data that was already at the destination.
1266   //   d) memcpy from a just-memset'd source can be turned into memset.
1267   if (DepInfo.isClobber()) {
1268     if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1269       // FIXME: Can we pass in either of dest/src alignment here instead
1270       // of conservatively taking the minimum?
1271       unsigned Align = MinAlign(M->getDestAlignment(), M->getSourceAlignment());
1272       if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
1273                                CopySize->getZExtValue(), Align,
1274                                C)) {
1275         MD->removeInstruction(M);
1276         M->eraseFromParent();
1277         return true;
1278       }
1279     }
1280   }
1281 
1282   MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1283   MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1284       SrcLoc, true, M->getIterator(), M->getParent());
1285 
1286   if (SrcDepInfo.isClobber()) {
1287     if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1288       return processMemCpyMemCpyDependence(M, MDep);
1289   } else if (SrcDepInfo.isDef()) {
1290     if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) {
1291       MD->removeInstruction(M);
1292       M->eraseFromParent();
1293       ++NumMemCpyInstr;
1294       return true;
1295     }
1296   }
1297 
1298   if (SrcDepInfo.isClobber())
1299     if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1300       if (performMemCpyToMemSetOptzn(M, MDep)) {
1301         MD->removeInstruction(M);
1302         M->eraseFromParent();
1303         ++NumCpyToSet;
1304         return true;
1305       }
1306 
1307   return false;
1308 }
1309 
1310 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1311 /// not to alias.
1312 bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1313   AliasAnalysis &AA = LookupAliasAnalysis();
1314 
1315   if (!TLI->has(LibFunc_memmove))
1316     return false;
1317 
1318   // See if the pointers alias.
1319   if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1320                     MemoryLocation::getForSource(M)))
1321     return false;
1322 
1323   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1324                     << "\n");
1325 
1326   // If not, then we know we can transform this.
1327   Type *ArgTys[3] = { M->getRawDest()->getType(),
1328                       M->getRawSource()->getType(),
1329                       M->getLength()->getType() };
1330   M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1331                                                  Intrinsic::memcpy, ArgTys));
1332 
1333   // MemDep may have over conservative information about this instruction, just
1334   // conservatively flush it from the cache.
1335   MD->removeInstruction(M);
1336 
1337   ++NumMoveToCpy;
1338   return true;
1339 }
1340 
1341 /// This is called on every byval argument in call sites.
1342 bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
1343   const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
1344   // Find out what feeds this byval argument.
1345   Value *ByValArg = CS.getArgument(ArgNo);
1346   Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1347   uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1348   MemDepResult DepInfo = MD->getPointerDependencyFrom(
1349       MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true,
1350       CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
1351   if (!DepInfo.isClobber())
1352     return false;
1353 
1354   // If the byval argument isn't fed by a memcpy, ignore it.  If it is fed by
1355   // a memcpy, see if we can byval from the source of the memcpy instead of the
1356   // result.
1357   MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1358   if (!MDep || MDep->isVolatile() ||
1359       ByValArg->stripPointerCasts() != MDep->getDest())
1360     return false;
1361 
1362   // The length of the memcpy must be larger or equal to the size of the byval.
1363   ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1364   if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1365     return false;
1366 
1367   // Get the alignment of the byval.  If the call doesn't specify the alignment,
1368   // then it is some target specific value that we can't know.
1369   unsigned ByValAlign = CS.getParamAlignment(ArgNo);
1370   if (ByValAlign == 0) return false;
1371 
1372   // If it is greater than the memcpy, then we check to see if we can force the
1373   // source of the memcpy to the alignment we need.  If we fail, we bail out.
1374   AssumptionCache &AC = LookupAssumptionCache();
1375   DominatorTree &DT = LookupDomTree();
1376   if (MDep->getSourceAlignment() < ByValAlign &&
1377       getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
1378                                  CS.getInstruction(), &AC, &DT) < ByValAlign)
1379     return false;
1380 
1381   // The address space of the memcpy source must match the byval argument
1382   if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1383       ByValArg->getType()->getPointerAddressSpace())
1384     return false;
1385 
1386   // Verify that the copied-from memory doesn't change in between the memcpy and
1387   // the byval call.
1388   //    memcpy(a <- b)
1389   //    *b = 42;
1390   //    foo(*a)
1391   // It would be invalid to transform the second memcpy into foo(*b).
1392   //
1393   // NOTE: This is conservative, it will stop on any read from the source loc,
1394   // not just the defining memcpy.
1395   MemDepResult SourceDep = MD->getPointerDependencyFrom(
1396       MemoryLocation::getForSource(MDep), false,
1397       CS.getInstruction()->getIterator(), MDep->getParent());
1398   if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1399     return false;
1400 
1401   Value *TmpCast = MDep->getSource();
1402   if (MDep->getSource()->getType() != ByValArg->getType())
1403     TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1404                               "tmpcast", CS.getInstruction());
1405 
1406   LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1407                     << "  " << *MDep << "\n"
1408                     << "  " << *CS.getInstruction() << "\n");
1409 
1410   // Otherwise we're good!  Update the byval argument.
1411   CS.setArgument(ArgNo, TmpCast);
1412   ++NumMemCpyInstr;
1413   return true;
1414 }
1415 
1416 /// Executes one iteration of MemCpyOptPass.
1417 bool MemCpyOptPass::iterateOnFunction(Function &F) {
1418   bool MadeChange = false;
1419 
1420   DominatorTree &DT = LookupDomTree();
1421 
1422   // Walk all instruction in the function.
1423   for (BasicBlock &BB : F) {
1424     // Skip unreachable blocks. For example processStore assumes that an
1425     // instruction in a BB can't be dominated by a later instruction in the
1426     // same BB (which is a scenario that can happen for an unreachable BB that
1427     // has itself as a predecessor).
1428     if (!DT.isReachableFromEntry(&BB))
1429       continue;
1430 
1431     for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
1432         // Avoid invalidating the iterator.
1433       Instruction *I = &*BI++;
1434 
1435       bool RepeatInstruction = false;
1436 
1437       if (StoreInst *SI = dyn_cast<StoreInst>(I))
1438         MadeChange |= processStore(SI, BI);
1439       else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1440         RepeatInstruction = processMemSet(M, BI);
1441       else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1442         RepeatInstruction = processMemCpy(M);
1443       else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1444         RepeatInstruction = processMemMove(M);
1445       else if (auto CS = CallSite(I)) {
1446         for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
1447           if (CS.isByValArgument(i))
1448             MadeChange |= processByValArgument(CS, i);
1449       }
1450 
1451       // Reprocess the instruction if desired.
1452       if (RepeatInstruction) {
1453         if (BI != BB.begin())
1454           --BI;
1455         MadeChange = true;
1456       }
1457     }
1458   }
1459 
1460   return MadeChange;
1461 }
1462 
1463 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1464   auto &MD = AM.getResult<MemoryDependenceAnalysis>(F);
1465   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1466 
1467   auto LookupAliasAnalysis = [&]() -> AliasAnalysis & {
1468     return AM.getResult<AAManager>(F);
1469   };
1470   auto LookupAssumptionCache = [&]() -> AssumptionCache & {
1471     return AM.getResult<AssumptionAnalysis>(F);
1472   };
1473   auto LookupDomTree = [&]() -> DominatorTree & {
1474     return AM.getResult<DominatorTreeAnalysis>(F);
1475   };
1476 
1477   bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis,
1478                             LookupAssumptionCache, LookupDomTree);
1479   if (!MadeChange)
1480     return PreservedAnalyses::all();
1481 
1482   PreservedAnalyses PA;
1483   PA.preserveSet<CFGAnalyses>();
1484   PA.preserve<GlobalsAA>();
1485   PA.preserve<MemoryDependenceAnalysis>();
1486   return PA;
1487 }
1488 
1489 bool MemCpyOptPass::runImpl(
1490     Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_,
1491     std::function<AliasAnalysis &()> LookupAliasAnalysis_,
1492     std::function<AssumptionCache &()> LookupAssumptionCache_,
1493     std::function<DominatorTree &()> LookupDomTree_) {
1494   bool MadeChange = false;
1495   MD = MD_;
1496   TLI = TLI_;
1497   LookupAliasAnalysis = std::move(LookupAliasAnalysis_);
1498   LookupAssumptionCache = std::move(LookupAssumptionCache_);
1499   LookupDomTree = std::move(LookupDomTree_);
1500 
1501   // If we don't have at least memset and memcpy, there is little point of doing
1502   // anything here.  These are required by a freestanding implementation, so if
1503   // even they are disabled, there is no point in trying hard.
1504   if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
1505     return false;
1506 
1507   while (true) {
1508     if (!iterateOnFunction(F))
1509       break;
1510     MadeChange = true;
1511   }
1512 
1513   MD = nullptr;
1514   return MadeChange;
1515 }
1516 
1517 /// This is the main transformation entry point for a function.
1518 bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1519   if (skipFunction(F))
1520     return false;
1521 
1522   auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1523   auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1524 
1525   auto LookupAliasAnalysis = [this]() -> AliasAnalysis & {
1526     return getAnalysis<AAResultsWrapperPass>().getAAResults();
1527   };
1528   auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & {
1529     return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1530   };
1531   auto LookupDomTree = [this]() -> DominatorTree & {
1532     return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1533   };
1534 
1535   return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache,
1536                       LookupDomTree);
1537 }
1538