xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The code below implements dead store elimination using MemorySSA. It uses
10 // the following general approach: given a MemoryDef, walk upwards to find
11 // clobbering MemoryDefs that may be killed by the starting def. Then check
12 // that there are no uses that may read the location of the original MemoryDef
13 // in between both MemoryDefs. A bit more concretely:
14 //
15 // For all MemoryDefs StartDef:
16 // 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking
17 //    upwards.
18 // 2. Check that there are no reads between MaybeDeadAccess and the StartDef by
19 //    checking all uses starting at MaybeDeadAccess and walking until we see
20 //    StartDef.
21 // 3. For each found CurrentDef, check that:
22 //   1. There are no barrier instructions between CurrentDef and StartDef (like
23 //       throws or stores with ordering constraints).
24 //   2. StartDef is executed whenever CurrentDef is executed.
25 //   3. StartDef completely overwrites CurrentDef.
26 // 4. Erase CurrentDef from the function and MemorySSA.
27 //
28 //===----------------------------------------------------------------------===//
29 
30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/MapVector.h"
34 #include "llvm/ADT/PostOrderIterator.h"
35 #include "llvm/ADT/SetVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/StringRef.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/CaptureTracking.h"
42 #include "llvm/Analysis/GlobalsModRef.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/MemoryBuiltins.h"
45 #include "llvm/Analysis/MemoryLocation.h"
46 #include "llvm/Analysis/MemorySSA.h"
47 #include "llvm/Analysis/MemorySSAUpdater.h"
48 #include "llvm/Analysis/MustExecute.h"
49 #include "llvm/Analysis/PostDominators.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/IR/Argument.h"
53 #include "llvm/IR/BasicBlock.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstIterator.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/IR/IntrinsicInst.h"
65 #include "llvm/IR/Intrinsics.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/PassManager.h"
69 #include "llvm/IR/PatternMatch.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/InitializePasses.h"
72 #include "llvm/Pass.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/Debug.h"
76 #include "llvm/Support/DebugCounter.h"
77 #include "llvm/Support/ErrorHandling.h"
78 #include "llvm/Support/MathExtras.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
82 #include "llvm/Transforms/Utils/BuildLibCalls.h"
83 #include "llvm/Transforms/Utils/Local.h"
84 #include <algorithm>
85 #include <cassert>
86 #include <cstddef>
87 #include <cstdint>
88 #include <iterator>
89 #include <map>
90 #include <utility>
91 
92 using namespace llvm;
93 using namespace PatternMatch;
94 
95 #define DEBUG_TYPE "dse"
96 
97 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
98 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
99 STATISTIC(NumFastStores, "Number of stores deleted");
100 STATISTIC(NumFastOther, "Number of other instrs removed");
101 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
102 STATISTIC(NumModifiedStores, "Number of stores modified");
103 STATISTIC(NumCFGChecks, "Number of stores modified");
104 STATISTIC(NumCFGTries, "Number of stores modified");
105 STATISTIC(NumCFGSuccess, "Number of stores modified");
106 STATISTIC(NumGetDomMemoryDefPassed,
107           "Number of times a valid candidate is returned from getDomMemoryDef");
108 STATISTIC(NumDomMemDefChecks,
109           "Number iterations check for reads in getDomMemoryDef");
110 
111 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
112               "Controls which MemoryDefs are eliminated.");
113 
114 static cl::opt<bool>
115 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
116   cl::init(true), cl::Hidden,
117   cl::desc("Enable partial-overwrite tracking in DSE"));
118 
119 static cl::opt<bool>
120 EnablePartialStoreMerging("enable-dse-partial-store-merging",
121   cl::init(true), cl::Hidden,
122   cl::desc("Enable partial store merging in DSE"));
123 
124 static cl::opt<unsigned>
125     MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
126                        cl::desc("The number of memory instructions to scan for "
127                                 "dead store elimination (default = 150)"));
128 static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
129     "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
130     cl::desc("The maximum number of steps while walking upwards to find "
131              "MemoryDefs that may be killed (default = 90)"));
132 
133 static cl::opt<unsigned> MemorySSAPartialStoreLimit(
134     "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
135     cl::desc("The maximum number candidates that only partially overwrite the "
136              "killing MemoryDef to consider"
137              " (default = 5)"));
138 
139 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
140     "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
141     cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
142              "other stores per basic block (default = 5000)"));
143 
144 static cl::opt<unsigned> MemorySSASameBBStepCost(
145     "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
146     cl::desc(
147         "The cost of a step in the same basic block as the killing MemoryDef"
148         "(default = 1)"));
149 
150 static cl::opt<unsigned>
151     MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
152                              cl::Hidden,
153                              cl::desc("The cost of a step in a different basic "
154                                       "block than the killing MemoryDef"
155                                       "(default = 5)"));
156 
157 static cl::opt<unsigned> MemorySSAPathCheckLimit(
158     "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
159     cl::desc("The maximum number of blocks to check when trying to prove that "
160              "all paths to an exit go through a killing block (default = 50)"));
161 
162 //===----------------------------------------------------------------------===//
163 // Helper functions
164 //===----------------------------------------------------------------------===//
165 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
166 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
167 
168 /// Does this instruction write some memory?  This only returns true for things
169 /// that we can analyze with other helpers below.
170 static bool hasAnalyzableMemoryWrite(Instruction *I,
171                                      const TargetLibraryInfo &TLI) {
172   if (isa<StoreInst>(I))
173     return true;
174   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
175     switch (II->getIntrinsicID()) {
176     default:
177       return false;
178     case Intrinsic::memset:
179     case Intrinsic::memmove:
180     case Intrinsic::memcpy:
181     case Intrinsic::memcpy_inline:
182     case Intrinsic::memcpy_element_unordered_atomic:
183     case Intrinsic::memmove_element_unordered_atomic:
184     case Intrinsic::memset_element_unordered_atomic:
185     case Intrinsic::init_trampoline:
186     case Intrinsic::lifetime_end:
187     case Intrinsic::masked_store:
188       return true;
189     }
190   }
191   if (auto *CB = dyn_cast<CallBase>(I)) {
192     LibFunc LF;
193     if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
194       switch (LF) {
195       case LibFunc_strcpy:
196       case LibFunc_strncpy:
197       case LibFunc_strcat:
198       case LibFunc_strncat:
199         return true;
200       default:
201         return false;
202       }
203     }
204   }
205   return false;
206 }
207 
208 /// If the value of this instruction and the memory it writes to is unused, may
209 /// we delete this instruction?
210 static bool isRemovable(Instruction *I) {
211   // Don't remove volatile/atomic stores.
212   if (StoreInst *SI = dyn_cast<StoreInst>(I))
213     return SI->isUnordered();
214 
215   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
216     switch (II->getIntrinsicID()) {
217     default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
218     case Intrinsic::lifetime_end:
219       // Never remove dead lifetime_end's, e.g. because it is followed by a
220       // free.
221       return false;
222     case Intrinsic::init_trampoline:
223       // Always safe to remove init_trampoline.
224       return true;
225     case Intrinsic::memset:
226     case Intrinsic::memmove:
227     case Intrinsic::memcpy:
228     case Intrinsic::memcpy_inline:
229       // Don't remove volatile memory intrinsics.
230       return !cast<MemIntrinsic>(II)->isVolatile();
231     case Intrinsic::memcpy_element_unordered_atomic:
232     case Intrinsic::memmove_element_unordered_atomic:
233     case Intrinsic::memset_element_unordered_atomic:
234     case Intrinsic::masked_store:
235       return true;
236     }
237   }
238 
239   // note: only get here for calls with analyzable writes - i.e. libcalls
240   if (auto *CB = dyn_cast<CallBase>(I))
241     return CB->use_empty();
242 
243   return false;
244 }
245 
246 /// Returns true if the end of this instruction can be safely shortened in
247 /// length.
248 static bool isShortenableAtTheEnd(Instruction *I) {
249   // Don't shorten stores for now
250   if (isa<StoreInst>(I))
251     return false;
252 
253   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
254     switch (II->getIntrinsicID()) {
255       default: return false;
256       case Intrinsic::memset:
257       case Intrinsic::memcpy:
258       case Intrinsic::memcpy_element_unordered_atomic:
259       case Intrinsic::memset_element_unordered_atomic:
260         // Do shorten memory intrinsics.
261         // FIXME: Add memmove if it's also safe to transform.
262         return true;
263     }
264   }
265 
266   // Don't shorten libcalls calls for now.
267 
268   return false;
269 }
270 
271 /// Returns true if the beginning of this instruction can be safely shortened
272 /// in length.
273 static bool isShortenableAtTheBeginning(Instruction *I) {
274   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
275   // easily done by offsetting the source address.
276   return isa<AnyMemSetInst>(I);
277 }
278 
279 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
280                                const TargetLibraryInfo &TLI,
281                                const Function *F) {
282   uint64_t Size;
283   ObjectSizeOpts Opts;
284   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
285 
286   if (getObjectSize(V, Size, DL, &TLI, Opts))
287     return Size;
288   return MemoryLocation::UnknownSize;
289 }
290 
291 namespace {
292 
293 enum OverwriteResult {
294   OW_Begin,
295   OW_Complete,
296   OW_End,
297   OW_PartialEarlierWithFullLater,
298   OW_MaybePartial,
299   OW_Unknown
300 };
301 
302 } // end anonymous namespace
303 
304 /// Check if two instruction are masked stores that completely
305 /// overwrite one another. More specifically, \p KillingI has to
306 /// overwrite \p DeadI.
307 static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI,
308                                               const Instruction *DeadI,
309                                               BatchAAResults &AA) {
310   const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI);
311   const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI);
312   if (KillingII == nullptr || DeadII == nullptr)
313     return OW_Unknown;
314   if (KillingII->getIntrinsicID() != Intrinsic::masked_store ||
315       DeadII->getIntrinsicID() != Intrinsic::masked_store)
316     return OW_Unknown;
317   // Pointers.
318   Value *KillingPtr = KillingII->getArgOperand(1)->stripPointerCasts();
319   Value *DeadPtr = DeadII->getArgOperand(1)->stripPointerCasts();
320   if (KillingPtr != DeadPtr && !AA.isMustAlias(KillingPtr, DeadPtr))
321     return OW_Unknown;
322   // Masks.
323   // TODO: check that KillingII's mask is a superset of the DeadII's mask.
324   if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3))
325     return OW_Unknown;
326   return OW_Complete;
327 }
328 
329 /// Return 'OW_Complete' if a store to the 'KillingLoc' location completely
330 /// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the
331 /// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin'
332 /// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'.
333 /// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was
334 /// overwritten by a killing (smaller) store which doesn't write outside the big
335 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
336 /// NOTE: This function must only be called if both \p KillingLoc and \p
337 /// DeadLoc belong to the same underlying object with valid \p KillingOff and
338 /// \p DeadOff.
339 static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc,
340                                           const MemoryLocation &DeadLoc,
341                                           int64_t KillingOff, int64_t DeadOff,
342                                           Instruction *DeadI,
343                                           InstOverlapIntervalsTy &IOL) {
344   const uint64_t KillingSize = KillingLoc.Size.getValue();
345   const uint64_t DeadSize = DeadLoc.Size.getValue();
346   // We may now overlap, although the overlap is not complete. There might also
347   // be other incomplete overlaps, and together, they might cover the complete
348   // dead store.
349   // Note: The correctness of this logic depends on the fact that this function
350   // is not even called providing DepWrite when there are any intervening reads.
351   if (EnablePartialOverwriteTracking &&
352       KillingOff < int64_t(DeadOff + DeadSize) &&
353       int64_t(KillingOff + KillingSize) >= DeadOff) {
354 
355     // Insert our part of the overlap into the map.
356     auto &IM = IOL[DeadI];
357     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", "
358                       << int64_t(DeadOff + DeadSize) << ") KillingLoc ["
359                       << KillingOff << ", " << int64_t(KillingOff + KillingSize)
360                       << ")\n");
361 
362     // Make sure that we only insert non-overlapping intervals and combine
363     // adjacent intervals. The intervals are stored in the map with the ending
364     // offset as the key (in the half-open sense) and the starting offset as
365     // the value.
366     int64_t KillingIntStart = KillingOff;
367     int64_t KillingIntEnd = KillingOff + KillingSize;
368 
369     // Find any intervals ending at, or after, KillingIntStart which start
370     // before KillingIntEnd.
371     auto ILI = IM.lower_bound(KillingIntStart);
372     if (ILI != IM.end() && ILI->second <= KillingIntEnd) {
373       // This existing interval is overlapped with the current store somewhere
374       // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing
375       // intervals and adjusting our start and end.
376       KillingIntStart = std::min(KillingIntStart, ILI->second);
377       KillingIntEnd = std::max(KillingIntEnd, ILI->first);
378       ILI = IM.erase(ILI);
379 
380       // Continue erasing and adjusting our end in case other previous
381       // intervals are also overlapped with the current store.
382       //
383       // |--- dead 1 ---|  |--- dead 2 ---|
384       //     |------- killing---------|
385       //
386       while (ILI != IM.end() && ILI->second <= KillingIntEnd) {
387         assert(ILI->second > KillingIntStart && "Unexpected interval");
388         KillingIntEnd = std::max(KillingIntEnd, ILI->first);
389         ILI = IM.erase(ILI);
390       }
391     }
392 
393     IM[KillingIntEnd] = KillingIntStart;
394 
395     ILI = IM.begin();
396     if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) {
397       LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc ["
398                         << DeadOff << ", " << int64_t(DeadOff + DeadSize)
399                         << ") Composite KillingLoc [" << ILI->second << ", "
400                         << ILI->first << ")\n");
401       ++NumCompletePartials;
402       return OW_Complete;
403     }
404   }
405 
406   // Check for a dead store which writes to all the memory locations that
407   // the killing store writes to.
408   if (EnablePartialStoreMerging && KillingOff >= DeadOff &&
409       int64_t(DeadOff + DeadSize) > KillingOff &&
410       uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) {
411     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff
412                       << ", " << int64_t(DeadOff + DeadSize)
413                       << ") by a killing store [" << KillingOff << ", "
414                       << int64_t(KillingOff + KillingSize) << ")\n");
415     // TODO: Maybe come up with a better name?
416     return OW_PartialEarlierWithFullLater;
417   }
418 
419   // Another interesting case is if the killing store overwrites the end of the
420   // dead store.
421   //
422   //      |--dead--|
423   //                |--   killing   --|
424   //
425   // In this case we may want to trim the size of dead store to avoid
426   // generating stores to addresses which will definitely be overwritten killing
427   // store.
428   if (!EnablePartialOverwriteTracking &&
429       (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) &&
430        int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize)))
431     return OW_End;
432 
433   // Finally, we also need to check if the killing store overwrites the
434   // beginning of the dead store.
435   //
436   //                |--dead--|
437   //      |--  killing  --|
438   //
439   // In this case we may want to move the destination address and trim the size
440   // of dead store to avoid generating stores to addresses which will definitely
441   // be overwritten killing store.
442   if (!EnablePartialOverwriteTracking &&
443       (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) {
444     assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) &&
445            "Expect to be handled as OW_Complete");
446     return OW_Begin;
447   }
448   // Otherwise, they don't completely overlap.
449   return OW_Unknown;
450 }
451 
452 /// Returns true if the memory which is accessed by the second instruction is not
453 /// modified between the first and the second instruction.
454 /// Precondition: Second instruction must be dominated by the first
455 /// instruction.
456 static bool
457 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI,
458                            BatchAAResults &AA, const DataLayout &DL,
459                            DominatorTree *DT) {
460   // Do a backwards scan through the CFG from SecondI to FirstI. Look for
461   // instructions which can modify the memory location accessed by SecondI.
462   //
463   // While doing the walk keep track of the address to check. It might be
464   // different in different basic blocks due to PHI translation.
465   using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
466   SmallVector<BlockAddressPair, 16> WorkList;
467   // Keep track of the address we visited each block with. Bail out if we
468   // visit a block with different addresses.
469   DenseMap<BasicBlock *, Value *> Visited;
470 
471   BasicBlock::iterator FirstBBI(FirstI);
472   ++FirstBBI;
473   BasicBlock::iterator SecondBBI(SecondI);
474   BasicBlock *FirstBB = FirstI->getParent();
475   BasicBlock *SecondBB = SecondI->getParent();
476   MemoryLocation MemLoc;
477   if (auto *MemSet = dyn_cast<MemSetInst>(SecondI))
478     MemLoc = MemoryLocation::getForDest(MemSet);
479   else
480     MemLoc = MemoryLocation::get(SecondI);
481 
482   auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
483 
484   // Start checking the SecondBB.
485   WorkList.push_back(
486       std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
487   bool isFirstBlock = true;
488 
489   // Check all blocks going backward until we reach the FirstBB.
490   while (!WorkList.empty()) {
491     BlockAddressPair Current = WorkList.pop_back_val();
492     BasicBlock *B = Current.first;
493     PHITransAddr &Addr = Current.second;
494     Value *Ptr = Addr.getAddr();
495 
496     // Ignore instructions before FirstI if this is the FirstBB.
497     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
498 
499     BasicBlock::iterator EI;
500     if (isFirstBlock) {
501       // Ignore instructions after SecondI if this is the first visit of SecondBB.
502       assert(B == SecondBB && "first block is not the store block");
503       EI = SecondBBI;
504       isFirstBlock = false;
505     } else {
506       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
507       // In this case we also have to look at instructions after SecondI.
508       EI = B->end();
509     }
510     for (; BI != EI; ++BI) {
511       Instruction *I = &*BI;
512       if (I->mayWriteToMemory() && I != SecondI)
513         if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
514           return false;
515     }
516     if (B != FirstBB) {
517       assert(B != &FirstBB->getParent()->getEntryBlock() &&
518           "Should not hit the entry block because SI must be dominated by LI");
519       for (BasicBlock *Pred : predecessors(B)) {
520         PHITransAddr PredAddr = Addr;
521         if (PredAddr.NeedsPHITranslationFromBlock(B)) {
522           if (!PredAddr.IsPotentiallyPHITranslatable())
523             return false;
524           if (PredAddr.PHITranslateValue(B, Pred, DT, false))
525             return false;
526         }
527         Value *TranslatedPtr = PredAddr.getAddr();
528         auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
529         if (!Inserted.second) {
530           // We already visited this block before. If it was with a different
531           // address - bail out!
532           if (TranslatedPtr != Inserted.first->second)
533             return false;
534           // ... otherwise just skip it.
535           continue;
536         }
537         WorkList.push_back(std::make_pair(Pred, PredAddr));
538       }
539     }
540   }
541   return true;
542 }
543 
544 static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
545                          uint64_t &DeadSize, int64_t KillingStart,
546                          uint64_t KillingSize, bool IsOverwriteEnd) {
547   auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI);
548   Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
549 
550   // We assume that memet/memcpy operates in chunks of the "largest" native
551   // type size and aligned on the same value. That means optimal start and size
552   // of memset/memcpy should be modulo of preferred alignment of that type. That
553   // is it there is no any sense in trying to reduce store size any further
554   // since any "extra" stores comes for free anyway.
555   // On the other hand, maximum alignment we can achieve is limited by alignment
556   // of initial store.
557 
558   // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
559   // "largest" native type.
560   // Note: What is the proper way to get that value?
561   // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
562   // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
563 
564   int64_t ToRemoveStart = 0;
565   uint64_t ToRemoveSize = 0;
566   // Compute start and size of the region to remove. Make sure 'PrefAlign' is
567   // maintained on the remaining store.
568   if (IsOverwriteEnd) {
569     // Calculate required adjustment for 'KillingStart' in order to keep
570     // remaining store size aligned on 'PerfAlign'.
571     uint64_t Off =
572         offsetToAlignment(uint64_t(KillingStart - DeadStart), PrefAlign);
573     ToRemoveStart = KillingStart + Off;
574     if (DeadSize <= uint64_t(ToRemoveStart - DeadStart))
575       return false;
576     ToRemoveSize = DeadSize - uint64_t(ToRemoveStart - DeadStart);
577   } else {
578     ToRemoveStart = DeadStart;
579     assert(KillingSize >= uint64_t(DeadStart - KillingStart) &&
580            "Not overlapping accesses?");
581     ToRemoveSize = KillingSize - uint64_t(DeadStart - KillingStart);
582     // Calculate required adjustment for 'ToRemoveSize'in order to keep
583     // start of the remaining store aligned on 'PerfAlign'.
584     uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
585     if (Off != 0) {
586       if (ToRemoveSize <= (PrefAlign.value() - Off))
587         return false;
588       ToRemoveSize -= PrefAlign.value() - Off;
589     }
590     assert(isAligned(PrefAlign, ToRemoveSize) &&
591            "Should preserve selected alignment");
592   }
593 
594   assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove");
595   assert(DeadSize > ToRemoveSize && "Can't remove more than original size");
596 
597   uint64_t NewSize = DeadSize - ToRemoveSize;
598   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) {
599     // When shortening an atomic memory intrinsic, the newly shortened
600     // length must remain an integer multiple of the element size.
601     const uint32_t ElementSize = AMI->getElementSizeInBytes();
602     if (0 != NewSize % ElementSize)
603       return false;
604   }
605 
606   LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
607                     << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI
608                     << "\n  KILLER [" << ToRemoveStart << ", "
609                     << int64_t(ToRemoveStart + ToRemoveSize) << ")\n");
610 
611   Value *DeadWriteLength = DeadIntrinsic->getLength();
612   Value *TrimmedLength = ConstantInt::get(DeadWriteLength->getType(), NewSize);
613   DeadIntrinsic->setLength(TrimmedLength);
614   DeadIntrinsic->setDestAlignment(PrefAlign);
615 
616   if (!IsOverwriteEnd) {
617     Value *OrigDest = DeadIntrinsic->getRawDest();
618     Type *Int8PtrTy =
619         Type::getInt8PtrTy(DeadIntrinsic->getContext(),
620                            OrigDest->getType()->getPointerAddressSpace());
621     Value *Dest = OrigDest;
622     if (OrigDest->getType() != Int8PtrTy)
623       Dest = CastInst::CreatePointerCast(OrigDest, Int8PtrTy, "", DeadI);
624     Value *Indices[1] = {
625         ConstantInt::get(DeadWriteLength->getType(), ToRemoveSize)};
626     Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds(
627         Type::getInt8Ty(DeadIntrinsic->getContext()), Dest, Indices, "", DeadI);
628     NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc());
629     if (NewDestGEP->getType() != OrigDest->getType())
630       NewDestGEP = CastInst::CreatePointerCast(NewDestGEP, OrigDest->getType(),
631                                                "", DeadI);
632     DeadIntrinsic->setDest(NewDestGEP);
633   }
634 
635   // Finally update start and size of dead access.
636   if (!IsOverwriteEnd)
637     DeadStart += ToRemoveSize;
638   DeadSize = NewSize;
639 
640   return true;
641 }
642 
643 static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap,
644                             int64_t &DeadStart, uint64_t &DeadSize) {
645   if (IntervalMap.empty() || !isShortenableAtTheEnd(DeadI))
646     return false;
647 
648   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
649   int64_t KillingStart = OII->second;
650   uint64_t KillingSize = OII->first - KillingStart;
651 
652   assert(OII->first - KillingStart >= 0 && "Size expected to be positive");
653 
654   if (KillingStart > DeadStart &&
655       // Note: "KillingStart - KillingStart" is known to be positive due to
656       // preceding check.
657       (uint64_t)(KillingStart - DeadStart) < DeadSize &&
658       // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to
659       // be non negative due to preceding checks.
660       KillingSize >= DeadSize - (uint64_t)(KillingStart - DeadStart)) {
661     if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
662                      true)) {
663       IntervalMap.erase(OII);
664       return true;
665     }
666   }
667   return false;
668 }
669 
670 static bool tryToShortenBegin(Instruction *DeadI,
671                               OverlapIntervalsTy &IntervalMap,
672                               int64_t &DeadStart, uint64_t &DeadSize) {
673   if (IntervalMap.empty() || !isShortenableAtTheBeginning(DeadI))
674     return false;
675 
676   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
677   int64_t KillingStart = OII->second;
678   uint64_t KillingSize = OII->first - KillingStart;
679 
680   assert(OII->first - KillingStart >= 0 && "Size expected to be positive");
681 
682   if (KillingStart <= DeadStart &&
683       // Note: "DeadStart - KillingStart" is known to be non negative due to
684       // preceding check.
685       KillingSize > (uint64_t)(DeadStart - KillingStart)) {
686     // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to
687     // be positive due to preceding checks.
688     assert(KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize &&
689            "Should have been handled as OW_Complete");
690     if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
691                      false)) {
692       IntervalMap.erase(OII);
693       return true;
694     }
695   }
696   return false;
697 }
698 
699 static Constant *
700 tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI,
701                                    int64_t KillingOffset, int64_t DeadOffset,
702                                    const DataLayout &DL, BatchAAResults &AA,
703                                    DominatorTree *DT) {
704 
705   if (DeadI && isa<ConstantInt>(DeadI->getValueOperand()) &&
706       DL.typeSizeEqualsStoreSize(DeadI->getValueOperand()->getType()) &&
707       KillingI && isa<ConstantInt>(KillingI->getValueOperand()) &&
708       DL.typeSizeEqualsStoreSize(KillingI->getValueOperand()->getType()) &&
709       memoryIsNotModifiedBetween(DeadI, KillingI, AA, DL, DT)) {
710     // If the store we find is:
711     //   a) partially overwritten by the store to 'Loc'
712     //   b) the killing store is fully contained in the dead one and
713     //   c) they both have a constant value
714     //   d) none of the two stores need padding
715     // Merge the two stores, replacing the dead store's value with a
716     // merge of both values.
717     // TODO: Deal with other constant types (vectors, etc), and probably
718     // some mem intrinsics (if needed)
719 
720     APInt DeadValue = cast<ConstantInt>(DeadI->getValueOperand())->getValue();
721     APInt KillingValue =
722         cast<ConstantInt>(KillingI->getValueOperand())->getValue();
723     unsigned KillingBits = KillingValue.getBitWidth();
724     assert(DeadValue.getBitWidth() > KillingValue.getBitWidth());
725     KillingValue = KillingValue.zext(DeadValue.getBitWidth());
726 
727     // Offset of the smaller store inside the larger store
728     unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8;
729     unsigned LShiftAmount =
730         DL.isBigEndian() ? DeadValue.getBitWidth() - BitOffsetDiff - KillingBits
731                          : BitOffsetDiff;
732     APInt Mask = APInt::getBitsSet(DeadValue.getBitWidth(), LShiftAmount,
733                                    LShiftAmount + KillingBits);
734     // Clear the bits we'll be replacing, then OR with the smaller
735     // store, shifted appropriately.
736     APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount);
737     LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Dead: " << *DeadI
738                       << "\n  Killing: " << *KillingI
739                       << "\n  Merged Value: " << Merged << '\n');
740     return ConstantInt::get(DeadI->getValueOperand()->getType(), Merged);
741   }
742   return nullptr;
743 }
744 
745 namespace {
746 // Returns true if \p I is an intrisnic that does not read or write memory.
747 bool isNoopIntrinsic(Instruction *I) {
748   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
749     switch (II->getIntrinsicID()) {
750     case Intrinsic::lifetime_start:
751     case Intrinsic::lifetime_end:
752     case Intrinsic::invariant_end:
753     case Intrinsic::launder_invariant_group:
754     case Intrinsic::assume:
755       return true;
756     case Intrinsic::dbg_addr:
757     case Intrinsic::dbg_declare:
758     case Intrinsic::dbg_label:
759     case Intrinsic::dbg_value:
760       llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
761     default:
762       return false;
763     }
764   }
765   return false;
766 }
767 
768 // Check if we can ignore \p D for DSE.
769 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller,
770                 const TargetLibraryInfo &TLI) {
771   Instruction *DI = D->getMemoryInst();
772   // Calls that only access inaccessible memory cannot read or write any memory
773   // locations we consider for elimination.
774   if (auto *CB = dyn_cast<CallBase>(DI))
775     if (CB->onlyAccessesInaccessibleMemory()) {
776       if (isAllocLikeFn(DI, &TLI))
777         return false;
778       return true;
779     }
780   // We can eliminate stores to locations not visible to the caller across
781   // throwing instructions.
782   if (DI->mayThrow() && !DefVisibleToCaller)
783     return true;
784 
785   // We can remove the dead stores, irrespective of the fence and its ordering
786   // (release/acquire/seq_cst). Fences only constraints the ordering of
787   // already visible stores, it does not make a store visible to other
788   // threads. So, skipping over a fence does not change a store from being
789   // dead.
790   if (isa<FenceInst>(DI))
791     return true;
792 
793   // Skip intrinsics that do not really read or modify memory.
794   if (isNoopIntrinsic(DI))
795     return true;
796 
797   return false;
798 }
799 
800 struct DSEState {
801   Function &F;
802   AliasAnalysis &AA;
803   EarliestEscapeInfo EI;
804 
805   /// The single BatchAA instance that is used to cache AA queries. It will
806   /// not be invalidated over the whole run. This is safe, because:
807   /// 1. Only memory writes are removed, so the alias cache for memory
808   ///    locations remains valid.
809   /// 2. No new instructions are added (only instructions removed), so cached
810   ///    information for a deleted value cannot be accessed by a re-used new
811   ///    value pointer.
812   BatchAAResults BatchAA;
813 
814   MemorySSA &MSSA;
815   DominatorTree &DT;
816   PostDominatorTree &PDT;
817   const TargetLibraryInfo &TLI;
818   const DataLayout &DL;
819   const LoopInfo &LI;
820 
821   // Whether the function contains any irreducible control flow, useful for
822   // being accurately able to detect loops.
823   bool ContainsIrreducibleLoops;
824 
825   // All MemoryDefs that potentially could kill other MemDefs.
826   SmallVector<MemoryDef *, 64> MemDefs;
827   // Any that should be skipped as they are already deleted
828   SmallPtrSet<MemoryAccess *, 4> SkipStores;
829   // Keep track of all of the objects that are invisible to the caller before
830   // the function returns.
831   // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
832   DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
833   // Keep track of all of the objects that are invisible to the caller after
834   // the function returns.
835   DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
836   // Keep track of blocks with throwing instructions not modeled in MemorySSA.
837   SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
838   // Post-order numbers for each basic block. Used to figure out if memory
839   // accesses are executed before another access.
840   DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
841 
842   /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
843   /// basic block.
844   DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
845 
846   // Class contains self-reference, make sure it's not copied/moved.
847   DSEState(const DSEState &) = delete;
848   DSEState &operator=(const DSEState &) = delete;
849 
850   DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
851            PostDominatorTree &PDT, const TargetLibraryInfo &TLI,
852            const LoopInfo &LI)
853       : F(F), AA(AA), EI(DT, LI), BatchAA(AA, &EI), MSSA(MSSA), DT(DT),
854         PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) {
855     // Collect blocks with throwing instructions not modeled in MemorySSA and
856     // alloc-like objects.
857     unsigned PO = 0;
858     for (BasicBlock *BB : post_order(&F)) {
859       PostOrderNumbers[BB] = PO++;
860       for (Instruction &I : *BB) {
861         MemoryAccess *MA = MSSA.getMemoryAccess(&I);
862         if (I.mayThrow() && !MA)
863           ThrowingBlocks.insert(I.getParent());
864 
865         auto *MD = dyn_cast_or_null<MemoryDef>(MA);
866         if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit &&
867             (getLocForWriteEx(&I) || isMemTerminatorInst(&I)))
868           MemDefs.push_back(MD);
869       }
870     }
871 
872     // Treat byval or inalloca arguments the same as Allocas, stores to them are
873     // dead at the end of the function.
874     for (Argument &AI : F.args())
875       if (AI.hasPassPointeeByValueCopyAttr()) {
876         // For byval, the caller doesn't know the address of the allocation.
877         if (AI.hasByValAttr())
878           InvisibleToCallerBeforeRet.insert({&AI, true});
879         InvisibleToCallerAfterRet.insert({&AI, true});
880       }
881 
882     // Collect whether there is any irreducible control flow in the function.
883     ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
884   }
885 
886   /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p
887   /// KillingI instruction) completely overwrites a store to the 'DeadLoc'
888   /// location (by \p DeadI instruction).
889   /// Return OW_MaybePartial if \p KillingI does not completely overwrite
890   /// \p DeadI, but they both write to the same underlying object. In that
891   /// case, use isPartialOverwrite to check if \p KillingI partially overwrites
892   /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined.
893   OverwriteResult isOverwrite(const Instruction *KillingI,
894                               const Instruction *DeadI,
895                               const MemoryLocation &KillingLoc,
896                               const MemoryLocation &DeadLoc,
897                               int64_t &KillingOff, int64_t &DeadOff) {
898     // AliasAnalysis does not always account for loops. Limit overwrite checks
899     // to dependencies for which we can guarantee they are independent of any
900     // loops they are in.
901     if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
902       return OW_Unknown;
903 
904     // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
905     // get imprecise values here, though (except for unknown sizes).
906     if (!KillingLoc.Size.isPrecise() || !DeadLoc.Size.isPrecise()) {
907       // In case no constant size is known, try to an IR values for the number
908       // of bytes written and check if they match.
909       const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI);
910       const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI);
911       if (KillingMemI && DeadMemI) {
912         const Value *KillingV = KillingMemI->getLength();
913         const Value *DeadV = DeadMemI->getLength();
914         if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc))
915           return OW_Complete;
916       }
917 
918       // Masked stores have imprecise locations, but we can reason about them
919       // to some extent.
920       return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA);
921     }
922 
923     const uint64_t KillingSize = KillingLoc.Size.getValue();
924     const uint64_t DeadSize = DeadLoc.Size.getValue();
925 
926     // Query the alias information
927     AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
928 
929     // If the start pointers are the same, we just have to compare sizes to see if
930     // the killing store was larger than the dead store.
931     if (AAR == AliasResult::MustAlias) {
932       // Make sure that the KillingSize size is >= the DeadSize size.
933       if (KillingSize >= DeadSize)
934         return OW_Complete;
935     }
936 
937     // If we hit a partial alias we may have a full overwrite
938     if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
939       int32_t Off = AAR.getOffset();
940       if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
941         return OW_Complete;
942     }
943 
944     // Check to see if the killing store is to the entire object (either a
945     // global, an alloca, or a byval/inalloca argument).  If so, then it clearly
946     // overwrites any other store to the same object.
947     const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts();
948     const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts();
949     const Value *DeadUndObj = getUnderlyingObject(DeadPtr);
950     const Value *KillingUndObj = getUnderlyingObject(KillingPtr);
951 
952     // If we can't resolve the same pointers to the same object, then we can't
953     // analyze them at all.
954     if (DeadUndObj != KillingUndObj)
955       return OW_Unknown;
956 
957     // If the KillingI store is to a recognizable object, get its size.
958     uint64_t KillingUndObjSize = getPointerSize(KillingUndObj, DL, TLI, &F);
959     if (KillingUndObjSize != MemoryLocation::UnknownSize)
960       if (KillingUndObjSize == KillingSize && KillingUndObjSize >= DeadSize)
961         return OW_Complete;
962 
963     // Okay, we have stores to two completely different pointers.  Try to
964     // decompose the pointer into a "base + constant_offset" form.  If the base
965     // pointers are equal, then we can reason about the two stores.
966     DeadOff = 0;
967     KillingOff = 0;
968     const Value *DeadBasePtr =
969         GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL);
970     const Value *KillingBasePtr =
971         GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL);
972 
973     // If the base pointers still differ, we have two completely different
974     // stores.
975     if (DeadBasePtr != KillingBasePtr)
976       return OW_Unknown;
977 
978     // The killing access completely overlaps the dead store if and only if
979     // both start and end of the dead one is "inside" the killing one:
980     //    |<->|--dead--|<->|
981     //    |-----killing------|
982     // Accesses may overlap if and only if start of one of them is "inside"
983     // another one:
984     //    |<->|--dead--|<-------->|
985     //    |-------killing--------|
986     //           OR
987     //    |-------dead-------|
988     //    |<->|---killing---|<----->|
989     //
990     // We have to be careful here as *Off is signed while *.Size is unsigned.
991 
992     // Check if the dead access starts "not before" the killing one.
993     if (DeadOff >= KillingOff) {
994       // If the dead access ends "not after" the killing access then the
995       // dead one is completely overwritten by the killing one.
996       if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
997         return OW_Complete;
998       // If start of the dead access is "before" end of the killing access
999       // then accesses overlap.
1000       else if ((uint64_t)(DeadOff - KillingOff) < KillingSize)
1001         return OW_MaybePartial;
1002     }
1003     // If start of the killing access is "before" end of the dead access then
1004     // accesses overlap.
1005     else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) {
1006       return OW_MaybePartial;
1007     }
1008 
1009     // Can reach here only if accesses are known not to overlap. There is no
1010     // dedicated code to indicate no overlap so signal "unknown".
1011     return OW_Unknown;
1012   }
1013 
1014   bool isInvisibleToCallerAfterRet(const Value *V) {
1015     if (isa<AllocaInst>(V))
1016       return true;
1017     auto I = InvisibleToCallerAfterRet.insert({V, false});
1018     if (I.second) {
1019       if (!isInvisibleToCallerBeforeRet(V)) {
1020         I.first->second = false;
1021       } else {
1022         auto *Inst = dyn_cast<Instruction>(V);
1023         if (Inst && isAllocLikeFn(Inst, &TLI))
1024           I.first->second = !PointerMayBeCaptured(V, true, false);
1025       }
1026     }
1027     return I.first->second;
1028   }
1029 
1030   bool isInvisibleToCallerBeforeRet(const Value *V) {
1031     if (isa<AllocaInst>(V))
1032       return true;
1033     auto I = InvisibleToCallerBeforeRet.insert({V, false});
1034     if (I.second) {
1035       auto *Inst = dyn_cast<Instruction>(V);
1036       if (Inst && isAllocLikeFn(Inst, &TLI))
1037         // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1038         // with the killing MemoryDef. But we refrain from doing so for now to
1039         // limit compile-time and this does not cause any changes to the number
1040         // of stores removed on a large test set in practice.
1041         I.first->second = !PointerMayBeCaptured(V, false, true);
1042     }
1043     return I.first->second;
1044   }
1045 
1046   Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1047     if (!I->mayWriteToMemory())
1048       return None;
1049 
1050     if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1051       return {MemoryLocation::getForDest(MTI)};
1052 
1053     if (auto *CB = dyn_cast<CallBase>(I)) {
1054       // If the functions may write to memory we do not know about, bail out.
1055       if (!CB->onlyAccessesArgMemory() &&
1056           !CB->onlyAccessesInaccessibleMemOrArgMem())
1057         return None;
1058 
1059       LibFunc LF;
1060       if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1061         switch (LF) {
1062         case LibFunc_strncpy:
1063           if (const auto *Len = dyn_cast<ConstantInt>(CB->getArgOperand(2)))
1064             return MemoryLocation(CB->getArgOperand(0),
1065                                   LocationSize::precise(Len->getZExtValue()),
1066                                   CB->getAAMetadata());
1067           LLVM_FALLTHROUGH;
1068         case LibFunc_strcpy:
1069         case LibFunc_strcat:
1070         case LibFunc_strncat:
1071           return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1072         default:
1073           break;
1074         }
1075       }
1076       switch (CB->getIntrinsicID()) {
1077       case Intrinsic::init_trampoline:
1078         return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1079       case Intrinsic::masked_store:
1080         return {MemoryLocation::getForArgument(CB, 1, TLI)};
1081       default:
1082         break;
1083       }
1084       return None;
1085     }
1086 
1087     return MemoryLocation::getOrNone(I);
1088   }
1089 
1090   /// Returns true if \p UseInst completely overwrites \p DefLoc
1091   /// (stored by \p DefInst).
1092   bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1093                            Instruction *UseInst) {
1094     // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1095     // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1096     // MemoryDef.
1097     if (!UseInst->mayWriteToMemory())
1098       return false;
1099 
1100     if (auto *CB = dyn_cast<CallBase>(UseInst))
1101       if (CB->onlyAccessesInaccessibleMemory())
1102         return false;
1103 
1104     int64_t InstWriteOffset, DepWriteOffset;
1105     if (auto CC = getLocForWriteEx(UseInst))
1106       return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
1107                          DepWriteOffset) == OW_Complete;
1108     return false;
1109   }
1110 
1111   /// Returns true if \p Def is not read before returning from the function.
1112   bool isWriteAtEndOfFunction(MemoryDef *Def) {
1113     LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
1114                       << *Def->getMemoryInst()
1115                       << ") is at the end the function \n");
1116 
1117     auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1118     if (!MaybeLoc) {
1119       LLVM_DEBUG(dbgs() << "  ... could not get location for write.\n");
1120       return false;
1121     }
1122 
1123     SmallVector<MemoryAccess *, 4> WorkList;
1124     SmallPtrSet<MemoryAccess *, 8> Visited;
1125     auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1126       if (!Visited.insert(Acc).second)
1127         return;
1128       for (Use &U : Acc->uses())
1129         WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1130     };
1131     PushMemUses(Def);
1132     for (unsigned I = 0; I < WorkList.size(); I++) {
1133       if (WorkList.size() >= MemorySSAScanLimit) {
1134         LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
1135         return false;
1136       }
1137 
1138       MemoryAccess *UseAccess = WorkList[I];
1139       // Simply adding the users of MemoryPhi to the worklist is not enough,
1140       // because we might miss read clobbers in different iterations of a loop,
1141       // for example.
1142       // TODO: Add support for phi translation to handle the loop case.
1143       if (isa<MemoryPhi>(UseAccess))
1144         return false;
1145 
1146       // TODO: Checking for aliasing is expensive. Consider reducing the amount
1147       // of times this is called and/or caching it.
1148       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1149       if (isReadClobber(*MaybeLoc, UseInst)) {
1150         LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
1151         return false;
1152       }
1153 
1154       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1155         PushMemUses(UseDef);
1156     }
1157     return true;
1158   }
1159 
1160   /// If \p I is a memory  terminator like llvm.lifetime.end or free, return a
1161   /// pair with the MemoryLocation terminated by \p I and a boolean flag
1162   /// indicating whether \p I is a free-like call.
1163   Optional<std::pair<MemoryLocation, bool>>
1164   getLocForTerminator(Instruction *I) const {
1165     uint64_t Len;
1166     Value *Ptr;
1167     if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1168                                                       m_Value(Ptr))))
1169       return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1170 
1171     if (auto *CB = dyn_cast<CallBase>(I)) {
1172       if (isFreeCall(I, &TLI))
1173         return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1174                                true)};
1175     }
1176 
1177     return None;
1178   }
1179 
1180   /// Returns true if \p I is a memory terminator instruction like
1181   /// llvm.lifetime.end or free.
1182   bool isMemTerminatorInst(Instruction *I) const {
1183     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1184     return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1185            isFreeCall(I, &TLI);
1186   }
1187 
1188   /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1189   /// instruction \p AccessI.
1190   bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1191                        Instruction *MaybeTerm) {
1192     Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1193         getLocForTerminator(MaybeTerm);
1194 
1195     if (!MaybeTermLoc)
1196       return false;
1197 
1198     // If the terminator is a free-like call, all accesses to the underlying
1199     // object can be considered terminated.
1200     if (getUnderlyingObject(Loc.Ptr) !=
1201         getUnderlyingObject(MaybeTermLoc->first.Ptr))
1202       return false;
1203 
1204     auto TermLoc = MaybeTermLoc->first;
1205     if (MaybeTermLoc->second) {
1206       const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1207       return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1208     }
1209     int64_t InstWriteOffset = 0;
1210     int64_t DepWriteOffset = 0;
1211     return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
1212                        DepWriteOffset) == OW_Complete;
1213   }
1214 
1215   // Returns true if \p Use may read from \p DefLoc.
1216   bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1217     if (isNoopIntrinsic(UseInst))
1218       return false;
1219 
1220     // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1221     // treated as read clobber.
1222     if (auto SI = dyn_cast<StoreInst>(UseInst))
1223       return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1224 
1225     if (!UseInst->mayReadFromMemory())
1226       return false;
1227 
1228     if (auto *CB = dyn_cast<CallBase>(UseInst))
1229       if (CB->onlyAccessesInaccessibleMemory())
1230         return false;
1231 
1232     return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1233   }
1234 
1235   /// Returns true if a dependency between \p Current and \p KillingDef is
1236   /// guaranteed to be loop invariant for the loops that they are in. Either
1237   /// because they are known to be in the same block, in the same loop level or
1238   /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation
1239   /// during execution of the containing function.
1240   bool isGuaranteedLoopIndependent(const Instruction *Current,
1241                                    const Instruction *KillingDef,
1242                                    const MemoryLocation &CurrentLoc) {
1243     // If the dependency is within the same block or loop level (being careful
1244     // of irreducible loops), we know that AA will return a valid result for the
1245     // memory dependency. (Both at the function level, outside of any loop,
1246     // would also be valid but we currently disable that to limit compile time).
1247     if (Current->getParent() == KillingDef->getParent())
1248       return true;
1249     const Loop *CurrentLI = LI.getLoopFor(Current->getParent());
1250     if (!ContainsIrreducibleLoops && CurrentLI &&
1251         CurrentLI == LI.getLoopFor(KillingDef->getParent()))
1252       return true;
1253     // Otherwise check the memory location is invariant to any loops.
1254     return isGuaranteedLoopInvariant(CurrentLoc.Ptr);
1255   }
1256 
1257   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1258   /// loop. In particular, this guarantees that it only references a single
1259   /// MemoryLocation during execution of the containing function.
1260   bool isGuaranteedLoopInvariant(const Value *Ptr) {
1261     auto IsGuaranteedLoopInvariantBase = [this](const Value *Ptr) {
1262       Ptr = Ptr->stripPointerCasts();
1263       if (auto *I = dyn_cast<Instruction>(Ptr)) {
1264         if (isa<AllocaInst>(Ptr))
1265           return true;
1266 
1267         if (isAllocLikeFn(I, &TLI))
1268           return true;
1269 
1270         return false;
1271       }
1272       return true;
1273     };
1274 
1275     Ptr = Ptr->stripPointerCasts();
1276     if (auto *I = dyn_cast<Instruction>(Ptr)) {
1277       if (I->getParent()->isEntryBlock())
1278         return true;
1279     }
1280     if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1281       return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1282              GEP->hasAllConstantIndices();
1283     }
1284     return IsGuaranteedLoopInvariantBase(Ptr);
1285   }
1286 
1287   // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess,
1288   // with no read access between them or on any other path to a function exit
1289   // block if \p KillingLoc is not accessible after the function returns. If
1290   // there is no such MemoryDef, return None. The returned value may not
1291   // (completely) overwrite \p KillingLoc. Currently we bail out when we
1292   // encounter an aliasing MemoryUse (read).
1293   Optional<MemoryAccess *>
1294   getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1295                   const MemoryLocation &KillingLoc, const Value *KillingUndObj,
1296                   unsigned &ScanLimit, unsigned &WalkerStepLimit,
1297                   bool IsMemTerm, unsigned &PartialLimit) {
1298     if (ScanLimit == 0 || WalkerStepLimit == 0) {
1299       LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1300       return None;
1301     }
1302 
1303     MemoryAccess *Current = StartAccess;
1304     Instruction *KillingI = KillingDef->getMemoryInst();
1305     LLVM_DEBUG(dbgs() << "  trying to get dominating access\n");
1306 
1307     // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1308     Optional<MemoryLocation> CurrentLoc;
1309     for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
1310       LLVM_DEBUG({
1311         dbgs() << "   visiting " << *Current;
1312         if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1313           dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1314                  << ")";
1315         dbgs() << "\n";
1316       });
1317 
1318       // Reached TOP.
1319       if (MSSA.isLiveOnEntryDef(Current)) {
1320         LLVM_DEBUG(dbgs() << "   ...  found LiveOnEntryDef\n");
1321         return None;
1322       }
1323 
1324       // Cost of a step. Accesses in the same block are more likely to be valid
1325       // candidates for elimination, hence consider them cheaper.
1326       unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1327                               ? MemorySSASameBBStepCost
1328                               : MemorySSAOtherBBStepCost;
1329       if (WalkerStepLimit <= StepCost) {
1330         LLVM_DEBUG(dbgs() << "   ...  hit walker step limit\n");
1331         return None;
1332       }
1333       WalkerStepLimit -= StepCost;
1334 
1335       // Return for MemoryPhis. They cannot be eliminated directly and the
1336       // caller is responsible for traversing them.
1337       if (isa<MemoryPhi>(Current)) {
1338         LLVM_DEBUG(dbgs() << "   ...  found MemoryPhi\n");
1339         return Current;
1340       }
1341 
1342       // Below, check if CurrentDef is a valid candidate to be eliminated by
1343       // KillingDef. If it is not, check the next candidate.
1344       MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1345       Instruction *CurrentI = CurrentDef->getMemoryInst();
1346 
1347       if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(KillingUndObj),
1348                      TLI))
1349         continue;
1350 
1351       // Before we try to remove anything, check for any extra throwing
1352       // instructions that block us from DSEing
1353       if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
1354         LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
1355         return None;
1356       }
1357 
1358       // Check for anything that looks like it will be a barrier to further
1359       // removal
1360       if (isDSEBarrier(KillingUndObj, CurrentI)) {
1361         LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
1362         return None;
1363       }
1364 
1365       // If Current is known to be on path that reads DefLoc or is a read
1366       // clobber, bail out, as the path is not profitable. We skip this check
1367       // for intrinsic calls, because the code knows how to handle memcpy
1368       // intrinsics.
1369       if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI))
1370         return None;
1371 
1372       // Quick check if there are direct uses that are read-clobbers.
1373       if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) {
1374             if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1375               return !MSSA.dominates(StartAccess, UseOrDef) &&
1376                      isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
1377             return false;
1378           })) {
1379         LLVM_DEBUG(dbgs() << "   ...  found a read clobber\n");
1380         return None;
1381       }
1382 
1383       // If Current cannot be analyzed or is not removable, check the next
1384       // candidate.
1385       if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI))
1386         continue;
1387 
1388       // If Current does not have an analyzable write location, skip it
1389       CurrentLoc = getLocForWriteEx(CurrentI);
1390       if (!CurrentLoc)
1391         continue;
1392 
1393       // AliasAnalysis does not account for loops. Limit elimination to
1394       // candidates for which we can guarantee they always store to the same
1395       // memory location and not located in different loops.
1396       if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1397         LLVM_DEBUG(dbgs() << "  ... not guaranteed loop independent\n");
1398         WalkerStepLimit -= 1;
1399         continue;
1400       }
1401 
1402       if (IsMemTerm) {
1403         // If the killing def is a memory terminator (e.g. lifetime.end), check
1404         // the next candidate if the current Current does not write the same
1405         // underlying object as the terminator.
1406         if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI))
1407           continue;
1408       } else {
1409         int64_t KillingOffset = 0;
1410         int64_t DeadOffset = 0;
1411         auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
1412                               KillingOffset, DeadOffset);
1413         // If Current does not write to the same object as KillingDef, check
1414         // the next candidate.
1415         if (OR == OW_Unknown)
1416           continue;
1417         else if (OR == OW_MaybePartial) {
1418           // If KillingDef only partially overwrites Current, check the next
1419           // candidate if the partial step limit is exceeded. This aggressively
1420           // limits the number of candidates for partial store elimination,
1421           // which are less likely to be removable in the end.
1422           if (PartialLimit <= 1) {
1423             WalkerStepLimit -= 1;
1424             continue;
1425           }
1426           PartialLimit -= 1;
1427         }
1428       }
1429       break;
1430     };
1431 
1432     // Accesses to objects accessible after the function returns can only be
1433     // eliminated if the access is dead along all paths to the exit. Collect
1434     // the blocks with killing (=completely overwriting MemoryDefs) and check if
1435     // they cover all paths from MaybeDeadAccess to any function exit.
1436     SmallPtrSet<Instruction *, 16> KillingDefs;
1437     KillingDefs.insert(KillingDef->getMemoryInst());
1438     MemoryAccess *MaybeDeadAccess = Current;
1439     MemoryLocation MaybeDeadLoc = *CurrentLoc;
1440     Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst();
1441     LLVM_DEBUG(dbgs() << "  Checking for reads of " << *MaybeDeadAccess << " ("
1442                       << *MaybeDeadI << ")\n");
1443 
1444     SmallSetVector<MemoryAccess *, 32> WorkList;
1445     auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1446       for (Use &U : Acc->uses())
1447         WorkList.insert(cast<MemoryAccess>(U.getUser()));
1448     };
1449     PushMemUses(MaybeDeadAccess);
1450 
1451     // Check if DeadDef may be read.
1452     for (unsigned I = 0; I < WorkList.size(); I++) {
1453       MemoryAccess *UseAccess = WorkList[I];
1454 
1455       LLVM_DEBUG(dbgs() << "   " << *UseAccess);
1456       // Bail out if the number of accesses to check exceeds the scan limit.
1457       if (ScanLimit < (WorkList.size() - I)) {
1458         LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1459         return None;
1460       }
1461       --ScanLimit;
1462       NumDomMemDefChecks++;
1463 
1464       if (isa<MemoryPhi>(UseAccess)) {
1465         if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1466               return DT.properlyDominates(KI->getParent(),
1467                                           UseAccess->getBlock());
1468             })) {
1469           LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
1470           continue;
1471         }
1472         LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
1473         PushMemUses(UseAccess);
1474         continue;
1475       }
1476 
1477       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1478       LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
1479 
1480       if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1481             return DT.dominates(KI, UseInst);
1482           })) {
1483         LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
1484         continue;
1485       }
1486 
1487       // A memory terminator kills all preceeding MemoryDefs and all succeeding
1488       // MemoryAccesses. We do not have to check it's users.
1489       if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1490         LLVM_DEBUG(
1491             dbgs()
1492             << " ... skipping, memterminator invalidates following accesses\n");
1493         continue;
1494       }
1495 
1496       if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1497         LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
1498         PushMemUses(UseAccess);
1499         continue;
1500       }
1501 
1502       if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(KillingUndObj)) {
1503         LLVM_DEBUG(dbgs() << "  ... found throwing instruction\n");
1504         return None;
1505       }
1506 
1507       // Uses which may read the original MemoryDef mean we cannot eliminate the
1508       // original MD. Stop walk.
1509       if (isReadClobber(MaybeDeadLoc, UseInst)) {
1510         LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
1511         return None;
1512       }
1513 
1514       // If this worklist walks back to the original memory access (and the
1515       // pointer is not guarenteed loop invariant) then we cannot assume that a
1516       // store kills itself.
1517       if (MaybeDeadAccess == UseAccess &&
1518           !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) {
1519         LLVM_DEBUG(dbgs() << "    ... found not loop invariant self access\n");
1520         return None;
1521       }
1522       // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check
1523       // if it reads the memory location.
1524       // TODO: It would probably be better to check for self-reads before
1525       // calling the function.
1526       if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
1527         LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
1528         continue;
1529       }
1530 
1531       // Check all uses for MemoryDefs, except for defs completely overwriting
1532       // the original location. Otherwise we have to check uses of *all*
1533       // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1534       // miss cases like the following
1535       //   1 = Def(LoE) ; <----- DeadDef stores [0,1]
1536       //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
1537       //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
1538       //                  (The Use points to the *first* Def it may alias)
1539       //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
1540       //                  stores [0,1]
1541       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1542         if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1543           BasicBlock *MaybeKillingBlock = UseInst->getParent();
1544           if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1545               PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) {
1546             if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1547               LLVM_DEBUG(dbgs()
1548                          << "    ... found killing def " << *UseInst << "\n");
1549               KillingDefs.insert(UseInst);
1550             }
1551           } else {
1552             LLVM_DEBUG(dbgs()
1553                        << "    ... found preceeding def " << *UseInst << "\n");
1554             return None;
1555           }
1556         } else
1557           PushMemUses(UseDef);
1558       }
1559     }
1560 
1561     // For accesses to locations visible after the function returns, make sure
1562     // that the location is dead (=overwritten) along all paths from
1563     // MaybeDeadAccess to the exit.
1564     if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1565       SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1566       for (Instruction *KD : KillingDefs)
1567         KillingBlocks.insert(KD->getParent());
1568       assert(!KillingBlocks.empty() &&
1569              "Expected at least a single killing block");
1570 
1571       // Find the common post-dominator of all killing blocks.
1572       BasicBlock *CommonPred = *KillingBlocks.begin();
1573       for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) {
1574         if (!CommonPred)
1575           break;
1576         CommonPred = PDT.findNearestCommonDominator(CommonPred, BB);
1577       }
1578 
1579       // If CommonPred is in the set of killing blocks, just check if it
1580       // post-dominates MaybeDeadAccess.
1581       if (KillingBlocks.count(CommonPred)) {
1582         if (PDT.dominates(CommonPred, MaybeDeadAccess->getBlock()))
1583           return {MaybeDeadAccess};
1584         return None;
1585       }
1586 
1587       // If the common post-dominator does not post-dominate MaybeDeadAccess,
1588       // there is a path from MaybeDeadAccess to an exit not going through a
1589       // killing block.
1590       if (PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) {
1591         SetVector<BasicBlock *> WorkList;
1592 
1593         // If CommonPred is null, there are multiple exits from the function.
1594         // They all have to be added to the worklist.
1595         if (CommonPred)
1596           WorkList.insert(CommonPred);
1597         else
1598           for (BasicBlock *R : PDT.roots())
1599             WorkList.insert(R);
1600 
1601         NumCFGTries++;
1602         // Check if all paths starting from an exit node go through one of the
1603         // killing blocks before reaching MaybeDeadAccess.
1604         for (unsigned I = 0; I < WorkList.size(); I++) {
1605           NumCFGChecks++;
1606           BasicBlock *Current = WorkList[I];
1607           if (KillingBlocks.count(Current))
1608             continue;
1609           if (Current == MaybeDeadAccess->getBlock())
1610             return None;
1611 
1612           // MaybeDeadAccess is reachable from the entry, so we don't have to
1613           // explore unreachable blocks further.
1614           if (!DT.isReachableFromEntry(Current))
1615             continue;
1616 
1617           for (BasicBlock *Pred : predecessors(Current))
1618             WorkList.insert(Pred);
1619 
1620           if (WorkList.size() >= MemorySSAPathCheckLimit)
1621             return None;
1622         }
1623         NumCFGSuccess++;
1624         return {MaybeDeadAccess};
1625       }
1626       return None;
1627     }
1628 
1629     // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is
1630     // potentially dead.
1631     return {MaybeDeadAccess};
1632   }
1633 
1634   // Delete dead memory defs
1635   void deleteDeadInstruction(Instruction *SI) {
1636     MemorySSAUpdater Updater(&MSSA);
1637     SmallVector<Instruction *, 32> NowDeadInsts;
1638     NowDeadInsts.push_back(SI);
1639     --NumFastOther;
1640 
1641     while (!NowDeadInsts.empty()) {
1642       Instruction *DeadInst = NowDeadInsts.pop_back_val();
1643       ++NumFastOther;
1644 
1645       // Try to preserve debug information attached to the dead instruction.
1646       salvageDebugInfo(*DeadInst);
1647       salvageKnowledge(DeadInst);
1648 
1649       // Remove the Instruction from MSSA.
1650       if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1651         if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1652           SkipStores.insert(MD);
1653         }
1654 
1655         Updater.removeMemoryAccess(MA);
1656       }
1657 
1658       auto I = IOLs.find(DeadInst->getParent());
1659       if (I != IOLs.end())
1660         I->second.erase(DeadInst);
1661       // Remove its operands
1662       for (Use &O : DeadInst->operands())
1663         if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1664           O = nullptr;
1665           if (isInstructionTriviallyDead(OpI, &TLI))
1666             NowDeadInsts.push_back(OpI);
1667         }
1668 
1669       EI.removeInstruction(DeadInst);
1670       DeadInst->eraseFromParent();
1671     }
1672   }
1673 
1674   // Check for any extra throws between \p KillingI and \p DeadI that block
1675   // DSE.  This only checks extra maythrows (those that aren't MemoryDef's).
1676   // MemoryDef that may throw are handled during the walk from one def to the
1677   // next.
1678   bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
1679                        const Value *KillingUndObj) {
1680     // First see if we can ignore it by using the fact that KillingI is an
1681     // alloca/alloca like object that is not visible to the caller during
1682     // execution of the function.
1683     if (KillingUndObj && isInvisibleToCallerBeforeRet(KillingUndObj))
1684       return false;
1685 
1686     if (KillingI->getParent() == DeadI->getParent())
1687       return ThrowingBlocks.count(KillingI->getParent());
1688     return !ThrowingBlocks.empty();
1689   }
1690 
1691   // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following
1692   // instructions act as barriers:
1693   //  * A memory instruction that may throw and \p KillingI accesses a non-stack
1694   //  object.
1695   //  * Atomic stores stronger that monotonic.
1696   bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) {
1697     // If DeadI may throw it acts as a barrier, unless we are to an
1698     // alloca/alloca like object that does not escape.
1699     if (DeadI->mayThrow() && !isInvisibleToCallerBeforeRet(KillingUndObj))
1700       return true;
1701 
1702     // If DeadI is an atomic load/store stronger than monotonic, do not try to
1703     // eliminate/reorder it.
1704     if (DeadI->isAtomic()) {
1705       if (auto *LI = dyn_cast<LoadInst>(DeadI))
1706         return isStrongerThanMonotonic(LI->getOrdering());
1707       if (auto *SI = dyn_cast<StoreInst>(DeadI))
1708         return isStrongerThanMonotonic(SI->getOrdering());
1709       if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI))
1710         return isStrongerThanMonotonic(ARMW->getOrdering());
1711       if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI))
1712         return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1713                isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1714       llvm_unreachable("other instructions should be skipped in MemorySSA");
1715     }
1716     return false;
1717   }
1718 
1719   /// Eliminate writes to objects that are not visible in the caller and are not
1720   /// accessed before returning from the function.
1721   bool eliminateDeadWritesAtEndOfFunction() {
1722     bool MadeChange = false;
1723     LLVM_DEBUG(
1724         dbgs()
1725         << "Trying to eliminate MemoryDefs at the end of the function\n");
1726     for (int I = MemDefs.size() - 1; I >= 0; I--) {
1727       MemoryDef *Def = MemDefs[I];
1728       if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst()))
1729         continue;
1730 
1731       Instruction *DefI = Def->getMemoryInst();
1732       auto DefLoc = getLocForWriteEx(DefI);
1733       if (!DefLoc)
1734         continue;
1735 
1736       // NOTE: Currently eliminating writes at the end of a function is limited
1737       // to MemoryDefs with a single underlying object, to save compile-time. In
1738       // practice it appears the case with multiple underlying objects is very
1739       // uncommon. If it turns out to be important, we can use
1740       // getUnderlyingObjects here instead.
1741       const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1742       if (!isInvisibleToCallerAfterRet(UO))
1743         continue;
1744 
1745       if (isWriteAtEndOfFunction(Def)) {
1746         // See through pointer-to-pointer bitcasts
1747         LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
1748                              "of the function\n");
1749         deleteDeadInstruction(DefI);
1750         ++NumFastStores;
1751         MadeChange = true;
1752       }
1753     }
1754     return MadeChange;
1755   }
1756 
1757   /// \returns true if \p Def is a no-op store, either because it
1758   /// directly stores back a loaded value or stores zero to a calloced object.
1759   bool storeIsNoop(MemoryDef *Def, const Value *DefUO) {
1760     StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
1761     MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst());
1762     Constant *StoredConstant = nullptr;
1763     if (Store)
1764       StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1765     if (MemSet)
1766       StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1767 
1768     if (StoredConstant && StoredConstant->isNullValue()) {
1769       auto *DefUOInst = dyn_cast<Instruction>(DefUO);
1770       if (DefUOInst) {
1771         if (isCallocLikeFn(DefUOInst, &TLI)) {
1772           auto *UnderlyingDef =
1773               cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
1774           // If UnderlyingDef is the clobbering access of Def, no instructions
1775           // between them can modify the memory location.
1776           auto *ClobberDef =
1777               MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
1778           return UnderlyingDef == ClobberDef;
1779         }
1780 
1781         if (MemSet) {
1782           if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
1783               F.hasFnAttribute(Attribute::SanitizeAddress) ||
1784               F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
1785               F.getName() == "calloc")
1786             return false;
1787           auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUOInst));
1788           if (!Malloc)
1789             return false;
1790           auto *InnerCallee = Malloc->getCalledFunction();
1791           if (!InnerCallee)
1792             return false;
1793           LibFunc Func;
1794           if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
1795               Func != LibFunc_malloc)
1796             return false;
1797 
1798           auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) {
1799             // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
1800             // of malloc block
1801             auto *MallocBB = Malloc->getParent(),
1802                  *MemsetBB = Memset->getParent();
1803             if (MallocBB == MemsetBB)
1804               return true;
1805             auto *Ptr = Memset->getArgOperand(0);
1806             auto *TI = MallocBB->getTerminator();
1807             ICmpInst::Predicate Pred;
1808             BasicBlock *TrueBB, *FalseBB;
1809             if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB,
1810                                 FalseBB)))
1811               return false;
1812             if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB)
1813               return false;
1814             return true;
1815           };
1816 
1817           if (Malloc->getOperand(0) == MemSet->getLength()) {
1818             if (shouldCreateCalloc(Malloc, MemSet) &&
1819                 DT.dominates(Malloc, MemSet) &&
1820                 memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT)) {
1821               IRBuilder<> IRB(Malloc);
1822               const auto &DL = Malloc->getModule()->getDataLayout();
1823               if (auto *Calloc =
1824                       emitCalloc(ConstantInt::get(IRB.getIntPtrTy(DL), 1),
1825                                  Malloc->getArgOperand(0), IRB, TLI)) {
1826                 MemorySSAUpdater Updater(&MSSA);
1827                 auto *LastDef = cast<MemoryDef>(
1828                     Updater.getMemorySSA()->getMemoryAccess(Malloc));
1829                 auto *NewAccess = Updater.createMemoryAccessAfter(
1830                     cast<Instruction>(Calloc), LastDef, LastDef);
1831                 auto *NewAccessMD = cast<MemoryDef>(NewAccess);
1832                 Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
1833                 Updater.removeMemoryAccess(Malloc);
1834                 Malloc->replaceAllUsesWith(Calloc);
1835                 Malloc->eraseFromParent();
1836                 return true;
1837               }
1838               return false;
1839             }
1840           }
1841         }
1842       }
1843     }
1844 
1845     if (!Store)
1846       return false;
1847 
1848     if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1849       if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1850         // Get the defining access for the load.
1851         auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1852         // Fast path: the defining accesses are the same.
1853         if (LoadAccess == Def->getDefiningAccess())
1854           return true;
1855 
1856         // Look through phi accesses. Recursively scan all phi accesses by
1857         // adding them to a worklist. Bail when we run into a memory def that
1858         // does not match LoadAccess.
1859         SetVector<MemoryAccess *> ToCheck;
1860         MemoryAccess *Current =
1861             MSSA.getWalker()->getClobberingMemoryAccess(Def);
1862         // We don't want to bail when we run into the store memory def. But,
1863         // the phi access may point to it. So, pretend like we've already
1864         // checked it.
1865         ToCheck.insert(Def);
1866         ToCheck.insert(Current);
1867         // Start at current (1) to simulate already having checked Def.
1868         for (unsigned I = 1; I < ToCheck.size(); ++I) {
1869           Current = ToCheck[I];
1870           if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1871             // Check all the operands.
1872             for (auto &Use : PhiAccess->incoming_values())
1873               ToCheck.insert(cast<MemoryAccess>(&Use));
1874             continue;
1875           }
1876 
1877           // If we found a memory def, bail. This happens when we have an
1878           // unrelated write in between an otherwise noop store.
1879           assert(isa<MemoryDef>(Current) &&
1880                  "Only MemoryDefs should reach here.");
1881           // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1882           // We are searching for the definition of the store's destination.
1883           // So, if that is the same definition as the load, then this is a
1884           // noop. Otherwise, fail.
1885           if (LoadAccess != Current)
1886             return false;
1887         }
1888         return true;
1889       }
1890     }
1891 
1892     return false;
1893   }
1894 
1895   bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) {
1896     bool Changed = false;
1897     for (auto OI : IOL) {
1898       Instruction *DeadI = OI.first;
1899       MemoryLocation Loc = *getLocForWriteEx(DeadI);
1900       assert(isRemovable(DeadI) && "Expect only removable instruction");
1901 
1902       const Value *Ptr = Loc.Ptr->stripPointerCasts();
1903       int64_t DeadStart = 0;
1904       uint64_t DeadSize = Loc.Size.getValue();
1905       GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL);
1906       OverlapIntervalsTy &IntervalMap = OI.second;
1907       Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize);
1908       if (IntervalMap.empty())
1909         continue;
1910       Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize);
1911     }
1912     return Changed;
1913   }
1914 
1915   /// Eliminates writes to locations where the value that is being written
1916   /// is already stored at the same location.
1917   bool eliminateRedundantStoresOfExistingValues() {
1918     bool MadeChange = false;
1919     LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the "
1920                          "already existing value\n");
1921     for (auto *Def : MemDefs) {
1922       if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def) ||
1923           !isRemovable(Def->getMemoryInst()))
1924         continue;
1925       auto *UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess());
1926       if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef))
1927         continue;
1928 
1929       Instruction *DefInst = Def->getMemoryInst();
1930       Instruction *UpperInst = UpperDef->getMemoryInst();
1931       auto IsRedundantStore = [this, DefInst,
1932                                UpperInst](MemoryLocation UpperLoc) {
1933         if (DefInst->isIdenticalTo(UpperInst))
1934           return true;
1935         if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) {
1936           if (auto *SI = dyn_cast<StoreInst>(DefInst)) {
1937             auto MaybeDefLoc = getLocForWriteEx(DefInst);
1938             if (!MaybeDefLoc)
1939               return false;
1940             int64_t InstWriteOffset = 0;
1941             int64_t DepWriteOffset = 0;
1942             auto OR = isOverwrite(UpperInst, DefInst, UpperLoc, *MaybeDefLoc,
1943                                   InstWriteOffset, DepWriteOffset);
1944             Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL);
1945             return StoredByte && StoredByte == MemSetI->getOperand(1) &&
1946                    OR == OW_Complete;
1947           }
1948         }
1949         return false;
1950       };
1951 
1952       auto MaybeUpperLoc = getLocForWriteEx(UpperInst);
1953       if (!MaybeUpperLoc || !IsRedundantStore(*MaybeUpperLoc) ||
1954           isReadClobber(*MaybeUpperLoc, DefInst))
1955         continue;
1956       LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *DefInst
1957                         << '\n');
1958       deleteDeadInstruction(DefInst);
1959       NumRedundantStores++;
1960       MadeChange = true;
1961     }
1962     return MadeChange;
1963   }
1964 };
1965 
1966 static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1967                                 DominatorTree &DT, PostDominatorTree &PDT,
1968                                 const TargetLibraryInfo &TLI,
1969                                 const LoopInfo &LI) {
1970   bool MadeChange = false;
1971 
1972   DSEState State(F, AA, MSSA, DT, PDT, TLI, LI);
1973   // For each store:
1974   for (unsigned I = 0; I < State.MemDefs.size(); I++) {
1975     MemoryDef *KillingDef = State.MemDefs[I];
1976     if (State.SkipStores.count(KillingDef))
1977       continue;
1978     Instruction *KillingI = KillingDef->getMemoryInst();
1979 
1980     Optional<MemoryLocation> MaybeKillingLoc;
1981     if (State.isMemTerminatorInst(KillingI))
1982       MaybeKillingLoc = State.getLocForTerminator(KillingI).map(
1983           [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
1984     else
1985       MaybeKillingLoc = State.getLocForWriteEx(KillingI);
1986 
1987     if (!MaybeKillingLoc) {
1988       LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
1989                         << *KillingI << "\n");
1990       continue;
1991     }
1992     MemoryLocation KillingLoc = *MaybeKillingLoc;
1993     assert(KillingLoc.Ptr && "KillingLoc should not be null");
1994     const Value *KillingUndObj = getUnderlyingObject(KillingLoc.Ptr);
1995     LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
1996                       << *KillingDef << " (" << *KillingI << ")\n");
1997 
1998     unsigned ScanLimit = MemorySSAScanLimit;
1999     unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
2000     unsigned PartialLimit = MemorySSAPartialStoreLimit;
2001     // Worklist of MemoryAccesses that may be killed by KillingDef.
2002     SetVector<MemoryAccess *> ToCheck;
2003     ToCheck.insert(KillingDef->getDefiningAccess());
2004 
2005     bool Shortend = false;
2006     bool IsMemTerm = State.isMemTerminatorInst(KillingI);
2007     // Check if MemoryAccesses in the worklist are killed by KillingDef.
2008     for (unsigned I = 0; I < ToCheck.size(); I++) {
2009       MemoryAccess *Current = ToCheck[I];
2010       if (State.SkipStores.count(Current))
2011         continue;
2012 
2013       Optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef(
2014           KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit,
2015           WalkerStepLimit, IsMemTerm, PartialLimit);
2016 
2017       if (!MaybeDeadAccess) {
2018         LLVM_DEBUG(dbgs() << "  finished walk\n");
2019         continue;
2020       }
2021 
2022       MemoryAccess *DeadAccess = *MaybeDeadAccess;
2023       LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess);
2024       if (isa<MemoryPhi>(DeadAccess)) {
2025         LLVM_DEBUG(dbgs() << "\n  ... adding incoming values to worklist\n");
2026         for (Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) {
2027           MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
2028           BasicBlock *IncomingBlock = IncomingAccess->getBlock();
2029           BasicBlock *PhiBlock = DeadAccess->getBlock();
2030 
2031           // We only consider incoming MemoryAccesses that come before the
2032           // MemoryPhi. Otherwise we could discover candidates that do not
2033           // strictly dominate our starting def.
2034           if (State.PostOrderNumbers[IncomingBlock] >
2035               State.PostOrderNumbers[PhiBlock])
2036             ToCheck.insert(IncomingAccess);
2037         }
2038         continue;
2039       }
2040       auto *DeadDefAccess = cast<MemoryDef>(DeadAccess);
2041       Instruction *DeadI = DeadDefAccess->getMemoryInst();
2042       LLVM_DEBUG(dbgs() << " (" << *DeadI << ")\n");
2043       ToCheck.insert(DeadDefAccess->getDefiningAccess());
2044       NumGetDomMemoryDefPassed++;
2045 
2046       if (!DebugCounter::shouldExecute(MemorySSACounter))
2047         continue;
2048 
2049       MemoryLocation DeadLoc = *State.getLocForWriteEx(DeadI);
2050 
2051       if (IsMemTerm) {
2052         const Value *DeadUndObj = getUnderlyingObject(DeadLoc.Ptr);
2053         if (KillingUndObj != DeadUndObj)
2054           continue;
2055         LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *DeadI
2056                           << "\n  KILLER: " << *KillingI << '\n');
2057         State.deleteDeadInstruction(DeadI);
2058         ++NumFastStores;
2059         MadeChange = true;
2060       } else {
2061         // Check if DeadI overwrites KillingI.
2062         int64_t KillingOffset = 0;
2063         int64_t DeadOffset = 0;
2064         OverwriteResult OR = State.isOverwrite(
2065             KillingI, DeadI, KillingLoc, DeadLoc, KillingOffset, DeadOffset);
2066         if (OR == OW_MaybePartial) {
2067           auto Iter = State.IOLs.insert(
2068               std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2069                   DeadI->getParent(), InstOverlapIntervalsTy()));
2070           auto &IOL = Iter.first->second;
2071           OR = isPartialOverwrite(KillingLoc, DeadLoc, KillingOffset,
2072                                   DeadOffset, DeadI, IOL);
2073         }
2074 
2075         if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2076           auto *DeadSI = dyn_cast<StoreInst>(DeadI);
2077           auto *KillingSI = dyn_cast<StoreInst>(KillingI);
2078           // We are re-using tryToMergePartialOverlappingStores, which requires
2079           // DeadSI to dominate DeadSI.
2080           // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2081           if (DeadSI && KillingSI && DT.dominates(DeadSI, KillingSI)) {
2082             if (Constant *Merged = tryToMergePartialOverlappingStores(
2083                     KillingSI, DeadSI, KillingOffset, DeadOffset, State.DL,
2084                     State.BatchAA, &DT)) {
2085 
2086               // Update stored value of earlier store to merged constant.
2087               DeadSI->setOperand(0, Merged);
2088               ++NumModifiedStores;
2089               MadeChange = true;
2090 
2091               Shortend = true;
2092               // Remove killing store and remove any outstanding overlap
2093               // intervals for the updated store.
2094               State.deleteDeadInstruction(KillingSI);
2095               auto I = State.IOLs.find(DeadSI->getParent());
2096               if (I != State.IOLs.end())
2097                 I->second.erase(DeadSI);
2098               break;
2099             }
2100           }
2101         }
2102 
2103         if (OR == OW_Complete) {
2104           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *DeadI
2105                             << "\n  KILLER: " << *KillingI << '\n');
2106           State.deleteDeadInstruction(DeadI);
2107           ++NumFastStores;
2108           MadeChange = true;
2109         }
2110       }
2111     }
2112 
2113     // Check if the store is a no-op.
2114     if (!Shortend && isRemovable(KillingI) &&
2115         State.storeIsNoop(KillingDef, KillingUndObj)) {
2116       LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *KillingI
2117                         << '\n');
2118       State.deleteDeadInstruction(KillingI);
2119       NumRedundantStores++;
2120       MadeChange = true;
2121       continue;
2122     }
2123   }
2124 
2125   if (EnablePartialOverwriteTracking)
2126     for (auto &KV : State.IOLs)
2127       MadeChange |= State.removePartiallyOverlappedStores(KV.second);
2128 
2129   MadeChange |= State.eliminateRedundantStoresOfExistingValues();
2130   MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2131   return MadeChange;
2132 }
2133 } // end anonymous namespace
2134 
2135 //===----------------------------------------------------------------------===//
2136 // DSE Pass
2137 //===----------------------------------------------------------------------===//
2138 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2139   AliasAnalysis &AA = AM.getResult<AAManager>(F);
2140   const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2141   DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2142   MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2143   PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2144   LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
2145 
2146   bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2147 
2148 #ifdef LLVM_ENABLE_STATS
2149   if (AreStatisticsEnabled())
2150     for (auto &I : instructions(F))
2151       NumRemainingStores += isa<StoreInst>(&I);
2152 #endif
2153 
2154   if (!Changed)
2155     return PreservedAnalyses::all();
2156 
2157   PreservedAnalyses PA;
2158   PA.preserveSet<CFGAnalyses>();
2159   PA.preserve<MemorySSAAnalysis>();
2160   PA.preserve<LoopAnalysis>();
2161   return PA;
2162 }
2163 
2164 namespace {
2165 
2166 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2167 class DSELegacyPass : public FunctionPass {
2168 public:
2169   static char ID; // Pass identification, replacement for typeid
2170 
2171   DSELegacyPass() : FunctionPass(ID) {
2172     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2173   }
2174 
2175   bool runOnFunction(Function &F) override {
2176     if (skipFunction(F))
2177       return false;
2178 
2179     AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2180     DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2181     const TargetLibraryInfo &TLI =
2182         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2183     MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2184     PostDominatorTree &PDT =
2185         getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2186     LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2187 
2188     bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2189 
2190 #ifdef LLVM_ENABLE_STATS
2191     if (AreStatisticsEnabled())
2192       for (auto &I : instructions(F))
2193         NumRemainingStores += isa<StoreInst>(&I);
2194 #endif
2195 
2196     return Changed;
2197   }
2198 
2199   void getAnalysisUsage(AnalysisUsage &AU) const override {
2200     AU.setPreservesCFG();
2201     AU.addRequired<AAResultsWrapperPass>();
2202     AU.addRequired<TargetLibraryInfoWrapperPass>();
2203     AU.addPreserved<GlobalsAAWrapperPass>();
2204     AU.addRequired<DominatorTreeWrapperPass>();
2205     AU.addPreserved<DominatorTreeWrapperPass>();
2206     AU.addRequired<PostDominatorTreeWrapperPass>();
2207     AU.addRequired<MemorySSAWrapperPass>();
2208     AU.addPreserved<PostDominatorTreeWrapperPass>();
2209     AU.addPreserved<MemorySSAWrapperPass>();
2210     AU.addRequired<LoopInfoWrapperPass>();
2211     AU.addPreserved<LoopInfoWrapperPass>();
2212   }
2213 };
2214 
2215 } // end anonymous namespace
2216 
2217 char DSELegacyPass::ID = 0;
2218 
2219 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2220                       false)
2221 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2222 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2223 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2224 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2225 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2226 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2227 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2228 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2229 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2230                     false)
2231 
2232 FunctionPass *llvm::createDeadStoreEliminationPass() {
2233   return new DSELegacyPass();
2234 }
2235