xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp (revision 79ac3c12a714bcd3f2354c52d948aed9575c46d6)
1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an analysis that determines, for a given memory
10 // operation, what preceding memory operations it depends on.  It builds on
11 // alias analysis information, and tries to provide a lazy, caching interface to
12 // a common kind of alias information query.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/Analysis/PHITransAddr.h"
27 #include "llvm/Analysis/PhiValues.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/IR/Metadata.h"
43 #include "llvm/IR/Module.h"
44 #include "llvm/IR/PredIteratorCache.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/User.h"
48 #include "llvm/IR/Value.h"
49 #include "llvm/InitializePasses.h"
50 #include "llvm/Pass.h"
51 #include "llvm/Support/AtomicOrdering.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/MathExtras.h"
57 #include <algorithm>
58 #include <cassert>
59 #include <cstdint>
60 #include <iterator>
61 #include <utility>
62 
63 using namespace llvm;
64 
65 #define DEBUG_TYPE "memdep"
66 
67 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
68 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses");
69 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses");
70 
71 STATISTIC(NumCacheNonLocalPtr,
72           "Number of fully cached non-local ptr responses");
73 STATISTIC(NumCacheDirtyNonLocalPtr,
74           "Number of cached, but dirty, non-local ptr responses");
75 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses");
76 STATISTIC(NumCacheCompleteNonLocalPtr,
77           "Number of block queries that were completely cached");
78 
79 // Limit for the number of instructions to scan in a block.
80 
81 static cl::opt<unsigned> BlockScanLimit(
82     "memdep-block-scan-limit", cl::Hidden, cl::init(100),
83     cl::desc("The number of instructions to scan in a block in memory "
84              "dependency analysis (default = 100)"));
85 
86 static cl::opt<unsigned>
87     BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000),
88                      cl::desc("The number of blocks to scan during memory "
89                               "dependency analysis (default = 1000)"));
90 
91 // Limit on the number of memdep results to process.
92 static const unsigned int NumResultsLimit = 100;
93 
94 /// This is a helper function that removes Val from 'Inst's set in ReverseMap.
95 ///
96 /// If the set becomes empty, remove Inst's entry.
97 template <typename KeyTy>
98 static void
99 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap,
100                      Instruction *Inst, KeyTy Val) {
101   typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt =
102       ReverseMap.find(Inst);
103   assert(InstIt != ReverseMap.end() && "Reverse map out of sync?");
104   bool Found = InstIt->second.erase(Val);
105   assert(Found && "Invalid reverse map!");
106   (void)Found;
107   if (InstIt->second.empty())
108     ReverseMap.erase(InstIt);
109 }
110 
111 /// If the given instruction references a specific memory location, fill in Loc
112 /// with the details, otherwise set Loc.Ptr to null.
113 ///
114 /// Returns a ModRefInfo value describing the general behavior of the
115 /// instruction.
116 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc,
117                               const TargetLibraryInfo &TLI) {
118   if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
119     if (LI->isUnordered()) {
120       Loc = MemoryLocation::get(LI);
121       return ModRefInfo::Ref;
122     }
123     if (LI->getOrdering() == AtomicOrdering::Monotonic) {
124       Loc = MemoryLocation::get(LI);
125       return ModRefInfo::ModRef;
126     }
127     Loc = MemoryLocation();
128     return ModRefInfo::ModRef;
129   }
130 
131   if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
132     if (SI->isUnordered()) {
133       Loc = MemoryLocation::get(SI);
134       return ModRefInfo::Mod;
135     }
136     if (SI->getOrdering() == AtomicOrdering::Monotonic) {
137       Loc = MemoryLocation::get(SI);
138       return ModRefInfo::ModRef;
139     }
140     Loc = MemoryLocation();
141     return ModRefInfo::ModRef;
142   }
143 
144   if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) {
145     Loc = MemoryLocation::get(V);
146     return ModRefInfo::ModRef;
147   }
148 
149   if (const CallInst *CI = isFreeCall(Inst, &TLI)) {
150     // calls to free() deallocate the entire structure
151     Loc = MemoryLocation::getAfter(CI->getArgOperand(0));
152     return ModRefInfo::Mod;
153   }
154 
155   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
156     switch (II->getIntrinsicID()) {
157     case Intrinsic::lifetime_start:
158     case Intrinsic::lifetime_end:
159     case Intrinsic::invariant_start:
160       Loc = MemoryLocation::getForArgument(II, 1, TLI);
161       // These intrinsics don't really modify the memory, but returning Mod
162       // will allow them to be handled conservatively.
163       return ModRefInfo::Mod;
164     case Intrinsic::invariant_end:
165       Loc = MemoryLocation::getForArgument(II, 2, TLI);
166       // These intrinsics don't really modify the memory, but returning Mod
167       // will allow them to be handled conservatively.
168       return ModRefInfo::Mod;
169     case Intrinsic::masked_load:
170       Loc = MemoryLocation::getForArgument(II, 0, TLI);
171       return ModRefInfo::Ref;
172     case Intrinsic::masked_store:
173       Loc = MemoryLocation::getForArgument(II, 1, TLI);
174       return ModRefInfo::Mod;
175     default:
176       break;
177     }
178   }
179 
180   // Otherwise, just do the coarse-grained thing that always works.
181   if (Inst->mayWriteToMemory())
182     return ModRefInfo::ModRef;
183   if (Inst->mayReadFromMemory())
184     return ModRefInfo::Ref;
185   return ModRefInfo::NoModRef;
186 }
187 
188 /// Private helper for finding the local dependencies of a call site.
189 MemDepResult MemoryDependenceResults::getCallDependencyFrom(
190     CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt,
191     BasicBlock *BB) {
192   unsigned Limit = getDefaultBlockScanLimit();
193 
194   // Walk backwards through the block, looking for dependencies.
195   while (ScanIt != BB->begin()) {
196     Instruction *Inst = &*--ScanIt;
197     // Debug intrinsics don't cause dependences and should not affect Limit
198     if (isa<DbgInfoIntrinsic>(Inst))
199       continue;
200 
201     // Limit the amount of scanning we do so we don't end up with quadratic
202     // running time on extreme testcases.
203     --Limit;
204     if (!Limit)
205       return MemDepResult::getUnknown();
206 
207     // If this inst is a memory op, get the pointer it accessed
208     MemoryLocation Loc;
209     ModRefInfo MR = GetLocation(Inst, Loc, TLI);
210     if (Loc.Ptr) {
211       // A simple instruction.
212       if (isModOrRefSet(AA.getModRefInfo(Call, Loc)))
213         return MemDepResult::getClobber(Inst);
214       continue;
215     }
216 
217     if (auto *CallB = dyn_cast<CallBase>(Inst)) {
218       // If these two calls do not interfere, look past it.
219       if (isNoModRef(AA.getModRefInfo(Call, CallB))) {
220         // If the two calls are the same, return Inst as a Def, so that
221         // Call can be found redundant and eliminated.
222         if (isReadOnlyCall && !isModSet(MR) &&
223             Call->isIdenticalToWhenDefined(CallB))
224           return MemDepResult::getDef(Inst);
225 
226         // Otherwise if the two calls don't interact (e.g. CallB is readnone)
227         // keep scanning.
228         continue;
229       } else
230         return MemDepResult::getClobber(Inst);
231     }
232 
233     // If we could not obtain a pointer for the instruction and the instruction
234     // touches memory then assume that this is a dependency.
235     if (isModOrRefSet(MR))
236       return MemDepResult::getClobber(Inst);
237   }
238 
239   // No dependence found.  If this is the entry block of the function, it is
240   // unknown, otherwise it is non-local.
241   if (BB != &BB->getParent()->getEntryBlock())
242     return MemDepResult::getNonLocal();
243   return MemDepResult::getNonFuncLocal();
244 }
245 
246 static bool isVolatile(Instruction *Inst) {
247   if (auto *LI = dyn_cast<LoadInst>(Inst))
248     return LI->isVolatile();
249   if (auto *SI = dyn_cast<StoreInst>(Inst))
250     return SI->isVolatile();
251   if (auto *AI = dyn_cast<AtomicCmpXchgInst>(Inst))
252     return AI->isVolatile();
253   return false;
254 }
255 
256 MemDepResult MemoryDependenceResults::getPointerDependencyFrom(
257     const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
258     BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
259   MemDepResult InvariantGroupDependency = MemDepResult::getUnknown();
260   if (QueryInst != nullptr) {
261     if (auto *LI = dyn_cast<LoadInst>(QueryInst)) {
262       InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB);
263 
264       if (InvariantGroupDependency.isDef())
265         return InvariantGroupDependency;
266     }
267   }
268   MemDepResult SimpleDep = getSimplePointerDependencyFrom(
269       MemLoc, isLoad, ScanIt, BB, QueryInst, Limit);
270   if (SimpleDep.isDef())
271     return SimpleDep;
272   // Non-local invariant group dependency indicates there is non local Def
273   // (it only returns nonLocal if it finds nonLocal def), which is better than
274   // local clobber and everything else.
275   if (InvariantGroupDependency.isNonLocal())
276     return InvariantGroupDependency;
277 
278   assert(InvariantGroupDependency.isUnknown() &&
279          "InvariantGroupDependency should be only unknown at this point");
280   return SimpleDep;
281 }
282 
283 MemDepResult
284 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI,
285                                                             BasicBlock *BB) {
286 
287   if (!LI->hasMetadata(LLVMContext::MD_invariant_group))
288     return MemDepResult::getUnknown();
289 
290   // Take the ptr operand after all casts and geps 0. This way we can search
291   // cast graph down only.
292   Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts();
293 
294   // It's is not safe to walk the use list of global value, because function
295   // passes aren't allowed to look outside their functions.
296   // FIXME: this could be fixed by filtering instructions from outside
297   // of current function.
298   if (isa<GlobalValue>(LoadOperand))
299     return MemDepResult::getUnknown();
300 
301   // Queue to process all pointers that are equivalent to load operand.
302   SmallVector<const Value *, 8> LoadOperandsQueue;
303   LoadOperandsQueue.push_back(LoadOperand);
304 
305   Instruction *ClosestDependency = nullptr;
306   // Order of instructions in uses list is unpredictible. In order to always
307   // get the same result, we will look for the closest dominance.
308   auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) {
309     assert(Other && "Must call it with not null instruction");
310     if (Best == nullptr || DT.dominates(Best, Other))
311       return Other;
312     return Best;
313   };
314 
315   // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case
316   // we will see all the instructions. This should be fixed in MSSA.
317   while (!LoadOperandsQueue.empty()) {
318     const Value *Ptr = LoadOperandsQueue.pop_back_val();
319     assert(Ptr && !isa<GlobalValue>(Ptr) &&
320            "Null or GlobalValue should not be inserted");
321 
322     for (const Use &Us : Ptr->uses()) {
323       auto *U = dyn_cast<Instruction>(Us.getUser());
324       if (!U || U == LI || !DT.dominates(U, LI))
325         continue;
326 
327       // Bitcast or gep with zeros are using Ptr. Add to queue to check it's
328       // users.      U = bitcast Ptr
329       if (isa<BitCastInst>(U)) {
330         LoadOperandsQueue.push_back(U);
331         continue;
332       }
333       // Gep with zeros is equivalent to bitcast.
334       // FIXME: we are not sure if some bitcast should be canonicalized to gep 0
335       // or gep 0 to bitcast because of SROA, so there are 2 forms. When
336       // typeless pointers will be ready then both cases will be gone
337       // (and this BFS also won't be needed).
338       if (auto *GEP = dyn_cast<GetElementPtrInst>(U))
339         if (GEP->hasAllZeroIndices()) {
340           LoadOperandsQueue.push_back(U);
341           continue;
342         }
343 
344       // If we hit load/store with the same invariant.group metadata (and the
345       // same pointer operand) we can assume that value pointed by pointer
346       // operand didn't change.
347       if ((isa<LoadInst>(U) || isa<StoreInst>(U)) &&
348           U->hasMetadata(LLVMContext::MD_invariant_group))
349         ClosestDependency = GetClosestDependency(ClosestDependency, U);
350     }
351   }
352 
353   if (!ClosestDependency)
354     return MemDepResult::getUnknown();
355   if (ClosestDependency->getParent() == BB)
356     return MemDepResult::getDef(ClosestDependency);
357   // Def(U) can't be returned here because it is non-local. If local
358   // dependency won't be found then return nonLocal counting that the
359   // user will call getNonLocalPointerDependency, which will return cached
360   // result.
361   NonLocalDefsCache.try_emplace(
362       LI, NonLocalDepResult(ClosestDependency->getParent(),
363                             MemDepResult::getDef(ClosestDependency), nullptr));
364   ReverseNonLocalDefsCache[ClosestDependency].insert(LI);
365   return MemDepResult::getNonLocal();
366 }
367 
368 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
369     const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
370     BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
371   // We can batch AA queries, because IR does not change during a MemDep query.
372   BatchAAResults BatchAA(AA);
373   bool isInvariantLoad = false;
374 
375   unsigned DefaultLimit = getDefaultBlockScanLimit();
376   if (!Limit)
377     Limit = &DefaultLimit;
378 
379   // We must be careful with atomic accesses, as they may allow another thread
380   //   to touch this location, clobbering it. We are conservative: if the
381   //   QueryInst is not a simple (non-atomic) memory access, we automatically
382   //   return getClobber.
383   // If it is simple, we know based on the results of
384   // "Compiler testing via a theory of sound optimisations in the C11/C++11
385   //   memory model" in PLDI 2013, that a non-atomic location can only be
386   //   clobbered between a pair of a release and an acquire action, with no
387   //   access to the location in between.
388   // Here is an example for giving the general intuition behind this rule.
389   // In the following code:
390   //   store x 0;
391   //   release action; [1]
392   //   acquire action; [4]
393   //   %val = load x;
394   // It is unsafe to replace %val by 0 because another thread may be running:
395   //   acquire action; [2]
396   //   store x 42;
397   //   release action; [3]
398   // with synchronization from 1 to 2 and from 3 to 4, resulting in %val
399   // being 42. A key property of this program however is that if either
400   // 1 or 4 were missing, there would be a race between the store of 42
401   // either the store of 0 or the load (making the whole program racy).
402   // The paper mentioned above shows that the same property is respected
403   // by every program that can detect any optimization of that kind: either
404   // it is racy (undefined) or there is a release followed by an acquire
405   // between the pair of accesses under consideration.
406 
407   // If the load is invariant, we "know" that it doesn't alias *any* write. We
408   // do want to respect mustalias results since defs are useful for value
409   // forwarding, but any mayalias write can be assumed to be noalias.
410   // Arguably, this logic should be pushed inside AliasAnalysis itself.
411   if (isLoad && QueryInst) {
412     LoadInst *LI = dyn_cast<LoadInst>(QueryInst);
413     if (LI && LI->hasMetadata(LLVMContext::MD_invariant_load))
414       isInvariantLoad = true;
415   }
416 
417   // Return "true" if and only if the instruction I is either a non-simple
418   // load or a non-simple store.
419   auto isNonSimpleLoadOrStore = [](Instruction *I) -> bool {
420     if (auto *LI = dyn_cast<LoadInst>(I))
421       return !LI->isSimple();
422     if (auto *SI = dyn_cast<StoreInst>(I))
423       return !SI->isSimple();
424     return false;
425   };
426 
427   // Return "true" if I is not a load and not a store, but it does access
428   // memory.
429   auto isOtherMemAccess = [](Instruction *I) -> bool {
430     return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory();
431   };
432 
433   // Walk backwards through the basic block, looking for dependencies.
434   while (ScanIt != BB->begin()) {
435     Instruction *Inst = &*--ScanIt;
436 
437     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
438       // Debug intrinsics don't (and can't) cause dependencies.
439       if (isa<DbgInfoIntrinsic>(II))
440         continue;
441 
442     // Limit the amount of scanning we do so we don't end up with quadratic
443     // running time on extreme testcases.
444     --*Limit;
445     if (!*Limit)
446       return MemDepResult::getUnknown();
447 
448     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
449       // If we reach a lifetime begin or end marker, then the query ends here
450       // because the value is undefined.
451       Intrinsic::ID ID = II->getIntrinsicID();
452       switch (ID) {
453       case Intrinsic::lifetime_start: {
454         // FIXME: This only considers queries directly on the invariant-tagged
455         // pointer, not on query pointers that are indexed off of them.  It'd
456         // be nice to handle that at some point (the right approach is to use
457         // GetPointerBaseWithConstantOffset).
458         MemoryLocation ArgLoc = MemoryLocation::getAfter(II->getArgOperand(1));
459         if (BatchAA.isMustAlias(ArgLoc, MemLoc))
460           return MemDepResult::getDef(II);
461         continue;
462       }
463       case Intrinsic::masked_load:
464       case Intrinsic::masked_store: {
465         MemoryLocation Loc;
466         /*ModRefInfo MR =*/ GetLocation(II, Loc, TLI);
467         AliasResult R = BatchAA.alias(Loc, MemLoc);
468         if (R == NoAlias)
469           continue;
470         if (R == MustAlias)
471           return MemDepResult::getDef(II);
472         if (ID == Intrinsic::masked_load)
473           continue;
474         return MemDepResult::getClobber(II);
475       }
476       }
477     }
478 
479     // Values depend on loads if the pointers are must aliased.  This means
480     // that a load depends on another must aliased load from the same value.
481     // One exception is atomic loads: a value can depend on an atomic load that
482     // it does not alias with when this atomic load indicates that another
483     // thread may be accessing the location.
484     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
485       // While volatile access cannot be eliminated, they do not have to clobber
486       // non-aliasing locations, as normal accesses, for example, can be safely
487       // reordered with volatile accesses.
488       if (LI->isVolatile()) {
489         if (!QueryInst)
490           // Original QueryInst *may* be volatile
491           return MemDepResult::getClobber(LI);
492         if (isVolatile(QueryInst))
493           // Ordering required if QueryInst is itself volatile
494           return MemDepResult::getClobber(LI);
495         // Otherwise, volatile doesn't imply any special ordering
496       }
497 
498       // Atomic loads have complications involved.
499       // A Monotonic (or higher) load is OK if the query inst is itself not
500       // atomic.
501       // FIXME: This is overly conservative.
502       if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) {
503         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
504             isOtherMemAccess(QueryInst))
505           return MemDepResult::getClobber(LI);
506         if (LI->getOrdering() != AtomicOrdering::Monotonic)
507           return MemDepResult::getClobber(LI);
508       }
509 
510       MemoryLocation LoadLoc = MemoryLocation::get(LI);
511 
512       // If we found a pointer, check if it could be the same as our pointer.
513       AliasResult R = BatchAA.alias(LoadLoc, MemLoc);
514 
515       if (isLoad) {
516         if (R == NoAlias)
517           continue;
518 
519         // Must aliased loads are defs of each other.
520         if (R == MustAlias)
521           return MemDepResult::getDef(Inst);
522 
523 #if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
524       // in terms of clobbering loads, but since it does this by looking
525       // at the clobbering load directly, it doesn't know about any
526       // phi translation that may have happened along the way.
527 
528         // If we have a partial alias, then return this as a clobber for the
529         // client to handle.
530         if (R == PartialAlias)
531           return MemDepResult::getClobber(Inst);
532 #endif
533 
534         // Random may-alias loads don't depend on each other without a
535         // dependence.
536         continue;
537       }
538 
539       // Stores don't depend on other no-aliased accesses.
540       if (R == NoAlias)
541         continue;
542 
543       // Stores don't alias loads from read-only memory.
544       if (BatchAA.pointsToConstantMemory(LoadLoc))
545         continue;
546 
547       // Stores depend on may/must aliased loads.
548       return MemDepResult::getDef(Inst);
549     }
550 
551     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
552       // Atomic stores have complications involved.
553       // A Monotonic store is OK if the query inst is itself not atomic.
554       // FIXME: This is overly conservative.
555       if (!SI->isUnordered() && SI->isAtomic()) {
556         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
557             isOtherMemAccess(QueryInst))
558           return MemDepResult::getClobber(SI);
559         if (SI->getOrdering() != AtomicOrdering::Monotonic)
560           return MemDepResult::getClobber(SI);
561       }
562 
563       // FIXME: this is overly conservative.
564       // While volatile access cannot be eliminated, they do not have to clobber
565       // non-aliasing locations, as normal accesses can for example be reordered
566       // with volatile accesses.
567       if (SI->isVolatile())
568         if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
569             isOtherMemAccess(QueryInst))
570           return MemDepResult::getClobber(SI);
571 
572       // If alias analysis can tell that this store is guaranteed to not modify
573       // the query pointer, ignore it.  Use getModRefInfo to handle cases where
574       // the query pointer points to constant memory etc.
575       if (!isModOrRefSet(BatchAA.getModRefInfo(SI, MemLoc)))
576         continue;
577 
578       // Ok, this store might clobber the query pointer.  Check to see if it is
579       // a must alias: in this case, we want to return this as a def.
580       // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above.
581       MemoryLocation StoreLoc = MemoryLocation::get(SI);
582 
583       // If we found a pointer, check if it could be the same as our pointer.
584       AliasResult R = BatchAA.alias(StoreLoc, MemLoc);
585 
586       if (R == NoAlias)
587         continue;
588       if (R == MustAlias)
589         return MemDepResult::getDef(Inst);
590       if (isInvariantLoad)
591         continue;
592       return MemDepResult::getClobber(Inst);
593     }
594 
595     // If this is an allocation, and if we know that the accessed pointer is to
596     // the allocation, return Def.  This means that there is no dependence and
597     // the access can be optimized based on that.  For example, a load could
598     // turn into undef.  Note that we can bypass the allocation itself when
599     // looking for a clobber in many cases; that's an alias property and is
600     // handled by BasicAA.
601     if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, &TLI)) {
602       const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr);
603       if (AccessPtr == Inst || BatchAA.isMustAlias(Inst, AccessPtr))
604         return MemDepResult::getDef(Inst);
605     }
606 
607     if (isInvariantLoad)
608       continue;
609 
610     // A release fence requires that all stores complete before it, but does
611     // not prevent the reordering of following loads or stores 'before' the
612     // fence.  As a result, we look past it when finding a dependency for
613     // loads.  DSE uses this to find preceding stores to delete and thus we
614     // can't bypass the fence if the query instruction is a store.
615     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
616       if (isLoad && FI->getOrdering() == AtomicOrdering::Release)
617         continue;
618 
619     // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer.
620     ModRefInfo MR = BatchAA.getModRefInfo(Inst, MemLoc);
621     // If necessary, perform additional analysis.
622     if (isModAndRefSet(MR))
623       // TODO: Support callCapturesBefore() on BatchAAResults.
624       MR = AA.callCapturesBefore(Inst, MemLoc, &DT);
625     switch (clearMust(MR)) {
626     case ModRefInfo::NoModRef:
627       // If the call has no effect on the queried pointer, just ignore it.
628       continue;
629     case ModRefInfo::Mod:
630       return MemDepResult::getClobber(Inst);
631     case ModRefInfo::Ref:
632       // If the call is known to never store to the pointer, and if this is a
633       // load query, we can safely ignore it (scan past it).
634       if (isLoad)
635         continue;
636       LLVM_FALLTHROUGH;
637     default:
638       // Otherwise, there is a potential dependence.  Return a clobber.
639       return MemDepResult::getClobber(Inst);
640     }
641   }
642 
643   // No dependence found.  If this is the entry block of the function, it is
644   // unknown, otherwise it is non-local.
645   if (BB != &BB->getParent()->getEntryBlock())
646     return MemDepResult::getNonLocal();
647   return MemDepResult::getNonFuncLocal();
648 }
649 
650 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) {
651   Instruction *ScanPos = QueryInst;
652 
653   // Check for a cached result
654   MemDepResult &LocalCache = LocalDeps[QueryInst];
655 
656   // If the cached entry is non-dirty, just return it.  Note that this depends
657   // on MemDepResult's default constructing to 'dirty'.
658   if (!LocalCache.isDirty())
659     return LocalCache;
660 
661   // Otherwise, if we have a dirty entry, we know we can start the scan at that
662   // instruction, which may save us some work.
663   if (Instruction *Inst = LocalCache.getInst()) {
664     ScanPos = Inst;
665 
666     RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst);
667   }
668 
669   BasicBlock *QueryParent = QueryInst->getParent();
670 
671   // Do the scan.
672   if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) {
673     // No dependence found. If this is the entry block of the function, it is
674     // unknown, otherwise it is non-local.
675     if (QueryParent != &QueryParent->getParent()->getEntryBlock())
676       LocalCache = MemDepResult::getNonLocal();
677     else
678       LocalCache = MemDepResult::getNonFuncLocal();
679   } else {
680     MemoryLocation MemLoc;
681     ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI);
682     if (MemLoc.Ptr) {
683       // If we can do a pointer scan, make it happen.
684       bool isLoad = !isModSet(MR);
685       if (auto *II = dyn_cast<IntrinsicInst>(QueryInst))
686         isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
687 
688       LocalCache =
689           getPointerDependencyFrom(MemLoc, isLoad, ScanPos->getIterator(),
690                                    QueryParent, QueryInst, nullptr);
691     } else if (auto *QueryCall = dyn_cast<CallBase>(QueryInst)) {
692       bool isReadOnly = AA.onlyReadsMemory(QueryCall);
693       LocalCache = getCallDependencyFrom(QueryCall, isReadOnly,
694                                          ScanPos->getIterator(), QueryParent);
695     } else
696       // Non-memory instruction.
697       LocalCache = MemDepResult::getUnknown();
698   }
699 
700   // Remember the result!
701   if (Instruction *I = LocalCache.getInst())
702     ReverseLocalDeps[I].insert(QueryInst);
703 
704   return LocalCache;
705 }
706 
707 #ifndef NDEBUG
708 /// This method is used when -debug is specified to verify that cache arrays
709 /// are properly kept sorted.
710 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache,
711                          int Count = -1) {
712   if (Count == -1)
713     Count = Cache.size();
714   assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) &&
715          "Cache isn't sorted!");
716 }
717 #endif
718 
719 const MemoryDependenceResults::NonLocalDepInfo &
720 MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) {
721   assert(getDependency(QueryCall).isNonLocal() &&
722          "getNonLocalCallDependency should only be used on calls with "
723          "non-local deps!");
724   PerInstNLInfo &CacheP = NonLocalDeps[QueryCall];
725   NonLocalDepInfo &Cache = CacheP.first;
726 
727   // This is the set of blocks that need to be recomputed.  In the cached case,
728   // this can happen due to instructions being deleted etc. In the uncached
729   // case, this starts out as the set of predecessors we care about.
730   SmallVector<BasicBlock *, 32> DirtyBlocks;
731 
732   if (!Cache.empty()) {
733     // Okay, we have a cache entry.  If we know it is not dirty, just return it
734     // with no computation.
735     if (!CacheP.second) {
736       ++NumCacheNonLocal;
737       return Cache;
738     }
739 
740     // If we already have a partially computed set of results, scan them to
741     // determine what is dirty, seeding our initial DirtyBlocks worklist.
742     for (auto &Entry : Cache)
743       if (Entry.getResult().isDirty())
744         DirtyBlocks.push_back(Entry.getBB());
745 
746     // Sort the cache so that we can do fast binary search lookups below.
747     llvm::sort(Cache);
748 
749     ++NumCacheDirtyNonLocal;
750     // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: "
751     //     << Cache.size() << " cached: " << *QueryInst;
752   } else {
753     // Seed DirtyBlocks with each of the preds of QueryInst's block.
754     BasicBlock *QueryBB = QueryCall->getParent();
755     append_range(DirtyBlocks, PredCache.get(QueryBB));
756     ++NumUncacheNonLocal;
757   }
758 
759   // isReadonlyCall - If this is a read-only call, we can be more aggressive.
760   bool isReadonlyCall = AA.onlyReadsMemory(QueryCall);
761 
762   SmallPtrSet<BasicBlock *, 32> Visited;
763 
764   unsigned NumSortedEntries = Cache.size();
765   LLVM_DEBUG(AssertSorted(Cache));
766 
767   // Iterate while we still have blocks to update.
768   while (!DirtyBlocks.empty()) {
769     BasicBlock *DirtyBB = DirtyBlocks.pop_back_val();
770 
771     // Already processed this block?
772     if (!Visited.insert(DirtyBB).second)
773       continue;
774 
775     // Do a binary search to see if we already have an entry for this block in
776     // the cache set.  If so, find it.
777     LLVM_DEBUG(AssertSorted(Cache, NumSortedEntries));
778     NonLocalDepInfo::iterator Entry =
779         std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries,
780                          NonLocalDepEntry(DirtyBB));
781     if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB)
782       --Entry;
783 
784     NonLocalDepEntry *ExistingResult = nullptr;
785     if (Entry != Cache.begin() + NumSortedEntries &&
786         Entry->getBB() == DirtyBB) {
787       // If we already have an entry, and if it isn't already dirty, the block
788       // is done.
789       if (!Entry->getResult().isDirty())
790         continue;
791 
792       // Otherwise, remember this slot so we can update the value.
793       ExistingResult = &*Entry;
794     }
795 
796     // If the dirty entry has a pointer, start scanning from it so we don't have
797     // to rescan the entire block.
798     BasicBlock::iterator ScanPos = DirtyBB->end();
799     if (ExistingResult) {
800       if (Instruction *Inst = ExistingResult->getResult().getInst()) {
801         ScanPos = Inst->getIterator();
802         // We're removing QueryInst's use of Inst.
803         RemoveFromReverseMap<Instruction *>(ReverseNonLocalDeps, Inst,
804                                             QueryCall);
805       }
806     }
807 
808     // Find out if this block has a local dependency for QueryInst.
809     MemDepResult Dep;
810 
811     if (ScanPos != DirtyBB->begin()) {
812       Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB);
813     } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) {
814       // No dependence found.  If this is the entry block of the function, it is
815       // a clobber, otherwise it is unknown.
816       Dep = MemDepResult::getNonLocal();
817     } else {
818       Dep = MemDepResult::getNonFuncLocal();
819     }
820 
821     // If we had a dirty entry for the block, update it.  Otherwise, just add
822     // a new entry.
823     if (ExistingResult)
824       ExistingResult->setResult(Dep);
825     else
826       Cache.push_back(NonLocalDepEntry(DirtyBB, Dep));
827 
828     // If the block has a dependency (i.e. it isn't completely transparent to
829     // the value), remember the association!
830     if (!Dep.isNonLocal()) {
831       // Keep the ReverseNonLocalDeps map up to date so we can efficiently
832       // update this when we remove instructions.
833       if (Instruction *Inst = Dep.getInst())
834         ReverseNonLocalDeps[Inst].insert(QueryCall);
835     } else {
836 
837       // If the block *is* completely transparent to the load, we need to check
838       // the predecessors of this block.  Add them to our worklist.
839       append_range(DirtyBlocks, PredCache.get(DirtyBB));
840     }
841   }
842 
843   return Cache;
844 }
845 
846 void MemoryDependenceResults::getNonLocalPointerDependency(
847     Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) {
848   const MemoryLocation Loc = MemoryLocation::get(QueryInst);
849   bool isLoad = isa<LoadInst>(QueryInst);
850   BasicBlock *FromBB = QueryInst->getParent();
851   assert(FromBB);
852 
853   assert(Loc.Ptr->getType()->isPointerTy() &&
854          "Can't get pointer deps of a non-pointer!");
855   Result.clear();
856   {
857     // Check if there is cached Def with invariant.group.
858     auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst);
859     if (NonLocalDefIt != NonLocalDefsCache.end()) {
860       Result.push_back(NonLocalDefIt->second);
861       ReverseNonLocalDefsCache[NonLocalDefIt->second.getResult().getInst()]
862           .erase(QueryInst);
863       NonLocalDefsCache.erase(NonLocalDefIt);
864       return;
865     }
866   }
867   // This routine does not expect to deal with volatile instructions.
868   // Doing so would require piping through the QueryInst all the way through.
869   // TODO: volatiles can't be elided, but they can be reordered with other
870   // non-volatile accesses.
871 
872   // We currently give up on any instruction which is ordered, but we do handle
873   // atomic instructions which are unordered.
874   // TODO: Handle ordered instructions
875   auto isOrdered = [](Instruction *Inst) {
876     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
877       return !LI->isUnordered();
878     } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
879       return !SI->isUnordered();
880     }
881     return false;
882   };
883   if (isVolatile(QueryInst) || isOrdered(QueryInst)) {
884     Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
885                                        const_cast<Value *>(Loc.Ptr)));
886     return;
887   }
888   const DataLayout &DL = FromBB->getModule()->getDataLayout();
889   PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC);
890 
891   // This is the set of blocks we've inspected, and the pointer we consider in
892   // each block.  Because of critical edges, we currently bail out if querying
893   // a block with multiple different pointers.  This can happen during PHI
894   // translation.
895   DenseMap<BasicBlock *, Value *> Visited;
896   if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB,
897                                    Result, Visited, true))
898     return;
899   Result.clear();
900   Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(),
901                                      const_cast<Value *>(Loc.Ptr)));
902 }
903 
904 /// Compute the memdep value for BB with Pointer/PointeeSize using either
905 /// cached information in Cache or by doing a lookup (which may use dirty cache
906 /// info if available).
907 ///
908 /// If we do a lookup, add the result to the cache.
909 MemDepResult MemoryDependenceResults::GetNonLocalInfoForBlock(
910     Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad,
911     BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries) {
912 
913   bool isInvariantLoad = false;
914 
915   if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst))
916     isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load);
917 
918   // Do a binary search to see if we already have an entry for this block in
919   // the cache set.  If so, find it.
920   NonLocalDepInfo::iterator Entry = std::upper_bound(
921       Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB));
922   if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB)
923     --Entry;
924 
925   NonLocalDepEntry *ExistingResult = nullptr;
926   if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB)
927     ExistingResult = &*Entry;
928 
929   // Use cached result for invariant load only if there is no dependency for non
930   // invariant load. In this case invariant load can not have any dependency as
931   // well.
932   if (ExistingResult && isInvariantLoad &&
933       !ExistingResult->getResult().isNonFuncLocal())
934     ExistingResult = nullptr;
935 
936   // If we have a cached entry, and it is non-dirty, use it as the value for
937   // this dependency.
938   if (ExistingResult && !ExistingResult->getResult().isDirty()) {
939     ++NumCacheNonLocalPtr;
940     return ExistingResult->getResult();
941   }
942 
943   // Otherwise, we have to scan for the value.  If we have a dirty cache
944   // entry, start scanning from its position, otherwise we scan from the end
945   // of the block.
946   BasicBlock::iterator ScanPos = BB->end();
947   if (ExistingResult && ExistingResult->getResult().getInst()) {
948     assert(ExistingResult->getResult().getInst()->getParent() == BB &&
949            "Instruction invalidated?");
950     ++NumCacheDirtyNonLocalPtr;
951     ScanPos = ExistingResult->getResult().getInst()->getIterator();
952 
953     // Eliminating the dirty entry from 'Cache', so update the reverse info.
954     ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
955     RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey);
956   } else {
957     ++NumUncacheNonLocalPtr;
958   }
959 
960   // Scan the block for the dependency.
961   MemDepResult Dep =
962       getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, QueryInst);
963 
964   // Don't cache results for invariant load.
965   if (isInvariantLoad)
966     return Dep;
967 
968   // If we had a dirty entry for the block, update it.  Otherwise, just add
969   // a new entry.
970   if (ExistingResult)
971     ExistingResult->setResult(Dep);
972   else
973     Cache->push_back(NonLocalDepEntry(BB, Dep));
974 
975   // If the block has a dependency (i.e. it isn't completely transparent to
976   // the value), remember the reverse association because we just added it
977   // to Cache!
978   if (!Dep.isDef() && !Dep.isClobber())
979     return Dep;
980 
981   // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently
982   // update MemDep when we remove instructions.
983   Instruction *Inst = Dep.getInst();
984   assert(Inst && "Didn't depend on anything?");
985   ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
986   ReverseNonLocalPtrDeps[Inst].insert(CacheKey);
987   return Dep;
988 }
989 
990 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the
991 /// array that are already properly ordered.
992 ///
993 /// This is optimized for the case when only a few entries are added.
994 static void
995 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache,
996                          unsigned NumSortedEntries) {
997   switch (Cache.size() - NumSortedEntries) {
998   case 0:
999     // done, no new entries.
1000     break;
1001   case 2: {
1002     // Two new entries, insert the last one into place.
1003     NonLocalDepEntry Val = Cache.back();
1004     Cache.pop_back();
1005     MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
1006         std::upper_bound(Cache.begin(), Cache.end() - 1, Val);
1007     Cache.insert(Entry, Val);
1008     LLVM_FALLTHROUGH;
1009   }
1010   case 1:
1011     // One new entry, Just insert the new value at the appropriate position.
1012     if (Cache.size() != 1) {
1013       NonLocalDepEntry Val = Cache.back();
1014       Cache.pop_back();
1015       MemoryDependenceResults::NonLocalDepInfo::iterator Entry =
1016           llvm::upper_bound(Cache, Val);
1017       Cache.insert(Entry, Val);
1018     }
1019     break;
1020   default:
1021     // Added many values, do a full scale sort.
1022     llvm::sort(Cache);
1023     break;
1024   }
1025 }
1026 
1027 /// Perform a dependency query based on pointer/pointeesize starting at the end
1028 /// of StartBB.
1029 ///
1030 /// Add any clobber/def results to the results vector and keep track of which
1031 /// blocks are visited in 'Visited'.
1032 ///
1033 /// This has special behavior for the first block queries (when SkipFirstBlock
1034 /// is true).  In this special case, it ignores the contents of the specified
1035 /// block and starts returning dependence info for its predecessors.
1036 ///
1037 /// This function returns true on success, or false to indicate that it could
1038 /// not compute dependence information for some reason.  This should be treated
1039 /// as a clobber dependence on the first instruction in the predecessor block.
1040 bool MemoryDependenceResults::getNonLocalPointerDepFromBB(
1041     Instruction *QueryInst, const PHITransAddr &Pointer,
1042     const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB,
1043     SmallVectorImpl<NonLocalDepResult> &Result,
1044     DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock,
1045     bool IsIncomplete) {
1046   // Look up the cached info for Pointer.
1047   ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad);
1048 
1049   // Set up a temporary NLPI value. If the map doesn't yet have an entry for
1050   // CacheKey, this value will be inserted as the associated value. Otherwise,
1051   // it'll be ignored, and we'll have to check to see if the cached size and
1052   // aa tags are consistent with the current query.
1053   NonLocalPointerInfo InitialNLPI;
1054   InitialNLPI.Size = Loc.Size;
1055   InitialNLPI.AATags = Loc.AATags;
1056 
1057   bool isInvariantLoad = false;
1058   if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst))
1059     isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load);
1060 
1061   // Get the NLPI for CacheKey, inserting one into the map if it doesn't
1062   // already have one.
1063   std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair =
1064       NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI));
1065   NonLocalPointerInfo *CacheInfo = &Pair.first->second;
1066 
1067   // If we already have a cache entry for this CacheKey, we may need to do some
1068   // work to reconcile the cache entry and the current query.
1069   // Invariant loads don't participate in caching. Thus no need to reconcile.
1070   if (!isInvariantLoad && !Pair.second) {
1071     if (CacheInfo->Size != Loc.Size) {
1072       bool ThrowOutEverything;
1073       if (CacheInfo->Size.hasValue() && Loc.Size.hasValue()) {
1074         // FIXME: We may be able to do better in the face of results with mixed
1075         // precision. We don't appear to get them in practice, though, so just
1076         // be conservative.
1077         ThrowOutEverything =
1078             CacheInfo->Size.isPrecise() != Loc.Size.isPrecise() ||
1079             CacheInfo->Size.getValue() < Loc.Size.getValue();
1080       } else {
1081         // For our purposes, unknown size > all others.
1082         ThrowOutEverything = !Loc.Size.hasValue();
1083       }
1084 
1085       if (ThrowOutEverything) {
1086         // The query's Size is greater than the cached one. Throw out the
1087         // cached data and proceed with the query at the greater size.
1088         CacheInfo->Pair = BBSkipFirstBlockPair();
1089         CacheInfo->Size = Loc.Size;
1090         for (auto &Entry : CacheInfo->NonLocalDeps)
1091           if (Instruction *Inst = Entry.getResult().getInst())
1092             RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
1093         CacheInfo->NonLocalDeps.clear();
1094         // The cache is cleared (in the above line) so we will have lost
1095         // information about blocks we have already visited. We therefore must
1096         // assume that the cache information is incomplete.
1097         IsIncomplete = true;
1098       } else {
1099         // This query's Size is less than the cached one. Conservatively restart
1100         // the query using the greater size.
1101         return getNonLocalPointerDepFromBB(
1102             QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad,
1103             StartBB, Result, Visited, SkipFirstBlock, IsIncomplete);
1104       }
1105     }
1106 
1107     // If the query's AATags are inconsistent with the cached one,
1108     // conservatively throw out the cached data and restart the query with
1109     // no tag if needed.
1110     if (CacheInfo->AATags != Loc.AATags) {
1111       if (CacheInfo->AATags) {
1112         CacheInfo->Pair = BBSkipFirstBlockPair();
1113         CacheInfo->AATags = AAMDNodes();
1114         for (auto &Entry : CacheInfo->NonLocalDeps)
1115           if (Instruction *Inst = Entry.getResult().getInst())
1116             RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey);
1117         CacheInfo->NonLocalDeps.clear();
1118         // The cache is cleared (in the above line) so we will have lost
1119         // information about blocks we have already visited. We therefore must
1120         // assume that the cache information is incomplete.
1121         IsIncomplete = true;
1122       }
1123       if (Loc.AATags)
1124         return getNonLocalPointerDepFromBB(
1125             QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result,
1126             Visited, SkipFirstBlock, IsIncomplete);
1127     }
1128   }
1129 
1130   NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps;
1131 
1132   // If we have valid cached information for exactly the block we are
1133   // investigating, just return it with no recomputation.
1134   // Don't use cached information for invariant loads since it is valid for
1135   // non-invariant loads only.
1136   //
1137   // Don't use cached information for invariant loads since it is valid for
1138   // non-invariant loads only.
1139   if (!IsIncomplete && !isInvariantLoad &&
1140       CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) {
1141     // We have a fully cached result for this query then we can just return the
1142     // cached results and populate the visited set.  However, we have to verify
1143     // that we don't already have conflicting results for these blocks.  Check
1144     // to ensure that if a block in the results set is in the visited set that
1145     // it was for the same pointer query.
1146     if (!Visited.empty()) {
1147       for (auto &Entry : *Cache) {
1148         DenseMap<BasicBlock *, Value *>::iterator VI =
1149             Visited.find(Entry.getBB());
1150         if (VI == Visited.end() || VI->second == Pointer.getAddr())
1151           continue;
1152 
1153         // We have a pointer mismatch in a block.  Just return false, saying
1154         // that something was clobbered in this result.  We could also do a
1155         // non-fully cached query, but there is little point in doing this.
1156         return false;
1157       }
1158     }
1159 
1160     Value *Addr = Pointer.getAddr();
1161     for (auto &Entry : *Cache) {
1162       Visited.insert(std::make_pair(Entry.getBB(), Addr));
1163       if (Entry.getResult().isNonLocal()) {
1164         continue;
1165       }
1166 
1167       if (DT.isReachableFromEntry(Entry.getBB())) {
1168         Result.push_back(
1169             NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr));
1170       }
1171     }
1172     ++NumCacheCompleteNonLocalPtr;
1173     return true;
1174   }
1175 
1176   // Otherwise, either this is a new block, a block with an invalid cache
1177   // pointer or one that we're about to invalidate by putting more info into
1178   // it than its valid cache info.  If empty and not explicitly indicated as
1179   // incomplete, the result will be valid cache info, otherwise it isn't.
1180   //
1181   // Invariant loads don't affect cache in any way thus no need to update
1182   // CacheInfo as well.
1183   if (!isInvariantLoad) {
1184     if (!IsIncomplete && Cache->empty())
1185       CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock);
1186     else
1187       CacheInfo->Pair = BBSkipFirstBlockPair();
1188   }
1189 
1190   SmallVector<BasicBlock *, 32> Worklist;
1191   Worklist.push_back(StartBB);
1192 
1193   // PredList used inside loop.
1194   SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList;
1195 
1196   // Keep track of the entries that we know are sorted.  Previously cached
1197   // entries will all be sorted.  The entries we add we only sort on demand (we
1198   // don't insert every element into its sorted position).  We know that we
1199   // won't get any reuse from currently inserted values, because we don't
1200   // revisit blocks after we insert info for them.
1201   unsigned NumSortedEntries = Cache->size();
1202   unsigned WorklistEntries = BlockNumberLimit;
1203   bool GotWorklistLimit = false;
1204   LLVM_DEBUG(AssertSorted(*Cache));
1205 
1206   while (!Worklist.empty()) {
1207     BasicBlock *BB = Worklist.pop_back_val();
1208 
1209     // If we do process a large number of blocks it becomes very expensive and
1210     // likely it isn't worth worrying about
1211     if (Result.size() > NumResultsLimit) {
1212       Worklist.clear();
1213       // Sort it now (if needed) so that recursive invocations of
1214       // getNonLocalPointerDepFromBB and other routines that could reuse the
1215       // cache value will only see properly sorted cache arrays.
1216       if (Cache && NumSortedEntries != Cache->size()) {
1217         SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1218       }
1219       // Since we bail out, the "Cache" set won't contain all of the
1220       // results for the query.  This is ok (we can still use it to accelerate
1221       // specific block queries) but we can't do the fastpath "return all
1222       // results from the set".  Clear out the indicator for this.
1223       CacheInfo->Pair = BBSkipFirstBlockPair();
1224       return false;
1225     }
1226 
1227     // Skip the first block if we have it.
1228     if (!SkipFirstBlock) {
1229       // Analyze the dependency of *Pointer in FromBB.  See if we already have
1230       // been here.
1231       assert(Visited.count(BB) && "Should check 'visited' before adding to WL");
1232 
1233       // Get the dependency info for Pointer in BB.  If we have cached
1234       // information, we will use it, otherwise we compute it.
1235       LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries));
1236       MemDepResult Dep = GetNonLocalInfoForBlock(QueryInst, Loc, isLoad, BB,
1237                                                  Cache, NumSortedEntries);
1238 
1239       // If we got a Def or Clobber, add this to the list of results.
1240       if (!Dep.isNonLocal()) {
1241         if (DT.isReachableFromEntry(BB)) {
1242           Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr()));
1243           continue;
1244         }
1245       }
1246     }
1247 
1248     // If 'Pointer' is an instruction defined in this block, then we need to do
1249     // phi translation to change it into a value live in the predecessor block.
1250     // If not, we just add the predecessors to the worklist and scan them with
1251     // the same Pointer.
1252     if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
1253       SkipFirstBlock = false;
1254       SmallVector<BasicBlock *, 16> NewBlocks;
1255       for (BasicBlock *Pred : PredCache.get(BB)) {
1256         // Verify that we haven't looked at this block yet.
1257         std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1258             Visited.insert(std::make_pair(Pred, Pointer.getAddr()));
1259         if (InsertRes.second) {
1260           // First time we've looked at *PI.
1261           NewBlocks.push_back(Pred);
1262           continue;
1263         }
1264 
1265         // If we have seen this block before, but it was with a different
1266         // pointer then we have a phi translation failure and we have to treat
1267         // this as a clobber.
1268         if (InsertRes.first->second != Pointer.getAddr()) {
1269           // Make sure to clean up the Visited map before continuing on to
1270           // PredTranslationFailure.
1271           for (unsigned i = 0; i < NewBlocks.size(); i++)
1272             Visited.erase(NewBlocks[i]);
1273           goto PredTranslationFailure;
1274         }
1275       }
1276       if (NewBlocks.size() > WorklistEntries) {
1277         // Make sure to clean up the Visited map before continuing on to
1278         // PredTranslationFailure.
1279         for (unsigned i = 0; i < NewBlocks.size(); i++)
1280           Visited.erase(NewBlocks[i]);
1281         GotWorklistLimit = true;
1282         goto PredTranslationFailure;
1283       }
1284       WorklistEntries -= NewBlocks.size();
1285       Worklist.append(NewBlocks.begin(), NewBlocks.end());
1286       continue;
1287     }
1288 
1289     // We do need to do phi translation, if we know ahead of time we can't phi
1290     // translate this value, don't even try.
1291     if (!Pointer.IsPotentiallyPHITranslatable())
1292       goto PredTranslationFailure;
1293 
1294     // We may have added values to the cache list before this PHI translation.
1295     // If so, we haven't done anything to ensure that the cache remains sorted.
1296     // Sort it now (if needed) so that recursive invocations of
1297     // getNonLocalPointerDepFromBB and other routines that could reuse the cache
1298     // value will only see properly sorted cache arrays.
1299     if (Cache && NumSortedEntries != Cache->size()) {
1300       SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1301       NumSortedEntries = Cache->size();
1302     }
1303     Cache = nullptr;
1304 
1305     PredList.clear();
1306     for (BasicBlock *Pred : PredCache.get(BB)) {
1307       PredList.push_back(std::make_pair(Pred, Pointer));
1308 
1309       // Get the PHI translated pointer in this predecessor.  This can fail if
1310       // not translatable, in which case the getAddr() returns null.
1311       PHITransAddr &PredPointer = PredList.back().second;
1312       PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false);
1313       Value *PredPtrVal = PredPointer.getAddr();
1314 
1315       // Check to see if we have already visited this pred block with another
1316       // pointer.  If so, we can't do this lookup.  This failure can occur
1317       // with PHI translation when a critical edge exists and the PHI node in
1318       // the successor translates to a pointer value different than the
1319       // pointer the block was first analyzed with.
1320       std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes =
1321           Visited.insert(std::make_pair(Pred, PredPtrVal));
1322 
1323       if (!InsertRes.second) {
1324         // We found the pred; take it off the list of preds to visit.
1325         PredList.pop_back();
1326 
1327         // If the predecessor was visited with PredPtr, then we already did
1328         // the analysis and can ignore it.
1329         if (InsertRes.first->second == PredPtrVal)
1330           continue;
1331 
1332         // Otherwise, the block was previously analyzed with a different
1333         // pointer.  We can't represent the result of this case, so we just
1334         // treat this as a phi translation failure.
1335 
1336         // Make sure to clean up the Visited map before continuing on to
1337         // PredTranslationFailure.
1338         for (unsigned i = 0, n = PredList.size(); i < n; ++i)
1339           Visited.erase(PredList[i].first);
1340 
1341         goto PredTranslationFailure;
1342       }
1343     }
1344 
1345     // Actually process results here; this need to be a separate loop to avoid
1346     // calling getNonLocalPointerDepFromBB for blocks we don't want to return
1347     // any results for.  (getNonLocalPointerDepFromBB will modify our
1348     // datastructures in ways the code after the PredTranslationFailure label
1349     // doesn't expect.)
1350     for (unsigned i = 0, n = PredList.size(); i < n; ++i) {
1351       BasicBlock *Pred = PredList[i].first;
1352       PHITransAddr &PredPointer = PredList[i].second;
1353       Value *PredPtrVal = PredPointer.getAddr();
1354 
1355       bool CanTranslate = true;
1356       // If PHI translation was unable to find an available pointer in this
1357       // predecessor, then we have to assume that the pointer is clobbered in
1358       // that predecessor.  We can still do PRE of the load, which would insert
1359       // a computation of the pointer in this predecessor.
1360       if (!PredPtrVal)
1361         CanTranslate = false;
1362 
1363       // FIXME: it is entirely possible that PHI translating will end up with
1364       // the same value.  Consider PHI translating something like:
1365       // X = phi [x, bb1], [y, bb2].  PHI translating for bb1 doesn't *need*
1366       // to recurse here, pedantically speaking.
1367 
1368       // If getNonLocalPointerDepFromBB fails here, that means the cached
1369       // result conflicted with the Visited list; we have to conservatively
1370       // assume it is unknown, but this also does not block PRE of the load.
1371       if (!CanTranslate ||
1372           !getNonLocalPointerDepFromBB(QueryInst, PredPointer,
1373                                       Loc.getWithNewPtr(PredPtrVal), isLoad,
1374                                       Pred, Result, Visited)) {
1375         // Add the entry to the Result list.
1376         NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal);
1377         Result.push_back(Entry);
1378 
1379         // Since we had a phi translation failure, the cache for CacheKey won't
1380         // include all of the entries that we need to immediately satisfy future
1381         // queries.  Mark this in NonLocalPointerDeps by setting the
1382         // BBSkipFirstBlockPair pointer to null.  This requires reuse of the
1383         // cached value to do more work but not miss the phi trans failure.
1384         NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey];
1385         NLPI.Pair = BBSkipFirstBlockPair();
1386         continue;
1387       }
1388     }
1389 
1390     // Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
1391     CacheInfo = &NonLocalPointerDeps[CacheKey];
1392     Cache = &CacheInfo->NonLocalDeps;
1393     NumSortedEntries = Cache->size();
1394 
1395     // Since we did phi translation, the "Cache" set won't contain all of the
1396     // results for the query.  This is ok (we can still use it to accelerate
1397     // specific block queries) but we can't do the fastpath "return all
1398     // results from the set"  Clear out the indicator for this.
1399     CacheInfo->Pair = BBSkipFirstBlockPair();
1400     SkipFirstBlock = false;
1401     continue;
1402 
1403   PredTranslationFailure:
1404     // The following code is "failure"; we can't produce a sane translation
1405     // for the given block.  It assumes that we haven't modified any of
1406     // our datastructures while processing the current block.
1407 
1408     if (!Cache) {
1409       // Refresh the CacheInfo/Cache pointer if it got invalidated.
1410       CacheInfo = &NonLocalPointerDeps[CacheKey];
1411       Cache = &CacheInfo->NonLocalDeps;
1412       NumSortedEntries = Cache->size();
1413     }
1414 
1415     // Since we failed phi translation, the "Cache" set won't contain all of the
1416     // results for the query.  This is ok (we can still use it to accelerate
1417     // specific block queries) but we can't do the fastpath "return all
1418     // results from the set".  Clear out the indicator for this.
1419     CacheInfo->Pair = BBSkipFirstBlockPair();
1420 
1421     // If *nothing* works, mark the pointer as unknown.
1422     //
1423     // If this is the magic first block, return this as a clobber of the whole
1424     // incoming value.  Since we can't phi translate to one of the predecessors,
1425     // we have to bail out.
1426     if (SkipFirstBlock)
1427       return false;
1428 
1429     // Results of invariant loads are not cached thus no need to update cached
1430     // information.
1431     if (!isInvariantLoad) {
1432       for (NonLocalDepEntry &I : llvm::reverse(*Cache)) {
1433         if (I.getBB() != BB)
1434           continue;
1435 
1436         assert((GotWorklistLimit || I.getResult().isNonLocal() ||
1437                 !DT.isReachableFromEntry(BB)) &&
1438                "Should only be here with transparent block");
1439 
1440         I.setResult(MemDepResult::getUnknown());
1441 
1442 
1443         break;
1444       }
1445     }
1446     (void)GotWorklistLimit;
1447     // Go ahead and report unknown dependence.
1448     Result.push_back(
1449         NonLocalDepResult(BB, MemDepResult::getUnknown(), Pointer.getAddr()));
1450   }
1451 
1452   // Okay, we're done now.  If we added new values to the cache, re-sort it.
1453   SortNonLocalDepInfoCache(*Cache, NumSortedEntries);
1454   LLVM_DEBUG(AssertSorted(*Cache));
1455   return true;
1456 }
1457 
1458 /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it.
1459 void MemoryDependenceResults::RemoveCachedNonLocalPointerDependencies(
1460     ValueIsLoadPair P) {
1461 
1462   // Most of the time this cache is empty.
1463   if (!NonLocalDefsCache.empty()) {
1464     auto it = NonLocalDefsCache.find(P.getPointer());
1465     if (it != NonLocalDefsCache.end()) {
1466       RemoveFromReverseMap(ReverseNonLocalDefsCache,
1467                            it->second.getResult().getInst(), P.getPointer());
1468       NonLocalDefsCache.erase(it);
1469     }
1470 
1471     if (auto *I = dyn_cast<Instruction>(P.getPointer())) {
1472       auto toRemoveIt = ReverseNonLocalDefsCache.find(I);
1473       if (toRemoveIt != ReverseNonLocalDefsCache.end()) {
1474         for (const auto *entry : toRemoveIt->second)
1475           NonLocalDefsCache.erase(entry);
1476         ReverseNonLocalDefsCache.erase(toRemoveIt);
1477       }
1478     }
1479   }
1480 
1481   CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P);
1482   if (It == NonLocalPointerDeps.end())
1483     return;
1484 
1485   // Remove all of the entries in the BB->val map.  This involves removing
1486   // instructions from the reverse map.
1487   NonLocalDepInfo &PInfo = It->second.NonLocalDeps;
1488 
1489   for (unsigned i = 0, e = PInfo.size(); i != e; ++i) {
1490     Instruction *Target = PInfo[i].getResult().getInst();
1491     if (!Target)
1492       continue; // Ignore non-local dep results.
1493     assert(Target->getParent() == PInfo[i].getBB());
1494 
1495     // Eliminating the dirty entry from 'Cache', so update the reverse info.
1496     RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P);
1497   }
1498 
1499   // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo).
1500   NonLocalPointerDeps.erase(It);
1501 }
1502 
1503 void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) {
1504   // If Ptr isn't really a pointer, just ignore it.
1505   if (!Ptr->getType()->isPointerTy())
1506     return;
1507   // Flush store info for the pointer.
1508   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false));
1509   // Flush load info for the pointer.
1510   RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true));
1511   // Invalidate phis that use the pointer.
1512   PV.invalidateValue(Ptr);
1513 }
1514 
1515 void MemoryDependenceResults::invalidateCachedPredecessors() {
1516   PredCache.clear();
1517 }
1518 
1519 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) {
1520   // Walk through the Non-local dependencies, removing this one as the value
1521   // for any cached queries.
1522   NonLocalDepMapType::iterator NLDI = NonLocalDeps.find(RemInst);
1523   if (NLDI != NonLocalDeps.end()) {
1524     NonLocalDepInfo &BlockMap = NLDI->second.first;
1525     for (auto &Entry : BlockMap)
1526       if (Instruction *Inst = Entry.getResult().getInst())
1527         RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst);
1528     NonLocalDeps.erase(NLDI);
1529   }
1530 
1531   // If we have a cached local dependence query for this instruction, remove it.
1532   LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst);
1533   if (LocalDepEntry != LocalDeps.end()) {
1534     // Remove us from DepInst's reverse set now that the local dep info is gone.
1535     if (Instruction *Inst = LocalDepEntry->second.getInst())
1536       RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst);
1537 
1538     // Remove this local dependency info.
1539     LocalDeps.erase(LocalDepEntry);
1540   }
1541 
1542   // If we have any cached dependencies on this instruction, remove
1543   // them.
1544 
1545   // If the instruction is a pointer, remove it from both the load info and the
1546   // store info.
1547   if (RemInst->getType()->isPointerTy()) {
1548     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false));
1549     RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true));
1550   } else {
1551     // Otherwise, if the instructions is in the map directly, it must be a load.
1552     // Remove it.
1553     auto toRemoveIt = NonLocalDefsCache.find(RemInst);
1554     if (toRemoveIt != NonLocalDefsCache.end()) {
1555       assert(isa<LoadInst>(RemInst) &&
1556              "only load instructions should be added directly");
1557       const Instruction *DepV = toRemoveIt->second.getResult().getInst();
1558       ReverseNonLocalDefsCache.find(DepV)->second.erase(RemInst);
1559       NonLocalDefsCache.erase(toRemoveIt);
1560     }
1561   }
1562 
1563   // Loop over all of the things that depend on the instruction we're removing.
1564   SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd;
1565 
1566   // If we find RemInst as a clobber or Def in any of the maps for other values,
1567   // we need to replace its entry with a dirty version of the instruction after
1568   // it.  If RemInst is a terminator, we use a null dirty value.
1569   //
1570   // Using a dirty version of the instruction after RemInst saves having to scan
1571   // the entire block to get to this point.
1572   MemDepResult NewDirtyVal;
1573   if (!RemInst->isTerminator())
1574     NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator());
1575 
1576   ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
1577   if (ReverseDepIt != ReverseLocalDeps.end()) {
1578     // RemInst can't be the terminator if it has local stuff depending on it.
1579     assert(!ReverseDepIt->second.empty() && !RemInst->isTerminator() &&
1580            "Nothing can locally depend on a terminator");
1581 
1582     for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) {
1583       assert(InstDependingOnRemInst != RemInst &&
1584              "Already removed our local dep info");
1585 
1586       LocalDeps[InstDependingOnRemInst] = NewDirtyVal;
1587 
1588       // Make sure to remember that new things depend on NewDepInst.
1589       assert(NewDirtyVal.getInst() &&
1590              "There is no way something else can have "
1591              "a local dep on this if it is a terminator!");
1592       ReverseDepsToAdd.push_back(
1593           std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst));
1594     }
1595 
1596     ReverseLocalDeps.erase(ReverseDepIt);
1597 
1598     // Add new reverse deps after scanning the set, to avoid invalidating the
1599     // 'ReverseDeps' reference.
1600     while (!ReverseDepsToAdd.empty()) {
1601       ReverseLocalDeps[ReverseDepsToAdd.back().first].insert(
1602           ReverseDepsToAdd.back().second);
1603       ReverseDepsToAdd.pop_back();
1604     }
1605   }
1606 
1607   ReverseDepIt = ReverseNonLocalDeps.find(RemInst);
1608   if (ReverseDepIt != ReverseNonLocalDeps.end()) {
1609     for (Instruction *I : ReverseDepIt->second) {
1610       assert(I != RemInst && "Already removed NonLocalDep info for RemInst");
1611 
1612       PerInstNLInfo &INLD = NonLocalDeps[I];
1613       // The information is now dirty!
1614       INLD.second = true;
1615 
1616       for (auto &Entry : INLD.first) {
1617         if (Entry.getResult().getInst() != RemInst)
1618           continue;
1619 
1620         // Convert to a dirty entry for the subsequent instruction.
1621         Entry.setResult(NewDirtyVal);
1622 
1623         if (Instruction *NextI = NewDirtyVal.getInst())
1624           ReverseDepsToAdd.push_back(std::make_pair(NextI, I));
1625       }
1626     }
1627 
1628     ReverseNonLocalDeps.erase(ReverseDepIt);
1629 
1630     // Add new reverse deps after scanning the set, to avoid invalidating 'Set'
1631     while (!ReverseDepsToAdd.empty()) {
1632       ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert(
1633           ReverseDepsToAdd.back().second);
1634       ReverseDepsToAdd.pop_back();
1635     }
1636   }
1637 
1638   // If the instruction is in ReverseNonLocalPtrDeps then it appears as a
1639   // value in the NonLocalPointerDeps info.
1640   ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt =
1641       ReverseNonLocalPtrDeps.find(RemInst);
1642   if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) {
1643     SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8>
1644         ReversePtrDepsToAdd;
1645 
1646     for (ValueIsLoadPair P : ReversePtrDepIt->second) {
1647       assert(P.getPointer() != RemInst &&
1648              "Already removed NonLocalPointerDeps info for RemInst");
1649 
1650       NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps;
1651 
1652       // The cache is not valid for any specific block anymore.
1653       NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair();
1654 
1655       // Update any entries for RemInst to use the instruction after it.
1656       for (auto &Entry : NLPDI) {
1657         if (Entry.getResult().getInst() != RemInst)
1658           continue;
1659 
1660         // Convert to a dirty entry for the subsequent instruction.
1661         Entry.setResult(NewDirtyVal);
1662 
1663         if (Instruction *NewDirtyInst = NewDirtyVal.getInst())
1664           ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P));
1665       }
1666 
1667       // Re-sort the NonLocalDepInfo.  Changing the dirty entry to its
1668       // subsequent value may invalidate the sortedness.
1669       llvm::sort(NLPDI);
1670     }
1671 
1672     ReverseNonLocalPtrDeps.erase(ReversePtrDepIt);
1673 
1674     while (!ReversePtrDepsToAdd.empty()) {
1675       ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert(
1676           ReversePtrDepsToAdd.back().second);
1677       ReversePtrDepsToAdd.pop_back();
1678     }
1679   }
1680 
1681   // Invalidate phis that use the removed instruction.
1682   PV.invalidateValue(RemInst);
1683 
1684   assert(!NonLocalDeps.count(RemInst) && "RemInst got reinserted?");
1685   LLVM_DEBUG(verifyRemoved(RemInst));
1686 }
1687 
1688 /// Verify that the specified instruction does not occur in our internal data
1689 /// structures.
1690 ///
1691 /// This function verifies by asserting in debug builds.
1692 void MemoryDependenceResults::verifyRemoved(Instruction *D) const {
1693 #ifndef NDEBUG
1694   for (const auto &DepKV : LocalDeps) {
1695     assert(DepKV.first != D && "Inst occurs in data structures");
1696     assert(DepKV.second.getInst() != D && "Inst occurs in data structures");
1697   }
1698 
1699   for (const auto &DepKV : NonLocalPointerDeps) {
1700     assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key");
1701     for (const auto &Entry : DepKV.second.NonLocalDeps)
1702       assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value");
1703   }
1704 
1705   for (const auto &DepKV : NonLocalDeps) {
1706     assert(DepKV.first != D && "Inst occurs in data structures");
1707     const PerInstNLInfo &INLD = DepKV.second;
1708     for (const auto &Entry : INLD.first)
1709       assert(Entry.getResult().getInst() != D &&
1710              "Inst occurs in data structures");
1711   }
1712 
1713   for (const auto &DepKV : ReverseLocalDeps) {
1714     assert(DepKV.first != D && "Inst occurs in data structures");
1715     for (Instruction *Inst : DepKV.second)
1716       assert(Inst != D && "Inst occurs in data structures");
1717   }
1718 
1719   for (const auto &DepKV : ReverseNonLocalDeps) {
1720     assert(DepKV.first != D && "Inst occurs in data structures");
1721     for (Instruction *Inst : DepKV.second)
1722       assert(Inst != D && "Inst occurs in data structures");
1723   }
1724 
1725   for (const auto &DepKV : ReverseNonLocalPtrDeps) {
1726     assert(DepKV.first != D && "Inst occurs in rev NLPD map");
1727 
1728     for (ValueIsLoadPair P : DepKV.second)
1729       assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) &&
1730              "Inst occurs in ReverseNonLocalPtrDeps map");
1731   }
1732 #endif
1733 }
1734 
1735 AnalysisKey MemoryDependenceAnalysis::Key;
1736 
1737 MemoryDependenceAnalysis::MemoryDependenceAnalysis()
1738     : DefaultBlockScanLimit(BlockScanLimit) {}
1739 
1740 MemoryDependenceResults
1741 MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) {
1742   auto &AA = AM.getResult<AAManager>(F);
1743   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1744   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1745   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1746   auto &PV = AM.getResult<PhiValuesAnalysis>(F);
1747   return MemoryDependenceResults(AA, AC, TLI, DT, PV, DefaultBlockScanLimit);
1748 }
1749 
1750 char MemoryDependenceWrapperPass::ID = 0;
1751 
1752 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep",
1753                       "Memory Dependence Analysis", false, true)
1754 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1755 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1756 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1757 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1758 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass)
1759 INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep",
1760                     "Memory Dependence Analysis", false, true)
1761 
1762 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) {
1763   initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry());
1764 }
1765 
1766 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default;
1767 
1768 void MemoryDependenceWrapperPass::releaseMemory() {
1769   MemDep.reset();
1770 }
1771 
1772 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1773   AU.setPreservesAll();
1774   AU.addRequired<AssumptionCacheTracker>();
1775   AU.addRequired<DominatorTreeWrapperPass>();
1776   AU.addRequired<PhiValuesWrapperPass>();
1777   AU.addRequiredTransitive<AAResultsWrapperPass>();
1778   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1779 }
1780 
1781 bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA,
1782                                FunctionAnalysisManager::Invalidator &Inv) {
1783   // Check whether our analysis is preserved.
1784   auto PAC = PA.getChecker<MemoryDependenceAnalysis>();
1785   if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>())
1786     // If not, give up now.
1787     return true;
1788 
1789   // Check whether the analyses we depend on became invalid for any reason.
1790   if (Inv.invalidate<AAManager>(F, PA) ||
1791       Inv.invalidate<AssumptionAnalysis>(F, PA) ||
1792       Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
1793       Inv.invalidate<PhiValuesAnalysis>(F, PA))
1794     return true;
1795 
1796   // Otherwise this analysis result remains valid.
1797   return false;
1798 }
1799 
1800 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const {
1801   return DefaultBlockScanLimit;
1802 }
1803 
1804 bool MemoryDependenceWrapperPass::runOnFunction(Function &F) {
1805   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
1806   auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1807   auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1808   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1809   auto &PV = getAnalysis<PhiValuesWrapperPass>().getResult();
1810   MemDep.emplace(AA, AC, TLI, DT, PV, BlockScanLimit);
1811   return false;
1812 }
1813