xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp (revision a7dea1671b87c07d2d266f836bfa8b58efc7c134)
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CFG.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/PhiValues.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include <cassert>
58 #include <cstdint>
59 #include <cstdlib>
60 #include <utility>
61 
62 #define DEBUG_TYPE "basicaa"
63 
64 using namespace llvm;
65 
66 /// Enable analysis of recursive PHI nodes.
67 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
68                                           cl::init(false));
69 
70 /// By default, even on 32-bit architectures we use 64-bit integers for
71 /// calculations. This will allow us to more-aggressively decompose indexing
72 /// expressions calculated using i64 values (e.g., long long in C) which is
73 /// common enough to worry about.
74 static cl::opt<bool> ForceAtLeast64Bits("basicaa-force-at-least-64b",
75                                         cl::Hidden, cl::init(true));
76 static cl::opt<bool> DoubleCalcBits("basicaa-double-calc-bits",
77                                     cl::Hidden, cl::init(false));
78 
79 /// SearchLimitReached / SearchTimes shows how often the limit of
80 /// to decompose GEPs is reached. It will affect the precision
81 /// of basic alias analysis.
82 STATISTIC(SearchLimitReached, "Number of times the limit to "
83                               "decompose GEPs is reached");
84 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
85 
86 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
87 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
88 /// careful with value equivalence. We use reachability to make sure a value
89 /// cannot be involved in a cycle.
90 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
91 
92 // The max limit of the search depth in DecomposeGEPExpression() and
93 // GetUnderlyingObject(), both functions need to use the same search
94 // depth otherwise the algorithm in aliasGEP will assert.
95 static const unsigned MaxLookupSearchDepth = 6;
96 
97 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
98                                FunctionAnalysisManager::Invalidator &Inv) {
99   // We don't care if this analysis itself is preserved, it has no state. But
100   // we need to check that the analyses it depends on have been. Note that we
101   // may be created without handles to some analyses and in that case don't
102   // depend on them.
103   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
104       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
105       (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
106       (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107     return true;
108 
109   // Otherwise this analysis result remains valid.
110   return false;
111 }
112 
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
116 
117 /// Returns true if the pointer is to a function-local object that never
118 /// escapes from the function.
119 static bool isNonEscapingLocalObject(
120     const Value *V,
121     SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr) {
122   SmallDenseMap<const Value *, bool, 8>::iterator CacheIt;
123   if (IsCapturedCache) {
124     bool Inserted;
125     std::tie(CacheIt, Inserted) = IsCapturedCache->insert({V, false});
126     if (!Inserted)
127       // Found cached result, return it!
128       return CacheIt->second;
129   }
130 
131   // If this is a local allocation, check to see if it escapes.
132   if (isa<AllocaInst>(V) || isNoAliasCall(V)) {
133     // Set StoreCaptures to True so that we can assume in our callers that the
134     // pointer is not the result of a load instruction. Currently
135     // PointerMayBeCaptured doesn't have any special analysis for the
136     // StoreCaptures=false case; if it did, our callers could be refined to be
137     // more precise.
138     auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
139     if (IsCapturedCache)
140       CacheIt->second = Ret;
141     return Ret;
142   }
143 
144   // If this is an argument that corresponds to a byval or noalias argument,
145   // then it has not escaped before entering the function.  Check if it escapes
146   // inside the function.
147   if (const Argument *A = dyn_cast<Argument>(V))
148     if (A->hasByValAttr() || A->hasNoAliasAttr()) {
149       // Note even if the argument is marked nocapture, we still need to check
150       // for copies made inside the function. The nocapture attribute only
151       // specifies that there are no copies made that outlive the function.
152       auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
153       if (IsCapturedCache)
154         CacheIt->second = Ret;
155       return Ret;
156     }
157 
158   return false;
159 }
160 
161 /// Returns true if the pointer is one which would have been considered an
162 /// escape by isNonEscapingLocalObject.
163 static bool isEscapeSource(const Value *V) {
164   if (isa<CallBase>(V))
165     return true;
166 
167   if (isa<Argument>(V))
168     return true;
169 
170   // The load case works because isNonEscapingLocalObject considers all
171   // stores to be escapes (it passes true for the StoreCaptures argument
172   // to PointerMayBeCaptured).
173   if (isa<LoadInst>(V))
174     return true;
175 
176   return false;
177 }
178 
179 /// Returns the size of the object specified by V or UnknownSize if unknown.
180 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
181                               const TargetLibraryInfo &TLI,
182                               bool NullIsValidLoc,
183                               bool RoundToAlign = false) {
184   uint64_t Size;
185   ObjectSizeOpts Opts;
186   Opts.RoundToAlign = RoundToAlign;
187   Opts.NullIsUnknownSize = NullIsValidLoc;
188   if (getObjectSize(V, Size, DL, &TLI, Opts))
189     return Size;
190   return MemoryLocation::UnknownSize;
191 }
192 
193 /// Returns true if we can prove that the object specified by V is smaller than
194 /// Size.
195 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
196                                 const DataLayout &DL,
197                                 const TargetLibraryInfo &TLI,
198                                 bool NullIsValidLoc) {
199   // Note that the meanings of the "object" are slightly different in the
200   // following contexts:
201   //    c1: llvm::getObjectSize()
202   //    c2: llvm.objectsize() intrinsic
203   //    c3: isObjectSmallerThan()
204   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
205   // refers to the "entire object".
206   //
207   //  Consider this example:
208   //     char *p = (char*)malloc(100)
209   //     char *q = p+80;
210   //
211   //  In the context of c1 and c2, the "object" pointed by q refers to the
212   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
213   //
214   //  However, in the context of c3, the "object" refers to the chunk of memory
215   // being allocated. So, the "object" has 100 bytes, and q points to the middle
216   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
217   // parameter, before the llvm::getObjectSize() is called to get the size of
218   // entire object, we should:
219   //    - either rewind the pointer q to the base-address of the object in
220   //      question (in this case rewind to p), or
221   //    - just give up. It is up to caller to make sure the pointer is pointing
222   //      to the base address the object.
223   //
224   // We go for 2nd option for simplicity.
225   if (!isIdentifiedObject(V))
226     return false;
227 
228   // This function needs to use the aligned object size because we allow
229   // reads a bit past the end given sufficient alignment.
230   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
231                                       /*RoundToAlign*/ true);
232 
233   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
234 }
235 
236 /// Return the minimal extent from \p V to the end of the underlying object,
237 /// assuming the result is used in an aliasing query. E.g., we do use the query
238 /// location size and the fact that null pointers cannot alias here.
239 static uint64_t getMinimalExtentFrom(const Value &V,
240                                      const LocationSize &LocSize,
241                                      const DataLayout &DL,
242                                      bool NullIsValidLoc) {
243   // If we have dereferenceability information we know a lower bound for the
244   // extent as accesses for a lower offset would be valid. We need to exclude
245   // the "or null" part if null is a valid pointer.
246   bool CanBeNull;
247   uint64_t DerefBytes = V.getPointerDereferenceableBytes(DL, CanBeNull);
248   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
249   // If queried with a precise location size, we assume that location size to be
250   // accessed, thus valid.
251   if (LocSize.isPrecise())
252     DerefBytes = std::max(DerefBytes, LocSize.getValue());
253   return DerefBytes;
254 }
255 
256 /// Returns true if we can prove that the object specified by V has size Size.
257 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
258                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
259   uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
260   return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
261 }
262 
263 //===----------------------------------------------------------------------===//
264 // GetElementPtr Instruction Decomposition and Analysis
265 //===----------------------------------------------------------------------===//
266 
267 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
268 /// B are constant integers.
269 ///
270 /// Returns the scale and offset values as APInts and return V as a Value*, and
271 /// return whether we looked through any sign or zero extends.  The incoming
272 /// Value is known to have IntegerType, and it may already be sign or zero
273 /// extended.
274 ///
275 /// Note that this looks through extends, so the high bits may not be
276 /// represented in the result.
277 /*static*/ const Value *BasicAAResult::GetLinearExpression(
278     const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
279     unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
280     AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
281   assert(V->getType()->isIntegerTy() && "Not an integer value");
282 
283   // Limit our recursion depth.
284   if (Depth == 6) {
285     Scale = 1;
286     Offset = 0;
287     return V;
288   }
289 
290   if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
291     // If it's a constant, just convert it to an offset and remove the variable.
292     // If we've been called recursively, the Offset bit width will be greater
293     // than the constant's (the Offset's always as wide as the outermost call),
294     // so we'll zext here and process any extension in the isa<SExtInst> &
295     // isa<ZExtInst> cases below.
296     Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
297     assert(Scale == 0 && "Constant values don't have a scale");
298     return V;
299   }
300 
301   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
302     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
303       // If we've been called recursively, then Offset and Scale will be wider
304       // than the BOp operands. We'll always zext it here as we'll process sign
305       // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
306       APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
307 
308       switch (BOp->getOpcode()) {
309       default:
310         // We don't understand this instruction, so we can't decompose it any
311         // further.
312         Scale = 1;
313         Offset = 0;
314         return V;
315       case Instruction::Or:
316         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
317         // analyze it.
318         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
319                                BOp, DT)) {
320           Scale = 1;
321           Offset = 0;
322           return V;
323         }
324         LLVM_FALLTHROUGH;
325       case Instruction::Add:
326         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
327                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
328         Offset += RHS;
329         break;
330       case Instruction::Sub:
331         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
332                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
333         Offset -= RHS;
334         break;
335       case Instruction::Mul:
336         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
337                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
338         Offset *= RHS;
339         Scale *= RHS;
340         break;
341       case Instruction::Shl:
342         V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
343                                 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
344 
345         // We're trying to linearize an expression of the kind:
346         //   shl i8 -128, 36
347         // where the shift count exceeds the bitwidth of the type.
348         // We can't decompose this further (the expression would return
349         // a poison value).
350         if (Offset.getBitWidth() < RHS.getLimitedValue() ||
351             Scale.getBitWidth() < RHS.getLimitedValue()) {
352           Scale = 1;
353           Offset = 0;
354           return V;
355         }
356 
357         Offset <<= RHS.getLimitedValue();
358         Scale <<= RHS.getLimitedValue();
359         // the semantics of nsw and nuw for left shifts don't match those of
360         // multiplications, so we won't propagate them.
361         NSW = NUW = false;
362         return V;
363       }
364 
365       if (isa<OverflowingBinaryOperator>(BOp)) {
366         NUW &= BOp->hasNoUnsignedWrap();
367         NSW &= BOp->hasNoSignedWrap();
368       }
369       return V;
370     }
371   }
372 
373   // Since GEP indices are sign extended anyway, we don't care about the high
374   // bits of a sign or zero extended value - just scales and offsets.  The
375   // extensions have to be consistent though.
376   if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
377     Value *CastOp = cast<CastInst>(V)->getOperand(0);
378     unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
379     unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
380     unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
381     const Value *Result =
382         GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
383                             Depth + 1, AC, DT, NSW, NUW);
384 
385     // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
386     // by just incrementing the number of bits we've extended by.
387     unsigned ExtendedBy = NewWidth - SmallWidth;
388 
389     if (isa<SExtInst>(V) && ZExtBits == 0) {
390       // sext(sext(%x, a), b) == sext(%x, a + b)
391 
392       if (NSW) {
393         // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
394         // into sext(%x) + sext(c). We'll sext the Offset ourselves:
395         unsigned OldWidth = Offset.getBitWidth();
396         Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
397       } else {
398         // We may have signed-wrapped, so don't decompose sext(%x + c) into
399         // sext(%x) + sext(c)
400         Scale = 1;
401         Offset = 0;
402         Result = CastOp;
403         ZExtBits = OldZExtBits;
404         SExtBits = OldSExtBits;
405       }
406       SExtBits += ExtendedBy;
407     } else {
408       // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
409 
410       if (!NUW) {
411         // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
412         // zext(%x) + zext(c)
413         Scale = 1;
414         Offset = 0;
415         Result = CastOp;
416         ZExtBits = OldZExtBits;
417         SExtBits = OldSExtBits;
418       }
419       ZExtBits += ExtendedBy;
420     }
421 
422     return Result;
423   }
424 
425   Scale = 1;
426   Offset = 0;
427   return V;
428 }
429 
430 /// To ensure a pointer offset fits in an integer of size PointerSize
431 /// (in bits) when that size is smaller than the maximum pointer size. This is
432 /// an issue, for example, in particular for 32b pointers with negative indices
433 /// that rely on two's complement wrap-arounds for precise alias information
434 /// where the maximum pointer size is 64b.
435 static APInt adjustToPointerSize(APInt Offset, unsigned PointerSize) {
436   assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
437   unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
438   return (Offset << ShiftBits).ashr(ShiftBits);
439 }
440 
441 static unsigned getMaxPointerSize(const DataLayout &DL) {
442   unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
443   if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
444   if (DoubleCalcBits) MaxPointerSize *= 2;
445 
446   return MaxPointerSize;
447 }
448 
449 /// If V is a symbolic pointer expression, decompose it into a base pointer
450 /// with a constant offset and a number of scaled symbolic offsets.
451 ///
452 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
453 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
454 /// specified amount, but which may have other unrepresented high bits. As
455 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
456 ///
457 /// When DataLayout is around, this function is capable of analyzing everything
458 /// that GetUnderlyingObject can look through. To be able to do that
459 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
460 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
461 /// through pointer casts.
462 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
463        DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
464        DominatorTree *DT) {
465   // Limit recursion depth to limit compile time in crazy cases.
466   unsigned MaxLookup = MaxLookupSearchDepth;
467   SearchTimes++;
468 
469   unsigned MaxPointerSize = getMaxPointerSize(DL);
470   Decomposed.VarIndices.clear();
471   do {
472     // See if this is a bitcast or GEP.
473     const Operator *Op = dyn_cast<Operator>(V);
474     if (!Op) {
475       // The only non-operator case we can handle are GlobalAliases.
476       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
477         if (!GA->isInterposable()) {
478           V = GA->getAliasee();
479           continue;
480         }
481       }
482       Decomposed.Base = V;
483       return false;
484     }
485 
486     if (Op->getOpcode() == Instruction::BitCast ||
487         Op->getOpcode() == Instruction::AddrSpaceCast) {
488       V = Op->getOperand(0);
489       continue;
490     }
491 
492     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
493     if (!GEPOp) {
494       if (const auto *Call = dyn_cast<CallBase>(V)) {
495         // CaptureTracking can know about special capturing properties of some
496         // intrinsics like launder.invariant.group, that can't be expressed with
497         // the attributes, but have properties like returning aliasing pointer.
498         // Because some analysis may assume that nocaptured pointer is not
499         // returned from some special intrinsic (because function would have to
500         // be marked with returns attribute), it is crucial to use this function
501         // because it should be in sync with CaptureTracking. Not using it may
502         // cause weird miscompilations where 2 aliasing pointers are assumed to
503         // noalias.
504         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
505           V = RP;
506           continue;
507         }
508       }
509 
510       // If it's not a GEP, hand it off to SimplifyInstruction to see if it
511       // can come up with something. This matches what GetUnderlyingObject does.
512       if (const Instruction *I = dyn_cast<Instruction>(V))
513         // TODO: Get a DominatorTree and AssumptionCache and use them here
514         // (these are both now available in this function, but this should be
515         // updated when GetUnderlyingObject is updated). TLI should be
516         // provided also.
517         if (const Value *Simplified =
518                 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
519           V = Simplified;
520           continue;
521         }
522 
523       Decomposed.Base = V;
524       return false;
525     }
526 
527     // Don't attempt to analyze GEPs over unsized objects.
528     if (!GEPOp->getSourceElementType()->isSized()) {
529       Decomposed.Base = V;
530       return false;
531     }
532 
533     unsigned AS = GEPOp->getPointerAddressSpace();
534     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
535     gep_type_iterator GTI = gep_type_begin(GEPOp);
536     unsigned PointerSize = DL.getPointerSizeInBits(AS);
537     // Assume all GEP operands are constants until proven otherwise.
538     bool GepHasConstantOffset = true;
539     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
540          I != E; ++I, ++GTI) {
541       const Value *Index = *I;
542       // Compute the (potentially symbolic) offset in bytes for this index.
543       if (StructType *STy = GTI.getStructTypeOrNull()) {
544         // For a struct, add the member offset.
545         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
546         if (FieldNo == 0)
547           continue;
548 
549         Decomposed.StructOffset +=
550           DL.getStructLayout(STy)->getElementOffset(FieldNo);
551         continue;
552       }
553 
554       // For an array/pointer, add the element offset, explicitly scaled.
555       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
556         if (CIdx->isZero())
557           continue;
558         Decomposed.OtherOffset +=
559           (DL.getTypeAllocSize(GTI.getIndexedType()) *
560             CIdx->getValue().sextOrSelf(MaxPointerSize))
561               .sextOrTrunc(MaxPointerSize);
562         continue;
563       }
564 
565       GepHasConstantOffset = false;
566 
567       APInt Scale(MaxPointerSize, DL.getTypeAllocSize(GTI.getIndexedType()));
568       unsigned ZExtBits = 0, SExtBits = 0;
569 
570       // If the integer type is smaller than the pointer size, it is implicitly
571       // sign extended to pointer size.
572       unsigned Width = Index->getType()->getIntegerBitWidth();
573       if (PointerSize > Width)
574         SExtBits += PointerSize - Width;
575 
576       // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
577       APInt IndexScale(Width, 0), IndexOffset(Width, 0);
578       bool NSW = true, NUW = true;
579       const Value *OrigIndex = Index;
580       Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
581                                   SExtBits, DL, 0, AC, DT, NSW, NUW);
582 
583       // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
584       // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
585 
586       // It can be the case that, even through C1*V+C2 does not overflow for
587       // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
588       // decompose the expression in this way.
589       //
590       // FIXME: C1*Scale and the other operations in the decomposed
591       // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
592       // possibility.
593       APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) *
594                                  Scale.sext(MaxPointerSize*2);
595       if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) {
596         Index = OrigIndex;
597         IndexScale = 1;
598         IndexOffset = 0;
599 
600         ZExtBits = SExtBits = 0;
601         if (PointerSize > Width)
602           SExtBits += PointerSize - Width;
603       } else {
604         Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
605         Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
606       }
607 
608       // If we already had an occurrence of this index variable, merge this
609       // scale into it.  For example, we want to handle:
610       //   A[x][x] -> x*16 + x*4 -> x*20
611       // This also ensures that 'x' only appears in the index list once.
612       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
613         if (Decomposed.VarIndices[i].V == Index &&
614             Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
615             Decomposed.VarIndices[i].SExtBits == SExtBits) {
616           Scale += Decomposed.VarIndices[i].Scale;
617           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
618           break;
619         }
620       }
621 
622       // Make sure that we have a scale that makes sense for this target's
623       // pointer size.
624       Scale = adjustToPointerSize(Scale, PointerSize);
625 
626       if (!!Scale) {
627         VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale};
628         Decomposed.VarIndices.push_back(Entry);
629       }
630     }
631 
632     // Take care of wrap-arounds
633     if (GepHasConstantOffset) {
634       Decomposed.StructOffset =
635           adjustToPointerSize(Decomposed.StructOffset, PointerSize);
636       Decomposed.OtherOffset =
637           adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
638     }
639 
640     // Analyze the base pointer next.
641     V = GEPOp->getOperand(0);
642   } while (--MaxLookup);
643 
644   // If the chain of expressions is too deep, just return early.
645   Decomposed.Base = V;
646   SearchLimitReached++;
647   return true;
648 }
649 
650 /// Returns whether the given pointer value points to memory that is local to
651 /// the function, with global constants being considered local to all
652 /// functions.
653 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
654                                            AAQueryInfo &AAQI, bool OrLocal) {
655   assert(Visited.empty() && "Visited must be cleared after use!");
656 
657   unsigned MaxLookup = 8;
658   SmallVector<const Value *, 16> Worklist;
659   Worklist.push_back(Loc.Ptr);
660   do {
661     const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
662     if (!Visited.insert(V).second) {
663       Visited.clear();
664       return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
665     }
666 
667     // An alloca instruction defines local memory.
668     if (OrLocal && isa<AllocaInst>(V))
669       continue;
670 
671     // A global constant counts as local memory for our purposes.
672     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
673       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
674       // global to be marked constant in some modules and non-constant in
675       // others.  GV may even be a declaration, not a definition.
676       if (!GV->isConstant()) {
677         Visited.clear();
678         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
679       }
680       continue;
681     }
682 
683     // If both select values point to local memory, then so does the select.
684     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
685       Worklist.push_back(SI->getTrueValue());
686       Worklist.push_back(SI->getFalseValue());
687       continue;
688     }
689 
690     // If all values incoming to a phi node point to local memory, then so does
691     // the phi.
692     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
693       // Don't bother inspecting phi nodes with many operands.
694       if (PN->getNumIncomingValues() > MaxLookup) {
695         Visited.clear();
696         return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
697       }
698       for (Value *IncValue : PN->incoming_values())
699         Worklist.push_back(IncValue);
700       continue;
701     }
702 
703     // Otherwise be conservative.
704     Visited.clear();
705     return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
706   } while (!Worklist.empty() && --MaxLookup);
707 
708   Visited.clear();
709   return Worklist.empty();
710 }
711 
712 /// Returns the behavior when calling the given call site.
713 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
714   if (Call->doesNotAccessMemory())
715     // Can't do better than this.
716     return FMRB_DoesNotAccessMemory;
717 
718   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
719 
720   // If the callsite knows it only reads memory, don't return worse
721   // than that.
722   if (Call->onlyReadsMemory())
723     Min = FMRB_OnlyReadsMemory;
724   else if (Call->doesNotReadMemory())
725     Min = FMRB_DoesNotReadMemory;
726 
727   if (Call->onlyAccessesArgMemory())
728     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
729   else if (Call->onlyAccessesInaccessibleMemory())
730     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
731   else if (Call->onlyAccessesInaccessibleMemOrArgMem())
732     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
733 
734   // If the call has operand bundles then aliasing attributes from the function
735   // it calls do not directly apply to the call.  This can be made more precise
736   // in the future.
737   if (!Call->hasOperandBundles())
738     if (const Function *F = Call->getCalledFunction())
739       Min =
740           FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
741 
742   return Min;
743 }
744 
745 /// Returns the behavior when calling the given function. For use when the call
746 /// site is not known.
747 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
748   // If the function declares it doesn't access memory, we can't do better.
749   if (F->doesNotAccessMemory())
750     return FMRB_DoesNotAccessMemory;
751 
752   FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
753 
754   // If the function declares it only reads memory, go with that.
755   if (F->onlyReadsMemory())
756     Min = FMRB_OnlyReadsMemory;
757   else if (F->doesNotReadMemory())
758     Min = FMRB_DoesNotReadMemory;
759 
760   if (F->onlyAccessesArgMemory())
761     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
762   else if (F->onlyAccessesInaccessibleMemory())
763     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
764   else if (F->onlyAccessesInaccessibleMemOrArgMem())
765     Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
766 
767   return Min;
768 }
769 
770 /// Returns true if this is a writeonly (i.e Mod only) parameter.
771 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
772                              const TargetLibraryInfo &TLI) {
773   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
774     return true;
775 
776   // We can bound the aliasing properties of memset_pattern16 just as we can
777   // for memcpy/memset.  This is particularly important because the
778   // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
779   // whenever possible.
780   // FIXME Consider handling this in InferFunctionAttr.cpp together with other
781   // attributes.
782   LibFunc F;
783   if (Call->getCalledFunction() &&
784       TLI.getLibFunc(*Call->getCalledFunction(), F) &&
785       F == LibFunc_memset_pattern16 && TLI.has(F))
786     if (ArgIdx == 0)
787       return true;
788 
789   // TODO: memset_pattern4, memset_pattern8
790   // TODO: _chk variants
791   // TODO: strcmp, strcpy
792 
793   return false;
794 }
795 
796 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
797                                            unsigned ArgIdx) {
798   // Checking for known builtin intrinsics and target library functions.
799   if (isWriteOnlyParam(Call, ArgIdx, TLI))
800     return ModRefInfo::Mod;
801 
802   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
803     return ModRefInfo::Ref;
804 
805   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
806     return ModRefInfo::NoModRef;
807 
808   return AAResultBase::getArgModRefInfo(Call, ArgIdx);
809 }
810 
811 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
812   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
813   return II && II->getIntrinsicID() == IID;
814 }
815 
816 #ifndef NDEBUG
817 static const Function *getParent(const Value *V) {
818   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
819     if (!inst->getParent())
820       return nullptr;
821     return inst->getParent()->getParent();
822   }
823 
824   if (const Argument *arg = dyn_cast<Argument>(V))
825     return arg->getParent();
826 
827   return nullptr;
828 }
829 
830 static bool notDifferentParent(const Value *O1, const Value *O2) {
831 
832   const Function *F1 = getParent(O1);
833   const Function *F2 = getParent(O2);
834 
835   return !F1 || !F2 || F1 == F2;
836 }
837 #endif
838 
839 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
840                                  const MemoryLocation &LocB,
841                                  AAQueryInfo &AAQI) {
842   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
843          "BasicAliasAnalysis doesn't support interprocedural queries.");
844 
845   // If we have a directly cached entry for these locations, we have recursed
846   // through this once, so just return the cached results. Notably, when this
847   // happens, we don't clear the cache.
848   auto CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocA, LocB));
849   if (CacheIt != AAQI.AliasCache.end())
850     return CacheIt->second;
851 
852   CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocB, LocA));
853   if (CacheIt != AAQI.AliasCache.end())
854     return CacheIt->second;
855 
856   AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
857                                  LocB.Size, LocB.AATags, AAQI);
858 
859   VisitedPhiBBs.clear();
860   return Alias;
861 }
862 
863 /// Checks to see if the specified callsite can clobber the specified memory
864 /// object.
865 ///
866 /// Since we only look at local properties of this function, we really can't
867 /// say much about this query.  We do, however, use simple "address taken"
868 /// analysis on local objects.
869 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
870                                         const MemoryLocation &Loc,
871                                         AAQueryInfo &AAQI) {
872   assert(notDifferentParent(Call, Loc.Ptr) &&
873          "AliasAnalysis query involving multiple functions!");
874 
875   const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
876 
877   // Calls marked 'tail' cannot read or write allocas from the current frame
878   // because the current frame might be destroyed by the time they run. However,
879   // a tail call may use an alloca with byval. Calling with byval copies the
880   // contents of the alloca into argument registers or stack slots, so there is
881   // no lifetime issue.
882   if (isa<AllocaInst>(Object))
883     if (const CallInst *CI = dyn_cast<CallInst>(Call))
884       if (CI->isTailCall() &&
885           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
886         return ModRefInfo::NoModRef;
887 
888   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
889   // modify them even though the alloca is not escaped.
890   if (auto *AI = dyn_cast<AllocaInst>(Object))
891     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
892       return ModRefInfo::Mod;
893 
894   // If the pointer is to a locally allocated object that does not escape,
895   // then the call can not mod/ref the pointer unless the call takes the pointer
896   // as an argument, and itself doesn't capture it.
897   if (!isa<Constant>(Object) && Call != Object &&
898       isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
899 
900     // Optimistically assume that call doesn't touch Object and check this
901     // assumption in the following loop.
902     ModRefInfo Result = ModRefInfo::NoModRef;
903     bool IsMustAlias = true;
904 
905     unsigned OperandNo = 0;
906     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
907          CI != CE; ++CI, ++OperandNo) {
908       // Only look at the no-capture or byval pointer arguments.  If this
909       // pointer were passed to arguments that were neither of these, then it
910       // couldn't be no-capture.
911       if (!(*CI)->getType()->isPointerTy() ||
912           (!Call->doesNotCapture(OperandNo) &&
913            OperandNo < Call->getNumArgOperands() &&
914            !Call->isByValArgument(OperandNo)))
915         continue;
916 
917       // Call doesn't access memory through this operand, so we don't care
918       // if it aliases with Object.
919       if (Call->doesNotAccessMemory(OperandNo))
920         continue;
921 
922       // If this is a no-capture pointer argument, see if we can tell that it
923       // is impossible to alias the pointer we're checking.
924       AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI),
925                                                 MemoryLocation(Object), AAQI);
926       if (AR != MustAlias)
927         IsMustAlias = false;
928       // Operand doesn't alias 'Object', continue looking for other aliases
929       if (AR == NoAlias)
930         continue;
931       // Operand aliases 'Object', but call doesn't modify it. Strengthen
932       // initial assumption and keep looking in case if there are more aliases.
933       if (Call->onlyReadsMemory(OperandNo)) {
934         Result = setRef(Result);
935         continue;
936       }
937       // Operand aliases 'Object' but call only writes into it.
938       if (Call->doesNotReadMemory(OperandNo)) {
939         Result = setMod(Result);
940         continue;
941       }
942       // This operand aliases 'Object' and call reads and writes into it.
943       // Setting ModRef will not yield an early return below, MustAlias is not
944       // used further.
945       Result = ModRefInfo::ModRef;
946       break;
947     }
948 
949     // No operand aliases, reset Must bit. Add below if at least one aliases
950     // and all aliases found are MustAlias.
951     if (isNoModRef(Result))
952       IsMustAlias = false;
953 
954     // Early return if we improved mod ref information
955     if (!isModAndRefSet(Result)) {
956       if (isNoModRef(Result))
957         return ModRefInfo::NoModRef;
958       return IsMustAlias ? setMust(Result) : clearMust(Result);
959     }
960   }
961 
962   // If the call is to malloc or calloc, we can assume that it doesn't
963   // modify any IR visible value.  This is only valid because we assume these
964   // routines do not read values visible in the IR.  TODO: Consider special
965   // casing realloc and strdup routines which access only their arguments as
966   // well.  Or alternatively, replace all of this with inaccessiblememonly once
967   // that's implemented fully.
968   if (isMallocOrCallocLikeFn(Call, &TLI)) {
969     // Be conservative if the accessed pointer may alias the allocation -
970     // fallback to the generic handling below.
971     if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias)
972       return ModRefInfo::NoModRef;
973   }
974 
975   // The semantics of memcpy intrinsics forbid overlap between their respective
976   // operands, i.e., source and destination of any given memcpy must no-alias.
977   // If Loc must-aliases either one of these two locations, then it necessarily
978   // no-aliases the other.
979   if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
980     AliasResult SrcAA, DestAA;
981 
982     if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
983                                           Loc, AAQI)) == MustAlias)
984       // Loc is exactly the memcpy source thus disjoint from memcpy dest.
985       return ModRefInfo::Ref;
986     if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
987                                            Loc, AAQI)) == MustAlias)
988       // The converse case.
989       return ModRefInfo::Mod;
990 
991     // It's also possible for Loc to alias both src and dest, or neither.
992     ModRefInfo rv = ModRefInfo::NoModRef;
993     if (SrcAA != NoAlias)
994       rv = setRef(rv);
995     if (DestAA != NoAlias)
996       rv = setMod(rv);
997     return rv;
998   }
999 
1000   // While the assume intrinsic is marked as arbitrarily writing so that
1001   // proper control dependencies will be maintained, it never aliases any
1002   // particular memory location.
1003   if (isIntrinsicCall(Call, Intrinsic::assume))
1004     return ModRefInfo::NoModRef;
1005 
1006   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1007   // that proper control dependencies are maintained but they never mods any
1008   // particular memory location.
1009   //
1010   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1011   // heap state at the point the guard is issued needs to be consistent in case
1012   // the guard invokes the "deopt" continuation.
1013   if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
1014     return ModRefInfo::Ref;
1015 
1016   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
1017   // writing so that proper control dependencies are maintained but they never
1018   // mod any particular memory location visible to the IR.
1019   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1020   // intrinsic is now modeled as reading memory. This prevents hoisting the
1021   // invariant.start intrinsic over stores. Consider:
1022   // *ptr = 40;
1023   // *ptr = 50;
1024   // invariant_start(ptr)
1025   // int val = *ptr;
1026   // print(val);
1027   //
1028   // This cannot be transformed to:
1029   //
1030   // *ptr = 40;
1031   // invariant_start(ptr)
1032   // *ptr = 50;
1033   // int val = *ptr;
1034   // print(val);
1035   //
1036   // The transformation will cause the second store to be ignored (based on
1037   // rules of invariant.start)  and print 40, while the first program always
1038   // prints 50.
1039   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1040     return ModRefInfo::Ref;
1041 
1042   // The AAResultBase base class has some smarts, lets use them.
1043   return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1044 }
1045 
1046 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1047                                         const CallBase *Call2,
1048                                         AAQueryInfo &AAQI) {
1049   // While the assume intrinsic is marked as arbitrarily writing so that
1050   // proper control dependencies will be maintained, it never aliases any
1051   // particular memory location.
1052   if (isIntrinsicCall(Call1, Intrinsic::assume) ||
1053       isIntrinsicCall(Call2, Intrinsic::assume))
1054     return ModRefInfo::NoModRef;
1055 
1056   // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1057   // that proper control dependencies are maintained but they never mod any
1058   // particular memory location.
1059   //
1060   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1061   // heap state at the point the guard is issued needs to be consistent in case
1062   // the guard invokes the "deopt" continuation.
1063 
1064   // NB! This function is *not* commutative, so we special case two
1065   // possibilities for guard intrinsics.
1066 
1067   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1068     return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1069                ? ModRefInfo::Ref
1070                : ModRefInfo::NoModRef;
1071 
1072   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1073     return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1074                ? ModRefInfo::Mod
1075                : ModRefInfo::NoModRef;
1076 
1077   // The AAResultBase base class has some smarts, lets use them.
1078   return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1079 }
1080 
1081 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1082 /// both having the exact same pointer operand.
1083 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1084                                             LocationSize MaybeV1Size,
1085                                             const GEPOperator *GEP2,
1086                                             LocationSize MaybeV2Size,
1087                                             const DataLayout &DL) {
1088   assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1089              GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1090          GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1091          "Expected GEPs with the same pointer operand");
1092 
1093   // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1094   // such that the struct field accesses provably cannot alias.
1095   // We also need at least two indices (the pointer, and the struct field).
1096   if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1097       GEP1->getNumIndices() < 2)
1098     return MayAlias;
1099 
1100   // If we don't know the size of the accesses through both GEPs, we can't
1101   // determine whether the struct fields accessed can't alias.
1102   if (MaybeV1Size == LocationSize::unknown() ||
1103       MaybeV2Size == LocationSize::unknown())
1104     return MayAlias;
1105 
1106   const uint64_t V1Size = MaybeV1Size.getValue();
1107   const uint64_t V2Size = MaybeV2Size.getValue();
1108 
1109   ConstantInt *C1 =
1110       dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1111   ConstantInt *C2 =
1112       dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1113 
1114   // If the last (struct) indices are constants and are equal, the other indices
1115   // might be also be dynamically equal, so the GEPs can alias.
1116   if (C1 && C2) {
1117     unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth());
1118     if (C1->getValue().sextOrSelf(BitWidth) ==
1119         C2->getValue().sextOrSelf(BitWidth))
1120       return MayAlias;
1121   }
1122 
1123   // Find the last-indexed type of the GEP, i.e., the type you'd get if
1124   // you stripped the last index.
1125   // On the way, look at each indexed type.  If there's something other
1126   // than an array, different indices can lead to different final types.
1127   SmallVector<Value *, 8> IntermediateIndices;
1128 
1129   // Insert the first index; we don't need to check the type indexed
1130   // through it as it only drops the pointer indirection.
1131   assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1132   IntermediateIndices.push_back(GEP1->getOperand(1));
1133 
1134   // Insert all the remaining indices but the last one.
1135   // Also, check that they all index through arrays.
1136   for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1137     if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1138             GEP1->getSourceElementType(), IntermediateIndices)))
1139       return MayAlias;
1140     IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1141   }
1142 
1143   auto *Ty = GetElementPtrInst::getIndexedType(
1144     GEP1->getSourceElementType(), IntermediateIndices);
1145   StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1146 
1147   if (isa<SequentialType>(Ty)) {
1148     // We know that:
1149     // - both GEPs begin indexing from the exact same pointer;
1150     // - the last indices in both GEPs are constants, indexing into a sequential
1151     //   type (array or pointer);
1152     // - both GEPs only index through arrays prior to that.
1153     //
1154     // Because array indices greater than the number of elements are valid in
1155     // GEPs, unless we know the intermediate indices are identical between
1156     // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1157     // partially overlap. We also need to check that the loaded size matches
1158     // the element size, otherwise we could still have overlap.
1159     const uint64_t ElementSize =
1160         DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1161     if (V1Size != ElementSize || V2Size != ElementSize)
1162       return MayAlias;
1163 
1164     for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1165       if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1166         return MayAlias;
1167 
1168     // Now we know that the array/pointer that GEP1 indexes into and that
1169     // that GEP2 indexes into must either precisely overlap or be disjoint.
1170     // Because they cannot partially overlap and because fields in an array
1171     // cannot overlap, if we can prove the final indices are different between
1172     // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1173 
1174     // If the last indices are constants, we've already checked they don't
1175     // equal each other so we can exit early.
1176     if (C1 && C2)
1177       return NoAlias;
1178     {
1179       Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1180       Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1181       if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1182         // If one of the indices is a PHI node, be safe and only use
1183         // computeKnownBits so we don't make any assumptions about the
1184         // relationships between the two indices. This is important if we're
1185         // asking about values from different loop iterations. See PR32314.
1186         // TODO: We may be able to change the check so we only do this when
1187         // we definitely looked through a PHINode.
1188         if (GEP1LastIdx != GEP2LastIdx &&
1189             GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1190           KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1191           KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1192           if (Known1.Zero.intersects(Known2.One) ||
1193               Known1.One.intersects(Known2.Zero))
1194             return NoAlias;
1195         }
1196       } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1197         return NoAlias;
1198     }
1199     return MayAlias;
1200   } else if (!LastIndexedStruct || !C1 || !C2) {
1201     return MayAlias;
1202   }
1203 
1204   if (C1->getValue().getActiveBits() > 64 ||
1205       C2->getValue().getActiveBits() > 64)
1206     return MayAlias;
1207 
1208   // We know that:
1209   // - both GEPs begin indexing from the exact same pointer;
1210   // - the last indices in both GEPs are constants, indexing into a struct;
1211   // - said indices are different, hence, the pointed-to fields are different;
1212   // - both GEPs only index through arrays prior to that.
1213   //
1214   // This lets us determine that the struct that GEP1 indexes into and the
1215   // struct that GEP2 indexes into must either precisely overlap or be
1216   // completely disjoint.  Because they cannot partially overlap, indexing into
1217   // different non-overlapping fields of the struct will never alias.
1218 
1219   // Therefore, the only remaining thing needed to show that both GEPs can't
1220   // alias is that the fields are not overlapping.
1221   const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1222   const uint64_t StructSize = SL->getSizeInBytes();
1223   const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1224   const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1225 
1226   auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1227                                       uint64_t V2Off, uint64_t V2Size) {
1228     return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1229            ((V2Off + V2Size <= StructSize) ||
1230             (V2Off + V2Size - StructSize <= V1Off));
1231   };
1232 
1233   if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1234       EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1235     return NoAlias;
1236 
1237   return MayAlias;
1238 }
1239 
1240 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1241 // beginning of the object the GEP points would have a negative offset with
1242 // repsect to the alloca, that means the GEP can not alias pointer (b).
1243 // Note that the pointer based on the alloca may not be a GEP. For
1244 // example, it may be the alloca itself.
1245 // The same applies if (b) is based on a GlobalVariable. Note that just being
1246 // based on isIdentifiedObject() is not enough - we need an identified object
1247 // that does not permit access to negative offsets. For example, a negative
1248 // offset from a noalias argument or call can be inbounds w.r.t the actual
1249 // underlying object.
1250 //
1251 // For example, consider:
1252 //
1253 //   struct { int f0, int f1, ...} foo;
1254 //   foo alloca;
1255 //   foo* random = bar(alloca);
1256 //   int *f0 = &alloca.f0
1257 //   int *f1 = &random->f1;
1258 //
1259 // Which is lowered, approximately, to:
1260 //
1261 //  %alloca = alloca %struct.foo
1262 //  %random = call %struct.foo* @random(%struct.foo* %alloca)
1263 //  %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1264 //  %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1265 //
1266 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1267 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1268 // point into the same object. But since %f0 points to the beginning of %alloca,
1269 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1270 // than (%alloca - 1), and so is not inbounds, a contradiction.
1271 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1272       const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1273       LocationSize MaybeObjectAccessSize) {
1274   // If the object access size is unknown, or the GEP isn't inbounds, bail.
1275   if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1276     return false;
1277 
1278   const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1279 
1280   // We need the object to be an alloca or a globalvariable, and want to know
1281   // the offset of the pointer from the object precisely, so no variable
1282   // indices are allowed.
1283   if (!(isa<AllocaInst>(DecompObject.Base) ||
1284         isa<GlobalVariable>(DecompObject.Base)) ||
1285       !DecompObject.VarIndices.empty())
1286     return false;
1287 
1288   APInt ObjectBaseOffset = DecompObject.StructOffset +
1289                            DecompObject.OtherOffset;
1290 
1291   // If the GEP has no variable indices, we know the precise offset
1292   // from the base, then use it. If the GEP has variable indices,
1293   // we can't get exact GEP offset to identify pointer alias. So return
1294   // false in that case.
1295   if (!DecompGEP.VarIndices.empty())
1296     return false;
1297 
1298   APInt GEPBaseOffset = DecompGEP.StructOffset;
1299   GEPBaseOffset += DecompGEP.OtherOffset;
1300 
1301   return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize);
1302 }
1303 
1304 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1305 /// another pointer.
1306 ///
1307 /// We know that V1 is a GEP, but we don't know anything about V2.
1308 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1309 /// V2.
1310 AliasResult BasicAAResult::aliasGEP(
1311     const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
1312     const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
1313     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1314   DecomposedGEP DecompGEP1, DecompGEP2;
1315   unsigned MaxPointerSize = getMaxPointerSize(DL);
1316   DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0);
1317   DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
1318 
1319   bool GEP1MaxLookupReached =
1320     DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1321   bool GEP2MaxLookupReached =
1322     DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1323 
1324   APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1325   APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1326 
1327   assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1328          "DecomposeGEPExpression returned a result different from "
1329          "GetUnderlyingObject");
1330 
1331   // If the GEP's offset relative to its base is such that the base would
1332   // fall below the start of the object underlying V2, then the GEP and V2
1333   // cannot alias.
1334   if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1335       isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1336     return NoAlias;
1337   // If we have two gep instructions with must-alias or not-alias'ing base
1338   // pointers, figure out if the indexes to the GEP tell us anything about the
1339   // derived pointer.
1340   if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1341     // Check for the GEP base being at a negative offset, this time in the other
1342     // direction.
1343     if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1344         isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1345       return NoAlias;
1346     // Do the base pointers alias?
1347     AliasResult BaseAlias =
1348         aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1349                    UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI);
1350 
1351     // Check for geps of non-aliasing underlying pointers where the offsets are
1352     // identical.
1353     if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1354       // Do the base pointers alias assuming type and size.
1355       AliasResult PreciseBaseAlias = aliasCheck(
1356           UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI);
1357       if (PreciseBaseAlias == NoAlias) {
1358         // See if the computed offset from the common pointer tells us about the
1359         // relation of the resulting pointer.
1360         // If the max search depth is reached the result is undefined
1361         if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1362           return MayAlias;
1363 
1364         // Same offsets.
1365         if (GEP1BaseOffset == GEP2BaseOffset &&
1366             DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1367           return NoAlias;
1368       }
1369     }
1370 
1371     // If we get a No or May, then return it immediately, no amount of analysis
1372     // will improve this situation.
1373     if (BaseAlias != MustAlias) {
1374       assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1375       return BaseAlias;
1376     }
1377 
1378     // Otherwise, we have a MustAlias.  Since the base pointers alias each other
1379     // exactly, see if the computed offset from the common pointer tells us
1380     // about the relation of the resulting pointer.
1381     // If we know the two GEPs are based off of the exact same pointer (and not
1382     // just the same underlying object), see if that tells us anything about
1383     // the resulting pointers.
1384     if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1385             GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1386         GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1387       AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1388       // If we couldn't find anything interesting, don't abandon just yet.
1389       if (R != MayAlias)
1390         return R;
1391     }
1392 
1393     // If the max search depth is reached, the result is undefined
1394     if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1395       return MayAlias;
1396 
1397     // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1398     // symbolic difference.
1399     GEP1BaseOffset -= GEP2BaseOffset;
1400     GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1401 
1402   } else {
1403     // Check to see if these two pointers are related by the getelementptr
1404     // instruction.  If one pointer is a GEP with a non-zero index of the other
1405     // pointer, we know they cannot alias.
1406 
1407     // If both accesses are unknown size, we can't do anything useful here.
1408     if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1409       return MayAlias;
1410 
1411     AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(),
1412                                AAMDNodes(), V2, LocationSize::unknown(),
1413                                V2AAInfo, AAQI, nullptr, UnderlyingV2);
1414     if (R != MustAlias) {
1415       // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1416       // If V2 is known not to alias GEP base pointer, then the two values
1417       // cannot alias per GEP semantics: "Any memory access must be done through
1418       // a pointer value associated with an address range of the memory access,
1419       // otherwise the behavior is undefined.".
1420       assert(R == NoAlias || R == MayAlias);
1421       return R;
1422     }
1423 
1424     // If the max search depth is reached the result is undefined
1425     if (GEP1MaxLookupReached)
1426       return MayAlias;
1427   }
1428 
1429   // In the two GEP Case, if there is no difference in the offsets of the
1430   // computed pointers, the resultant pointers are a must alias.  This
1431   // happens when we have two lexically identical GEP's (for example).
1432   //
1433   // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1434   // must aliases the GEP, the end result is a must alias also.
1435   if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1436     return MustAlias;
1437 
1438   // If there is a constant difference between the pointers, but the difference
1439   // is less than the size of the associated memory object, then we know
1440   // that the objects are partially overlapping.  If the difference is
1441   // greater, we know they do not overlap.
1442   if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1443     if (GEP1BaseOffset.sge(0)) {
1444       if (V2Size != LocationSize::unknown()) {
1445         if (GEP1BaseOffset.ult(V2Size.getValue()))
1446           return PartialAlias;
1447         return NoAlias;
1448       }
1449     } else {
1450       // We have the situation where:
1451       // +                +
1452       // | BaseOffset     |
1453       // ---------------->|
1454       // |-->V1Size       |-------> V2Size
1455       // GEP1             V2
1456       // We need to know that V2Size is not unknown, otherwise we might have
1457       // stripped a gep with negative index ('gep <ptr>, -1, ...).
1458       if (V1Size != LocationSize::unknown() &&
1459           V2Size != LocationSize::unknown()) {
1460         if ((-GEP1BaseOffset).ult(V1Size.getValue()))
1461           return PartialAlias;
1462         return NoAlias;
1463       }
1464     }
1465   }
1466 
1467   if (!DecompGEP1.VarIndices.empty()) {
1468     APInt Modulo(MaxPointerSize, 0);
1469     bool AllPositive = true;
1470     for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1471 
1472       // Try to distinguish something like &A[i][1] against &A[42][0].
1473       // Grab the least significant bit set in any of the scales. We
1474       // don't need std::abs here (even if the scale's negative) as we'll
1475       // be ^'ing Modulo with itself later.
1476       Modulo |= DecompGEP1.VarIndices[i].Scale;
1477 
1478       if (AllPositive) {
1479         // If the Value could change between cycles, then any reasoning about
1480         // the Value this cycle may not hold in the next cycle. We'll just
1481         // give up if we can't determine conditions that hold for every cycle:
1482         const Value *V = DecompGEP1.VarIndices[i].V;
1483 
1484         KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1485         bool SignKnownZero = Known.isNonNegative();
1486         bool SignKnownOne = Known.isNegative();
1487 
1488         // Zero-extension widens the variable, and so forces the sign
1489         // bit to zero.
1490         bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1491         SignKnownZero |= IsZExt;
1492         SignKnownOne &= !IsZExt;
1493 
1494         // If the variable begins with a zero then we know it's
1495         // positive, regardless of whether the value is signed or
1496         // unsigned.
1497         APInt Scale = DecompGEP1.VarIndices[i].Scale;
1498         AllPositive =
1499             (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0));
1500       }
1501     }
1502 
1503     Modulo = Modulo ^ (Modulo & (Modulo - 1));
1504 
1505     // We can compute the difference between the two addresses
1506     // mod Modulo. Check whether that difference guarantees that the
1507     // two locations do not alias.
1508     APInt ModOffset = GEP1BaseOffset & (Modulo - 1);
1509     if (V1Size != LocationSize::unknown() &&
1510         V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) &&
1511         (Modulo - ModOffset).uge(V1Size.getValue()))
1512       return NoAlias;
1513 
1514     // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1515     // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1516     // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1517     if (AllPositive && GEP1BaseOffset.sgt(0) &&
1518         V2Size != LocationSize::unknown() &&
1519         GEP1BaseOffset.uge(V2Size.getValue()))
1520       return NoAlias;
1521 
1522     if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1523                                 GEP1BaseOffset, &AC, DT))
1524       return NoAlias;
1525   }
1526 
1527   // Statically, we can see that the base objects are the same, but the
1528   // pointers have dynamic offsets which we can't resolve. And none of our
1529   // little tricks above worked.
1530   return MayAlias;
1531 }
1532 
1533 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1534   // If the results agree, take it.
1535   if (A == B)
1536     return A;
1537   // A mix of PartialAlias and MustAlias is PartialAlias.
1538   if ((A == PartialAlias && B == MustAlias) ||
1539       (B == PartialAlias && A == MustAlias))
1540     return PartialAlias;
1541   // Otherwise, we don't know anything.
1542   return MayAlias;
1543 }
1544 
1545 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1546 /// against another.
1547 AliasResult
1548 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1549                            const AAMDNodes &SIAAInfo, const Value *V2,
1550                            LocationSize V2Size, const AAMDNodes &V2AAInfo,
1551                            const Value *UnderV2, AAQueryInfo &AAQI) {
1552   // If the values are Selects with the same condition, we can do a more precise
1553   // check: just check for aliases between the values on corresponding arms.
1554   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1555     if (SI->getCondition() == SI2->getCondition()) {
1556       AliasResult Alias =
1557           aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(),
1558                      V2Size, V2AAInfo, AAQI);
1559       if (Alias == MayAlias)
1560         return MayAlias;
1561       AliasResult ThisAlias =
1562           aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1563                      SI2->getFalseValue(), V2Size, V2AAInfo, AAQI);
1564       return MergeAliasResults(ThisAlias, Alias);
1565     }
1566 
1567   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1568   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1569   AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1570                                  SISize, SIAAInfo, AAQI, UnderV2);
1571   if (Alias == MayAlias)
1572     return MayAlias;
1573 
1574   AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(),
1575                                      SISize, SIAAInfo, AAQI, UnderV2);
1576   return MergeAliasResults(ThisAlias, Alias);
1577 }
1578 
1579 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1580 /// another.
1581 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1582                                     const AAMDNodes &PNAAInfo, const Value *V2,
1583                                     LocationSize V2Size,
1584                                     const AAMDNodes &V2AAInfo,
1585                                     const Value *UnderV2, AAQueryInfo &AAQI) {
1586   // Track phi nodes we have visited. We use this information when we determine
1587   // value equivalence.
1588   VisitedPhiBBs.insert(PN->getParent());
1589 
1590   // If the values are PHIs in the same block, we can do a more precise
1591   // as well as efficient check: just check for aliases between the values
1592   // on corresponding edges.
1593   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1594     if (PN2->getParent() == PN->getParent()) {
1595       AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1596                                 MemoryLocation(V2, V2Size, V2AAInfo));
1597       if (PN > V2)
1598         std::swap(Locs.first, Locs.second);
1599       // Analyse the PHIs' inputs under the assumption that the PHIs are
1600       // NoAlias.
1601       // If the PHIs are May/MustAlias there must be (recursively) an input
1602       // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1603       // there must be an operation on the PHIs within the PHIs' value cycle
1604       // that causes a MayAlias.
1605       // Pretend the phis do not alias.
1606       AliasResult Alias = NoAlias;
1607       AliasResult OrigAliasResult;
1608       {
1609         // Limited lifetime iterator invalidated by the aliasCheck call below.
1610         auto CacheIt = AAQI.AliasCache.find(Locs);
1611         assert((CacheIt != AAQI.AliasCache.end()) &&
1612                "There must exist an entry for the phi node");
1613         OrigAliasResult = CacheIt->second;
1614         CacheIt->second = NoAlias;
1615       }
1616 
1617       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1618         AliasResult ThisAlias =
1619             aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1620                        PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1621                        V2Size, V2AAInfo, AAQI);
1622         Alias = MergeAliasResults(ThisAlias, Alias);
1623         if (Alias == MayAlias)
1624           break;
1625       }
1626 
1627       // Reset if speculation failed.
1628       if (Alias != NoAlias) {
1629         auto Pair =
1630             AAQI.AliasCache.insert(std::make_pair(Locs, OrigAliasResult));
1631         assert(!Pair.second && "Entry must have existed");
1632         Pair.first->second = OrigAliasResult;
1633       }
1634       return Alias;
1635     }
1636 
1637   SmallVector<Value *, 4> V1Srcs;
1638   bool isRecursive = false;
1639   if (PV)  {
1640     // If we have PhiValues then use it to get the underlying phi values.
1641     const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1642     // If we have more phi values than the search depth then return MayAlias
1643     // conservatively to avoid compile time explosion. The worst possible case
1644     // is if both sides are PHI nodes. In which case, this is O(m x n) time
1645     // where 'm' and 'n' are the number of PHI sources.
1646     if (PhiValueSet.size() > MaxLookupSearchDepth)
1647       return MayAlias;
1648     // Add the values to V1Srcs
1649     for (Value *PV1 : PhiValueSet) {
1650       if (EnableRecPhiAnalysis) {
1651         if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1652           // Check whether the incoming value is a GEP that advances the pointer
1653           // result of this PHI node (e.g. in a loop). If this is the case, we
1654           // would recurse and always get a MayAlias. Handle this case specially
1655           // below.
1656           if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1657               isa<ConstantInt>(PV1GEP->idx_begin())) {
1658             isRecursive = true;
1659             continue;
1660           }
1661         }
1662       }
1663       V1Srcs.push_back(PV1);
1664     }
1665   } else {
1666     // If we don't have PhiInfo then just look at the operands of the phi itself
1667     // FIXME: Remove this once we can guarantee that we have PhiInfo always
1668     SmallPtrSet<Value *, 4> UniqueSrc;
1669     for (Value *PV1 : PN->incoming_values()) {
1670       if (isa<PHINode>(PV1))
1671         // If any of the source itself is a PHI, return MayAlias conservatively
1672         // to avoid compile time explosion. The worst possible case is if both
1673         // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1674         // and 'n' are the number of PHI sources.
1675         return MayAlias;
1676 
1677       if (EnableRecPhiAnalysis)
1678         if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1679           // Check whether the incoming value is a GEP that advances the pointer
1680           // result of this PHI node (e.g. in a loop). If this is the case, we
1681           // would recurse and always get a MayAlias. Handle this case specially
1682           // below.
1683           if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1684               isa<ConstantInt>(PV1GEP->idx_begin())) {
1685             isRecursive = true;
1686             continue;
1687           }
1688         }
1689 
1690       if (UniqueSrc.insert(PV1).second)
1691         V1Srcs.push_back(PV1);
1692     }
1693   }
1694 
1695   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1696   // value. This should only be possible in blocks unreachable from the entry
1697   // block, but return MayAlias just in case.
1698   if (V1Srcs.empty())
1699     return MayAlias;
1700 
1701   // If this PHI node is recursive, set the size of the accessed memory to
1702   // unknown to represent all the possible values the GEP could advance the
1703   // pointer to.
1704   if (isRecursive)
1705     PNSize = LocationSize::unknown();
1706 
1707   AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize,
1708                                  PNAAInfo, AAQI, UnderV2);
1709 
1710   // Early exit if the check of the first PHI source against V2 is MayAlias.
1711   // Other results are not possible.
1712   if (Alias == MayAlias)
1713     return MayAlias;
1714 
1715   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1716   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1717   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1718     Value *V = V1Srcs[i];
1719 
1720     AliasResult ThisAlias =
1721         aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, AAQI, UnderV2);
1722     Alias = MergeAliasResults(ThisAlias, Alias);
1723     if (Alias == MayAlias)
1724       break;
1725   }
1726 
1727   return Alias;
1728 }
1729 
1730 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1731 /// array references.
1732 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1733                                       AAMDNodes V1AAInfo, const Value *V2,
1734                                       LocationSize V2Size, AAMDNodes V2AAInfo,
1735                                       AAQueryInfo &AAQI, const Value *O1,
1736                                       const Value *O2) {
1737   // If either of the memory references is empty, it doesn't matter what the
1738   // pointer values are.
1739   if (V1Size.isZero() || V2Size.isZero())
1740     return NoAlias;
1741 
1742   // Strip off any casts if they exist.
1743   V1 = V1->stripPointerCastsAndInvariantGroups();
1744   V2 = V2->stripPointerCastsAndInvariantGroups();
1745 
1746   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1747   // value for undef that aliases nothing in the program.
1748   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1749     return NoAlias;
1750 
1751   // Are we checking for alias of the same value?
1752   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1753   // different iterations. We must therefore make sure that this is not the
1754   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1755   // happen by looking at the visited phi nodes and making sure they cannot
1756   // reach the value.
1757   if (isValueEqualInPotentialCycles(V1, V2))
1758     return MustAlias;
1759 
1760   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1761     return NoAlias; // Scalars cannot alias each other
1762 
1763   // Figure out what objects these things are pointing to if we can.
1764   if (O1 == nullptr)
1765     O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1766 
1767   if (O2 == nullptr)
1768     O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1769 
1770   // Null values in the default address space don't point to any object, so they
1771   // don't alias any other pointer.
1772   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1773     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1774       return NoAlias;
1775   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1776     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1777       return NoAlias;
1778 
1779   if (O1 != O2) {
1780     // If V1/V2 point to two different objects, we know that we have no alias.
1781     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1782       return NoAlias;
1783 
1784     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1785     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1786         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1787       return NoAlias;
1788 
1789     // Function arguments can't alias with things that are known to be
1790     // unambigously identified at the function level.
1791     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1792         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1793       return NoAlias;
1794 
1795     // If one pointer is the result of a call/invoke or load and the other is a
1796     // non-escaping local object within the same function, then we know the
1797     // object couldn't escape to a point where the call could return it.
1798     //
1799     // Note that if the pointers are in different functions, there are a
1800     // variety of complications. A call with a nocapture argument may still
1801     // temporary store the nocapture argument's value in a temporary memory
1802     // location if that memory location doesn't escape. Or it may pass a
1803     // nocapture value to other functions as long as they don't capture it.
1804     if (isEscapeSource(O1) &&
1805         isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1806       return NoAlias;
1807     if (isEscapeSource(O2) &&
1808         isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1809       return NoAlias;
1810   }
1811 
1812   // If the size of one access is larger than the entire object on the other
1813   // side, then we know such behavior is undefined and can assume no alias.
1814   bool NullIsValidLocation = NullPointerIsDefined(&F);
1815   if ((isObjectSmallerThan(
1816           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1817           TLI, NullIsValidLocation)) ||
1818       (isObjectSmallerThan(
1819           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1820           TLI, NullIsValidLocation)))
1821     return NoAlias;
1822 
1823   // Check the cache before climbing up use-def chains. This also terminates
1824   // otherwise infinitely recursive queries.
1825   AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1826                             MemoryLocation(V2, V2Size, V2AAInfo));
1827   if (V1 > V2)
1828     std::swap(Locs.first, Locs.second);
1829   std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair =
1830       AAQI.AliasCache.try_emplace(Locs, MayAlias);
1831   if (!Pair.second)
1832     return Pair.first->second;
1833 
1834   // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1835   // GEP can't simplify, we don't even look at the PHI cases.
1836   if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1837     std::swap(V1, V2);
1838     std::swap(V1Size, V2Size);
1839     std::swap(O1, O2);
1840     std::swap(V1AAInfo, V2AAInfo);
1841   }
1842   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1843     AliasResult Result =
1844         aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI);
1845     if (Result != MayAlias) {
1846       auto ItInsPair = AAQI.AliasCache.insert(std::make_pair(Locs, Result));
1847       assert(!ItInsPair.second && "Entry must have existed");
1848       ItInsPair.first->second = Result;
1849       return Result;
1850     }
1851   }
1852 
1853   if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1854     std::swap(V1, V2);
1855     std::swap(O1, O2);
1856     std::swap(V1Size, V2Size);
1857     std::swap(V1AAInfo, V2AAInfo);
1858   }
1859   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1860     AliasResult Result =
1861         aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1862     if (Result != MayAlias) {
1863       Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1864       assert(!Pair.second && "Entry must have existed");
1865       return Pair.first->second = Result;
1866     }
1867   }
1868 
1869   if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1870     std::swap(V1, V2);
1871     std::swap(O1, O2);
1872     std::swap(V1Size, V2Size);
1873     std::swap(V1AAInfo, V2AAInfo);
1874   }
1875   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1876     AliasResult Result =
1877         aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1878     if (Result != MayAlias) {
1879       Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1880       assert(!Pair.second && "Entry must have existed");
1881       return Pair.first->second = Result;
1882     }
1883   }
1884 
1885   // If both pointers are pointing into the same object and one of them
1886   // accesses the entire object, then the accesses must overlap in some way.
1887   if (O1 == O2)
1888     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1889         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1890          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) {
1891       Pair = AAQI.AliasCache.try_emplace(Locs, PartialAlias);
1892       assert(!Pair.second && "Entry must have existed");
1893       return Pair.first->second = PartialAlias;
1894     }
1895 
1896   // Recurse back into the best AA results we have, potentially with refined
1897   // memory locations. We have already ensured that BasicAA has a MayAlias
1898   // cache result for these, so any recursion back into BasicAA won't loop.
1899   AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI);
1900   Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1901   assert(!Pair.second && "Entry must have existed");
1902   return Pair.first->second = Result;
1903 }
1904 
1905 /// Check whether two Values can be considered equivalent.
1906 ///
1907 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1908 /// they can not be part of a cycle in the value graph by looking at all
1909 /// visited phi nodes an making sure that the phis cannot reach the value. We
1910 /// have to do this because we are looking through phi nodes (That is we say
1911 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1912 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1913                                                   const Value *V2) {
1914   if (V != V2)
1915     return false;
1916 
1917   const Instruction *Inst = dyn_cast<Instruction>(V);
1918   if (!Inst)
1919     return true;
1920 
1921   if (VisitedPhiBBs.empty())
1922     return true;
1923 
1924   if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1925     return false;
1926 
1927   // Make sure that the visited phis cannot reach the Value. This ensures that
1928   // the Values cannot come from different iterations of a potential cycle the
1929   // phi nodes could be involved in.
1930   for (auto *P : VisitedPhiBBs)
1931     if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI))
1932       return false;
1933 
1934   return true;
1935 }
1936 
1937 /// Computes the symbolic difference between two de-composed GEPs.
1938 ///
1939 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1940 /// instructions GEP1 and GEP2 which have common base pointers.
1941 void BasicAAResult::GetIndexDifference(
1942     SmallVectorImpl<VariableGEPIndex> &Dest,
1943     const SmallVectorImpl<VariableGEPIndex> &Src) {
1944   if (Src.empty())
1945     return;
1946 
1947   for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1948     const Value *V = Src[i].V;
1949     unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1950     APInt Scale = Src[i].Scale;
1951 
1952     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1953     // than a few variable indexes.
1954     for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1955       if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1956           Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1957         continue;
1958 
1959       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1960       // goes to zero, remove the entry.
1961       if (Dest[j].Scale != Scale)
1962         Dest[j].Scale -= Scale;
1963       else
1964         Dest.erase(Dest.begin() + j);
1965       Scale = 0;
1966       break;
1967     }
1968 
1969     // If we didn't consume this entry, add it to the end of the Dest list.
1970     if (!!Scale) {
1971       VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1972       Dest.push_back(Entry);
1973     }
1974   }
1975 }
1976 
1977 bool BasicAAResult::constantOffsetHeuristic(
1978     const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1979     LocationSize MaybeV1Size, LocationSize MaybeV2Size, APInt BaseOffset,
1980     AssumptionCache *AC, DominatorTree *DT) {
1981   if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1982       MaybeV2Size == LocationSize::unknown())
1983     return false;
1984 
1985   const uint64_t V1Size = MaybeV1Size.getValue();
1986   const uint64_t V2Size = MaybeV2Size.getValue();
1987 
1988   const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1989 
1990   if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1991       Var0.Scale != -Var1.Scale)
1992     return false;
1993 
1994   unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1995 
1996   // We'll strip off the Extensions of Var0 and Var1 and do another round
1997   // of GetLinearExpression decomposition. In the example above, if Var0
1998   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1999 
2000   APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
2001       V1Offset(Width, 0);
2002   bool NSW = true, NUW = true;
2003   unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
2004   const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
2005                                         V0SExtBits, DL, 0, AC, DT, NSW, NUW);
2006   NSW = true;
2007   NUW = true;
2008   const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
2009                                         V1SExtBits, DL, 0, AC, DT, NSW, NUW);
2010 
2011   if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
2012       V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
2013     return false;
2014 
2015   // We have a hit - Var0 and Var1 only differ by a constant offset!
2016 
2017   // If we've been sext'ed then zext'd the maximum difference between Var0 and
2018   // Var1 is possible to calculate, but we're just interested in the absolute
2019   // minimum difference between the two. The minimum distance may occur due to
2020   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
2021   // the minimum distance between %i and %i + 5 is 3.
2022   APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
2023   MinDiff = APIntOps::umin(MinDiff, Wrapped);
2024   APInt MinDiffBytes =
2025     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
2026 
2027   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
2028   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
2029   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
2030   // V2Size can fit in the MinDiffBytes gap.
2031   return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
2032          MinDiffBytes.uge(V2Size + BaseOffset.abs());
2033 }
2034 
2035 //===----------------------------------------------------------------------===//
2036 // BasicAliasAnalysis Pass
2037 //===----------------------------------------------------------------------===//
2038 
2039 AnalysisKey BasicAA::Key;
2040 
2041 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
2042   return BasicAAResult(F.getParent()->getDataLayout(),
2043                        F,
2044                        AM.getResult<TargetLibraryAnalysis>(F),
2045                        AM.getResult<AssumptionAnalysis>(F),
2046                        &AM.getResult<DominatorTreeAnalysis>(F),
2047                        AM.getCachedResult<LoopAnalysis>(F),
2048                        AM.getCachedResult<PhiValuesAnalysis>(F));
2049 }
2050 
2051 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
2052     initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
2053 }
2054 
2055 char BasicAAWrapperPass::ID = 0;
2056 
2057 void BasicAAWrapperPass::anchor() {}
2058 
2059 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
2060                       "Basic Alias Analysis (stateless AA impl)", false, true)
2061 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2062 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2063 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2064 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
2065                     "Basic Alias Analysis (stateless AA impl)", false, true)
2066 
2067 FunctionPass *llvm::createBasicAAWrapperPass() {
2068   return new BasicAAWrapperPass();
2069 }
2070 
2071 bool BasicAAWrapperPass::runOnFunction(Function &F) {
2072   auto &ACT = getAnalysis<AssumptionCacheTracker>();
2073   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2074   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2075   auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2076   auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
2077 
2078   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
2079                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
2080                                  &DTWP.getDomTree(),
2081                                  LIWP ? &LIWP->getLoopInfo() : nullptr,
2082                                  PVWP ? &PVWP->getResult() : nullptr));
2083 
2084   return false;
2085 }
2086 
2087 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2088   AU.setPreservesAll();
2089   AU.addRequired<AssumptionCacheTracker>();
2090   AU.addRequired<DominatorTreeWrapperPass>();
2091   AU.addRequired<TargetLibraryInfoWrapperPass>();
2092   AU.addUsedIfAvailable<PhiValuesWrapperPass>();
2093 }
2094 
2095 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
2096   return BasicAAResult(
2097       F.getParent()->getDataLayout(), F,
2098       P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
2099       P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
2100 }
2101