xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/BasicAliasAnalysis.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/SaveAndRestore.h"
57 #include <cassert>
58 #include <cstdint>
59 #include <cstdlib>
60 #include <optional>
61 #include <utility>
62 
63 #define DEBUG_TYPE "basicaa"
64 
65 using namespace llvm;
66 
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
69                                           cl::init(true));
70 
71 static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
72                                                    cl::Hidden, cl::init(true));
73 
74 /// SearchLimitReached / SearchTimes shows how often the limit of
75 /// to decompose GEPs is reached. It will affect the precision
76 /// of basic alias analysis.
77 STATISTIC(SearchLimitReached, "Number of times the limit to "
78                               "decompose GEPs is reached");
79 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
80 
81 // The max limit of the search depth in DecomposeGEPExpression() and
82 // getUnderlyingObject().
83 static const unsigned MaxLookupSearchDepth = 6;
84 
85 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
86                                FunctionAnalysisManager::Invalidator &Inv) {
87   // We don't care if this analysis itself is preserved, it has no state. But
88   // we need to check that the analyses it depends on have been. Note that we
89   // may be created without handles to some analyses and in that case don't
90   // depend on them.
91   if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
92       (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
93     return true;
94 
95   // Otherwise this analysis result remains valid.
96   return false;
97 }
98 
99 //===----------------------------------------------------------------------===//
100 // Useful predicates
101 //===----------------------------------------------------------------------===//
102 
103 /// Returns the size of the object specified by V or UnknownSize if unknown.
104 static std::optional<TypeSize> getObjectSize(const Value *V,
105                                              const DataLayout &DL,
106                                              const TargetLibraryInfo &TLI,
107                                              bool NullIsValidLoc,
108                                              bool RoundToAlign = false) {
109   uint64_t Size;
110   ObjectSizeOpts Opts;
111   Opts.RoundToAlign = RoundToAlign;
112   Opts.NullIsUnknownSize = NullIsValidLoc;
113   if (getObjectSize(V, Size, DL, &TLI, Opts))
114     return TypeSize::getFixed(Size);
115   return std::nullopt;
116 }
117 
118 /// Returns true if we can prove that the object specified by V is smaller than
119 /// Size.
120 static bool isObjectSmallerThan(const Value *V, TypeSize Size,
121                                 const DataLayout &DL,
122                                 const TargetLibraryInfo &TLI,
123                                 bool NullIsValidLoc) {
124   // Note that the meanings of the "object" are slightly different in the
125   // following contexts:
126   //    c1: llvm::getObjectSize()
127   //    c2: llvm.objectsize() intrinsic
128   //    c3: isObjectSmallerThan()
129   // c1 and c2 share the same meaning; however, the meaning of "object" in c3
130   // refers to the "entire object".
131   //
132   //  Consider this example:
133   //     char *p = (char*)malloc(100)
134   //     char *q = p+80;
135   //
136   //  In the context of c1 and c2, the "object" pointed by q refers to the
137   // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
138   //
139   //  However, in the context of c3, the "object" refers to the chunk of memory
140   // being allocated. So, the "object" has 100 bytes, and q points to the middle
141   // the "object". In case q is passed to isObjectSmallerThan() as the 1st
142   // parameter, before the llvm::getObjectSize() is called to get the size of
143   // entire object, we should:
144   //    - either rewind the pointer q to the base-address of the object in
145   //      question (in this case rewind to p), or
146   //    - just give up. It is up to caller to make sure the pointer is pointing
147   //      to the base address the object.
148   //
149   // We go for 2nd option for simplicity.
150   if (!isIdentifiedObject(V))
151     return false;
152 
153   // This function needs to use the aligned object size because we allow
154   // reads a bit past the end given sufficient alignment.
155   std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
156                                                      /*RoundToAlign*/ true);
157 
158   return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
159 }
160 
161 /// Return the minimal extent from \p V to the end of the underlying object,
162 /// assuming the result is used in an aliasing query. E.g., we do use the query
163 /// location size and the fact that null pointers cannot alias here.
164 static TypeSize getMinimalExtentFrom(const Value &V,
165                                      const LocationSize &LocSize,
166                                      const DataLayout &DL,
167                                      bool NullIsValidLoc) {
168   // If we have dereferenceability information we know a lower bound for the
169   // extent as accesses for a lower offset would be valid. We need to exclude
170   // the "or null" part if null is a valid pointer. We can ignore frees, as an
171   // access after free would be undefined behavior.
172   bool CanBeNull, CanBeFreed;
173   uint64_t DerefBytes =
174     V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
175   DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
176   // If queried with a precise location size, we assume that location size to be
177   // accessed, thus valid.
178   if (LocSize.isPrecise())
179     DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
180   return TypeSize::getFixed(DerefBytes);
181 }
182 
183 /// Returns true if we can prove that the object specified by V has size Size.
184 static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
185                          const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
186   std::optional<TypeSize> ObjectSize =
187       getObjectSize(V, DL, TLI, NullIsValidLoc);
188   return ObjectSize && *ObjectSize == Size;
189 }
190 
191 //===----------------------------------------------------------------------===//
192 // CaptureInfo implementations
193 //===----------------------------------------------------------------------===//
194 
195 CaptureInfo::~CaptureInfo() = default;
196 
197 bool SimpleCaptureInfo::isNotCapturedBefore(const Value *Object,
198                                             const Instruction *I, bool OrAt) {
199   return isNonEscapingLocalObject(Object, &IsCapturedCache);
200 }
201 
202 static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
203                          const LoopInfo *LI) {
204   BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
205   SmallVector<BasicBlock *> Succs(successors(BB));
206   return Succs.empty() ||
207          !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
208 }
209 
210 bool EarliestEscapeInfo::isNotCapturedBefore(const Value *Object,
211                                              const Instruction *I, bool OrAt) {
212   if (!isIdentifiedFunctionLocal(Object))
213     return false;
214 
215   auto Iter = EarliestEscapes.insert({Object, nullptr});
216   if (Iter.second) {
217     Instruction *EarliestCapture = FindEarliestCapture(
218         Object, *const_cast<Function *>(I->getFunction()),
219         /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
220     if (EarliestCapture) {
221       auto Ins = Inst2Obj.insert({EarliestCapture, {}});
222       Ins.first->second.push_back(Object);
223     }
224     Iter.first->second = EarliestCapture;
225   }
226 
227   // No capturing instruction.
228   if (!Iter.first->second)
229     return true;
230 
231   if (I == Iter.first->second) {
232     if (OrAt)
233       return false;
234     return isNotInCycle(I, &DT, LI);
235   }
236 
237   return !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, LI);
238 }
239 
240 void EarliestEscapeInfo::removeInstruction(Instruction *I) {
241   auto Iter = Inst2Obj.find(I);
242   if (Iter != Inst2Obj.end()) {
243     for (const Value *Obj : Iter->second)
244       EarliestEscapes.erase(Obj);
245     Inst2Obj.erase(I);
246   }
247 }
248 
249 //===----------------------------------------------------------------------===//
250 // GetElementPtr Instruction Decomposition and Analysis
251 //===----------------------------------------------------------------------===//
252 
253 namespace {
254 /// Represents zext(sext(trunc(V))).
255 struct CastedValue {
256   const Value *V;
257   unsigned ZExtBits = 0;
258   unsigned SExtBits = 0;
259   unsigned TruncBits = 0;
260 
261   explicit CastedValue(const Value *V) : V(V) {}
262   explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
263                        unsigned TruncBits)
264       : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits) {}
265 
266   unsigned getBitWidth() const {
267     return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
268            SExtBits;
269   }
270 
271   CastedValue withValue(const Value *NewV) const {
272     return CastedValue(NewV, ZExtBits, SExtBits, TruncBits);
273   }
274 
275   /// Replace V with zext(NewV)
276   CastedValue withZExtOfValue(const Value *NewV) const {
277     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
278                         NewV->getType()->getPrimitiveSizeInBits();
279     if (ExtendBy <= TruncBits)
280       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
281 
282     // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
283     ExtendBy -= TruncBits;
284     return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0);
285   }
286 
287   /// Replace V with sext(NewV)
288   CastedValue withSExtOfValue(const Value *NewV) const {
289     unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
290                         NewV->getType()->getPrimitiveSizeInBits();
291     if (ExtendBy <= TruncBits)
292       return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy);
293 
294     // zext(sext(sext(NewV)))
295     ExtendBy -= TruncBits;
296     return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0);
297   }
298 
299   APInt evaluateWith(APInt N) const {
300     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
301            "Incompatible bit width");
302     if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
303     if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
304     if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
305     return N;
306   }
307 
308   ConstantRange evaluateWith(ConstantRange N) const {
309     assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
310            "Incompatible bit width");
311     if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
312     if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
313     if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
314     return N;
315   }
316 
317   bool canDistributeOver(bool NUW, bool NSW) const {
318     // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
319     // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
320     // trunc(x op y) == trunc(x) op trunc(y)
321     return (!ZExtBits || NUW) && (!SExtBits || NSW);
322   }
323 
324   bool hasSameCastsAs(const CastedValue &Other) const {
325     return ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
326            TruncBits == Other.TruncBits;
327   }
328 };
329 
330 /// Represents zext(sext(trunc(V))) * Scale + Offset.
331 struct LinearExpression {
332   CastedValue Val;
333   APInt Scale;
334   APInt Offset;
335 
336   /// True if all operations in this expression are NSW.
337   bool IsNSW;
338 
339   LinearExpression(const CastedValue &Val, const APInt &Scale,
340                    const APInt &Offset, bool IsNSW)
341       : Val(Val), Scale(Scale), Offset(Offset), IsNSW(IsNSW) {}
342 
343   LinearExpression(const CastedValue &Val) : Val(Val), IsNSW(true) {
344     unsigned BitWidth = Val.getBitWidth();
345     Scale = APInt(BitWidth, 1);
346     Offset = APInt(BitWidth, 0);
347   }
348 
349   LinearExpression mul(const APInt &Other, bool MulIsNSW) const {
350     // The check for zero offset is necessary, because generally
351     // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
352     bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
353     return LinearExpression(Val, Scale * Other, Offset * Other, NSW);
354   }
355 };
356 }
357 
358 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
359 /// B are constant integers.
360 static LinearExpression GetLinearExpression(
361     const CastedValue &Val,  const DataLayout &DL, unsigned Depth,
362     AssumptionCache *AC, DominatorTree *DT) {
363   // Limit our recursion depth.
364   if (Depth == 6)
365     return Val;
366 
367   if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
368     return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
369                             Val.evaluateWith(Const->getValue()), true);
370 
371   if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
372     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
373       APInt RHS = Val.evaluateWith(RHSC->getValue());
374       // The only non-OBO case we deal with is or, and only limited to the
375       // case where it is both nuw and nsw.
376       bool NUW = true, NSW = true;
377       if (isa<OverflowingBinaryOperator>(BOp)) {
378         NUW &= BOp->hasNoUnsignedWrap();
379         NSW &= BOp->hasNoSignedWrap();
380       }
381       if (!Val.canDistributeOver(NUW, NSW))
382         return Val;
383 
384       // While we can distribute over trunc, we cannot preserve nowrap flags
385       // in that case.
386       if (Val.TruncBits)
387         NUW = NSW = false;
388 
389       LinearExpression E(Val);
390       switch (BOp->getOpcode()) {
391       default:
392         // We don't understand this instruction, so we can't decompose it any
393         // further.
394         return Val;
395       case Instruction::Or:
396         // X|C == X+C if all the bits in C are unset in X.  Otherwise we can't
397         // analyze it.
398         if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(),
399                                SimplifyQuery(DL, DT, AC, BOp)))
400           return Val;
401 
402         [[fallthrough]];
403       case Instruction::Add: {
404         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
405                                 Depth + 1, AC, DT);
406         E.Offset += RHS;
407         E.IsNSW &= NSW;
408         break;
409       }
410       case Instruction::Sub: {
411         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
412                                 Depth + 1, AC, DT);
413         E.Offset -= RHS;
414         E.IsNSW &= NSW;
415         break;
416       }
417       case Instruction::Mul:
418         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
419                                 Depth + 1, AC, DT)
420                 .mul(RHS, NSW);
421         break;
422       case Instruction::Shl:
423         // We're trying to linearize an expression of the kind:
424         //   shl i8 -128, 36
425         // where the shift count exceeds the bitwidth of the type.
426         // We can't decompose this further (the expression would return
427         // a poison value).
428         if (RHS.getLimitedValue() > Val.getBitWidth())
429           return Val;
430 
431         E = GetLinearExpression(Val.withValue(BOp->getOperand(0)), DL,
432                                 Depth + 1, AC, DT);
433         E.Offset <<= RHS.getLimitedValue();
434         E.Scale <<= RHS.getLimitedValue();
435         E.IsNSW &= NSW;
436         break;
437       }
438       return E;
439     }
440   }
441 
442   if (isa<ZExtInst>(Val.V))
443     return GetLinearExpression(
444         Val.withZExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
445         DL, Depth + 1, AC, DT);
446 
447   if (isa<SExtInst>(Val.V))
448     return GetLinearExpression(
449         Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
450         DL, Depth + 1, AC, DT);
451 
452   return Val;
453 }
454 
455 /// To ensure a pointer offset fits in an integer of size IndexSize
456 /// (in bits) when that size is smaller than the maximum index size. This is
457 /// an issue, for example, in particular for 32b pointers with negative indices
458 /// that rely on two's complement wrap-arounds for precise alias information
459 /// where the maximum index size is 64b.
460 static void adjustToIndexSize(APInt &Offset, unsigned IndexSize) {
461   assert(IndexSize <= Offset.getBitWidth() && "Invalid IndexSize!");
462   unsigned ShiftBits = Offset.getBitWidth() - IndexSize;
463   if (ShiftBits != 0) {
464     Offset <<= ShiftBits;
465     Offset.ashrInPlace(ShiftBits);
466   }
467 }
468 
469 namespace {
470 // A linear transformation of a Value; this class represents
471 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
472 struct VariableGEPIndex {
473   CastedValue Val;
474   APInt Scale;
475 
476   // Context instruction to use when querying information about this index.
477   const Instruction *CxtI;
478 
479   /// True if all operations in this expression are NSW.
480   bool IsNSW;
481 
482   /// True if the index should be subtracted rather than added. We don't simply
483   /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
484   /// non-wrapping, while X + INT_MIN*(-1) wraps.
485   bool IsNegated;
486 
487   bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
488     if (IsNegated == Other.IsNegated)
489       return Scale == -Other.Scale;
490     return Scale == Other.Scale;
491   }
492 
493   void dump() const {
494     print(dbgs());
495     dbgs() << "\n";
496   }
497   void print(raw_ostream &OS) const {
498     OS << "(V=" << Val.V->getName()
499        << ", zextbits=" << Val.ZExtBits
500        << ", sextbits=" << Val.SExtBits
501        << ", truncbits=" << Val.TruncBits
502        << ", scale=" << Scale
503        << ", nsw=" << IsNSW
504        << ", negated=" << IsNegated << ")";
505   }
506 };
507 }
508 
509 // Represents the internal structure of a GEP, decomposed into a base pointer,
510 // constant offsets, and variable scaled indices.
511 struct BasicAAResult::DecomposedGEP {
512   // Base pointer of the GEP
513   const Value *Base;
514   // Total constant offset from base.
515   APInt Offset;
516   // Scaled variable (non-constant) indices.
517   SmallVector<VariableGEPIndex, 4> VarIndices;
518   // Are all operations inbounds GEPs or non-indexing operations?
519   // (std::nullopt iff expression doesn't involve any geps)
520   std::optional<bool> InBounds;
521 
522   void dump() const {
523     print(dbgs());
524     dbgs() << "\n";
525   }
526   void print(raw_ostream &OS) const {
527     OS << "(DecomposedGEP Base=" << Base->getName()
528        << ", Offset=" << Offset
529        << ", VarIndices=[";
530     for (size_t i = 0; i < VarIndices.size(); i++) {
531       if (i != 0)
532         OS << ", ";
533       VarIndices[i].print(OS);
534     }
535     OS << "])";
536   }
537 };
538 
539 
540 /// If V is a symbolic pointer expression, decompose it into a base pointer
541 /// with a constant offset and a number of scaled symbolic offsets.
542 ///
543 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
544 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
545 /// specified amount, but which may have other unrepresented high bits. As
546 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
547 BasicAAResult::DecomposedGEP
548 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
549                                       AssumptionCache *AC, DominatorTree *DT) {
550   // Limit recursion depth to limit compile time in crazy cases.
551   unsigned MaxLookup = MaxLookupSearchDepth;
552   SearchTimes++;
553   const Instruction *CxtI = dyn_cast<Instruction>(V);
554 
555   unsigned MaxIndexSize = DL.getMaxIndexSizeInBits();
556   DecomposedGEP Decomposed;
557   Decomposed.Offset = APInt(MaxIndexSize, 0);
558   do {
559     // See if this is a bitcast or GEP.
560     const Operator *Op = dyn_cast<Operator>(V);
561     if (!Op) {
562       // The only non-operator case we can handle are GlobalAliases.
563       if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
564         if (!GA->isInterposable()) {
565           V = GA->getAliasee();
566           continue;
567         }
568       }
569       Decomposed.Base = V;
570       return Decomposed;
571     }
572 
573     if (Op->getOpcode() == Instruction::BitCast ||
574         Op->getOpcode() == Instruction::AddrSpaceCast) {
575       V = Op->getOperand(0);
576       continue;
577     }
578 
579     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
580     if (!GEPOp) {
581       if (const auto *PHI = dyn_cast<PHINode>(V)) {
582         // Look through single-arg phi nodes created by LCSSA.
583         if (PHI->getNumIncomingValues() == 1) {
584           V = PHI->getIncomingValue(0);
585           continue;
586         }
587       } else if (const auto *Call = dyn_cast<CallBase>(V)) {
588         // CaptureTracking can know about special capturing properties of some
589         // intrinsics like launder.invariant.group, that can't be expressed with
590         // the attributes, but have properties like returning aliasing pointer.
591         // Because some analysis may assume that nocaptured pointer is not
592         // returned from some special intrinsic (because function would have to
593         // be marked with returns attribute), it is crucial to use this function
594         // because it should be in sync with CaptureTracking. Not using it may
595         // cause weird miscompilations where 2 aliasing pointers are assumed to
596         // noalias.
597         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
598           V = RP;
599           continue;
600         }
601       }
602 
603       Decomposed.Base = V;
604       return Decomposed;
605     }
606 
607     // Track whether we've seen at least one in bounds gep, and if so, whether
608     // all geps parsed were in bounds.
609     if (Decomposed.InBounds == std::nullopt)
610       Decomposed.InBounds = GEPOp->isInBounds();
611     else if (!GEPOp->isInBounds())
612       Decomposed.InBounds = false;
613 
614     assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
615 
616     unsigned AS = GEPOp->getPointerAddressSpace();
617     // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
618     gep_type_iterator GTI = gep_type_begin(GEPOp);
619     unsigned IndexSize = DL.getIndexSizeInBits(AS);
620     // Assume all GEP operands are constants until proven otherwise.
621     bool GepHasConstantOffset = true;
622     for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
623          I != E; ++I, ++GTI) {
624       const Value *Index = *I;
625       // Compute the (potentially symbolic) offset in bytes for this index.
626       if (StructType *STy = GTI.getStructTypeOrNull()) {
627         // For a struct, add the member offset.
628         unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
629         if (FieldNo == 0)
630           continue;
631 
632         Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
633         continue;
634       }
635 
636       // For an array/pointer, add the element offset, explicitly scaled.
637       if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
638         if (CIdx->isZero())
639           continue;
640 
641         // Don't attempt to analyze GEPs if the scalable index is not zero.
642         TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
643         if (AllocTypeSize.isScalable()) {
644           Decomposed.Base = V;
645           return Decomposed;
646         }
647 
648         Decomposed.Offset += AllocTypeSize.getFixedValue() *
649                              CIdx->getValue().sextOrTrunc(MaxIndexSize);
650         continue;
651       }
652 
653       TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
654       if (AllocTypeSize.isScalable()) {
655         Decomposed.Base = V;
656         return Decomposed;
657       }
658 
659       GepHasConstantOffset = false;
660 
661       // If the integer type is smaller than the index size, it is implicitly
662       // sign extended or truncated to index size.
663       unsigned Width = Index->getType()->getIntegerBitWidth();
664       unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
665       unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
666       LinearExpression LE = GetLinearExpression(
667           CastedValue(Index, 0, SExtBits, TruncBits), DL, 0, AC, DT);
668 
669       // Scale by the type size.
670       unsigned TypeSize = AllocTypeSize.getFixedValue();
671       LE = LE.mul(APInt(IndexSize, TypeSize), GEPOp->isInBounds());
672       Decomposed.Offset += LE.Offset.sext(MaxIndexSize);
673       APInt Scale = LE.Scale.sext(MaxIndexSize);
674 
675       // If we already had an occurrence of this index variable, merge this
676       // scale into it.  For example, we want to handle:
677       //   A[x][x] -> x*16 + x*4 -> x*20
678       // This also ensures that 'x' only appears in the index list once.
679       for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
680         if (Decomposed.VarIndices[i].Val.V == LE.Val.V &&
681             Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
682           Scale += Decomposed.VarIndices[i].Scale;
683           LE.IsNSW = false; // We cannot guarantee nsw for the merge.
684           Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
685           break;
686         }
687       }
688 
689       // Make sure that we have a scale that makes sense for this target's
690       // index size.
691       adjustToIndexSize(Scale, IndexSize);
692 
693       if (!!Scale) {
694         VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
695                                   /* IsNegated */ false};
696         Decomposed.VarIndices.push_back(Entry);
697       }
698     }
699 
700     // Take care of wrap-arounds
701     if (GepHasConstantOffset)
702       adjustToIndexSize(Decomposed.Offset, IndexSize);
703 
704     // Analyze the base pointer next.
705     V = GEPOp->getOperand(0);
706   } while (--MaxLookup);
707 
708   // If the chain of expressions is too deep, just return early.
709   Decomposed.Base = V;
710   SearchLimitReached++;
711   return Decomposed;
712 }
713 
714 ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
715                                             AAQueryInfo &AAQI,
716                                             bool IgnoreLocals) {
717   assert(Visited.empty() && "Visited must be cleared after use!");
718   auto _ = make_scope_exit([&] { Visited.clear(); });
719 
720   unsigned MaxLookup = 8;
721   SmallVector<const Value *, 16> Worklist;
722   Worklist.push_back(Loc.Ptr);
723   ModRefInfo Result = ModRefInfo::NoModRef;
724 
725   do {
726     const Value *V = getUnderlyingObject(Worklist.pop_back_val());
727     if (!Visited.insert(V).second)
728       continue;
729 
730     // Ignore allocas if we were instructed to do so.
731     if (IgnoreLocals && isa<AllocaInst>(V))
732       continue;
733 
734     // If the location points to memory that is known to be invariant for
735     // the life of the underlying SSA value, then we can exclude Mod from
736     // the set of valid memory effects.
737     //
738     // An argument that is marked readonly and noalias is known to be
739     // invariant while that function is executing.
740     if (const Argument *Arg = dyn_cast<Argument>(V)) {
741       if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
742         Result |= ModRefInfo::Ref;
743         continue;
744       }
745     }
746 
747     // A global constant can't be mutated.
748     if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
749       // Note: this doesn't require GV to be "ODR" because it isn't legal for a
750       // global to be marked constant in some modules and non-constant in
751       // others.  GV may even be a declaration, not a definition.
752       if (!GV->isConstant())
753         return ModRefInfo::ModRef;
754       continue;
755     }
756 
757     // If both select values point to local memory, then so does the select.
758     if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
759       Worklist.push_back(SI->getTrueValue());
760       Worklist.push_back(SI->getFalseValue());
761       continue;
762     }
763 
764     // If all values incoming to a phi node point to local memory, then so does
765     // the phi.
766     if (const PHINode *PN = dyn_cast<PHINode>(V)) {
767       // Don't bother inspecting phi nodes with many operands.
768       if (PN->getNumIncomingValues() > MaxLookup)
769         return ModRefInfo::ModRef;
770       append_range(Worklist, PN->incoming_values());
771       continue;
772     }
773 
774     // Otherwise be conservative.
775     return ModRefInfo::ModRef;
776   } while (!Worklist.empty() && --MaxLookup);
777 
778   // If we hit the maximum number of instructions to examine, be conservative.
779   if (!Worklist.empty())
780     return ModRefInfo::ModRef;
781 
782   return Result;
783 }
784 
785 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
786   const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
787   return II && II->getIntrinsicID() == IID;
788 }
789 
790 /// Returns the behavior when calling the given call site.
791 MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
792                                               AAQueryInfo &AAQI) {
793   MemoryEffects Min = Call->getAttributes().getMemoryEffects();
794 
795   if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
796     MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
797     // Operand bundles on the call may also read or write memory, in addition
798     // to the behavior of the called function.
799     if (Call->hasReadingOperandBundles())
800       FuncME |= MemoryEffects::readOnly();
801     if (Call->hasClobberingOperandBundles())
802       FuncME |= MemoryEffects::writeOnly();
803     Min &= FuncME;
804   }
805 
806   return Min;
807 }
808 
809 /// Returns the behavior when calling the given function. For use when the call
810 /// site is not known.
811 MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
812   switch (F->getIntrinsicID()) {
813   case Intrinsic::experimental_guard:
814   case Intrinsic::experimental_deoptimize:
815     // These intrinsics can read arbitrary memory, and additionally modref
816     // inaccessible memory to model control dependence.
817     return MemoryEffects::readOnly() |
818            MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef);
819   }
820 
821   return F->getMemoryEffects();
822 }
823 
824 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
825                                            unsigned ArgIdx) {
826   if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
827     return ModRefInfo::Mod;
828 
829   if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
830     return ModRefInfo::Ref;
831 
832   if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
833     return ModRefInfo::NoModRef;
834 
835   return ModRefInfo::ModRef;
836 }
837 
838 #ifndef NDEBUG
839 static const Function *getParent(const Value *V) {
840   if (const Instruction *inst = dyn_cast<Instruction>(V)) {
841     if (!inst->getParent())
842       return nullptr;
843     return inst->getParent()->getParent();
844   }
845 
846   if (const Argument *arg = dyn_cast<Argument>(V))
847     return arg->getParent();
848 
849   return nullptr;
850 }
851 
852 static bool notDifferentParent(const Value *O1, const Value *O2) {
853 
854   const Function *F1 = getParent(O1);
855   const Function *F2 = getParent(O2);
856 
857   return !F1 || !F2 || F1 == F2;
858 }
859 #endif
860 
861 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
862                                  const MemoryLocation &LocB, AAQueryInfo &AAQI,
863                                  const Instruction *CtxI) {
864   assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
865          "BasicAliasAnalysis doesn't support interprocedural queries.");
866   return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
867 }
868 
869 /// Checks to see if the specified callsite can clobber the specified memory
870 /// object.
871 ///
872 /// Since we only look at local properties of this function, we really can't
873 /// say much about this query.  We do, however, use simple "address taken"
874 /// analysis on local objects.
875 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
876                                         const MemoryLocation &Loc,
877                                         AAQueryInfo &AAQI) {
878   assert(notDifferentParent(Call, Loc.Ptr) &&
879          "AliasAnalysis query involving multiple functions!");
880 
881   const Value *Object = getUnderlyingObject(Loc.Ptr);
882 
883   // Calls marked 'tail' cannot read or write allocas from the current frame
884   // because the current frame might be destroyed by the time they run. However,
885   // a tail call may use an alloca with byval. Calling with byval copies the
886   // contents of the alloca into argument registers or stack slots, so there is
887   // no lifetime issue.
888   if (isa<AllocaInst>(Object))
889     if (const CallInst *CI = dyn_cast<CallInst>(Call))
890       if (CI->isTailCall() &&
891           !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
892         return ModRefInfo::NoModRef;
893 
894   // Stack restore is able to modify unescaped dynamic allocas. Assume it may
895   // modify them even though the alloca is not escaped.
896   if (auto *AI = dyn_cast<AllocaInst>(Object))
897     if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
898       return ModRefInfo::Mod;
899 
900   // A call can access a locally allocated object either because it is passed as
901   // an argument to the call, or because it has escaped prior to the call.
902   //
903   // Make sure the object has not escaped here, and then check that none of the
904   // call arguments alias the object below.
905   if (!isa<Constant>(Object) && Call != Object &&
906       AAQI.CI->isNotCapturedBefore(Object, Call, /*OrAt*/ false)) {
907 
908     // Optimistically assume that call doesn't touch Object and check this
909     // assumption in the following loop.
910     ModRefInfo Result = ModRefInfo::NoModRef;
911 
912     unsigned OperandNo = 0;
913     for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
914          CI != CE; ++CI, ++OperandNo) {
915       if (!(*CI)->getType()->isPointerTy())
916         continue;
917 
918       // Call doesn't access memory through this operand, so we don't care
919       // if it aliases with Object.
920       if (Call->doesNotAccessMemory(OperandNo))
921         continue;
922 
923       // If this is a no-capture pointer argument, see if we can tell that it
924       // is impossible to alias the pointer we're checking.
925       AliasResult AR =
926           AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(*CI),
927                          MemoryLocation::getBeforeOrAfter(Object), AAQI);
928       // Operand doesn't alias 'Object', continue looking for other aliases
929       if (AR == AliasResult::NoAlias)
930         continue;
931       // Operand aliases 'Object', but call doesn't modify it. Strengthen
932       // initial assumption and keep looking in case if there are more aliases.
933       if (Call->onlyReadsMemory(OperandNo)) {
934         Result |= ModRefInfo::Ref;
935         continue;
936       }
937       // Operand aliases 'Object' but call only writes into it.
938       if (Call->onlyWritesMemory(OperandNo)) {
939         Result |= ModRefInfo::Mod;
940         continue;
941       }
942       // This operand aliases 'Object' and call reads and writes into it.
943       // Setting ModRef will not yield an early return below, MustAlias is not
944       // used further.
945       Result = ModRefInfo::ModRef;
946       break;
947     }
948 
949     // Early return if we improved mod ref information
950     if (!isModAndRefSet(Result))
951       return Result;
952   }
953 
954   // If the call is malloc/calloc like, we can assume that it doesn't
955   // modify any IR visible value.  This is only valid because we assume these
956   // routines do not read values visible in the IR.  TODO: Consider special
957   // casing realloc and strdup routines which access only their arguments as
958   // well.  Or alternatively, replace all of this with inaccessiblememonly once
959   // that's implemented fully.
960   if (isMallocOrCallocLikeFn(Call, &TLI)) {
961     // Be conservative if the accessed pointer may alias the allocation -
962     // fallback to the generic handling below.
963     if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
964         AliasResult::NoAlias)
965       return ModRefInfo::NoModRef;
966   }
967 
968   // Like assumes, invariant.start intrinsics were also marked as arbitrarily
969   // writing so that proper control dependencies are maintained but they never
970   // mod any particular memory location visible to the IR.
971   // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
972   // intrinsic is now modeled as reading memory. This prevents hoisting the
973   // invariant.start intrinsic over stores. Consider:
974   // *ptr = 40;
975   // *ptr = 50;
976   // invariant_start(ptr)
977   // int val = *ptr;
978   // print(val);
979   //
980   // This cannot be transformed to:
981   //
982   // *ptr = 40;
983   // invariant_start(ptr)
984   // *ptr = 50;
985   // int val = *ptr;
986   // print(val);
987   //
988   // The transformation will cause the second store to be ignored (based on
989   // rules of invariant.start)  and print 40, while the first program always
990   // prints 50.
991   if (isIntrinsicCall(Call, Intrinsic::invariant_start))
992     return ModRefInfo::Ref;
993 
994   // Be conservative.
995   return ModRefInfo::ModRef;
996 }
997 
998 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
999                                         const CallBase *Call2,
1000                                         AAQueryInfo &AAQI) {
1001   // Guard intrinsics are marked as arbitrarily writing so that proper control
1002   // dependencies are maintained but they never mods any particular memory
1003   // location.
1004   //
1005   // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1006   // heap state at the point the guard is issued needs to be consistent in case
1007   // the guard invokes the "deopt" continuation.
1008 
1009   // NB! This function is *not* commutative, so we special case two
1010   // possibilities for guard intrinsics.
1011 
1012   if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1013     return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1014                ? ModRefInfo::Ref
1015                : ModRefInfo::NoModRef;
1016 
1017   if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1018     return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1019                ? ModRefInfo::Mod
1020                : ModRefInfo::NoModRef;
1021 
1022   // Be conservative.
1023   return ModRefInfo::ModRef;
1024 }
1025 
1026 /// Return true if we know V to the base address of the corresponding memory
1027 /// object.  This implies that any address less than V must be out of bounds
1028 /// for the underlying object.  Note that just being isIdentifiedObject() is
1029 /// not enough - For example, a negative offset from a noalias argument or call
1030 /// can be inbounds w.r.t the actual underlying object.
1031 static bool isBaseOfObject(const Value *V) {
1032   // TODO: We can handle other cases here
1033   // 1) For GC languages, arguments to functions are often required to be
1034   //    base pointers.
1035   // 2) Result of allocation routines are often base pointers.  Leverage TLI.
1036   return (isa<AllocaInst>(V) || isa<GlobalVariable>(V));
1037 }
1038 
1039 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1040 /// another pointer.
1041 ///
1042 /// We know that V1 is a GEP, but we don't know anything about V2.
1043 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1044 /// V2.
1045 AliasResult BasicAAResult::aliasGEP(
1046     const GEPOperator *GEP1, LocationSize V1Size,
1047     const Value *V2, LocationSize V2Size,
1048     const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1049   if (!V1Size.hasValue() && !V2Size.hasValue()) {
1050     // TODO: This limitation exists for compile-time reasons. Relax it if we
1051     // can avoid exponential pathological cases.
1052     if (!isa<GEPOperator>(V2))
1053       return AliasResult::MayAlias;
1054 
1055     // If both accesses have unknown size, we can only check whether the base
1056     // objects don't alias.
1057     AliasResult BaseAlias =
1058         AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1059                        MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1060     return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1061                                              : AliasResult::MayAlias;
1062   }
1063 
1064   DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1065   DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1066 
1067   // Bail if we were not able to decompose anything.
1068   if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1069     return AliasResult::MayAlias;
1070 
1071   // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1072   // symbolic difference.
1073   subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1074 
1075   // If an inbounds GEP would have to start from an out of bounds address
1076   // for the two to alias, then we can assume noalias.
1077   // TODO: Remove !isScalable() once BasicAA fully support scalable location
1078   // size
1079   if (*DecompGEP1.InBounds && DecompGEP1.VarIndices.empty() &&
1080       V2Size.hasValue() && !V2Size.isScalable() &&
1081       DecompGEP1.Offset.sge(V2Size.getValue()) &&
1082       isBaseOfObject(DecompGEP2.Base))
1083     return AliasResult::NoAlias;
1084 
1085   if (isa<GEPOperator>(V2)) {
1086     // Symmetric case to above.
1087     if (*DecompGEP2.InBounds && DecompGEP1.VarIndices.empty() &&
1088         V1Size.hasValue() && !V1Size.isScalable() &&
1089         DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1090         isBaseOfObject(DecompGEP1.Base))
1091       return AliasResult::NoAlias;
1092   }
1093 
1094   // For GEPs with identical offsets, we can preserve the size and AAInfo
1095   // when performing the alias check on the underlying objects.
1096   if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1097     return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1098                           MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1099 
1100   // Do the base pointers alias?
1101   AliasResult BaseAlias =
1102       AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1103                      MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1104 
1105   // If we get a No or May, then return it immediately, no amount of analysis
1106   // will improve this situation.
1107   if (BaseAlias != AliasResult::MustAlias) {
1108     assert(BaseAlias == AliasResult::NoAlias ||
1109            BaseAlias == AliasResult::MayAlias);
1110     return BaseAlias;
1111   }
1112 
1113   // Bail on analysing scalable LocationSize
1114   if (V1Size.isScalable() || V2Size.isScalable())
1115     return AliasResult::MayAlias;
1116 
1117   // If there is a constant difference between the pointers, but the difference
1118   // is less than the size of the associated memory object, then we know
1119   // that the objects are partially overlapping.  If the difference is
1120   // greater, we know they do not overlap.
1121   if (DecompGEP1.VarIndices.empty()) {
1122     APInt &Off = DecompGEP1.Offset;
1123 
1124     // Initialize for Off >= 0 (V2 <= GEP1) case.
1125     const Value *LeftPtr = V2;
1126     const Value *RightPtr = GEP1;
1127     LocationSize VLeftSize = V2Size;
1128     LocationSize VRightSize = V1Size;
1129     const bool Swapped = Off.isNegative();
1130 
1131     if (Swapped) {
1132       // Swap if we have the situation where:
1133       // +                +
1134       // | BaseOffset     |
1135       // ---------------->|
1136       // |-->V1Size       |-------> V2Size
1137       // GEP1             V2
1138       std::swap(LeftPtr, RightPtr);
1139       std::swap(VLeftSize, VRightSize);
1140       Off = -Off;
1141     }
1142 
1143     if (!VLeftSize.hasValue())
1144       return AliasResult::MayAlias;
1145 
1146     const uint64_t LSize = VLeftSize.getValue();
1147     if (Off.ult(LSize)) {
1148       // Conservatively drop processing if a phi was visited and/or offset is
1149       // too big.
1150       AliasResult AR = AliasResult::PartialAlias;
1151       if (VRightSize.hasValue() && Off.ule(INT32_MAX) &&
1152           (Off + VRightSize.getValue()).ule(LSize)) {
1153         // Memory referenced by right pointer is nested. Save the offset in
1154         // cache. Note that originally offset estimated as GEP1-V2, but
1155         // AliasResult contains the shift that represents GEP1+Offset=V2.
1156         AR.setOffset(-Off.getSExtValue());
1157         AR.swap(Swapped);
1158       }
1159       return AR;
1160     }
1161     return AliasResult::NoAlias;
1162   }
1163 
1164   // We need to know both acess sizes for all the following heuristics.
1165   if (!V1Size.hasValue() || !V2Size.hasValue())
1166     return AliasResult::MayAlias;
1167 
1168   APInt GCD;
1169   ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1170   for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1171     const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1172     const APInt &Scale = Index.Scale;
1173     APInt ScaleForGCD = Scale;
1174     if (!Index.IsNSW)
1175       ScaleForGCD =
1176           APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1177 
1178     if (i == 0)
1179       GCD = ScaleForGCD.abs();
1180     else
1181       GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1182 
1183     ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1184                                             true, &AC, Index.CxtI);
1185     KnownBits Known =
1186         computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1187     CR = CR.intersectWith(
1188         ConstantRange::fromKnownBits(Known, /* Signed */ true),
1189         ConstantRange::Signed);
1190     CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1191 
1192     assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1193            "Bit widths are normalized to MaxIndexSize");
1194     if (Index.IsNSW)
1195       CR = CR.smul_sat(ConstantRange(Scale));
1196     else
1197       CR = CR.smul_fast(ConstantRange(Scale));
1198 
1199     if (Index.IsNegated)
1200       OffsetRange = OffsetRange.sub(CR);
1201     else
1202       OffsetRange = OffsetRange.add(CR);
1203   }
1204 
1205   // We now have accesses at two offsets from the same base:
1206   //  1. (...)*GCD + DecompGEP1.Offset with size V1Size
1207   //  2. 0 with size V2Size
1208   // Using arithmetic modulo GCD, the accesses are at
1209   // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1210   // into the range [V2Size..GCD), then we know they cannot overlap.
1211   APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1212   if (ModOffset.isNegative())
1213     ModOffset += GCD; // We want mod, not rem.
1214   if (ModOffset.uge(V2Size.getValue()) &&
1215       (GCD - ModOffset).uge(V1Size.getValue()))
1216     return AliasResult::NoAlias;
1217 
1218   // Compute ranges of potentially accessed bytes for both accesses. If the
1219   // interseciton is empty, there can be no overlap.
1220   unsigned BW = OffsetRange.getBitWidth();
1221   ConstantRange Range1 = OffsetRange.add(
1222       ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1223   ConstantRange Range2 =
1224       ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1225   if (Range1.intersectWith(Range2).isEmptySet())
1226     return AliasResult::NoAlias;
1227 
1228   // Try to determine the range of values for VarIndex such that
1229   // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1230   std::optional<APInt> MinAbsVarIndex;
1231   if (DecompGEP1.VarIndices.size() == 1) {
1232     // VarIndex = Scale*V.
1233     const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1234     if (Var.Val.TruncBits == 0 &&
1235         isKnownNonZero(Var.Val.V, DL, 0, &AC, Var.CxtI, DT)) {
1236       // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1237       // potentially wrapping math.
1238       auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1239         if (Var.IsNSW)
1240           return true;
1241 
1242         int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1243         // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1244         // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1245         // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1246         int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1247         if (MaxScaleValueBW <= 0)
1248           return false;
1249         return Var.Scale.ule(
1250             APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1251       };
1252       // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1253       // presence of potentially wrapping math.
1254       if (MultiplyByScaleNoWrap(Var)) {
1255         // If V != 0 then abs(VarIndex) >= abs(Scale).
1256         MinAbsVarIndex = Var.Scale.abs();
1257       }
1258     }
1259   } else if (DecompGEP1.VarIndices.size() == 2) {
1260     // VarIndex = Scale*V0 + (-Scale)*V1.
1261     // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1262     // Check that MayBeCrossIteration is false, to avoid reasoning about
1263     // inequality of values across loop iterations.
1264     const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1265     const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1266     if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1267         Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1268         isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1269                         DT))
1270       MinAbsVarIndex = Var0.Scale.abs();
1271   }
1272 
1273   if (MinAbsVarIndex) {
1274     // The constant offset will have added at least +/-MinAbsVarIndex to it.
1275     APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1276     APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1277     // We know that Offset <= OffsetLo || Offset >= OffsetHi
1278     if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1279         OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1280       return AliasResult::NoAlias;
1281   }
1282 
1283   if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1284     return AliasResult::NoAlias;
1285 
1286   // Statically, we can see that the base objects are the same, but the
1287   // pointers have dynamic offsets which we can't resolve. And none of our
1288   // little tricks above worked.
1289   return AliasResult::MayAlias;
1290 }
1291 
1292 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1293   // If the results agree, take it.
1294   if (A == B)
1295     return A;
1296   // A mix of PartialAlias and MustAlias is PartialAlias.
1297   if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1298       (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1299     return AliasResult::PartialAlias;
1300   // Otherwise, we don't know anything.
1301   return AliasResult::MayAlias;
1302 }
1303 
1304 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1305 /// against another.
1306 AliasResult
1307 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1308                            const Value *V2, LocationSize V2Size,
1309                            AAQueryInfo &AAQI) {
1310   // If the values are Selects with the same condition, we can do a more precise
1311   // check: just check for aliases between the values on corresponding arms.
1312   if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1313     if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1314                                       AAQI)) {
1315       AliasResult Alias =
1316           AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1317                          MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1318       if (Alias == AliasResult::MayAlias)
1319         return AliasResult::MayAlias;
1320       AliasResult ThisAlias =
1321           AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1322                          MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1323       return MergeAliasResults(ThisAlias, Alias);
1324     }
1325 
1326   // If both arms of the Select node NoAlias or MustAlias V2, then returns
1327   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1328   AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1329                                      MemoryLocation(V2, V2Size), AAQI);
1330   if (Alias == AliasResult::MayAlias)
1331     return AliasResult::MayAlias;
1332 
1333   AliasResult ThisAlias =
1334       AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1335                      MemoryLocation(V2, V2Size), AAQI);
1336   return MergeAliasResults(ThisAlias, Alias);
1337 }
1338 
1339 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1340 /// another.
1341 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1342                                     const Value *V2, LocationSize V2Size,
1343                                     AAQueryInfo &AAQI) {
1344   if (!PN->getNumIncomingValues())
1345     return AliasResult::NoAlias;
1346   // If the values are PHIs in the same block, we can do a more precise
1347   // as well as efficient check: just check for aliases between the values
1348   // on corresponding edges.
1349   if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1350     if (PN2->getParent() == PN->getParent()) {
1351       std::optional<AliasResult> Alias;
1352       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1353         AliasResult ThisAlias = AAQI.AAR.alias(
1354             MemoryLocation(PN->getIncomingValue(i), PNSize),
1355             MemoryLocation(
1356                 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1357             AAQI);
1358         if (Alias)
1359           *Alias = MergeAliasResults(*Alias, ThisAlias);
1360         else
1361           Alias = ThisAlias;
1362         if (*Alias == AliasResult::MayAlias)
1363           break;
1364       }
1365       return *Alias;
1366     }
1367 
1368   SmallVector<Value *, 4> V1Srcs;
1369   // If a phi operand recurses back to the phi, we can still determine NoAlias
1370   // if we don't alias the underlying objects of the other phi operands, as we
1371   // know that the recursive phi needs to be based on them in some way.
1372   bool isRecursive = false;
1373   auto CheckForRecPhi = [&](Value *PV) {
1374     if (!EnableRecPhiAnalysis)
1375       return false;
1376     if (getUnderlyingObject(PV) == PN) {
1377       isRecursive = true;
1378       return true;
1379     }
1380     return false;
1381   };
1382 
1383   SmallPtrSet<Value *, 4> UniqueSrc;
1384   Value *OnePhi = nullptr;
1385   for (Value *PV1 : PN->incoming_values()) {
1386     // Skip the phi itself being the incoming value.
1387     if (PV1 == PN)
1388       continue;
1389 
1390     if (isa<PHINode>(PV1)) {
1391       if (OnePhi && OnePhi != PV1) {
1392         // To control potential compile time explosion, we choose to be
1393         // conserviate when we have more than one Phi input.  It is important
1394         // that we handle the single phi case as that lets us handle LCSSA
1395         // phi nodes and (combined with the recursive phi handling) simple
1396         // pointer induction variable patterns.
1397         return AliasResult::MayAlias;
1398       }
1399       OnePhi = PV1;
1400     }
1401 
1402     if (CheckForRecPhi(PV1))
1403       continue;
1404 
1405     if (UniqueSrc.insert(PV1).second)
1406       V1Srcs.push_back(PV1);
1407   }
1408 
1409   if (OnePhi && UniqueSrc.size() > 1)
1410     // Out of an abundance of caution, allow only the trivial lcssa and
1411     // recursive phi cases.
1412     return AliasResult::MayAlias;
1413 
1414   // If V1Srcs is empty then that means that the phi has no underlying non-phi
1415   // value. This should only be possible in blocks unreachable from the entry
1416   // block, but return MayAlias just in case.
1417   if (V1Srcs.empty())
1418     return AliasResult::MayAlias;
1419 
1420   // If this PHI node is recursive, indicate that the pointer may be moved
1421   // across iterations. We can only prove NoAlias if different underlying
1422   // objects are involved.
1423   if (isRecursive)
1424     PNSize = LocationSize::beforeOrAfterPointer();
1425 
1426   // In the recursive alias queries below, we may compare values from two
1427   // different loop iterations.
1428   SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1429 
1430   AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1431                                      MemoryLocation(V2, V2Size), AAQI);
1432 
1433   // Early exit if the check of the first PHI source against V2 is MayAlias.
1434   // Other results are not possible.
1435   if (Alias == AliasResult::MayAlias)
1436     return AliasResult::MayAlias;
1437   // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1438   // remain valid to all elements and needs to conservatively return MayAlias.
1439   if (isRecursive && Alias != AliasResult::NoAlias)
1440     return AliasResult::MayAlias;
1441 
1442   // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1443   // NoAlias / MustAlias. Otherwise, returns MayAlias.
1444   for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1445     Value *V = V1Srcs[i];
1446 
1447     AliasResult ThisAlias = AAQI.AAR.alias(
1448         MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1449     Alias = MergeAliasResults(ThisAlias, Alias);
1450     if (Alias == AliasResult::MayAlias)
1451       break;
1452   }
1453 
1454   return Alias;
1455 }
1456 
1457 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1458 /// array references.
1459 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1460                                       const Value *V2, LocationSize V2Size,
1461                                       AAQueryInfo &AAQI,
1462                                       const Instruction *CtxI) {
1463   // If either of the memory references is empty, it doesn't matter what the
1464   // pointer values are.
1465   if (V1Size.isZero() || V2Size.isZero())
1466     return AliasResult::NoAlias;
1467 
1468   // Strip off any casts if they exist.
1469   V1 = V1->stripPointerCastsForAliasAnalysis();
1470   V2 = V2->stripPointerCastsForAliasAnalysis();
1471 
1472   // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1473   // value for undef that aliases nothing in the program.
1474   if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1475     return AliasResult::NoAlias;
1476 
1477   // Are we checking for alias of the same value?
1478   // Because we look 'through' phi nodes, we could look at "Value" pointers from
1479   // different iterations. We must therefore make sure that this is not the
1480   // case. The function isValueEqualInPotentialCycles ensures that this cannot
1481   // happen by looking at the visited phi nodes and making sure they cannot
1482   // reach the value.
1483   if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1484     return AliasResult::MustAlias;
1485 
1486   if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1487     return AliasResult::NoAlias; // Scalars cannot alias each other
1488 
1489   // Figure out what objects these things are pointing to if we can.
1490   const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1491   const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1492 
1493   // Null values in the default address space don't point to any object, so they
1494   // don't alias any other pointer.
1495   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1496     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1497       return AliasResult::NoAlias;
1498   if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1499     if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1500       return AliasResult::NoAlias;
1501 
1502   if (O1 != O2) {
1503     // If V1/V2 point to two different objects, we know that we have no alias.
1504     if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1505       return AliasResult::NoAlias;
1506 
1507     // Constant pointers can't alias with non-const isIdentifiedObject objects.
1508     if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1509         (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1510       return AliasResult::NoAlias;
1511 
1512     // Function arguments can't alias with things that are known to be
1513     // unambigously identified at the function level.
1514     if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1515         (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1516       return AliasResult::NoAlias;
1517 
1518     // If one pointer is the result of a call/invoke or load and the other is a
1519     // non-escaping local object within the same function, then we know the
1520     // object couldn't escape to a point where the call could return it.
1521     //
1522     // Note that if the pointers are in different functions, there are a
1523     // variety of complications. A call with a nocapture argument may still
1524     // temporary store the nocapture argument's value in a temporary memory
1525     // location if that memory location doesn't escape. Or it may pass a
1526     // nocapture value to other functions as long as they don't capture it.
1527     if (isEscapeSource(O1) &&
1528         AAQI.CI->isNotCapturedBefore(O2, cast<Instruction>(O1), /*OrAt*/ true))
1529       return AliasResult::NoAlias;
1530     if (isEscapeSource(O2) &&
1531         AAQI.CI->isNotCapturedBefore(O1, cast<Instruction>(O2), /*OrAt*/ true))
1532       return AliasResult::NoAlias;
1533   }
1534 
1535   // If the size of one access is larger than the entire object on the other
1536   // side, then we know such behavior is undefined and can assume no alias.
1537   bool NullIsValidLocation = NullPointerIsDefined(&F);
1538   if ((isObjectSmallerThan(
1539           O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1540           TLI, NullIsValidLocation)) ||
1541       (isObjectSmallerThan(
1542           O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1543           TLI, NullIsValidLocation)))
1544     return AliasResult::NoAlias;
1545 
1546   if (EnableSeparateStorageAnalysis) {
1547     for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1548       if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1549         continue;
1550 
1551       AssumeInst *Assume = cast<AssumeInst>(Elem);
1552       OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index);
1553       if (OBU.getTagName() == "separate_storage") {
1554         assert(OBU.Inputs.size() == 2);
1555         const Value *Hint1 = OBU.Inputs[0].get();
1556         const Value *Hint2 = OBU.Inputs[1].get();
1557         // This is often a no-op; instcombine rewrites this for us. No-op
1558         // getUnderlyingObject calls are fast, though.
1559         const Value *HintO1 = getUnderlyingObject(Hint1);
1560         const Value *HintO2 = getUnderlyingObject(Hint2);
1561 
1562         auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1563           if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1564             return isValidAssumeForContext(Assume, PtrI, DT,
1565                                            /* AllowEphemerals */ true);
1566           }
1567           if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) {
1568             const Instruction *FirstI =
1569                 &*PtrA->getParent()->getEntryBlock().begin();
1570             return isValidAssumeForContext(Assume, FirstI, DT,
1571                                            /* AllowEphemerals */ true);
1572           }
1573           return false;
1574         };
1575 
1576         if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1577           // Note that we go back to V1 and V2 for the
1578           // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1579           // so strictly more assumptions are valid for them.
1580           if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT,
1581                                                /* AllowEphemerals */ true)) ||
1582               ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1583             return AliasResult::NoAlias;
1584           }
1585         }
1586       }
1587     }
1588   }
1589 
1590   // If one the accesses may be before the accessed pointer, canonicalize this
1591   // by using unknown after-pointer sizes for both accesses. This is
1592   // equivalent, because regardless of which pointer is lower, one of them
1593   // will always came after the other, as long as the underlying objects aren't
1594   // disjoint. We do this so that the rest of BasicAA does not have to deal
1595   // with accesses before the base pointer, and to improve cache utilization by
1596   // merging equivalent states.
1597   if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1598     V1Size = LocationSize::afterPointer();
1599     V2Size = LocationSize::afterPointer();
1600   }
1601 
1602   // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1603   // for recursive queries. For this reason, this limit is chosen to be large
1604   // enough to be very rarely hit, while still being small enough to avoid
1605   // stack overflows.
1606   if (AAQI.Depth >= 512)
1607     return AliasResult::MayAlias;
1608 
1609   // Check the cache before climbing up use-def chains. This also terminates
1610   // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1611   // cache key, because some cases where MayBeCrossIteration==false returns
1612   // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1613   AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1614                             {V2, V2Size, AAQI.MayBeCrossIteration});
1615   const bool Swapped = V1 > V2;
1616   if (Swapped)
1617     std::swap(Locs.first, Locs.second);
1618   const auto &Pair = AAQI.AliasCache.try_emplace(
1619       Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1620   if (!Pair.second) {
1621     auto &Entry = Pair.first->second;
1622     if (!Entry.isDefinitive()) {
1623       // Remember that we used an assumption.
1624       ++Entry.NumAssumptionUses;
1625       ++AAQI.NumAssumptionUses;
1626     }
1627     // Cache contains sorted {V1,V2} pairs but we should return original order.
1628     auto Result = Entry.Result;
1629     Result.swap(Swapped);
1630     return Result;
1631   }
1632 
1633   int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1634   unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1635   AliasResult Result =
1636       aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1637 
1638   auto It = AAQI.AliasCache.find(Locs);
1639   assert(It != AAQI.AliasCache.end() && "Must be in cache");
1640   auto &Entry = It->second;
1641 
1642   // Check whether a NoAlias assumption has been used, but disproven.
1643   bool AssumptionDisproven =
1644       Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1645   if (AssumptionDisproven)
1646     Result = AliasResult::MayAlias;
1647 
1648   // This is a definitive result now, when considered as a root query.
1649   AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1650   Entry.Result = Result;
1651   // Cache contains sorted {V1,V2} pairs.
1652   Entry.Result.swap(Swapped);
1653   Entry.NumAssumptionUses = -1;
1654 
1655   // If the assumption has been disproven, remove any results that may have
1656   // been based on this assumption. Do this after the Entry updates above to
1657   // avoid iterator invalidation.
1658   if (AssumptionDisproven)
1659     while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1660       AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1661 
1662   // The result may still be based on assumptions higher up in the chain.
1663   // Remember it, so it can be purged from the cache later.
1664   if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1665       Result != AliasResult::MayAlias)
1666     AAQI.AssumptionBasedResults.push_back(Locs);
1667   return Result;
1668 }
1669 
1670 AliasResult BasicAAResult::aliasCheckRecursive(
1671     const Value *V1, LocationSize V1Size,
1672     const Value *V2, LocationSize V2Size,
1673     AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1674   if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1675     AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1676     if (Result != AliasResult::MayAlias)
1677       return Result;
1678   } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1679     AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1680     Result.swap();
1681     if (Result != AliasResult::MayAlias)
1682       return Result;
1683   }
1684 
1685   if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1686     AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1687     if (Result != AliasResult::MayAlias)
1688       return Result;
1689   } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1690     AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1691     Result.swap();
1692     if (Result != AliasResult::MayAlias)
1693       return Result;
1694   }
1695 
1696   if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1697     AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1698     if (Result != AliasResult::MayAlias)
1699       return Result;
1700   } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1701     AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1702     Result.swap();
1703     if (Result != AliasResult::MayAlias)
1704       return Result;
1705   }
1706 
1707   // If both pointers are pointing into the same object and one of them
1708   // accesses the entire object, then the accesses must overlap in some way.
1709   if (O1 == O2) {
1710     bool NullIsValidLocation = NullPointerIsDefined(&F);
1711     if (V1Size.isPrecise() && V2Size.isPrecise() &&
1712         (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1713          isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1714       return AliasResult::PartialAlias;
1715   }
1716 
1717   return AliasResult::MayAlias;
1718 }
1719 
1720 /// Check whether two Values can be considered equivalent.
1721 ///
1722 /// If the values may come from different cycle iterations, this will also
1723 /// check that the values are not part of cycle. We have to do this because we
1724 /// are looking through phi nodes, that is we say
1725 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1726 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1727                                                   const Value *V2,
1728                                                   const AAQueryInfo &AAQI) {
1729   if (V != V2)
1730     return false;
1731 
1732   if (!AAQI.MayBeCrossIteration)
1733     return true;
1734 
1735   // Non-instructions and instructions in the entry block cannot be part of
1736   // a loop.
1737   const Instruction *Inst = dyn_cast<Instruction>(V);
1738   if (!Inst || Inst->getParent()->isEntryBlock())
1739     return true;
1740 
1741   return isNotInCycle(Inst, DT, /*LI*/ nullptr);
1742 }
1743 
1744 /// Computes the symbolic difference between two de-composed GEPs.
1745 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1746                                            const DecomposedGEP &SrcGEP,
1747                                            const AAQueryInfo &AAQI) {
1748   DestGEP.Offset -= SrcGEP.Offset;
1749   for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1750     // Find V in Dest.  This is N^2, but pointer indices almost never have more
1751     // than a few variable indexes.
1752     bool Found = false;
1753     for (auto I : enumerate(DestGEP.VarIndices)) {
1754       VariableGEPIndex &Dest = I.value();
1755       if (!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) ||
1756           !Dest.Val.hasSameCastsAs(Src.Val))
1757         continue;
1758 
1759       // Normalize IsNegated if we're going to lose the NSW flag anyway.
1760       if (Dest.IsNegated) {
1761         Dest.Scale = -Dest.Scale;
1762         Dest.IsNegated = false;
1763         Dest.IsNSW = false;
1764       }
1765 
1766       // If we found it, subtract off Scale V's from the entry in Dest.  If it
1767       // goes to zero, remove the entry.
1768       if (Dest.Scale != Src.Scale) {
1769         Dest.Scale -= Src.Scale;
1770         Dest.IsNSW = false;
1771       } else {
1772         DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1773       }
1774       Found = true;
1775       break;
1776     }
1777 
1778     // If we didn't consume this entry, add it to the end of the Dest list.
1779     if (!Found) {
1780       VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1781                                 /* IsNegated */ true};
1782       DestGEP.VarIndices.push_back(Entry);
1783     }
1784   }
1785 }
1786 
1787 bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1788                                             LocationSize MaybeV1Size,
1789                                             LocationSize MaybeV2Size,
1790                                             AssumptionCache *AC,
1791                                             DominatorTree *DT,
1792                                             const AAQueryInfo &AAQI) {
1793   if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1794       !MaybeV2Size.hasValue())
1795     return false;
1796 
1797   const uint64_t V1Size = MaybeV1Size.getValue();
1798   const uint64_t V2Size = MaybeV2Size.getValue();
1799 
1800   const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1801 
1802   if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1803       !Var0.hasNegatedScaleOf(Var1) ||
1804       Var0.Val.V->getType() != Var1.Val.V->getType())
1805     return false;
1806 
1807   // We'll strip off the Extensions of Var0 and Var1 and do another round
1808   // of GetLinearExpression decomposition. In the example above, if Var0
1809   // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1810 
1811   LinearExpression E0 =
1812       GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1813   LinearExpression E1 =
1814       GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1815   if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1816       !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1817     return false;
1818 
1819   // We have a hit - Var0 and Var1 only differ by a constant offset!
1820 
1821   // If we've been sext'ed then zext'd the maximum difference between Var0 and
1822   // Var1 is possible to calculate, but we're just interested in the absolute
1823   // minimum difference between the two. The minimum distance may occur due to
1824   // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1825   // the minimum distance between %i and %i + 5 is 3.
1826   APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1827   MinDiff = APIntOps::umin(MinDiff, Wrapped);
1828   APInt MinDiffBytes =
1829     MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1830 
1831   // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1832   // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1833   // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1834   // V2Size can fit in the MinDiffBytes gap.
1835   return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1836          MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1837 }
1838 
1839 //===----------------------------------------------------------------------===//
1840 // BasicAliasAnalysis Pass
1841 //===----------------------------------------------------------------------===//
1842 
1843 AnalysisKey BasicAA::Key;
1844 
1845 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1846   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1847   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1848   auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1849   return BasicAAResult(F.getParent()->getDataLayout(), F, TLI, AC, DT);
1850 }
1851 
1852 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1853   initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1854 }
1855 
1856 char BasicAAWrapperPass::ID = 0;
1857 
1858 void BasicAAWrapperPass::anchor() {}
1859 
1860 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1861                       "Basic Alias Analysis (stateless AA impl)", true, true)
1862 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1863 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1864 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1865 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1866                     "Basic Alias Analysis (stateless AA impl)", true, true)
1867 
1868 FunctionPass *llvm::createBasicAAWrapperPass() {
1869   return new BasicAAWrapperPass();
1870 }
1871 
1872 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1873   auto &ACT = getAnalysis<AssumptionCacheTracker>();
1874   auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1875   auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1876 
1877   Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F,
1878                                  TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1879                                  &DTWP.getDomTree()));
1880 
1881   return false;
1882 }
1883 
1884 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1885   AU.setPreservesAll();
1886   AU.addRequiredTransitive<AssumptionCacheTracker>();
1887   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
1888   AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
1889 }
1890