xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/LoopAccessAnalysis.cpp (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 //===- LoopAccessAnalysis.cpp - Loop Access Analysis Implementation --------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The implementation for the loop memory dependence that was originally
10 // developed for the loop vectorizer.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/LoopAccessAnalysis.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SetVector.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/AliasSetTracker.h"
28 #include "llvm/Analysis/LoopAnalysisManager.h"
29 #include "llvm/Analysis/LoopInfo.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
32 #include "llvm/Analysis/ScalarEvolution.h"
33 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/ValueTracking.h"
36 #include "llvm/Analysis/VectorUtils.h"
37 #include "llvm/IR/BasicBlock.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/DerivedTypes.h"
42 #include "llvm/IR/DiagnosticInfo.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Function.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/CommandLine.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/raw_ostream.h"
60 #include <algorithm>
61 #include <cassert>
62 #include <cstdint>
63 #include <cstdlib>
64 #include <iterator>
65 #include <utility>
66 #include <vector>
67 
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "loop-accesses"
71 
72 static cl::opt<unsigned, true>
73 VectorizationFactor("force-vector-width", cl::Hidden,
74                     cl::desc("Sets the SIMD width. Zero is autoselect."),
75                     cl::location(VectorizerParams::VectorizationFactor));
76 unsigned VectorizerParams::VectorizationFactor;
77 
78 static cl::opt<unsigned, true>
79 VectorizationInterleave("force-vector-interleave", cl::Hidden,
80                         cl::desc("Sets the vectorization interleave count. "
81                                  "Zero is autoselect."),
82                         cl::location(
83                             VectorizerParams::VectorizationInterleave));
84 unsigned VectorizerParams::VectorizationInterleave;
85 
86 static cl::opt<unsigned, true> RuntimeMemoryCheckThreshold(
87     "runtime-memory-check-threshold", cl::Hidden,
88     cl::desc("When performing memory disambiguation checks at runtime do not "
89              "generate more than this number of comparisons (default = 8)."),
90     cl::location(VectorizerParams::RuntimeMemoryCheckThreshold), cl::init(8));
91 unsigned VectorizerParams::RuntimeMemoryCheckThreshold;
92 
93 /// The maximum iterations used to merge memory checks
94 static cl::opt<unsigned> MemoryCheckMergeThreshold(
95     "memory-check-merge-threshold", cl::Hidden,
96     cl::desc("Maximum number of comparisons done when trying to merge "
97              "runtime memory checks. (default = 100)"),
98     cl::init(100));
99 
100 /// Maximum SIMD width.
101 const unsigned VectorizerParams::MaxVectorWidth = 64;
102 
103 /// We collect dependences up to this threshold.
104 static cl::opt<unsigned>
105     MaxDependences("max-dependences", cl::Hidden,
106                    cl::desc("Maximum number of dependences collected by "
107                             "loop-access analysis (default = 100)"),
108                    cl::init(100));
109 
110 /// This enables versioning on the strides of symbolically striding memory
111 /// accesses in code like the following.
112 ///   for (i = 0; i < N; ++i)
113 ///     A[i * Stride1] += B[i * Stride2] ...
114 ///
115 /// Will be roughly translated to
116 ///    if (Stride1 == 1 && Stride2 == 1) {
117 ///      for (i = 0; i < N; i+=4)
118 ///       A[i:i+3] += ...
119 ///    } else
120 ///      ...
121 static cl::opt<bool> EnableMemAccessVersioning(
122     "enable-mem-access-versioning", cl::init(true), cl::Hidden,
123     cl::desc("Enable symbolic stride memory access versioning"));
124 
125 /// Enable store-to-load forwarding conflict detection. This option can
126 /// be disabled for correctness testing.
127 static cl::opt<bool> EnableForwardingConflictDetection(
128     "store-to-load-forwarding-conflict-detection", cl::Hidden,
129     cl::desc("Enable conflict detection in loop-access analysis"),
130     cl::init(true));
131 
132 bool VectorizerParams::isInterleaveForced() {
133   return ::VectorizationInterleave.getNumOccurrences() > 0;
134 }
135 
136 Value *llvm::stripIntegerCast(Value *V) {
137   if (auto *CI = dyn_cast<CastInst>(V))
138     if (CI->getOperand(0)->getType()->isIntegerTy())
139       return CI->getOperand(0);
140   return V;
141 }
142 
143 const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
144                                             const ValueToValueMap &PtrToStride,
145                                             Value *Ptr, Value *OrigPtr) {
146   const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
147 
148   // If there is an entry in the map return the SCEV of the pointer with the
149   // symbolic stride replaced by one.
150   ValueToValueMap::const_iterator SI =
151       PtrToStride.find(OrigPtr ? OrigPtr : Ptr);
152   if (SI != PtrToStride.end()) {
153     Value *StrideVal = SI->second;
154 
155     // Strip casts.
156     StrideVal = stripIntegerCast(StrideVal);
157 
158     ScalarEvolution *SE = PSE.getSE();
159     const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
160     const auto *CT =
161         static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
162 
163     PSE.addPredicate(*SE->getEqualPredicate(U, CT));
164     auto *Expr = PSE.getSCEV(Ptr);
165 
166     LLVM_DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV
167                       << " by: " << *Expr << "\n");
168     return Expr;
169   }
170 
171   // Otherwise, just return the SCEV of the original pointer.
172   return OrigSCEV;
173 }
174 
175 RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
176     unsigned Index, RuntimePointerChecking &RtCheck)
177     : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
178       Low(RtCheck.Pointers[Index].Start) {
179   Members.push_back(Index);
180 }
181 
182 /// Calculate Start and End points of memory access.
183 /// Let's assume A is the first access and B is a memory access on N-th loop
184 /// iteration. Then B is calculated as:
185 ///   B = A + Step*N .
186 /// Step value may be positive or negative.
187 /// N is a calculated back-edge taken count:
188 ///     N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
189 /// Start and End points are calculated in the following way:
190 /// Start = UMIN(A, B) ; End = UMAX(A, B) + SizeOfElt,
191 /// where SizeOfElt is the size of single memory access in bytes.
192 ///
193 /// There is no conflict when the intervals are disjoint:
194 /// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
195 void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
196                                     unsigned DepSetId, unsigned ASId,
197                                     const ValueToValueMap &Strides,
198                                     PredicatedScalarEvolution &PSE) {
199   // Get the stride replaced scev.
200   const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
201   ScalarEvolution *SE = PSE.getSE();
202 
203   const SCEV *ScStart;
204   const SCEV *ScEnd;
205 
206   if (SE->isLoopInvariant(Sc, Lp))
207     ScStart = ScEnd = Sc;
208   else {
209     const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
210     assert(AR && "Invalid addrec expression");
211     const SCEV *Ex = PSE.getBackedgeTakenCount();
212 
213     ScStart = AR->getStart();
214     ScEnd = AR->evaluateAtIteration(Ex, *SE);
215     const SCEV *Step = AR->getStepRecurrence(*SE);
216 
217     // For expressions with negative step, the upper bound is ScStart and the
218     // lower bound is ScEnd.
219     if (const auto *CStep = dyn_cast<SCEVConstant>(Step)) {
220       if (CStep->getValue()->isNegative())
221         std::swap(ScStart, ScEnd);
222     } else {
223       // Fallback case: the step is not constant, but we can still
224       // get the upper and lower bounds of the interval by using min/max
225       // expressions.
226       ScStart = SE->getUMinExpr(ScStart, ScEnd);
227       ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
228     }
229     // Add the size of the pointed element to ScEnd.
230     unsigned EltSize =
231       Ptr->getType()->getPointerElementType()->getScalarSizeInBits() / 8;
232     const SCEV *EltSizeSCEV = SE->getConstant(ScEnd->getType(), EltSize);
233     ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
234   }
235 
236   Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, Sc);
237 }
238 
239 SmallVector<RuntimePointerCheck, 4>
240 RuntimePointerChecking::generateChecks() const {
241   SmallVector<RuntimePointerCheck, 4> Checks;
242 
243   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
244     for (unsigned J = I + 1; J < CheckingGroups.size(); ++J) {
245       const RuntimeCheckingPtrGroup &CGI = CheckingGroups[I];
246       const RuntimeCheckingPtrGroup &CGJ = CheckingGroups[J];
247 
248       if (needsChecking(CGI, CGJ))
249         Checks.push_back(std::make_pair(&CGI, &CGJ));
250     }
251   }
252   return Checks;
253 }
254 
255 void RuntimePointerChecking::generateChecks(
256     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
257   assert(Checks.empty() && "Checks is not empty");
258   groupChecks(DepCands, UseDependencies);
259   Checks = generateChecks();
260 }
261 
262 bool RuntimePointerChecking::needsChecking(
263     const RuntimeCheckingPtrGroup &M, const RuntimeCheckingPtrGroup &N) const {
264   for (unsigned I = 0, EI = M.Members.size(); EI != I; ++I)
265     for (unsigned J = 0, EJ = N.Members.size(); EJ != J; ++J)
266       if (needsChecking(M.Members[I], N.Members[J]))
267         return true;
268   return false;
269 }
270 
271 /// Compare \p I and \p J and return the minimum.
272 /// Return nullptr in case we couldn't find an answer.
273 static const SCEV *getMinFromExprs(const SCEV *I, const SCEV *J,
274                                    ScalarEvolution *SE) {
275   const SCEV *Diff = SE->getMinusSCEV(J, I);
276   const SCEVConstant *C = dyn_cast<const SCEVConstant>(Diff);
277 
278   if (!C)
279     return nullptr;
280   if (C->getValue()->isNegative())
281     return J;
282   return I;
283 }
284 
285 bool RuntimeCheckingPtrGroup::addPointer(unsigned Index) {
286   const SCEV *Start = RtCheck.Pointers[Index].Start;
287   const SCEV *End = RtCheck.Pointers[Index].End;
288 
289   // Compare the starts and ends with the known minimum and maximum
290   // of this set. We need to know how we compare against the min/max
291   // of the set in order to be able to emit memchecks.
292   const SCEV *Min0 = getMinFromExprs(Start, Low, RtCheck.SE);
293   if (!Min0)
294     return false;
295 
296   const SCEV *Min1 = getMinFromExprs(End, High, RtCheck.SE);
297   if (!Min1)
298     return false;
299 
300   // Update the low bound  expression if we've found a new min value.
301   if (Min0 == Start)
302     Low = Start;
303 
304   // Update the high bound expression if we've found a new max value.
305   if (Min1 != End)
306     High = End;
307 
308   Members.push_back(Index);
309   return true;
310 }
311 
312 void RuntimePointerChecking::groupChecks(
313     MemoryDepChecker::DepCandidates &DepCands, bool UseDependencies) {
314   // We build the groups from dependency candidates equivalence classes
315   // because:
316   //    - We know that pointers in the same equivalence class share
317   //      the same underlying object and therefore there is a chance
318   //      that we can compare pointers
319   //    - We wouldn't be able to merge two pointers for which we need
320   //      to emit a memcheck. The classes in DepCands are already
321   //      conveniently built such that no two pointers in the same
322   //      class need checking against each other.
323 
324   // We use the following (greedy) algorithm to construct the groups
325   // For every pointer in the equivalence class:
326   //   For each existing group:
327   //   - if the difference between this pointer and the min/max bounds
328   //     of the group is a constant, then make the pointer part of the
329   //     group and update the min/max bounds of that group as required.
330 
331   CheckingGroups.clear();
332 
333   // If we need to check two pointers to the same underlying object
334   // with a non-constant difference, we shouldn't perform any pointer
335   // grouping with those pointers. This is because we can easily get
336   // into cases where the resulting check would return false, even when
337   // the accesses are safe.
338   //
339   // The following example shows this:
340   // for (i = 0; i < 1000; ++i)
341   //   a[5000 + i * m] = a[i] + a[i + 9000]
342   //
343   // Here grouping gives a check of (5000, 5000 + 1000 * m) against
344   // (0, 10000) which is always false. However, if m is 1, there is no
345   // dependence. Not grouping the checks for a[i] and a[i + 9000] allows
346   // us to perform an accurate check in this case.
347   //
348   // The above case requires that we have an UnknownDependence between
349   // accesses to the same underlying object. This cannot happen unless
350   // FoundNonConstantDistanceDependence is set, and therefore UseDependencies
351   // is also false. In this case we will use the fallback path and create
352   // separate checking groups for all pointers.
353 
354   // If we don't have the dependency partitions, construct a new
355   // checking pointer group for each pointer. This is also required
356   // for correctness, because in this case we can have checking between
357   // pointers to the same underlying object.
358   if (!UseDependencies) {
359     for (unsigned I = 0; I < Pointers.size(); ++I)
360       CheckingGroups.push_back(RuntimeCheckingPtrGroup(I, *this));
361     return;
362   }
363 
364   unsigned TotalComparisons = 0;
365 
366   DenseMap<Value *, unsigned> PositionMap;
367   for (unsigned Index = 0; Index < Pointers.size(); ++Index)
368     PositionMap[Pointers[Index].PointerValue] = Index;
369 
370   // We need to keep track of what pointers we've already seen so we
371   // don't process them twice.
372   SmallSet<unsigned, 2> Seen;
373 
374   // Go through all equivalence classes, get the "pointer check groups"
375   // and add them to the overall solution. We use the order in which accesses
376   // appear in 'Pointers' to enforce determinism.
377   for (unsigned I = 0; I < Pointers.size(); ++I) {
378     // We've seen this pointer before, and therefore already processed
379     // its equivalence class.
380     if (Seen.count(I))
381       continue;
382 
383     MemoryDepChecker::MemAccessInfo Access(Pointers[I].PointerValue,
384                                            Pointers[I].IsWritePtr);
385 
386     SmallVector<RuntimeCheckingPtrGroup, 2> Groups;
387     auto LeaderI = DepCands.findValue(DepCands.getLeaderValue(Access));
388 
389     // Because DepCands is constructed by visiting accesses in the order in
390     // which they appear in alias sets (which is deterministic) and the
391     // iteration order within an equivalence class member is only dependent on
392     // the order in which unions and insertions are performed on the
393     // equivalence class, the iteration order is deterministic.
394     for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
395          MI != ME; ++MI) {
396       auto PointerI = PositionMap.find(MI->getPointer());
397       assert(PointerI != PositionMap.end() &&
398              "pointer in equivalence class not found in PositionMap");
399       unsigned Pointer = PointerI->second;
400       bool Merged = false;
401       // Mark this pointer as seen.
402       Seen.insert(Pointer);
403 
404       // Go through all the existing sets and see if we can find one
405       // which can include this pointer.
406       for (RuntimeCheckingPtrGroup &Group : Groups) {
407         // Don't perform more than a certain amount of comparisons.
408         // This should limit the cost of grouping the pointers to something
409         // reasonable.  If we do end up hitting this threshold, the algorithm
410         // will create separate groups for all remaining pointers.
411         if (TotalComparisons > MemoryCheckMergeThreshold)
412           break;
413 
414         TotalComparisons++;
415 
416         if (Group.addPointer(Pointer)) {
417           Merged = true;
418           break;
419         }
420       }
421 
422       if (!Merged)
423         // We couldn't add this pointer to any existing set or the threshold
424         // for the number of comparisons has been reached. Create a new group
425         // to hold the current pointer.
426         Groups.push_back(RuntimeCheckingPtrGroup(Pointer, *this));
427     }
428 
429     // We've computed the grouped checks for this partition.
430     // Save the results and continue with the next one.
431     llvm::copy(Groups, std::back_inserter(CheckingGroups));
432   }
433 }
434 
435 bool RuntimePointerChecking::arePointersInSamePartition(
436     const SmallVectorImpl<int> &PtrToPartition, unsigned PtrIdx1,
437     unsigned PtrIdx2) {
438   return (PtrToPartition[PtrIdx1] != -1 &&
439           PtrToPartition[PtrIdx1] == PtrToPartition[PtrIdx2]);
440 }
441 
442 bool RuntimePointerChecking::needsChecking(unsigned I, unsigned J) const {
443   const PointerInfo &PointerI = Pointers[I];
444   const PointerInfo &PointerJ = Pointers[J];
445 
446   // No need to check if two readonly pointers intersect.
447   if (!PointerI.IsWritePtr && !PointerJ.IsWritePtr)
448     return false;
449 
450   // Only need to check pointers between two different dependency sets.
451   if (PointerI.DependencySetId == PointerJ.DependencySetId)
452     return false;
453 
454   // Only need to check pointers in the same alias set.
455   if (PointerI.AliasSetId != PointerJ.AliasSetId)
456     return false;
457 
458   return true;
459 }
460 
461 void RuntimePointerChecking::printChecks(
462     raw_ostream &OS, const SmallVectorImpl<RuntimePointerCheck> &Checks,
463     unsigned Depth) const {
464   unsigned N = 0;
465   for (const auto &Check : Checks) {
466     const auto &First = Check.first->Members, &Second = Check.second->Members;
467 
468     OS.indent(Depth) << "Check " << N++ << ":\n";
469 
470     OS.indent(Depth + 2) << "Comparing group (" << Check.first << "):\n";
471     for (unsigned K = 0; K < First.size(); ++K)
472       OS.indent(Depth + 2) << *Pointers[First[K]].PointerValue << "\n";
473 
474     OS.indent(Depth + 2) << "Against group (" << Check.second << "):\n";
475     for (unsigned K = 0; K < Second.size(); ++K)
476       OS.indent(Depth + 2) << *Pointers[Second[K]].PointerValue << "\n";
477   }
478 }
479 
480 void RuntimePointerChecking::print(raw_ostream &OS, unsigned Depth) const {
481 
482   OS.indent(Depth) << "Run-time memory checks:\n";
483   printChecks(OS, Checks, Depth);
484 
485   OS.indent(Depth) << "Grouped accesses:\n";
486   for (unsigned I = 0; I < CheckingGroups.size(); ++I) {
487     const auto &CG = CheckingGroups[I];
488 
489     OS.indent(Depth + 2) << "Group " << &CG << ":\n";
490     OS.indent(Depth + 4) << "(Low: " << *CG.Low << " High: " << *CG.High
491                          << ")\n";
492     for (unsigned J = 0; J < CG.Members.size(); ++J) {
493       OS.indent(Depth + 6) << "Member: " << *Pointers[CG.Members[J]].Expr
494                            << "\n";
495     }
496   }
497 }
498 
499 namespace {
500 
501 /// Analyses memory accesses in a loop.
502 ///
503 /// Checks whether run time pointer checks are needed and builds sets for data
504 /// dependence checking.
505 class AccessAnalysis {
506 public:
507   /// Read or write access location.
508   typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
509   typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
510 
511   AccessAnalysis(const DataLayout &Dl, Loop *TheLoop, AAResults *AA,
512                  LoopInfo *LI, MemoryDepChecker::DepCandidates &DA,
513                  PredicatedScalarEvolution &PSE)
514       : DL(Dl), TheLoop(TheLoop), AST(*AA), LI(LI), DepCands(DA),
515         IsRTCheckAnalysisNeeded(false), PSE(PSE) {}
516 
517   /// Register a load  and whether it is only read from.
518   void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
519     Value *Ptr = const_cast<Value*>(Loc.Ptr);
520     AST.add(Ptr, LocationSize::unknown(), Loc.AATags);
521     Accesses.insert(MemAccessInfo(Ptr, false));
522     if (IsReadOnly)
523       ReadOnlyPtr.insert(Ptr);
524   }
525 
526   /// Register a store.
527   void addStore(MemoryLocation &Loc) {
528     Value *Ptr = const_cast<Value*>(Loc.Ptr);
529     AST.add(Ptr, LocationSize::unknown(), Loc.AATags);
530     Accesses.insert(MemAccessInfo(Ptr, true));
531   }
532 
533   /// Check if we can emit a run-time no-alias check for \p Access.
534   ///
535   /// Returns true if we can emit a run-time no alias check for \p Access.
536   /// If we can check this access, this also adds it to a dependence set and
537   /// adds a run-time to check for it to \p RtCheck. If \p Assume is true,
538   /// we will attempt to use additional run-time checks in order to get
539   /// the bounds of the pointer.
540   bool createCheckForAccess(RuntimePointerChecking &RtCheck,
541                             MemAccessInfo Access,
542                             const ValueToValueMap &Strides,
543                             DenseMap<Value *, unsigned> &DepSetId,
544                             Loop *TheLoop, unsigned &RunningDepId,
545                             unsigned ASId, bool ShouldCheckStride,
546                             bool Assume);
547 
548   /// Check whether we can check the pointers at runtime for
549   /// non-intersection.
550   ///
551   /// Returns true if we need no check or if we do and we can generate them
552   /// (i.e. the pointers have computable bounds).
553   bool canCheckPtrAtRT(RuntimePointerChecking &RtCheck, ScalarEvolution *SE,
554                        Loop *TheLoop, const ValueToValueMap &Strides,
555                        bool ShouldCheckWrap = false);
556 
557   /// Goes over all memory accesses, checks whether a RT check is needed
558   /// and builds sets of dependent accesses.
559   void buildDependenceSets() {
560     processMemAccesses();
561   }
562 
563   /// Initial processing of memory accesses determined that we need to
564   /// perform dependency checking.
565   ///
566   /// Note that this can later be cleared if we retry memcheck analysis without
567   /// dependency checking (i.e. FoundNonConstantDistanceDependence).
568   bool isDependencyCheckNeeded() { return !CheckDeps.empty(); }
569 
570   /// We decided that no dependence analysis would be used.  Reset the state.
571   void resetDepChecks(MemoryDepChecker &DepChecker) {
572     CheckDeps.clear();
573     DepChecker.clearDependences();
574   }
575 
576   MemAccessInfoList &getDependenciesToCheck() { return CheckDeps; }
577 
578 private:
579   typedef SetVector<MemAccessInfo> PtrAccessSet;
580 
581   /// Go over all memory access and check whether runtime pointer checks
582   /// are needed and build sets of dependency check candidates.
583   void processMemAccesses();
584 
585   /// Set of all accesses.
586   PtrAccessSet Accesses;
587 
588   const DataLayout &DL;
589 
590   /// The loop being checked.
591   const Loop *TheLoop;
592 
593   /// List of accesses that need a further dependence check.
594   MemAccessInfoList CheckDeps;
595 
596   /// Set of pointers that are read only.
597   SmallPtrSet<Value*, 16> ReadOnlyPtr;
598 
599   /// An alias set tracker to partition the access set by underlying object and
600   //intrinsic property (such as TBAA metadata).
601   AliasSetTracker AST;
602 
603   LoopInfo *LI;
604 
605   /// Sets of potentially dependent accesses - members of one set share an
606   /// underlying pointer. The set "CheckDeps" identfies which sets really need a
607   /// dependence check.
608   MemoryDepChecker::DepCandidates &DepCands;
609 
610   /// Initial processing of memory accesses determined that we may need
611   /// to add memchecks.  Perform the analysis to determine the necessary checks.
612   ///
613   /// Note that, this is different from isDependencyCheckNeeded.  When we retry
614   /// memcheck analysis without dependency checking
615   /// (i.e. FoundNonConstantDistanceDependence), isDependencyCheckNeeded is
616   /// cleared while this remains set if we have potentially dependent accesses.
617   bool IsRTCheckAnalysisNeeded;
618 
619   /// The SCEV predicate containing all the SCEV-related assumptions.
620   PredicatedScalarEvolution &PSE;
621 };
622 
623 } // end anonymous namespace
624 
625 /// Check whether a pointer can participate in a runtime bounds check.
626 /// If \p Assume, try harder to prove that we can compute the bounds of \p Ptr
627 /// by adding run-time checks (overflow checks) if necessary.
628 static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
629                                 const ValueToValueMap &Strides, Value *Ptr,
630                                 Loop *L, bool Assume) {
631   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
632 
633   // The bounds for loop-invariant pointer is trivial.
634   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
635     return true;
636 
637   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
638 
639   if (!AR && Assume)
640     AR = PSE.getAsAddRec(Ptr);
641 
642   if (!AR)
643     return false;
644 
645   return AR->isAffine();
646 }
647 
648 /// Check whether a pointer address cannot wrap.
649 static bool isNoWrap(PredicatedScalarEvolution &PSE,
650                      const ValueToValueMap &Strides, Value *Ptr, Loop *L) {
651   const SCEV *PtrScev = PSE.getSCEV(Ptr);
652   if (PSE.getSE()->isLoopInvariant(PtrScev, L))
653     return true;
654 
655   int64_t Stride = getPtrStride(PSE, Ptr, L, Strides);
656   if (Stride == 1 || PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW))
657     return true;
658 
659   return false;
660 }
661 
662 bool AccessAnalysis::createCheckForAccess(RuntimePointerChecking &RtCheck,
663                                           MemAccessInfo Access,
664                                           const ValueToValueMap &StridesMap,
665                                           DenseMap<Value *, unsigned> &DepSetId,
666                                           Loop *TheLoop, unsigned &RunningDepId,
667                                           unsigned ASId, bool ShouldCheckWrap,
668                                           bool Assume) {
669   Value *Ptr = Access.getPointer();
670 
671   if (!hasComputableBounds(PSE, StridesMap, Ptr, TheLoop, Assume))
672     return false;
673 
674   // When we run after a failing dependency check we have to make sure
675   // we don't have wrapping pointers.
676   if (ShouldCheckWrap && !isNoWrap(PSE, StridesMap, Ptr, TheLoop)) {
677     auto *Expr = PSE.getSCEV(Ptr);
678     if (!Assume || !isa<SCEVAddRecExpr>(Expr))
679       return false;
680     PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
681   }
682 
683   // The id of the dependence set.
684   unsigned DepId;
685 
686   if (isDependencyCheckNeeded()) {
687     Value *Leader = DepCands.getLeaderValue(Access).getPointer();
688     unsigned &LeaderId = DepSetId[Leader];
689     if (!LeaderId)
690       LeaderId = RunningDepId++;
691     DepId = LeaderId;
692   } else
693     // Each access has its own dependence set.
694     DepId = RunningDepId++;
695 
696   bool IsWrite = Access.getInt();
697   RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
698   LLVM_DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
699 
700   return true;
701  }
702 
703 bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
704                                      ScalarEvolution *SE, Loop *TheLoop,
705                                      const ValueToValueMap &StridesMap,
706                                      bool ShouldCheckWrap) {
707   // Find pointers with computable bounds. We are going to use this information
708   // to place a runtime bound check.
709   bool CanDoRT = true;
710 
711   bool MayNeedRTCheck = false;
712   if (!IsRTCheckAnalysisNeeded) return true;
713 
714   bool IsDepCheckNeeded = isDependencyCheckNeeded();
715 
716   // We assign a consecutive id to access from different alias sets.
717   // Accesses between different groups doesn't need to be checked.
718   unsigned ASId = 0;
719   for (auto &AS : AST) {
720     int NumReadPtrChecks = 0;
721     int NumWritePtrChecks = 0;
722     bool CanDoAliasSetRT = true;
723     ++ASId;
724 
725     // We assign consecutive id to access from different dependence sets.
726     // Accesses within the same set don't need a runtime check.
727     unsigned RunningDepId = 1;
728     DenseMap<Value *, unsigned> DepSetId;
729 
730     SmallVector<MemAccessInfo, 4> Retries;
731 
732     // First, count how many write and read accesses are in the alias set. Also
733     // collect MemAccessInfos for later.
734     SmallVector<MemAccessInfo, 4> AccessInfos;
735     for (auto A : AS) {
736       Value *Ptr = A.getValue();
737       bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
738 
739       if (IsWrite)
740         ++NumWritePtrChecks;
741       else
742         ++NumReadPtrChecks;
743       AccessInfos.emplace_back(Ptr, IsWrite);
744     }
745 
746     // We do not need runtime checks for this alias set, if there are no writes
747     // or a single write and no reads.
748     if (NumWritePtrChecks == 0 ||
749         (NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
750       assert((AS.size() <= 1 ||
751               all_of(AS,
752                      [this](auto AC) {
753                        MemAccessInfo AccessWrite(AC.getValue(), true);
754                        return DepCands.findValue(AccessWrite) == DepCands.end();
755                      })) &&
756              "Can only skip updating CanDoRT below, if all entries in AS "
757              "are reads or there is at most 1 entry");
758       continue;
759     }
760 
761     for (auto &Access : AccessInfos) {
762       if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop,
763                                 RunningDepId, ASId, ShouldCheckWrap, false)) {
764         LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
765                           << *Access.getPointer() << '\n');
766         Retries.push_back(Access);
767         CanDoAliasSetRT = false;
768       }
769     }
770 
771     // Note that this function computes CanDoRT and MayNeedRTCheck
772     // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
773     // we have a pointer for which we couldn't find the bounds but we don't
774     // actually need to emit any checks so it does not matter.
775     //
776     // We need runtime checks for this alias set, if there are at least 2
777     // dependence sets (in which case RunningDepId > 2) or if we need to re-try
778     // any bound checks (because in that case the number of dependence sets is
779     // incomplete).
780     bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
781 
782     // We need to perform run-time alias checks, but some pointers had bounds
783     // that couldn't be checked.
784     if (NeedsAliasSetRTCheck && !CanDoAliasSetRT) {
785       // Reset the CanDoSetRt flag and retry all accesses that have failed.
786       // We know that we need these checks, so we can now be more aggressive
787       // and add further checks if required (overflow checks).
788       CanDoAliasSetRT = true;
789       for (auto Access : Retries)
790         if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId,
791                                   TheLoop, RunningDepId, ASId,
792                                   ShouldCheckWrap, /*Assume=*/true)) {
793           CanDoAliasSetRT = false;
794           break;
795         }
796     }
797 
798     CanDoRT &= CanDoAliasSetRT;
799     MayNeedRTCheck |= NeedsAliasSetRTCheck;
800     ++ASId;
801   }
802 
803   // If the pointers that we would use for the bounds comparison have different
804   // address spaces, assume the values aren't directly comparable, so we can't
805   // use them for the runtime check. We also have to assume they could
806   // overlap. In the future there should be metadata for whether address spaces
807   // are disjoint.
808   unsigned NumPointers = RtCheck.Pointers.size();
809   for (unsigned i = 0; i < NumPointers; ++i) {
810     for (unsigned j = i + 1; j < NumPointers; ++j) {
811       // Only need to check pointers between two different dependency sets.
812       if (RtCheck.Pointers[i].DependencySetId ==
813           RtCheck.Pointers[j].DependencySetId)
814        continue;
815       // Only need to check pointers in the same alias set.
816       if (RtCheck.Pointers[i].AliasSetId != RtCheck.Pointers[j].AliasSetId)
817         continue;
818 
819       Value *PtrI = RtCheck.Pointers[i].PointerValue;
820       Value *PtrJ = RtCheck.Pointers[j].PointerValue;
821 
822       unsigned ASi = PtrI->getType()->getPointerAddressSpace();
823       unsigned ASj = PtrJ->getType()->getPointerAddressSpace();
824       if (ASi != ASj) {
825         LLVM_DEBUG(
826             dbgs() << "LAA: Runtime check would require comparison between"
827                       " different address spaces\n");
828         return false;
829       }
830     }
831   }
832 
833   if (MayNeedRTCheck && CanDoRT)
834     RtCheck.generateChecks(DepCands, IsDepCheckNeeded);
835 
836   LLVM_DEBUG(dbgs() << "LAA: We need to do " << RtCheck.getNumberOfChecks()
837                     << " pointer comparisons.\n");
838 
839   // If we can do run-time checks, but there are no checks, no runtime checks
840   // are needed. This can happen when all pointers point to the same underlying
841   // object for example.
842   RtCheck.Need = CanDoRT ? RtCheck.getNumberOfChecks() != 0 : MayNeedRTCheck;
843 
844   bool CanDoRTIfNeeded = !RtCheck.Need || CanDoRT;
845   if (!CanDoRTIfNeeded)
846     RtCheck.reset();
847   return CanDoRTIfNeeded;
848 }
849 
850 void AccessAnalysis::processMemAccesses() {
851   // We process the set twice: first we process read-write pointers, last we
852   // process read-only pointers. This allows us to skip dependence tests for
853   // read-only pointers.
854 
855   LLVM_DEBUG(dbgs() << "LAA: Processing memory accesses...\n");
856   LLVM_DEBUG(dbgs() << "  AST: "; AST.dump());
857   LLVM_DEBUG(dbgs() << "LAA:   Accesses(" << Accesses.size() << "):\n");
858   LLVM_DEBUG({
859     for (auto A : Accesses)
860       dbgs() << "\t" << *A.getPointer() << " (" <<
861                 (A.getInt() ? "write" : (ReadOnlyPtr.count(A.getPointer()) ?
862                                          "read-only" : "read")) << ")\n";
863   });
864 
865   // The AliasSetTracker has nicely partitioned our pointers by metadata
866   // compatibility and potential for underlying-object overlap. As a result, we
867   // only need to check for potential pointer dependencies within each alias
868   // set.
869   for (auto &AS : AST) {
870     // Note that both the alias-set tracker and the alias sets themselves used
871     // linked lists internally and so the iteration order here is deterministic
872     // (matching the original instruction order within each set).
873 
874     bool SetHasWrite = false;
875 
876     // Map of pointers to last access encountered.
877     typedef DenseMap<const Value*, MemAccessInfo> UnderlyingObjToAccessMap;
878     UnderlyingObjToAccessMap ObjToLastAccess;
879 
880     // Set of access to check after all writes have been processed.
881     PtrAccessSet DeferredAccesses;
882 
883     // Iterate over each alias set twice, once to process read/write pointers,
884     // and then to process read-only pointers.
885     for (int SetIteration = 0; SetIteration < 2; ++SetIteration) {
886       bool UseDeferred = SetIteration > 0;
887       PtrAccessSet &S = UseDeferred ? DeferredAccesses : Accesses;
888 
889       for (auto AV : AS) {
890         Value *Ptr = AV.getValue();
891 
892         // For a single memory access in AliasSetTracker, Accesses may contain
893         // both read and write, and they both need to be handled for CheckDeps.
894         for (auto AC : S) {
895           if (AC.getPointer() != Ptr)
896             continue;
897 
898           bool IsWrite = AC.getInt();
899 
900           // If we're using the deferred access set, then it contains only
901           // reads.
902           bool IsReadOnlyPtr = ReadOnlyPtr.count(Ptr) && !IsWrite;
903           if (UseDeferred && !IsReadOnlyPtr)
904             continue;
905           // Otherwise, the pointer must be in the PtrAccessSet, either as a
906           // read or a write.
907           assert(((IsReadOnlyPtr && UseDeferred) || IsWrite ||
908                   S.count(MemAccessInfo(Ptr, false))) &&
909                  "Alias-set pointer not in the access set?");
910 
911           MemAccessInfo Access(Ptr, IsWrite);
912           DepCands.insert(Access);
913 
914           // Memorize read-only pointers for later processing and skip them in
915           // the first round (they need to be checked after we have seen all
916           // write pointers). Note: we also mark pointer that are not
917           // consecutive as "read-only" pointers (so that we check
918           // "a[b[i]] +="). Hence, we need the second check for "!IsWrite".
919           if (!UseDeferred && IsReadOnlyPtr) {
920             DeferredAccesses.insert(Access);
921             continue;
922           }
923 
924           // If this is a write - check other reads and writes for conflicts. If
925           // this is a read only check other writes for conflicts (but only if
926           // there is no other write to the ptr - this is an optimization to
927           // catch "a[i] = a[i] + " without having to do a dependence check).
928           if ((IsWrite || IsReadOnlyPtr) && SetHasWrite) {
929             CheckDeps.push_back(Access);
930             IsRTCheckAnalysisNeeded = true;
931           }
932 
933           if (IsWrite)
934             SetHasWrite = true;
935 
936           // Create sets of pointers connected by a shared alias set and
937           // underlying object.
938           typedef SmallVector<const Value *, 16> ValueVector;
939           ValueVector TempObjects;
940 
941           GetUnderlyingObjects(Ptr, TempObjects, DL, LI);
942           LLVM_DEBUG(dbgs()
943                      << "Underlying objects for pointer " << *Ptr << "\n");
944           for (const Value *UnderlyingObj : TempObjects) {
945             // nullptr never alias, don't join sets for pointer that have "null"
946             // in their UnderlyingObjects list.
947             if (isa<ConstantPointerNull>(UnderlyingObj) &&
948                 !NullPointerIsDefined(
949                     TheLoop->getHeader()->getParent(),
950                     UnderlyingObj->getType()->getPointerAddressSpace()))
951               continue;
952 
953             UnderlyingObjToAccessMap::iterator Prev =
954                 ObjToLastAccess.find(UnderlyingObj);
955             if (Prev != ObjToLastAccess.end())
956               DepCands.unionSets(Access, Prev->second);
957 
958             ObjToLastAccess[UnderlyingObj] = Access;
959             LLVM_DEBUG(dbgs() << "  " << *UnderlyingObj << "\n");
960           }
961         }
962       }
963     }
964   }
965 }
966 
967 static bool isInBoundsGep(Value *Ptr) {
968   if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr))
969     return GEP->isInBounds();
970   return false;
971 }
972 
973 /// Return true if an AddRec pointer \p Ptr is unsigned non-wrapping,
974 /// i.e. monotonically increasing/decreasing.
975 static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
976                            PredicatedScalarEvolution &PSE, const Loop *L) {
977   // FIXME: This should probably only return true for NUW.
978   if (AR->getNoWrapFlags(SCEV::NoWrapMask))
979     return true;
980 
981   // Scalar evolution does not propagate the non-wrapping flags to values that
982   // are derived from a non-wrapping induction variable because non-wrapping
983   // could be flow-sensitive.
984   //
985   // Look through the potentially overflowing instruction to try to prove
986   // non-wrapping for the *specific* value of Ptr.
987 
988   // The arithmetic implied by an inbounds GEP can't overflow.
989   auto *GEP = dyn_cast<GetElementPtrInst>(Ptr);
990   if (!GEP || !GEP->isInBounds())
991     return false;
992 
993   // Make sure there is only one non-const index and analyze that.
994   Value *NonConstIndex = nullptr;
995   for (Value *Index : make_range(GEP->idx_begin(), GEP->idx_end()))
996     if (!isa<ConstantInt>(Index)) {
997       if (NonConstIndex)
998         return false;
999       NonConstIndex = Index;
1000     }
1001   if (!NonConstIndex)
1002     // The recurrence is on the pointer, ignore for now.
1003     return false;
1004 
1005   // The index in GEP is signed.  It is non-wrapping if it's derived from a NSW
1006   // AddRec using a NSW operation.
1007   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(NonConstIndex))
1008     if (OBO->hasNoSignedWrap() &&
1009         // Assume constant for other the operand so that the AddRec can be
1010         // easily found.
1011         isa<ConstantInt>(OBO->getOperand(1))) {
1012       auto *OpScev = PSE.getSCEV(OBO->getOperand(0));
1013 
1014       if (auto *OpAR = dyn_cast<SCEVAddRecExpr>(OpScev))
1015         return OpAR->getLoop() == L && OpAR->getNoWrapFlags(SCEV::FlagNSW);
1016     }
1017 
1018   return false;
1019 }
1020 
1021 /// Check whether the access through \p Ptr has a constant stride.
1022 int64_t llvm::getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr,
1023                            const Loop *Lp, const ValueToValueMap &StridesMap,
1024                            bool Assume, bool ShouldCheckWrap) {
1025   Type *Ty = Ptr->getType();
1026   assert(Ty->isPointerTy() && "Unexpected non-ptr");
1027 
1028   // Make sure that the pointer does not point to aggregate types.
1029   auto *PtrTy = cast<PointerType>(Ty);
1030   if (PtrTy->getElementType()->isAggregateType()) {
1031     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a pointer to a scalar type"
1032                       << *Ptr << "\n");
1033     return 0;
1034   }
1035 
1036   const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
1037 
1038   const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
1039   if (Assume && !AR)
1040     AR = PSE.getAsAddRec(Ptr);
1041 
1042   if (!AR) {
1043     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not an AddRecExpr pointer " << *Ptr
1044                       << " SCEV: " << *PtrScev << "\n");
1045     return 0;
1046   }
1047 
1048   // The access function must stride over the innermost loop.
1049   if (Lp != AR->getLoop()) {
1050     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not striding over innermost loop "
1051                       << *Ptr << " SCEV: " << *AR << "\n");
1052     return 0;
1053   }
1054 
1055   // The address calculation must not wrap. Otherwise, a dependence could be
1056   // inverted.
1057   // An inbounds getelementptr that is a AddRec with a unit stride
1058   // cannot wrap per definition. The unit stride requirement is checked later.
1059   // An getelementptr without an inbounds attribute and unit stride would have
1060   // to access the pointer value "0" which is undefined behavior in address
1061   // space 0, therefore we can also vectorize this case.
1062   bool IsInBoundsGEP = isInBoundsGep(Ptr);
1063   bool IsNoWrapAddRec = !ShouldCheckWrap ||
1064     PSE.hasNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW) ||
1065     isNoWrapAddRec(Ptr, AR, PSE, Lp);
1066   if (!IsNoWrapAddRec && !IsInBoundsGEP &&
1067       NullPointerIsDefined(Lp->getHeader()->getParent(),
1068                            PtrTy->getAddressSpace())) {
1069     if (Assume) {
1070       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1071       IsNoWrapAddRec = true;
1072       LLVM_DEBUG(dbgs() << "LAA: Pointer may wrap in the address space:\n"
1073                         << "LAA:   Pointer: " << *Ptr << "\n"
1074                         << "LAA:   SCEV: " << *AR << "\n"
1075                         << "LAA:   Added an overflow assumption\n");
1076     } else {
1077       LLVM_DEBUG(
1078           dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
1079                  << *Ptr << " SCEV: " << *AR << "\n");
1080       return 0;
1081     }
1082   }
1083 
1084   // Check the step is constant.
1085   const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
1086 
1087   // Calculate the pointer stride and check if it is constant.
1088   const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
1089   if (!C) {
1090     LLVM_DEBUG(dbgs() << "LAA: Bad stride - Not a constant strided " << *Ptr
1091                       << " SCEV: " << *AR << "\n");
1092     return 0;
1093   }
1094 
1095   auto &DL = Lp->getHeader()->getModule()->getDataLayout();
1096   int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
1097   const APInt &APStepVal = C->getAPInt();
1098 
1099   // Huge step value - give up.
1100   if (APStepVal.getBitWidth() > 64)
1101     return 0;
1102 
1103   int64_t StepVal = APStepVal.getSExtValue();
1104 
1105   // Strided access.
1106   int64_t Stride = StepVal / Size;
1107   int64_t Rem = StepVal % Size;
1108   if (Rem)
1109     return 0;
1110 
1111   // If the SCEV could wrap but we have an inbounds gep with a unit stride we
1112   // know we can't "wrap around the address space". In case of address space
1113   // zero we know that this won't happen without triggering undefined behavior.
1114   if (!IsNoWrapAddRec && Stride != 1 && Stride != -1 &&
1115       (IsInBoundsGEP || !NullPointerIsDefined(Lp->getHeader()->getParent(),
1116                                               PtrTy->getAddressSpace()))) {
1117     if (Assume) {
1118       // We can avoid this case by adding a run-time check.
1119       LLVM_DEBUG(dbgs() << "LAA: Non unit strided pointer which is not either "
1120                         << "inbounds or in address space 0 may wrap:\n"
1121                         << "LAA:   Pointer: " << *Ptr << "\n"
1122                         << "LAA:   SCEV: " << *AR << "\n"
1123                         << "LAA:   Added an overflow assumption\n");
1124       PSE.setNoOverflow(Ptr, SCEVWrapPredicate::IncrementNUSW);
1125     } else
1126       return 0;
1127   }
1128 
1129   return Stride;
1130 }
1131 
1132 bool llvm::sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
1133                            ScalarEvolution &SE,
1134                            SmallVectorImpl<unsigned> &SortedIndices) {
1135   assert(llvm::all_of(
1136              VL, [](const Value *V) { return V->getType()->isPointerTy(); }) &&
1137          "Expected list of pointer operands.");
1138   SmallVector<std::pair<int64_t, Value *>, 4> OffValPairs;
1139   OffValPairs.reserve(VL.size());
1140 
1141   // Walk over the pointers, and map each of them to an offset relative to
1142   // first pointer in the array.
1143   Value *Ptr0 = VL[0];
1144   const SCEV *Scev0 = SE.getSCEV(Ptr0);
1145   Value *Obj0 = GetUnderlyingObject(Ptr0, DL);
1146 
1147   llvm::SmallSet<int64_t, 4> Offsets;
1148   for (auto *Ptr : VL) {
1149     // TODO: Outline this code as a special, more time consuming, version of
1150     // computeConstantDifference() function.
1151     if (Ptr->getType()->getPointerAddressSpace() !=
1152         Ptr0->getType()->getPointerAddressSpace())
1153       return false;
1154     // If a pointer refers to a different underlying object, bail - the
1155     // pointers are by definition incomparable.
1156     Value *CurrObj = GetUnderlyingObject(Ptr, DL);
1157     if (CurrObj != Obj0)
1158       return false;
1159 
1160     const SCEV *Scev = SE.getSCEV(Ptr);
1161     const auto *Diff = dyn_cast<SCEVConstant>(SE.getMinusSCEV(Scev, Scev0));
1162     // The pointers may not have a constant offset from each other, or SCEV
1163     // may just not be smart enough to figure out they do. Regardless,
1164     // there's nothing we can do.
1165     if (!Diff)
1166       return false;
1167 
1168     // Check if the pointer with the same offset is found.
1169     int64_t Offset = Diff->getAPInt().getSExtValue();
1170     if (!Offsets.insert(Offset).second)
1171       return false;
1172     OffValPairs.emplace_back(Offset, Ptr);
1173   }
1174   SortedIndices.clear();
1175   SortedIndices.resize(VL.size());
1176   std::iota(SortedIndices.begin(), SortedIndices.end(), 0);
1177 
1178   // Sort the memory accesses and keep the order of their uses in UseOrder.
1179   llvm::stable_sort(SortedIndices, [&](unsigned Left, unsigned Right) {
1180     return OffValPairs[Left].first < OffValPairs[Right].first;
1181   });
1182 
1183   // Check if the order is consecutive already.
1184   if (llvm::all_of(SortedIndices, [&SortedIndices](const unsigned I) {
1185         return I == SortedIndices[I];
1186       }))
1187     SortedIndices.clear();
1188 
1189   return true;
1190 }
1191 
1192 /// Take the address space operand from the Load/Store instruction.
1193 /// Returns -1 if this is not a valid Load/Store instruction.
1194 static unsigned getAddressSpaceOperand(Value *I) {
1195   if (LoadInst *L = dyn_cast<LoadInst>(I))
1196     return L->getPointerAddressSpace();
1197   if (StoreInst *S = dyn_cast<StoreInst>(I))
1198     return S->getPointerAddressSpace();
1199   return -1;
1200 }
1201 
1202 /// Returns true if the memory operations \p A and \p B are consecutive.
1203 bool llvm::isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
1204                                ScalarEvolution &SE, bool CheckType) {
1205   Value *PtrA = getLoadStorePointerOperand(A);
1206   Value *PtrB = getLoadStorePointerOperand(B);
1207   unsigned ASA = getAddressSpaceOperand(A);
1208   unsigned ASB = getAddressSpaceOperand(B);
1209 
1210   // Check that the address spaces match and that the pointers are valid.
1211   if (!PtrA || !PtrB || (ASA != ASB))
1212     return false;
1213 
1214   // Make sure that A and B are different pointers.
1215   if (PtrA == PtrB)
1216     return false;
1217 
1218   // Make sure that A and B have the same type if required.
1219   if (CheckType && PtrA->getType() != PtrB->getType())
1220     return false;
1221 
1222   unsigned IdxWidth = DL.getIndexSizeInBits(ASA);
1223   Type *Ty = cast<PointerType>(PtrA->getType())->getElementType();
1224 
1225   APInt OffsetA(IdxWidth, 0), OffsetB(IdxWidth, 0);
1226   PtrA = PtrA->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetA);
1227   PtrB = PtrB->stripAndAccumulateInBoundsConstantOffsets(DL, OffsetB);
1228 
1229   // Retrieve the address space again as pointer stripping now tracks through
1230   // `addrspacecast`.
1231   ASA = cast<PointerType>(PtrA->getType())->getAddressSpace();
1232   ASB = cast<PointerType>(PtrB->getType())->getAddressSpace();
1233   // Check that the address spaces match and that the pointers are valid.
1234   if (ASA != ASB)
1235     return false;
1236 
1237   IdxWidth = DL.getIndexSizeInBits(ASA);
1238   OffsetA = OffsetA.sextOrTrunc(IdxWidth);
1239   OffsetB = OffsetB.sextOrTrunc(IdxWidth);
1240 
1241   APInt Size(IdxWidth, DL.getTypeStoreSize(Ty));
1242 
1243   //  OffsetDelta = OffsetB - OffsetA;
1244   const SCEV *OffsetSCEVA = SE.getConstant(OffsetA);
1245   const SCEV *OffsetSCEVB = SE.getConstant(OffsetB);
1246   const SCEV *OffsetDeltaSCEV = SE.getMinusSCEV(OffsetSCEVB, OffsetSCEVA);
1247   const APInt &OffsetDelta = cast<SCEVConstant>(OffsetDeltaSCEV)->getAPInt();
1248 
1249   // Check if they are based on the same pointer. That makes the offsets
1250   // sufficient.
1251   if (PtrA == PtrB)
1252     return OffsetDelta == Size;
1253 
1254   // Compute the necessary base pointer delta to have the necessary final delta
1255   // equal to the size.
1256   // BaseDelta = Size - OffsetDelta;
1257   const SCEV *SizeSCEV = SE.getConstant(Size);
1258   const SCEV *BaseDelta = SE.getMinusSCEV(SizeSCEV, OffsetDeltaSCEV);
1259 
1260   // Otherwise compute the distance with SCEV between the base pointers.
1261   const SCEV *PtrSCEVA = SE.getSCEV(PtrA);
1262   const SCEV *PtrSCEVB = SE.getSCEV(PtrB);
1263   const SCEV *X = SE.getAddExpr(PtrSCEVA, BaseDelta);
1264   return X == PtrSCEVB;
1265 }
1266 
1267 MemoryDepChecker::VectorizationSafetyStatus
1268 MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
1269   switch (Type) {
1270   case NoDep:
1271   case Forward:
1272   case BackwardVectorizable:
1273     return VectorizationSafetyStatus::Safe;
1274 
1275   case Unknown:
1276     return VectorizationSafetyStatus::PossiblySafeWithRtChecks;
1277   case ForwardButPreventsForwarding:
1278   case Backward:
1279   case BackwardVectorizableButPreventsForwarding:
1280     return VectorizationSafetyStatus::Unsafe;
1281   }
1282   llvm_unreachable("unexpected DepType!");
1283 }
1284 
1285 bool MemoryDepChecker::Dependence::isBackward() const {
1286   switch (Type) {
1287   case NoDep:
1288   case Forward:
1289   case ForwardButPreventsForwarding:
1290   case Unknown:
1291     return false;
1292 
1293   case BackwardVectorizable:
1294   case Backward:
1295   case BackwardVectorizableButPreventsForwarding:
1296     return true;
1297   }
1298   llvm_unreachable("unexpected DepType!");
1299 }
1300 
1301 bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
1302   return isBackward() || Type == Unknown;
1303 }
1304 
1305 bool MemoryDepChecker::Dependence::isForward() const {
1306   switch (Type) {
1307   case Forward:
1308   case ForwardButPreventsForwarding:
1309     return true;
1310 
1311   case NoDep:
1312   case Unknown:
1313   case BackwardVectorizable:
1314   case Backward:
1315   case BackwardVectorizableButPreventsForwarding:
1316     return false;
1317   }
1318   llvm_unreachable("unexpected DepType!");
1319 }
1320 
1321 bool MemoryDepChecker::couldPreventStoreLoadForward(uint64_t Distance,
1322                                                     uint64_t TypeByteSize) {
1323   // If loads occur at a distance that is not a multiple of a feasible vector
1324   // factor store-load forwarding does not take place.
1325   // Positive dependences might cause troubles because vectorizing them might
1326   // prevent store-load forwarding making vectorized code run a lot slower.
1327   //   a[i] = a[i-3] ^ a[i-8];
1328   //   The stores to a[i:i+1] don't align with the stores to a[i-3:i-2] and
1329   //   hence on your typical architecture store-load forwarding does not take
1330   //   place. Vectorizing in such cases does not make sense.
1331   // Store-load forwarding distance.
1332 
1333   // After this many iterations store-to-load forwarding conflicts should not
1334   // cause any slowdowns.
1335   const uint64_t NumItersForStoreLoadThroughMemory = 8 * TypeByteSize;
1336   // Maximum vector factor.
1337   uint64_t MaxVFWithoutSLForwardIssues = std::min(
1338       VectorizerParams::MaxVectorWidth * TypeByteSize, MaxSafeDepDistBytes);
1339 
1340   // Compute the smallest VF at which the store and load would be misaligned.
1341   for (uint64_t VF = 2 * TypeByteSize; VF <= MaxVFWithoutSLForwardIssues;
1342        VF *= 2) {
1343     // If the number of vector iteration between the store and the load are
1344     // small we could incur conflicts.
1345     if (Distance % VF && Distance / VF < NumItersForStoreLoadThroughMemory) {
1346       MaxVFWithoutSLForwardIssues = (VF >>= 1);
1347       break;
1348     }
1349   }
1350 
1351   if (MaxVFWithoutSLForwardIssues < 2 * TypeByteSize) {
1352     LLVM_DEBUG(
1353         dbgs() << "LAA: Distance " << Distance
1354                << " that could cause a store-load forwarding conflict\n");
1355     return true;
1356   }
1357 
1358   if (MaxVFWithoutSLForwardIssues < MaxSafeDepDistBytes &&
1359       MaxVFWithoutSLForwardIssues !=
1360           VectorizerParams::MaxVectorWidth * TypeByteSize)
1361     MaxSafeDepDistBytes = MaxVFWithoutSLForwardIssues;
1362   return false;
1363 }
1364 
1365 void MemoryDepChecker::mergeInStatus(VectorizationSafetyStatus S) {
1366   if (Status < S)
1367     Status = S;
1368 }
1369 
1370 /// Given a non-constant (unknown) dependence-distance \p Dist between two
1371 /// memory accesses, that have the same stride whose absolute value is given
1372 /// in \p Stride, and that have the same type size \p TypeByteSize,
1373 /// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
1374 /// possible to prove statically that the dependence distance is larger
1375 /// than the range that the accesses will travel through the execution of
1376 /// the loop. If so, return true; false otherwise. This is useful for
1377 /// example in loops such as the following (PR31098):
1378 ///     for (i = 0; i < D; ++i) {
1379 ///                = out[i];
1380 ///       out[i+D] =
1381 ///     }
1382 static bool isSafeDependenceDistance(const DataLayout &DL, ScalarEvolution &SE,
1383                                      const SCEV &BackedgeTakenCount,
1384                                      const SCEV &Dist, uint64_t Stride,
1385                                      uint64_t TypeByteSize) {
1386 
1387   // If we can prove that
1388   //      (**) |Dist| > BackedgeTakenCount * Step
1389   // where Step is the absolute stride of the memory accesses in bytes,
1390   // then there is no dependence.
1391   //
1392   // Rationale:
1393   // We basically want to check if the absolute distance (|Dist/Step|)
1394   // is >= the loop iteration count (or > BackedgeTakenCount).
1395   // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
1396   // Section 4.2.1); Note, that for vectorization it is sufficient to prove
1397   // that the dependence distance is >= VF; This is checked elsewhere.
1398   // But in some cases we can prune unknown dependence distances early, and
1399   // even before selecting the VF, and without a runtime test, by comparing
1400   // the distance against the loop iteration count. Since the vectorized code
1401   // will be executed only if LoopCount >= VF, proving distance >= LoopCount
1402   // also guarantees that distance >= VF.
1403   //
1404   const uint64_t ByteStride = Stride * TypeByteSize;
1405   const SCEV *Step = SE.getConstant(BackedgeTakenCount.getType(), ByteStride);
1406   const SCEV *Product = SE.getMulExpr(&BackedgeTakenCount, Step);
1407 
1408   const SCEV *CastedDist = &Dist;
1409   const SCEV *CastedProduct = Product;
1410   uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
1411   uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
1412 
1413   // The dependence distance can be positive/negative, so we sign extend Dist;
1414   // The multiplication of the absolute stride in bytes and the
1415   // backedgeTakenCount is non-negative, so we zero extend Product.
1416   if (DistTypeSize > ProductTypeSize)
1417     CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
1418   else
1419     CastedDist = SE.getNoopOrSignExtend(&Dist, Product->getType());
1420 
1421   // Is  Dist - (BackedgeTakenCount * Step) > 0 ?
1422   // (If so, then we have proven (**) because |Dist| >= Dist)
1423   const SCEV *Minus = SE.getMinusSCEV(CastedDist, CastedProduct);
1424   if (SE.isKnownPositive(Minus))
1425     return true;
1426 
1427   // Second try: Is  -Dist - (BackedgeTakenCount * Step) > 0 ?
1428   // (If so, then we have proven (**) because |Dist| >= -1*Dist)
1429   const SCEV *NegDist = SE.getNegativeSCEV(CastedDist);
1430   Minus = SE.getMinusSCEV(NegDist, CastedProduct);
1431   if (SE.isKnownPositive(Minus))
1432     return true;
1433 
1434   return false;
1435 }
1436 
1437 /// Check the dependence for two accesses with the same stride \p Stride.
1438 /// \p Distance is the positive distance and \p TypeByteSize is type size in
1439 /// bytes.
1440 ///
1441 /// \returns true if they are independent.
1442 static bool areStridedAccessesIndependent(uint64_t Distance, uint64_t Stride,
1443                                           uint64_t TypeByteSize) {
1444   assert(Stride > 1 && "The stride must be greater than 1");
1445   assert(TypeByteSize > 0 && "The type size in byte must be non-zero");
1446   assert(Distance > 0 && "The distance must be non-zero");
1447 
1448   // Skip if the distance is not multiple of type byte size.
1449   if (Distance % TypeByteSize)
1450     return false;
1451 
1452   uint64_t ScaledDist = Distance / TypeByteSize;
1453 
1454   // No dependence if the scaled distance is not multiple of the stride.
1455   // E.g.
1456   //      for (i = 0; i < 1024 ; i += 4)
1457   //        A[i+2] = A[i] + 1;
1458   //
1459   // Two accesses in memory (scaled distance is 2, stride is 4):
1460   //     | A[0] |      |      |      | A[4] |      |      |      |
1461   //     |      |      | A[2] |      |      |      | A[6] |      |
1462   //
1463   // E.g.
1464   //      for (i = 0; i < 1024 ; i += 3)
1465   //        A[i+4] = A[i] + 1;
1466   //
1467   // Two accesses in memory (scaled distance is 4, stride is 3):
1468   //     | A[0] |      |      | A[3] |      |      | A[6] |      |      |
1469   //     |      |      |      |      | A[4] |      |      | A[7] |      |
1470   return ScaledDist % Stride;
1471 }
1472 
1473 MemoryDepChecker::Dependence::DepType
1474 MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
1475                               const MemAccessInfo &B, unsigned BIdx,
1476                               const ValueToValueMap &Strides) {
1477   assert (AIdx < BIdx && "Must pass arguments in program order");
1478 
1479   Value *APtr = A.getPointer();
1480   Value *BPtr = B.getPointer();
1481   bool AIsWrite = A.getInt();
1482   bool BIsWrite = B.getInt();
1483 
1484   // Two reads are independent.
1485   if (!AIsWrite && !BIsWrite)
1486     return Dependence::NoDep;
1487 
1488   // We cannot check pointers in different address spaces.
1489   if (APtr->getType()->getPointerAddressSpace() !=
1490       BPtr->getType()->getPointerAddressSpace())
1491     return Dependence::Unknown;
1492 
1493   int64_t StrideAPtr = getPtrStride(PSE, APtr, InnermostLoop, Strides, true);
1494   int64_t StrideBPtr = getPtrStride(PSE, BPtr, InnermostLoop, Strides, true);
1495 
1496   const SCEV *Src = PSE.getSCEV(APtr);
1497   const SCEV *Sink = PSE.getSCEV(BPtr);
1498 
1499   // If the induction step is negative we have to invert source and sink of the
1500   // dependence.
1501   if (StrideAPtr < 0) {
1502     std::swap(APtr, BPtr);
1503     std::swap(Src, Sink);
1504     std::swap(AIsWrite, BIsWrite);
1505     std::swap(AIdx, BIdx);
1506     std::swap(StrideAPtr, StrideBPtr);
1507   }
1508 
1509   const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
1510 
1511   LLVM_DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
1512                     << "(Induction step: " << StrideAPtr << ")\n");
1513   LLVM_DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
1514                     << *InstMap[BIdx] << ": " << *Dist << "\n");
1515 
1516   // Need accesses with constant stride. We don't want to vectorize
1517   // "A[B[i]] += ..." and similar code or pointer arithmetic that could wrap in
1518   // the address space.
1519   if (!StrideAPtr || !StrideBPtr || StrideAPtr != StrideBPtr){
1520     LLVM_DEBUG(dbgs() << "Pointer access with non-constant stride\n");
1521     return Dependence::Unknown;
1522   }
1523 
1524   Type *ATy = APtr->getType()->getPointerElementType();
1525   Type *BTy = BPtr->getType()->getPointerElementType();
1526   auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
1527   uint64_t TypeByteSize = DL.getTypeAllocSize(ATy);
1528   uint64_t Stride = std::abs(StrideAPtr);
1529   const SCEVConstant *C = dyn_cast<SCEVConstant>(Dist);
1530   if (!C) {
1531     if (TypeByteSize == DL.getTypeAllocSize(BTy) &&
1532         isSafeDependenceDistance(DL, *(PSE.getSE()),
1533                                  *(PSE.getBackedgeTakenCount()), *Dist, Stride,
1534                                  TypeByteSize))
1535       return Dependence::NoDep;
1536 
1537     LLVM_DEBUG(dbgs() << "LAA: Dependence because of non-constant distance\n");
1538     FoundNonConstantDistanceDependence = true;
1539     return Dependence::Unknown;
1540   }
1541 
1542   const APInt &Val = C->getAPInt();
1543   int64_t Distance = Val.getSExtValue();
1544 
1545   // Attempt to prove strided accesses independent.
1546   if (std::abs(Distance) > 0 && Stride > 1 && ATy == BTy &&
1547       areStridedAccessesIndependent(std::abs(Distance), Stride, TypeByteSize)) {
1548     LLVM_DEBUG(dbgs() << "LAA: Strided accesses are independent\n");
1549     return Dependence::NoDep;
1550   }
1551 
1552   // Negative distances are not plausible dependencies.
1553   if (Val.isNegative()) {
1554     bool IsTrueDataDependence = (AIsWrite && !BIsWrite);
1555     if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1556         (couldPreventStoreLoadForward(Val.abs().getZExtValue(), TypeByteSize) ||
1557          ATy != BTy)) {
1558       LLVM_DEBUG(dbgs() << "LAA: Forward but may prevent st->ld forwarding\n");
1559       return Dependence::ForwardButPreventsForwarding;
1560     }
1561 
1562     LLVM_DEBUG(dbgs() << "LAA: Dependence is negative\n");
1563     return Dependence::Forward;
1564   }
1565 
1566   // Write to the same location with the same size.
1567   // Could be improved to assert type sizes are the same (i32 == float, etc).
1568   if (Val == 0) {
1569     if (ATy == BTy)
1570       return Dependence::Forward;
1571     LLVM_DEBUG(
1572         dbgs() << "LAA: Zero dependence difference but different types\n");
1573     return Dependence::Unknown;
1574   }
1575 
1576   assert(Val.isStrictlyPositive() && "Expect a positive value");
1577 
1578   if (ATy != BTy) {
1579     LLVM_DEBUG(
1580         dbgs()
1581         << "LAA: ReadWrite-Write positive dependency with different types\n");
1582     return Dependence::Unknown;
1583   }
1584 
1585   // Bail out early if passed-in parameters make vectorization not feasible.
1586   unsigned ForcedFactor = (VectorizerParams::VectorizationFactor ?
1587                            VectorizerParams::VectorizationFactor : 1);
1588   unsigned ForcedUnroll = (VectorizerParams::VectorizationInterleave ?
1589                            VectorizerParams::VectorizationInterleave : 1);
1590   // The minimum number of iterations for a vectorized/unrolled version.
1591   unsigned MinNumIter = std::max(ForcedFactor * ForcedUnroll, 2U);
1592 
1593   // It's not vectorizable if the distance is smaller than the minimum distance
1594   // needed for a vectroized/unrolled version. Vectorizing one iteration in
1595   // front needs TypeByteSize * Stride. Vectorizing the last iteration needs
1596   // TypeByteSize (No need to plus the last gap distance).
1597   //
1598   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1599   //      foo(int *A) {
1600   //        int *B = (int *)((char *)A + 14);
1601   //        for (i = 0 ; i < 1024 ; i += 2)
1602   //          B[i] = A[i] + 1;
1603   //      }
1604   //
1605   // Two accesses in memory (stride is 2):
1606   //     | A[0] |      | A[2] |      | A[4] |      | A[6] |      |
1607   //                              | B[0] |      | B[2] |      | B[4] |
1608   //
1609   // Distance needs for vectorizing iterations except the last iteration:
1610   // 4 * 2 * (MinNumIter - 1). Distance needs for the last iteration: 4.
1611   // So the minimum distance needed is: 4 * 2 * (MinNumIter - 1) + 4.
1612   //
1613   // If MinNumIter is 2, it is vectorizable as the minimum distance needed is
1614   // 12, which is less than distance.
1615   //
1616   // If MinNumIter is 4 (Say if a user forces the vectorization factor to be 4),
1617   // the minimum distance needed is 28, which is greater than distance. It is
1618   // not safe to do vectorization.
1619   uint64_t MinDistanceNeeded =
1620       TypeByteSize * Stride * (MinNumIter - 1) + TypeByteSize;
1621   if (MinDistanceNeeded > static_cast<uint64_t>(Distance)) {
1622     LLVM_DEBUG(dbgs() << "LAA: Failure because of positive distance "
1623                       << Distance << '\n');
1624     return Dependence::Backward;
1625   }
1626 
1627   // Unsafe if the minimum distance needed is greater than max safe distance.
1628   if (MinDistanceNeeded > MaxSafeDepDistBytes) {
1629     LLVM_DEBUG(dbgs() << "LAA: Failure because it needs at least "
1630                       << MinDistanceNeeded << " size in bytes");
1631     return Dependence::Backward;
1632   }
1633 
1634   // Positive distance bigger than max vectorization factor.
1635   // FIXME: Should use max factor instead of max distance in bytes, which could
1636   // not handle different types.
1637   // E.g. Assume one char is 1 byte in memory and one int is 4 bytes.
1638   //      void foo (int *A, char *B) {
1639   //        for (unsigned i = 0; i < 1024; i++) {
1640   //          A[i+2] = A[i] + 1;
1641   //          B[i+2] = B[i] + 1;
1642   //        }
1643   //      }
1644   //
1645   // This case is currently unsafe according to the max safe distance. If we
1646   // analyze the two accesses on array B, the max safe dependence distance
1647   // is 2. Then we analyze the accesses on array A, the minimum distance needed
1648   // is 8, which is less than 2 and forbidden vectorization, But actually
1649   // both A and B could be vectorized by 2 iterations.
1650   MaxSafeDepDistBytes =
1651       std::min(static_cast<uint64_t>(Distance), MaxSafeDepDistBytes);
1652 
1653   bool IsTrueDataDependence = (!AIsWrite && BIsWrite);
1654   if (IsTrueDataDependence && EnableForwardingConflictDetection &&
1655       couldPreventStoreLoadForward(Distance, TypeByteSize))
1656     return Dependence::BackwardVectorizableButPreventsForwarding;
1657 
1658   uint64_t MaxVF = MaxSafeDepDistBytes / (TypeByteSize * Stride);
1659   LLVM_DEBUG(dbgs() << "LAA: Positive distance " << Val.getSExtValue()
1660                     << " with max VF = " << MaxVF << '\n');
1661   uint64_t MaxVFInBits = MaxVF * TypeByteSize * 8;
1662   MaxSafeRegisterWidth = std::min(MaxSafeRegisterWidth, MaxVFInBits);
1663   return Dependence::BackwardVectorizable;
1664 }
1665 
1666 bool MemoryDepChecker::areDepsSafe(DepCandidates &AccessSets,
1667                                    MemAccessInfoList &CheckDeps,
1668                                    const ValueToValueMap &Strides) {
1669 
1670   MaxSafeDepDistBytes = -1;
1671   SmallPtrSet<MemAccessInfo, 8> Visited;
1672   for (MemAccessInfo CurAccess : CheckDeps) {
1673     if (Visited.count(CurAccess))
1674       continue;
1675 
1676     // Get the relevant memory access set.
1677     EquivalenceClasses<MemAccessInfo>::iterator I =
1678       AccessSets.findValue(AccessSets.getLeaderValue(CurAccess));
1679 
1680     // Check accesses within this set.
1681     EquivalenceClasses<MemAccessInfo>::member_iterator AI =
1682         AccessSets.member_begin(I);
1683     EquivalenceClasses<MemAccessInfo>::member_iterator AE =
1684         AccessSets.member_end();
1685 
1686     // Check every access pair.
1687     while (AI != AE) {
1688       Visited.insert(*AI);
1689       bool AIIsWrite = AI->getInt();
1690       // Check loads only against next equivalent class, but stores also against
1691       // other stores in the same equivalence class - to the same address.
1692       EquivalenceClasses<MemAccessInfo>::member_iterator OI =
1693           (AIIsWrite ? AI : std::next(AI));
1694       while (OI != AE) {
1695         // Check every accessing instruction pair in program order.
1696         for (std::vector<unsigned>::iterator I1 = Accesses[*AI].begin(),
1697              I1E = Accesses[*AI].end(); I1 != I1E; ++I1)
1698           // Scan all accesses of another equivalence class, but only the next
1699           // accesses of the same equivalent class.
1700           for (std::vector<unsigned>::iterator
1701                    I2 = (OI == AI ? std::next(I1) : Accesses[*OI].begin()),
1702                    I2E = (OI == AI ? I1E : Accesses[*OI].end());
1703                I2 != I2E; ++I2) {
1704             auto A = std::make_pair(&*AI, *I1);
1705             auto B = std::make_pair(&*OI, *I2);
1706 
1707             assert(*I1 != *I2);
1708             if (*I1 > *I2)
1709               std::swap(A, B);
1710 
1711             Dependence::DepType Type =
1712                 isDependent(*A.first, A.second, *B.first, B.second, Strides);
1713             mergeInStatus(Dependence::isSafeForVectorization(Type));
1714 
1715             // Gather dependences unless we accumulated MaxDependences
1716             // dependences.  In that case return as soon as we find the first
1717             // unsafe dependence.  This puts a limit on this quadratic
1718             // algorithm.
1719             if (RecordDependences) {
1720               if (Type != Dependence::NoDep)
1721                 Dependences.push_back(Dependence(A.second, B.second, Type));
1722 
1723               if (Dependences.size() >= MaxDependences) {
1724                 RecordDependences = false;
1725                 Dependences.clear();
1726                 LLVM_DEBUG(dbgs()
1727                            << "Too many dependences, stopped recording\n");
1728               }
1729             }
1730             if (!RecordDependences && !isSafeForVectorization())
1731               return false;
1732           }
1733         ++OI;
1734       }
1735       AI++;
1736     }
1737   }
1738 
1739   LLVM_DEBUG(dbgs() << "Total Dependences: " << Dependences.size() << "\n");
1740   return isSafeForVectorization();
1741 }
1742 
1743 SmallVector<Instruction *, 4>
1744 MemoryDepChecker::getInstructionsForAccess(Value *Ptr, bool isWrite) const {
1745   MemAccessInfo Access(Ptr, isWrite);
1746   auto &IndexVector = Accesses.find(Access)->second;
1747 
1748   SmallVector<Instruction *, 4> Insts;
1749   transform(IndexVector,
1750                  std::back_inserter(Insts),
1751                  [&](unsigned Idx) { return this->InstMap[Idx]; });
1752   return Insts;
1753 }
1754 
1755 const char *MemoryDepChecker::Dependence::DepName[] = {
1756     "NoDep", "Unknown", "Forward", "ForwardButPreventsForwarding", "Backward",
1757     "BackwardVectorizable", "BackwardVectorizableButPreventsForwarding"};
1758 
1759 void MemoryDepChecker::Dependence::print(
1760     raw_ostream &OS, unsigned Depth,
1761     const SmallVectorImpl<Instruction *> &Instrs) const {
1762   OS.indent(Depth) << DepName[Type] << ":\n";
1763   OS.indent(Depth + 2) << *Instrs[Source] << " -> \n";
1764   OS.indent(Depth + 2) << *Instrs[Destination] << "\n";
1765 }
1766 
1767 bool LoopAccessInfo::canAnalyzeLoop() {
1768   // We need to have a loop header.
1769   LLVM_DEBUG(dbgs() << "LAA: Found a loop in "
1770                     << TheLoop->getHeader()->getParent()->getName() << ": "
1771                     << TheLoop->getHeader()->getName() << '\n');
1772 
1773   // We can only analyze innermost loops.
1774   if (!TheLoop->empty()) {
1775     LLVM_DEBUG(dbgs() << "LAA: loop is not the innermost loop\n");
1776     recordAnalysis("NotInnerMostLoop") << "loop is not the innermost loop";
1777     return false;
1778   }
1779 
1780   // We must have a single backedge.
1781   if (TheLoop->getNumBackEdges() != 1) {
1782     LLVM_DEBUG(
1783         dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1784     recordAnalysis("CFGNotUnderstood")
1785         << "loop control flow is not understood by analyzer";
1786     return false;
1787   }
1788 
1789   // We must have a single exiting block.
1790   if (!TheLoop->getExitingBlock()) {
1791     LLVM_DEBUG(
1792         dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1793     recordAnalysis("CFGNotUnderstood")
1794         << "loop control flow is not understood by analyzer";
1795     return false;
1796   }
1797 
1798   // We only handle bottom-tested loops, i.e. loop in which the condition is
1799   // checked at the end of each iteration. With that we can assume that all
1800   // instructions in the loop are executed the same number of times.
1801   if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
1802     LLVM_DEBUG(
1803         dbgs() << "LAA: loop control flow is not understood by analyzer\n");
1804     recordAnalysis("CFGNotUnderstood")
1805         << "loop control flow is not understood by analyzer";
1806     return false;
1807   }
1808 
1809   // ScalarEvolution needs to be able to find the exit count.
1810   const SCEV *ExitCount = PSE->getBackedgeTakenCount();
1811   if (ExitCount == PSE->getSE()->getCouldNotCompute()) {
1812     recordAnalysis("CantComputeNumberOfIterations")
1813         << "could not determine number of loop iterations";
1814     LLVM_DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
1815     return false;
1816   }
1817 
1818   return true;
1819 }
1820 
1821 void LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
1822                                  const TargetLibraryInfo *TLI,
1823                                  DominatorTree *DT) {
1824   typedef SmallPtrSet<Value*, 16> ValueSet;
1825 
1826   // Holds the Load and Store instructions.
1827   SmallVector<LoadInst *, 16> Loads;
1828   SmallVector<StoreInst *, 16> Stores;
1829 
1830   // Holds all the different accesses in the loop.
1831   unsigned NumReads = 0;
1832   unsigned NumReadWrites = 0;
1833 
1834   bool HasComplexMemInst = false;
1835 
1836   // A runtime check is only legal to insert if there are no convergent calls.
1837   HasConvergentOp = false;
1838 
1839   PtrRtChecking->Pointers.clear();
1840   PtrRtChecking->Need = false;
1841 
1842   const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
1843 
1844   const bool EnableMemAccessVersioningOfLoop =
1845       EnableMemAccessVersioning &&
1846       !TheLoop->getHeader()->getParent()->hasOptSize();
1847 
1848   // For each block.
1849   for (BasicBlock *BB : TheLoop->blocks()) {
1850     // Scan the BB and collect legal loads and stores. Also detect any
1851     // convergent instructions.
1852     for (Instruction &I : *BB) {
1853       if (auto *Call = dyn_cast<CallBase>(&I)) {
1854         if (Call->isConvergent())
1855           HasConvergentOp = true;
1856       }
1857 
1858       // With both a non-vectorizable memory instruction and a convergent
1859       // operation, found in this loop, no reason to continue the search.
1860       if (HasComplexMemInst && HasConvergentOp) {
1861         CanVecMem = false;
1862         return;
1863       }
1864 
1865       // Avoid hitting recordAnalysis multiple times.
1866       if (HasComplexMemInst)
1867         continue;
1868 
1869       // If this is a load, save it. If this instruction can read from memory
1870       // but is not a load, then we quit. Notice that we don't handle function
1871       // calls that read or write.
1872       if (I.mayReadFromMemory()) {
1873         // Many math library functions read the rounding mode. We will only
1874         // vectorize a loop if it contains known function calls that don't set
1875         // the flag. Therefore, it is safe to ignore this read from memory.
1876         auto *Call = dyn_cast<CallInst>(&I);
1877         if (Call && getVectorIntrinsicIDForCall(Call, TLI))
1878           continue;
1879 
1880         // If the function has an explicit vectorized counterpart, we can safely
1881         // assume that it can be vectorized.
1882         if (Call && !Call->isNoBuiltin() && Call->getCalledFunction() &&
1883             !VFDatabase::getMappings(*Call).empty())
1884           continue;
1885 
1886         auto *Ld = dyn_cast<LoadInst>(&I);
1887         if (!Ld) {
1888           recordAnalysis("CantVectorizeInstruction", Ld)
1889             << "instruction cannot be vectorized";
1890           HasComplexMemInst = true;
1891           continue;
1892         }
1893         if (!Ld->isSimple() && !IsAnnotatedParallel) {
1894           recordAnalysis("NonSimpleLoad", Ld)
1895               << "read with atomic ordering or volatile read";
1896           LLVM_DEBUG(dbgs() << "LAA: Found a non-simple load.\n");
1897           HasComplexMemInst = true;
1898           continue;
1899         }
1900         NumLoads++;
1901         Loads.push_back(Ld);
1902         DepChecker->addAccess(Ld);
1903         if (EnableMemAccessVersioningOfLoop)
1904           collectStridedAccess(Ld);
1905         continue;
1906       }
1907 
1908       // Save 'store' instructions. Abort if other instructions write to memory.
1909       if (I.mayWriteToMemory()) {
1910         auto *St = dyn_cast<StoreInst>(&I);
1911         if (!St) {
1912           recordAnalysis("CantVectorizeInstruction", St)
1913               << "instruction cannot be vectorized";
1914           HasComplexMemInst = true;
1915           continue;
1916         }
1917         if (!St->isSimple() && !IsAnnotatedParallel) {
1918           recordAnalysis("NonSimpleStore", St)
1919               << "write with atomic ordering or volatile write";
1920           LLVM_DEBUG(dbgs() << "LAA: Found a non-simple store.\n");
1921           HasComplexMemInst = true;
1922           continue;
1923         }
1924         NumStores++;
1925         Stores.push_back(St);
1926         DepChecker->addAccess(St);
1927         if (EnableMemAccessVersioningOfLoop)
1928           collectStridedAccess(St);
1929       }
1930     } // Next instr.
1931   } // Next block.
1932 
1933   if (HasComplexMemInst) {
1934     CanVecMem = false;
1935     return;
1936   }
1937 
1938   // Now we have two lists that hold the loads and the stores.
1939   // Next, we find the pointers that they use.
1940 
1941   // Check if we see any stores. If there are no stores, then we don't
1942   // care if the pointers are *restrict*.
1943   if (!Stores.size()) {
1944     LLVM_DEBUG(dbgs() << "LAA: Found a read-only loop!\n");
1945     CanVecMem = true;
1946     return;
1947   }
1948 
1949   MemoryDepChecker::DepCandidates DependentAccesses;
1950   AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
1951                           TheLoop, AA, LI, DependentAccesses, *PSE);
1952 
1953   // Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
1954   // multiple times on the same object. If the ptr is accessed twice, once
1955   // for read and once for write, it will only appear once (on the write
1956   // list). This is okay, since we are going to check for conflicts between
1957   // writes and between reads and writes, but not between reads and reads.
1958   ValueSet Seen;
1959 
1960   // Record uniform store addresses to identify if we have multiple stores
1961   // to the same address.
1962   ValueSet UniformStores;
1963 
1964   for (StoreInst *ST : Stores) {
1965     Value *Ptr = ST->getPointerOperand();
1966 
1967     if (isUniform(Ptr))
1968       HasDependenceInvolvingLoopInvariantAddress |=
1969           !UniformStores.insert(Ptr).second;
1970 
1971     // If we did *not* see this pointer before, insert it to  the read-write
1972     // list. At this phase it is only a 'write' list.
1973     if (Seen.insert(Ptr).second) {
1974       ++NumReadWrites;
1975 
1976       MemoryLocation Loc = MemoryLocation::get(ST);
1977       // The TBAA metadata could have a control dependency on the predication
1978       // condition, so we cannot rely on it when determining whether or not we
1979       // need runtime pointer checks.
1980       if (blockNeedsPredication(ST->getParent(), TheLoop, DT))
1981         Loc.AATags.TBAA = nullptr;
1982 
1983       Accesses.addStore(Loc);
1984     }
1985   }
1986 
1987   if (IsAnnotatedParallel) {
1988     LLVM_DEBUG(
1989         dbgs() << "LAA: A loop annotated parallel, ignore memory dependency "
1990                << "checks.\n");
1991     CanVecMem = true;
1992     return;
1993   }
1994 
1995   for (LoadInst *LD : Loads) {
1996     Value *Ptr = LD->getPointerOperand();
1997     // If we did *not* see this pointer before, insert it to the
1998     // read list. If we *did* see it before, then it is already in
1999     // the read-write list. This allows us to vectorize expressions
2000     // such as A[i] += x;  Because the address of A[i] is a read-write
2001     // pointer. This only works if the index of A[i] is consecutive.
2002     // If the address of i is unknown (for example A[B[i]]) then we may
2003     // read a few words, modify, and write a few words, and some of the
2004     // words may be written to the same address.
2005     bool IsReadOnlyPtr = false;
2006     if (Seen.insert(Ptr).second ||
2007         !getPtrStride(*PSE, Ptr, TheLoop, SymbolicStrides)) {
2008       ++NumReads;
2009       IsReadOnlyPtr = true;
2010     }
2011 
2012     // See if there is an unsafe dependency between a load to a uniform address and
2013     // store to the same uniform address.
2014     if (UniformStores.count(Ptr)) {
2015       LLVM_DEBUG(dbgs() << "LAA: Found an unsafe dependency between a uniform "
2016                            "load and uniform store to the same address!\n");
2017       HasDependenceInvolvingLoopInvariantAddress = true;
2018     }
2019 
2020     MemoryLocation Loc = MemoryLocation::get(LD);
2021     // The TBAA metadata could have a control dependency on the predication
2022     // condition, so we cannot rely on it when determining whether or not we
2023     // need runtime pointer checks.
2024     if (blockNeedsPredication(LD->getParent(), TheLoop, DT))
2025       Loc.AATags.TBAA = nullptr;
2026 
2027     Accesses.addLoad(Loc, IsReadOnlyPtr);
2028   }
2029 
2030   // If we write (or read-write) to a single destination and there are no
2031   // other reads in this loop then is it safe to vectorize.
2032   if (NumReadWrites == 1 && NumReads == 0) {
2033     LLVM_DEBUG(dbgs() << "LAA: Found a write-only loop!\n");
2034     CanVecMem = true;
2035     return;
2036   }
2037 
2038   // Build dependence sets and check whether we need a runtime pointer bounds
2039   // check.
2040   Accesses.buildDependenceSets();
2041 
2042   // Find pointers with computable bounds. We are going to use this information
2043   // to place a runtime bound check.
2044   bool CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, PSE->getSE(),
2045                                                   TheLoop, SymbolicStrides);
2046   if (!CanDoRTIfNeeded) {
2047     recordAnalysis("CantIdentifyArrayBounds") << "cannot identify array bounds";
2048     LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
2049                       << "the array bounds.\n");
2050     CanVecMem = false;
2051     return;
2052   }
2053 
2054   LLVM_DEBUG(
2055     dbgs() << "LAA: May be able to perform a memory runtime check if needed.\n");
2056 
2057   CanVecMem = true;
2058   if (Accesses.isDependencyCheckNeeded()) {
2059     LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
2060     CanVecMem = DepChecker->areDepsSafe(
2061         DependentAccesses, Accesses.getDependenciesToCheck(), SymbolicStrides);
2062     MaxSafeDepDistBytes = DepChecker->getMaxSafeDepDistBytes();
2063 
2064     if (!CanVecMem && DepChecker->shouldRetryWithRuntimeCheck()) {
2065       LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
2066 
2067       // Clear the dependency checks. We assume they are not needed.
2068       Accesses.resetDepChecks(*DepChecker);
2069 
2070       PtrRtChecking->reset();
2071       PtrRtChecking->Need = true;
2072 
2073       auto *SE = PSE->getSE();
2074       CanDoRTIfNeeded = Accesses.canCheckPtrAtRT(*PtrRtChecking, SE, TheLoop,
2075                                                  SymbolicStrides, true);
2076 
2077       // Check that we found the bounds for the pointer.
2078       if (!CanDoRTIfNeeded) {
2079         recordAnalysis("CantCheckMemDepsAtRunTime")
2080             << "cannot check memory dependencies at runtime";
2081         LLVM_DEBUG(dbgs() << "LAA: Can't vectorize with memory checks\n");
2082         CanVecMem = false;
2083         return;
2084       }
2085 
2086       CanVecMem = true;
2087     }
2088   }
2089 
2090   if (HasConvergentOp) {
2091     recordAnalysis("CantInsertRuntimeCheckWithConvergent")
2092       << "cannot add control dependency to convergent operation";
2093     LLVM_DEBUG(dbgs() << "LAA: We can't vectorize because a runtime check "
2094                          "would be needed with a convergent operation\n");
2095     CanVecMem = false;
2096     return;
2097   }
2098 
2099   if (CanVecMem)
2100     LLVM_DEBUG(
2101         dbgs() << "LAA: No unsafe dependent memory operations in loop.  We"
2102                << (PtrRtChecking->Need ? "" : " don't")
2103                << " need runtime memory checks.\n");
2104   else {
2105     recordAnalysis("UnsafeMemDep")
2106         << "unsafe dependent memory operations in loop. Use "
2107            "#pragma loop distribute(enable) to allow loop distribution "
2108            "to attempt to isolate the offending operations into a separate "
2109            "loop";
2110     LLVM_DEBUG(dbgs() << "LAA: unsafe dependent memory operations in loop\n");
2111   }
2112 }
2113 
2114 bool LoopAccessInfo::blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
2115                                            DominatorTree *DT)  {
2116   assert(TheLoop->contains(BB) && "Unknown block used");
2117 
2118   // Blocks that do not dominate the latch need predication.
2119   BasicBlock* Latch = TheLoop->getLoopLatch();
2120   return !DT->dominates(BB, Latch);
2121 }
2122 
2123 OptimizationRemarkAnalysis &LoopAccessInfo::recordAnalysis(StringRef RemarkName,
2124                                                            Instruction *I) {
2125   assert(!Report && "Multiple reports generated");
2126 
2127   Value *CodeRegion = TheLoop->getHeader();
2128   DebugLoc DL = TheLoop->getStartLoc();
2129 
2130   if (I) {
2131     CodeRegion = I->getParent();
2132     // If there is no debug location attached to the instruction, revert back to
2133     // using the loop's.
2134     if (I->getDebugLoc())
2135       DL = I->getDebugLoc();
2136   }
2137 
2138   Report = std::make_unique<OptimizationRemarkAnalysis>(DEBUG_TYPE, RemarkName, DL,
2139                                                    CodeRegion);
2140   return *Report;
2141 }
2142 
2143 bool LoopAccessInfo::isUniform(Value *V) const {
2144   auto *SE = PSE->getSE();
2145   // Since we rely on SCEV for uniformity, if the type is not SCEVable, it is
2146   // never considered uniform.
2147   // TODO: Is this really what we want? Even without FP SCEV, we may want some
2148   // trivially loop-invariant FP values to be considered uniform.
2149   if (!SE->isSCEVable(V->getType()))
2150     return false;
2151   return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
2152 }
2153 
2154 void LoopAccessInfo::collectStridedAccess(Value *MemAccess) {
2155   Value *Ptr = nullptr;
2156   if (LoadInst *LI = dyn_cast<LoadInst>(MemAccess))
2157     Ptr = LI->getPointerOperand();
2158   else if (StoreInst *SI = dyn_cast<StoreInst>(MemAccess))
2159     Ptr = SI->getPointerOperand();
2160   else
2161     return;
2162 
2163   Value *Stride = getStrideFromPointer(Ptr, PSE->getSE(), TheLoop);
2164   if (!Stride)
2165     return;
2166 
2167   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that is a candidate for "
2168                        "versioning:");
2169   LLVM_DEBUG(dbgs() << "  Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
2170 
2171   // Avoid adding the "Stride == 1" predicate when we know that
2172   // Stride >= Trip-Count. Such a predicate will effectively optimize a single
2173   // or zero iteration loop, as Trip-Count <= Stride == 1.
2174   //
2175   // TODO: We are currently not making a very informed decision on when it is
2176   // beneficial to apply stride versioning. It might make more sense that the
2177   // users of this analysis (such as the vectorizer) will trigger it, based on
2178   // their specific cost considerations; For example, in cases where stride
2179   // versioning does  not help resolving memory accesses/dependences, the
2180   // vectorizer should evaluate the cost of the runtime test, and the benefit
2181   // of various possible stride specializations, considering the alternatives
2182   // of using gather/scatters (if available).
2183 
2184   const SCEV *StrideExpr = PSE->getSCEV(Stride);
2185   const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
2186 
2187   // Match the types so we can compare the stride and the BETakenCount.
2188   // The Stride can be positive/negative, so we sign extend Stride;
2189   // The backedgeTakenCount is non-negative, so we zero extend BETakenCount.
2190   const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
2191   uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
2192   uint64_t BETypeSize = DL.getTypeAllocSize(BETakenCount->getType());
2193   const SCEV *CastedStride = StrideExpr;
2194   const SCEV *CastedBECount = BETakenCount;
2195   ScalarEvolution *SE = PSE->getSE();
2196   if (BETypeSize >= StrideTypeSize)
2197     CastedStride = SE->getNoopOrSignExtend(StrideExpr, BETakenCount->getType());
2198   else
2199     CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
2200   const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
2201   // Since TripCount == BackEdgeTakenCount + 1, checking:
2202   // "Stride >= TripCount" is equivalent to checking:
2203   // Stride - BETakenCount > 0
2204   if (SE->isKnownPositive(StrideMinusBETaken)) {
2205     LLVM_DEBUG(
2206         dbgs() << "LAA: Stride>=TripCount; No point in versioning as the "
2207                   "Stride==1 predicate will imply that the loop executes "
2208                   "at most once.\n");
2209     return;
2210   }
2211   LLVM_DEBUG(dbgs() << "LAA: Found a strided access that we can version.");
2212 
2213   SymbolicStrides[Ptr] = Stride;
2214   StrideSet.insert(Stride);
2215 }
2216 
2217 LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
2218                                const TargetLibraryInfo *TLI, AAResults *AA,
2219                                DominatorTree *DT, LoopInfo *LI)
2220     : PSE(std::make_unique<PredicatedScalarEvolution>(*SE, *L)),
2221       PtrRtChecking(std::make_unique<RuntimePointerChecking>(SE)),
2222       DepChecker(std::make_unique<MemoryDepChecker>(*PSE, L)), TheLoop(L),
2223       NumLoads(0), NumStores(0), MaxSafeDepDistBytes(-1), CanVecMem(false),
2224       HasConvergentOp(false),
2225       HasDependenceInvolvingLoopInvariantAddress(false) {
2226   if (canAnalyzeLoop())
2227     analyzeLoop(AA, LI, TLI, DT);
2228 }
2229 
2230 void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
2231   if (CanVecMem) {
2232     OS.indent(Depth) << "Memory dependences are safe";
2233     if (MaxSafeDepDistBytes != -1ULL)
2234       OS << " with a maximum dependence distance of " << MaxSafeDepDistBytes
2235          << " bytes";
2236     if (PtrRtChecking->Need)
2237       OS << " with run-time checks";
2238     OS << "\n";
2239   }
2240 
2241   if (HasConvergentOp)
2242     OS.indent(Depth) << "Has convergent operation in loop\n";
2243 
2244   if (Report)
2245     OS.indent(Depth) << "Report: " << Report->getMsg() << "\n";
2246 
2247   if (auto *Dependences = DepChecker->getDependences()) {
2248     OS.indent(Depth) << "Dependences:\n";
2249     for (auto &Dep : *Dependences) {
2250       Dep.print(OS, Depth + 2, DepChecker->getMemoryInstructions());
2251       OS << "\n";
2252     }
2253   } else
2254     OS.indent(Depth) << "Too many dependences, not recorded\n";
2255 
2256   // List the pair of accesses need run-time checks to prove independence.
2257   PtrRtChecking->print(OS, Depth);
2258   OS << "\n";
2259 
2260   OS.indent(Depth) << "Non vectorizable stores to invariant address were "
2261                    << (HasDependenceInvolvingLoopInvariantAddress ? "" : "not ")
2262                    << "found in loop.\n";
2263 
2264   OS.indent(Depth) << "SCEV assumptions:\n";
2265   PSE->getUnionPredicate().print(OS, Depth);
2266 
2267   OS << "\n";
2268 
2269   OS.indent(Depth) << "Expressions re-written:\n";
2270   PSE->print(OS, Depth);
2271 }
2272 
2273 LoopAccessLegacyAnalysis::LoopAccessLegacyAnalysis() : FunctionPass(ID) {
2274   initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
2275 }
2276 
2277 const LoopAccessInfo &LoopAccessLegacyAnalysis::getInfo(Loop *L) {
2278   auto &LAI = LoopAccessInfoMap[L];
2279 
2280   if (!LAI)
2281     LAI = std::make_unique<LoopAccessInfo>(L, SE, TLI, AA, DT, LI);
2282 
2283   return *LAI.get();
2284 }
2285 
2286 void LoopAccessLegacyAnalysis::print(raw_ostream &OS, const Module *M) const {
2287   LoopAccessLegacyAnalysis &LAA = *const_cast<LoopAccessLegacyAnalysis *>(this);
2288 
2289   for (Loop *TopLevelLoop : *LI)
2290     for (Loop *L : depth_first(TopLevelLoop)) {
2291       OS.indent(2) << L->getHeader()->getName() << ":\n";
2292       auto &LAI = LAA.getInfo(L);
2293       LAI.print(OS, 4);
2294     }
2295 }
2296 
2297 bool LoopAccessLegacyAnalysis::runOnFunction(Function &F) {
2298   SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2299   auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
2300   TLI = TLIP ? &TLIP->getTLI(F) : nullptr;
2301   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2302   DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2303   LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2304 
2305   return false;
2306 }
2307 
2308 void LoopAccessLegacyAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
2309     AU.addRequired<ScalarEvolutionWrapperPass>();
2310     AU.addRequired<AAResultsWrapperPass>();
2311     AU.addRequired<DominatorTreeWrapperPass>();
2312     AU.addRequired<LoopInfoWrapperPass>();
2313 
2314     AU.setPreservesAll();
2315 }
2316 
2317 char LoopAccessLegacyAnalysis::ID = 0;
2318 static const char laa_name[] = "Loop Access Analysis";
2319 #define LAA_NAME "loop-accesses"
2320 
2321 INITIALIZE_PASS_BEGIN(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2322 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2323 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
2324 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2325 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2326 INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
2327 
2328 AnalysisKey LoopAccessAnalysis::Key;
2329 
2330 LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
2331                                        LoopStandardAnalysisResults &AR) {
2332   return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
2333 }
2334 
2335 namespace llvm {
2336 
2337   Pass *createLAAPass() {
2338     return new LoopAccessLegacyAnalysis();
2339   }
2340 
2341 } // end namespace llvm
2342