xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 //===- LoopLoadElimination.cpp - Loop Load Elimination Pass ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implement a loop-aware load elimination pass.
10 //
11 // It uses LoopAccessAnalysis to identify loop-carried dependences with a
12 // distance of one between stores and loads.  These form the candidates for the
13 // transformation.  The source value of each store then propagated to the user
14 // of the corresponding load.  This makes the load dead.
15 //
16 // The pass can also version the loop and add memchecks in order to prove that
17 // may-aliasing stores can't change the value in memory before it's read by the
18 // load.
19 //
20 //===----------------------------------------------------------------------===//
21 
22 #include "llvm/Transforms/Scalar/LoopLoadElimination.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DepthFirstIterator.h"
26 #include "llvm/ADT/STLExtras.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Analysis/AssumptionCache.h"
31 #include "llvm/Analysis/BlockFrequencyInfo.h"
32 #include "llvm/Analysis/GlobalsModRef.h"
33 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
34 #include "llvm/Analysis/LoopAccessAnalysis.h"
35 #include "llvm/Analysis/LoopAnalysisManager.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/ProfileSummaryInfo.h"
38 #include "llvm/Analysis/ScalarEvolution.h"
39 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
40 #include "llvm/Analysis/TargetLibraryInfo.h"
41 #include "llvm/Analysis/TargetTransformInfo.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/PassManager.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/Value.h"
49 #include "llvm/InitializePasses.h"
50 #include "llvm/Pass.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include "llvm/Transforms/Scalar.h"
56 #include "llvm/Transforms/Utils.h"
57 #include "llvm/Transforms/Utils/LoopSimplify.h"
58 #include "llvm/Transforms/Utils/LoopVersioning.h"
59 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
60 #include "llvm/Transforms/Utils/SizeOpts.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <forward_list>
64 #include <tuple>
65 #include <utility>
66 
67 using namespace llvm;
68 
69 #define LLE_OPTION "loop-load-elim"
70 #define DEBUG_TYPE LLE_OPTION
71 
72 static cl::opt<unsigned> CheckPerElim(
73     "runtime-check-per-loop-load-elim", cl::Hidden,
74     cl::desc("Max number of memchecks allowed per eliminated load on average"),
75     cl::init(1));
76 
77 static cl::opt<unsigned> LoadElimSCEVCheckThreshold(
78     "loop-load-elimination-scev-check-threshold", cl::init(8), cl::Hidden,
79     cl::desc("The maximum number of SCEV checks allowed for Loop "
80              "Load Elimination"));
81 
82 STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE");
83 
84 namespace {
85 
86 /// Represent a store-to-forwarding candidate.
87 struct StoreToLoadForwardingCandidate {
88   LoadInst *Load;
89   StoreInst *Store;
90 
91   StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store)
92       : Load(Load), Store(Store) {}
93 
94   /// Return true if the dependence from the store to the load has a
95   /// distance of one.  E.g. A[i+1] = A[i]
96   bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE,
97                                  Loop *L) const {
98     Value *LoadPtr = Load->getPointerOperand();
99     Value *StorePtr = Store->getPointerOperand();
100     Type *LoadType = getLoadStoreType(Load);
101     auto &DL = Load->getParent()->getModule()->getDataLayout();
102 
103     assert(LoadPtr->getType()->getPointerAddressSpace() ==
104                StorePtr->getType()->getPointerAddressSpace() &&
105            DL.getTypeSizeInBits(LoadType) ==
106                DL.getTypeSizeInBits(getLoadStoreType(Store)) &&
107            "Should be a known dependence");
108 
109     // Currently we only support accesses with unit stride.  FIXME: we should be
110     // able to handle non unit stirde as well as long as the stride is equal to
111     // the dependence distance.
112     if (getPtrStride(PSE, LoadType, LoadPtr, L).value_or(0) != 1 ||
113         getPtrStride(PSE, LoadType, StorePtr, L).value_or(0) != 1)
114       return false;
115 
116     unsigned TypeByteSize = DL.getTypeAllocSize(const_cast<Type *>(LoadType));
117 
118     auto *LoadPtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(LoadPtr));
119     auto *StorePtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(StorePtr));
120 
121     // We don't need to check non-wrapping here because forward/backward
122     // dependence wouldn't be valid if these weren't monotonic accesses.
123     auto *Dist = cast<SCEVConstant>(
124         PSE.getSE()->getMinusSCEV(StorePtrSCEV, LoadPtrSCEV));
125     const APInt &Val = Dist->getAPInt();
126     return Val == TypeByteSize;
127   }
128 
129   Value *getLoadPtr() const { return Load->getPointerOperand(); }
130 
131 #ifndef NDEBUG
132   friend raw_ostream &operator<<(raw_ostream &OS,
133                                  const StoreToLoadForwardingCandidate &Cand) {
134     OS << *Cand.Store << " -->\n";
135     OS.indent(2) << *Cand.Load << "\n";
136     return OS;
137   }
138 #endif
139 };
140 
141 } // end anonymous namespace
142 
143 /// Check if the store dominates all latches, so as long as there is no
144 /// intervening store this value will be loaded in the next iteration.
145 static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L,
146                                          DominatorTree *DT) {
147   SmallVector<BasicBlock *, 8> Latches;
148   L->getLoopLatches(Latches);
149   return llvm::all_of(Latches, [&](const BasicBlock *Latch) {
150     return DT->dominates(StoreBlock, Latch);
151   });
152 }
153 
154 /// Return true if the load is not executed on all paths in the loop.
155 static bool isLoadConditional(LoadInst *Load, Loop *L) {
156   return Load->getParent() != L->getHeader();
157 }
158 
159 namespace {
160 
161 /// The per-loop class that does most of the work.
162 class LoadEliminationForLoop {
163 public:
164   LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI,
165                          DominatorTree *DT, BlockFrequencyInfo *BFI,
166                          ProfileSummaryInfo* PSI)
167       : L(L), LI(LI), LAI(LAI), DT(DT), BFI(BFI), PSI(PSI), PSE(LAI.getPSE()) {}
168 
169   /// Look through the loop-carried and loop-independent dependences in
170   /// this loop and find store->load dependences.
171   ///
172   /// Note that no candidate is returned if LAA has failed to analyze the loop
173   /// (e.g. if it's not bottom-tested, contains volatile memops, etc.)
174   std::forward_list<StoreToLoadForwardingCandidate>
175   findStoreToLoadDependences(const LoopAccessInfo &LAI) {
176     std::forward_list<StoreToLoadForwardingCandidate> Candidates;
177 
178     const auto *Deps = LAI.getDepChecker().getDependences();
179     if (!Deps)
180       return Candidates;
181 
182     // Find store->load dependences (consequently true dep).  Both lexically
183     // forward and backward dependences qualify.  Disqualify loads that have
184     // other unknown dependences.
185 
186     SmallPtrSet<Instruction *, 4> LoadsWithUnknownDepedence;
187 
188     for (const auto &Dep : *Deps) {
189       Instruction *Source = Dep.getSource(LAI);
190       Instruction *Destination = Dep.getDestination(LAI);
191 
192       if (Dep.Type == MemoryDepChecker::Dependence::Unknown) {
193         if (isa<LoadInst>(Source))
194           LoadsWithUnknownDepedence.insert(Source);
195         if (isa<LoadInst>(Destination))
196           LoadsWithUnknownDepedence.insert(Destination);
197         continue;
198       }
199 
200       if (Dep.isBackward())
201         // Note that the designations source and destination follow the program
202         // order, i.e. source is always first.  (The direction is given by the
203         // DepType.)
204         std::swap(Source, Destination);
205       else
206         assert(Dep.isForward() && "Needs to be a forward dependence");
207 
208       auto *Store = dyn_cast<StoreInst>(Source);
209       if (!Store)
210         continue;
211       auto *Load = dyn_cast<LoadInst>(Destination);
212       if (!Load)
213         continue;
214 
215       // Only propagate if the stored values are bit/pointer castable.
216       if (!CastInst::isBitOrNoopPointerCastable(
217               getLoadStoreType(Store), getLoadStoreType(Load),
218               Store->getParent()->getModule()->getDataLayout()))
219         continue;
220 
221       Candidates.emplace_front(Load, Store);
222     }
223 
224     if (!LoadsWithUnknownDepedence.empty())
225       Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) {
226         return LoadsWithUnknownDepedence.count(C.Load);
227       });
228 
229     return Candidates;
230   }
231 
232   /// Return the index of the instruction according to program order.
233   unsigned getInstrIndex(Instruction *Inst) {
234     auto I = InstOrder.find(Inst);
235     assert(I != InstOrder.end() && "No index for instruction");
236     return I->second;
237   }
238 
239   /// If a load has multiple candidates associated (i.e. different
240   /// stores), it means that it could be forwarding from multiple stores
241   /// depending on control flow.  Remove these candidates.
242   ///
243   /// Here, we rely on LAA to include the relevant loop-independent dependences.
244   /// LAA is known to omit these in the very simple case when the read and the
245   /// write within an alias set always takes place using the *same* pointer.
246   ///
247   /// However, we know that this is not the case here, i.e. we can rely on LAA
248   /// to provide us with loop-independent dependences for the cases we're
249   /// interested.  Consider the case for example where a loop-independent
250   /// dependece S1->S2 invalidates the forwarding S3->S2.
251   ///
252   ///         A[i]   = ...   (S1)
253   ///         ...    = A[i]  (S2)
254   ///         A[i+1] = ...   (S3)
255   ///
256   /// LAA will perform dependence analysis here because there are two
257   /// *different* pointers involved in the same alias set (&A[i] and &A[i+1]).
258   void removeDependencesFromMultipleStores(
259       std::forward_list<StoreToLoadForwardingCandidate> &Candidates) {
260     // If Store is nullptr it means that we have multiple stores forwarding to
261     // this store.
262     using LoadToSingleCandT =
263         DenseMap<LoadInst *, const StoreToLoadForwardingCandidate *>;
264     LoadToSingleCandT LoadToSingleCand;
265 
266     for (const auto &Cand : Candidates) {
267       bool NewElt;
268       LoadToSingleCandT::iterator Iter;
269 
270       std::tie(Iter, NewElt) =
271           LoadToSingleCand.insert(std::make_pair(Cand.Load, &Cand));
272       if (!NewElt) {
273         const StoreToLoadForwardingCandidate *&OtherCand = Iter->second;
274         // Already multiple stores forward to this load.
275         if (OtherCand == nullptr)
276           continue;
277 
278         // Handle the very basic case when the two stores are in the same block
279         // so deciding which one forwards is easy.  The later one forwards as
280         // long as they both have a dependence distance of one to the load.
281         if (Cand.Store->getParent() == OtherCand->Store->getParent() &&
282             Cand.isDependenceDistanceOfOne(PSE, L) &&
283             OtherCand->isDependenceDistanceOfOne(PSE, L)) {
284           // They are in the same block, the later one will forward to the load.
285           if (getInstrIndex(OtherCand->Store) < getInstrIndex(Cand.Store))
286             OtherCand = &Cand;
287         } else
288           OtherCand = nullptr;
289       }
290     }
291 
292     Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) {
293       if (LoadToSingleCand[Cand.Load] != &Cand) {
294         LLVM_DEBUG(
295             dbgs() << "Removing from candidates: \n"
296                    << Cand
297                    << "  The load may have multiple stores forwarding to "
298                    << "it\n");
299         return true;
300       }
301       return false;
302     });
303   }
304 
305   /// Given two pointers operations by their RuntimePointerChecking
306   /// indices, return true if they require an alias check.
307   ///
308   /// We need a check if one is a pointer for a candidate load and the other is
309   /// a pointer for a possibly intervening store.
310   bool needsChecking(unsigned PtrIdx1, unsigned PtrIdx2,
311                      const SmallPtrSetImpl<Value *> &PtrsWrittenOnFwdingPath,
312                      const SmallPtrSetImpl<Value *> &CandLoadPtrs) {
313     Value *Ptr1 =
314         LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx1).PointerValue;
315     Value *Ptr2 =
316         LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx2).PointerValue;
317     return ((PtrsWrittenOnFwdingPath.count(Ptr1) && CandLoadPtrs.count(Ptr2)) ||
318             (PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1)));
319   }
320 
321   /// Return pointers that are possibly written to on the path from a
322   /// forwarding store to a load.
323   ///
324   /// These pointers need to be alias-checked against the forwarding candidates.
325   SmallPtrSet<Value *, 4> findPointersWrittenOnForwardingPath(
326       const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
327     // From FirstStore to LastLoad neither of the elimination candidate loads
328     // should overlap with any of the stores.
329     //
330     // E.g.:
331     //
332     // st1 C[i]
333     // ld1 B[i] <-------,
334     // ld0 A[i] <----,  |              * LastLoad
335     // ...           |  |
336     // st2 E[i]      |  |
337     // st3 B[i+1] -- | -'              * FirstStore
338     // st0 A[i+1] ---'
339     // st4 D[i]
340     //
341     // st0 forwards to ld0 if the accesses in st4 and st1 don't overlap with
342     // ld0.
343 
344     LoadInst *LastLoad =
345         std::max_element(Candidates.begin(), Candidates.end(),
346                          [&](const StoreToLoadForwardingCandidate &A,
347                              const StoreToLoadForwardingCandidate &B) {
348                            return getInstrIndex(A.Load) < getInstrIndex(B.Load);
349                          })
350             ->Load;
351     StoreInst *FirstStore =
352         std::min_element(Candidates.begin(), Candidates.end(),
353                          [&](const StoreToLoadForwardingCandidate &A,
354                              const StoreToLoadForwardingCandidate &B) {
355                            return getInstrIndex(A.Store) <
356                                   getInstrIndex(B.Store);
357                          })
358             ->Store;
359 
360     // We're looking for stores after the first forwarding store until the end
361     // of the loop, then from the beginning of the loop until the last
362     // forwarded-to load.  Collect the pointer for the stores.
363     SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath;
364 
365     auto InsertStorePtr = [&](Instruction *I) {
366       if (auto *S = dyn_cast<StoreInst>(I))
367         PtrsWrittenOnFwdingPath.insert(S->getPointerOperand());
368     };
369     const auto &MemInstrs = LAI.getDepChecker().getMemoryInstructions();
370     std::for_each(MemInstrs.begin() + getInstrIndex(FirstStore) + 1,
371                   MemInstrs.end(), InsertStorePtr);
372     std::for_each(MemInstrs.begin(), &MemInstrs[getInstrIndex(LastLoad)],
373                   InsertStorePtr);
374 
375     return PtrsWrittenOnFwdingPath;
376   }
377 
378   /// Determine the pointer alias checks to prove that there are no
379   /// intervening stores.
380   SmallVector<RuntimePointerCheck, 4> collectMemchecks(
381       const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
382 
383     SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath =
384         findPointersWrittenOnForwardingPath(Candidates);
385 
386     // Collect the pointers of the candidate loads.
387     SmallPtrSet<Value *, 4> CandLoadPtrs;
388     for (const auto &Candidate : Candidates)
389       CandLoadPtrs.insert(Candidate.getLoadPtr());
390 
391     const auto &AllChecks = LAI.getRuntimePointerChecking()->getChecks();
392     SmallVector<RuntimePointerCheck, 4> Checks;
393 
394     copy_if(AllChecks, std::back_inserter(Checks),
395             [&](const RuntimePointerCheck &Check) {
396               for (auto PtrIdx1 : Check.first->Members)
397                 for (auto PtrIdx2 : Check.second->Members)
398                   if (needsChecking(PtrIdx1, PtrIdx2, PtrsWrittenOnFwdingPath,
399                                     CandLoadPtrs))
400                     return true;
401               return false;
402             });
403 
404     LLVM_DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size()
405                       << "):\n");
406     LLVM_DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks));
407 
408     return Checks;
409   }
410 
411   /// Perform the transformation for a candidate.
412   void
413   propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand,
414                                   SCEVExpander &SEE) {
415     // loop:
416     //      %x = load %gep_i
417     //         = ... %x
418     //      store %y, %gep_i_plus_1
419     //
420     // =>
421     //
422     // ph:
423     //      %x.initial = load %gep_0
424     // loop:
425     //      %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
426     //      %x = load %gep_i            <---- now dead
427     //         = ... %x.storeforward
428     //      store %y, %gep_i_plus_1
429 
430     Value *Ptr = Cand.Load->getPointerOperand();
431     auto *PtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(Ptr));
432     auto *PH = L->getLoopPreheader();
433     assert(PH && "Preheader should exist!");
434     Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(),
435                                           PH->getTerminator());
436     Value *Initial = new LoadInst(
437         Cand.Load->getType(), InitialPtr, "load_initial",
438         /* isVolatile */ false, Cand.Load->getAlign(), PH->getTerminator());
439 
440     PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
441                                    &L->getHeader()->front());
442     PHI->addIncoming(Initial, PH);
443 
444     Type *LoadType = Initial->getType();
445     Type *StoreType = Cand.Store->getValueOperand()->getType();
446     auto &DL = Cand.Load->getParent()->getModule()->getDataLayout();
447     (void)DL;
448 
449     assert(DL.getTypeSizeInBits(LoadType) == DL.getTypeSizeInBits(StoreType) &&
450            "The type sizes should match!");
451 
452     Value *StoreValue = Cand.Store->getValueOperand();
453     if (LoadType != StoreType)
454       StoreValue = CastInst::CreateBitOrPointerCast(
455           StoreValue, LoadType, "store_forward_cast", Cand.Store);
456 
457     PHI->addIncoming(StoreValue, L->getLoopLatch());
458 
459     Cand.Load->replaceAllUsesWith(PHI);
460   }
461 
462   /// Top-level driver for each loop: find store->load forwarding
463   /// candidates, add run-time checks and perform transformation.
464   bool processLoop() {
465     LLVM_DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName()
466                       << "\" checking " << *L << "\n");
467 
468     // Look for store-to-load forwarding cases across the
469     // backedge. E.g.:
470     //
471     // loop:
472     //      %x = load %gep_i
473     //         = ... %x
474     //      store %y, %gep_i_plus_1
475     //
476     // =>
477     //
478     // ph:
479     //      %x.initial = load %gep_0
480     // loop:
481     //      %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
482     //      %x = load %gep_i            <---- now dead
483     //         = ... %x.storeforward
484     //      store %y, %gep_i_plus_1
485 
486     // First start with store->load dependences.
487     auto StoreToLoadDependences = findStoreToLoadDependences(LAI);
488     if (StoreToLoadDependences.empty())
489       return false;
490 
491     // Generate an index for each load and store according to the original
492     // program order.  This will be used later.
493     InstOrder = LAI.getDepChecker().generateInstructionOrderMap();
494 
495     // To keep things simple for now, remove those where the load is potentially
496     // fed by multiple stores.
497     removeDependencesFromMultipleStores(StoreToLoadDependences);
498     if (StoreToLoadDependences.empty())
499       return false;
500 
501     // Filter the candidates further.
502     SmallVector<StoreToLoadForwardingCandidate, 4> Candidates;
503     for (const StoreToLoadForwardingCandidate &Cand : StoreToLoadDependences) {
504       LLVM_DEBUG(dbgs() << "Candidate " << Cand);
505 
506       // Make sure that the stored values is available everywhere in the loop in
507       // the next iteration.
508       if (!doesStoreDominatesAllLatches(Cand.Store->getParent(), L, DT))
509         continue;
510 
511       // If the load is conditional we can't hoist its 0-iteration instance to
512       // the preheader because that would make it unconditional.  Thus we would
513       // access a memory location that the original loop did not access.
514       if (isLoadConditional(Cand.Load, L))
515         continue;
516 
517       // Check whether the SCEV difference is the same as the induction step,
518       // thus we load the value in the next iteration.
519       if (!Cand.isDependenceDistanceOfOne(PSE, L))
520         continue;
521 
522       assert(isa<SCEVAddRecExpr>(PSE.getSCEV(Cand.Load->getPointerOperand())) &&
523              "Loading from something other than indvar?");
524       assert(
525           isa<SCEVAddRecExpr>(PSE.getSCEV(Cand.Store->getPointerOperand())) &&
526           "Storing to something other than indvar?");
527 
528       Candidates.push_back(Cand);
529       LLVM_DEBUG(
530           dbgs()
531           << Candidates.size()
532           << ". Valid store-to-load forwarding across the loop backedge\n");
533     }
534     if (Candidates.empty())
535       return false;
536 
537     // Check intervening may-alias stores.  These need runtime checks for alias
538     // disambiguation.
539     SmallVector<RuntimePointerCheck, 4> Checks = collectMemchecks(Candidates);
540 
541     // Too many checks are likely to outweigh the benefits of forwarding.
542     if (Checks.size() > Candidates.size() * CheckPerElim) {
543       LLVM_DEBUG(dbgs() << "Too many run-time checks needed.\n");
544       return false;
545     }
546 
547     if (LAI.getPSE().getPredicate().getComplexity() >
548         LoadElimSCEVCheckThreshold) {
549       LLVM_DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
550       return false;
551     }
552 
553     if (!L->isLoopSimplifyForm()) {
554       LLVM_DEBUG(dbgs() << "Loop is not is loop-simplify form");
555       return false;
556     }
557 
558     if (!Checks.empty() || !LAI.getPSE().getPredicate().isAlwaysTrue()) {
559       if (LAI.hasConvergentOp()) {
560         LLVM_DEBUG(dbgs() << "Versioning is needed but not allowed with "
561                              "convergent calls\n");
562         return false;
563       }
564 
565       auto *HeaderBB = L->getHeader();
566       auto *F = HeaderBB->getParent();
567       bool OptForSize = F->hasOptSize() ||
568                         llvm::shouldOptimizeForSize(HeaderBB, PSI, BFI,
569                                                     PGSOQueryType::IRPass);
570       if (OptForSize) {
571         LLVM_DEBUG(
572             dbgs() << "Versioning is needed but not allowed when optimizing "
573                       "for size.\n");
574         return false;
575       }
576 
577       // Point of no-return, start the transformation.  First, version the loop
578       // if necessary.
579 
580       LoopVersioning LV(LAI, Checks, L, LI, DT, PSE.getSE());
581       LV.versionLoop();
582 
583       // After versioning, some of the candidates' pointers could stop being
584       // SCEVAddRecs. We need to filter them out.
585       auto NoLongerGoodCandidate = [this](
586           const StoreToLoadForwardingCandidate &Cand) {
587         return !isa<SCEVAddRecExpr>(
588                     PSE.getSCEV(Cand.Load->getPointerOperand())) ||
589                !isa<SCEVAddRecExpr>(
590                     PSE.getSCEV(Cand.Store->getPointerOperand()));
591       };
592       llvm::erase_if(Candidates, NoLongerGoodCandidate);
593     }
594 
595     // Next, propagate the value stored by the store to the users of the load.
596     // Also for the first iteration, generate the initial value of the load.
597     SCEVExpander SEE(*PSE.getSE(), L->getHeader()->getModule()->getDataLayout(),
598                      "storeforward");
599     for (const auto &Cand : Candidates)
600       propagateStoredValueToLoadUsers(Cand, SEE);
601     NumLoopLoadEliminted += Candidates.size();
602 
603     return true;
604   }
605 
606 private:
607   Loop *L;
608 
609   /// Maps the load/store instructions to their index according to
610   /// program order.
611   DenseMap<Instruction *, unsigned> InstOrder;
612 
613   // Analyses used.
614   LoopInfo *LI;
615   const LoopAccessInfo &LAI;
616   DominatorTree *DT;
617   BlockFrequencyInfo *BFI;
618   ProfileSummaryInfo *PSI;
619   PredicatedScalarEvolution PSE;
620 };
621 
622 } // end anonymous namespace
623 
624 static bool eliminateLoadsAcrossLoops(Function &F, LoopInfo &LI,
625                                       DominatorTree &DT,
626                                       BlockFrequencyInfo *BFI,
627                                       ProfileSummaryInfo *PSI,
628                                       ScalarEvolution *SE, AssumptionCache *AC,
629                                       LoopAccessInfoManager &LAIs) {
630   // Build up a worklist of inner-loops to transform to avoid iterator
631   // invalidation.
632   // FIXME: This logic comes from other passes that actually change the loop
633   // nest structure. It isn't clear this is necessary (or useful) for a pass
634   // which merely optimizes the use of loads in a loop.
635   SmallVector<Loop *, 8> Worklist;
636 
637   bool Changed = false;
638 
639   for (Loop *TopLevelLoop : LI)
640     for (Loop *L : depth_first(TopLevelLoop)) {
641       Changed |= simplifyLoop(L, &DT, &LI, SE, AC, /*MSSAU*/ nullptr, false);
642       // We only handle inner-most loops.
643       if (L->isInnermost())
644         Worklist.push_back(L);
645     }
646 
647   // Now walk the identified inner loops.
648   for (Loop *L : Worklist) {
649     // Match historical behavior
650     if (!L->isRotatedForm() || !L->getExitingBlock())
651       continue;
652     // The actual work is performed by LoadEliminationForLoop.
653     LoadEliminationForLoop LEL(L, &LI, LAIs.getInfo(*L), &DT, BFI, PSI);
654     Changed |= LEL.processLoop();
655     if (Changed)
656       LAIs.clear();
657   }
658   return Changed;
659 }
660 
661 namespace {
662 
663 /// The pass.  Most of the work is delegated to the per-loop
664 /// LoadEliminationForLoop class.
665 class LoopLoadElimination : public FunctionPass {
666 public:
667   static char ID;
668 
669   LoopLoadElimination() : FunctionPass(ID) {
670     initializeLoopLoadEliminationPass(*PassRegistry::getPassRegistry());
671   }
672 
673   bool runOnFunction(Function &F) override {
674     if (skipFunction(F))
675       return false;
676 
677     auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
678     auto &LAIs = getAnalysis<LoopAccessLegacyAnalysis>().getLAIs();
679     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
680     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
681     auto *BFI = (PSI && PSI->hasProfileSummary()) ?
682                 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
683                 nullptr;
684     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
685 
686     // Process each loop nest in the function.
687     return eliminateLoadsAcrossLoops(F, LI, DT, BFI, PSI, SE, /*AC*/ nullptr,
688                                      LAIs);
689   }
690 
691   void getAnalysisUsage(AnalysisUsage &AU) const override {
692     AU.addRequiredID(LoopSimplifyID);
693     AU.addRequired<LoopInfoWrapperPass>();
694     AU.addPreserved<LoopInfoWrapperPass>();
695     AU.addRequired<LoopAccessLegacyAnalysis>();
696     AU.addRequired<ScalarEvolutionWrapperPass>();
697     AU.addRequired<DominatorTreeWrapperPass>();
698     AU.addPreserved<DominatorTreeWrapperPass>();
699     AU.addPreserved<GlobalsAAWrapperPass>();
700     AU.addRequired<ProfileSummaryInfoWrapperPass>();
701     LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
702   }
703 };
704 
705 } // end anonymous namespace
706 
707 char LoopLoadElimination::ID;
708 
709 static const char LLE_name[] = "Loop Load Elimination";
710 
711 INITIALIZE_PASS_BEGIN(LoopLoadElimination, LLE_OPTION, LLE_name, false, false)
712 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
713 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
714 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
715 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
716 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
717 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
718 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
719 INITIALIZE_PASS_END(LoopLoadElimination, LLE_OPTION, LLE_name, false, false)
720 
721 FunctionPass *llvm::createLoopLoadEliminationPass() {
722   return new LoopLoadElimination();
723 }
724 
725 PreservedAnalyses LoopLoadEliminationPass::run(Function &F,
726                                                FunctionAnalysisManager &AM) {
727   auto &LI = AM.getResult<LoopAnalysis>(F);
728   // There are no loops in the function. Return before computing other expensive
729   // analyses.
730   if (LI.empty())
731     return PreservedAnalyses::all();
732   auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
733   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
734   auto &AC = AM.getResult<AssumptionAnalysis>(F);
735   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
736   auto *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
737   auto *BFI = (PSI && PSI->hasProfileSummary()) ?
738       &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
739   LoopAccessInfoManager &LAIs = AM.getResult<LoopAccessAnalysis>(F);
740 
741   bool Changed = eliminateLoadsAcrossLoops(F, LI, DT, BFI, PSI, &SE, &AC, LAIs);
742 
743   if (!Changed)
744     return PreservedAnalyses::all();
745 
746   PreservedAnalyses PA;
747   return PA;
748 }
749