xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp (revision 8ddb146abcdf061be9f2c0db7e391697dafad85c)
1 //===- LoopLoadElimination.cpp - Loop Load Elimination Pass ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implement a loop-aware load elimination pass.
10 //
11 // It uses LoopAccessAnalysis to identify loop-carried dependences with a
12 // distance of one between stores and loads.  These form the candidates for the
13 // transformation.  The source value of each store then propagated to the user
14 // of the corresponding load.  This makes the load dead.
15 //
16 // The pass can also version the loop and add memchecks in order to prove that
17 // may-aliasing stores can't change the value in memory before it's read by the
18 // load.
19 //
20 //===----------------------------------------------------------------------===//
21 
22 #include "llvm/Transforms/Scalar/LoopLoadElimination.h"
23 #include "llvm/ADT/APInt.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DepthFirstIterator.h"
26 #include "llvm/ADT/STLExtras.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Analysis/AssumptionCache.h"
31 #include "llvm/Analysis/BlockFrequencyInfo.h"
32 #include "llvm/Analysis/GlobalsModRef.h"
33 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
34 #include "llvm/Analysis/LoopAccessAnalysis.h"
35 #include "llvm/Analysis/LoopAnalysisManager.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/ProfileSummaryInfo.h"
38 #include "llvm/Analysis/ScalarEvolution.h"
39 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
40 #include "llvm/Analysis/TargetLibraryInfo.h"
41 #include "llvm/Analysis/TargetTransformInfo.h"
42 #include "llvm/IR/DataLayout.h"
43 #include "llvm/IR/Dominators.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/Module.h"
46 #include "llvm/IR/PassManager.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/Value.h"
49 #include "llvm/InitializePasses.h"
50 #include "llvm/Pass.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/CommandLine.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/raw_ostream.h"
55 #include "llvm/Transforms/Scalar.h"
56 #include "llvm/Transforms/Utils.h"
57 #include "llvm/Transforms/Utils/LoopSimplify.h"
58 #include "llvm/Transforms/Utils/LoopVersioning.h"
59 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
60 #include "llvm/Transforms/Utils/SizeOpts.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <forward_list>
64 #include <set>
65 #include <tuple>
66 #include <utility>
67 
68 using namespace llvm;
69 
70 #define LLE_OPTION "loop-load-elim"
71 #define DEBUG_TYPE LLE_OPTION
72 
73 static cl::opt<unsigned> CheckPerElim(
74     "runtime-check-per-loop-load-elim", cl::Hidden,
75     cl::desc("Max number of memchecks allowed per eliminated load on average"),
76     cl::init(1));
77 
78 static cl::opt<unsigned> LoadElimSCEVCheckThreshold(
79     "loop-load-elimination-scev-check-threshold", cl::init(8), cl::Hidden,
80     cl::desc("The maximum number of SCEV checks allowed for Loop "
81              "Load Elimination"));
82 
83 STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE");
84 
85 namespace {
86 
87 /// Represent a store-to-forwarding candidate.
88 struct StoreToLoadForwardingCandidate {
89   LoadInst *Load;
90   StoreInst *Store;
91 
92   StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store)
93       : Load(Load), Store(Store) {}
94 
95   /// Return true if the dependence from the store to the load has a
96   /// distance of one.  E.g. A[i+1] = A[i]
97   bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE,
98                                  Loop *L) const {
99     Value *LoadPtr = Load->getPointerOperand();
100     Value *StorePtr = Store->getPointerOperand();
101     Type *LoadType = getLoadStoreType(Load);
102 
103     assert(LoadPtr->getType()->getPointerAddressSpace() ==
104                StorePtr->getType()->getPointerAddressSpace() &&
105            LoadType == getLoadStoreType(Store) &&
106            "Should be a known dependence");
107 
108     // Currently we only support accesses with unit stride.  FIXME: we should be
109     // able to handle non unit stirde as well as long as the stride is equal to
110     // the dependence distance.
111     if (getPtrStride(PSE, LoadType, LoadPtr, L) != 1 ||
112         getPtrStride(PSE, LoadType, StorePtr, L) != 1)
113       return false;
114 
115     auto &DL = Load->getParent()->getModule()->getDataLayout();
116     unsigned TypeByteSize = DL.getTypeAllocSize(const_cast<Type *>(LoadType));
117 
118     auto *LoadPtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(LoadPtr));
119     auto *StorePtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(StorePtr));
120 
121     // We don't need to check non-wrapping here because forward/backward
122     // dependence wouldn't be valid if these weren't monotonic accesses.
123     auto *Dist = cast<SCEVConstant>(
124         PSE.getSE()->getMinusSCEV(StorePtrSCEV, LoadPtrSCEV));
125     const APInt &Val = Dist->getAPInt();
126     return Val == TypeByteSize;
127   }
128 
129   Value *getLoadPtr() const { return Load->getPointerOperand(); }
130 
131 #ifndef NDEBUG
132   friend raw_ostream &operator<<(raw_ostream &OS,
133                                  const StoreToLoadForwardingCandidate &Cand) {
134     OS << *Cand.Store << " -->\n";
135     OS.indent(2) << *Cand.Load << "\n";
136     return OS;
137   }
138 #endif
139 };
140 
141 } // end anonymous namespace
142 
143 /// Check if the store dominates all latches, so as long as there is no
144 /// intervening store this value will be loaded in the next iteration.
145 static bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L,
146                                          DominatorTree *DT) {
147   SmallVector<BasicBlock *, 8> Latches;
148   L->getLoopLatches(Latches);
149   return llvm::all_of(Latches, [&](const BasicBlock *Latch) {
150     return DT->dominates(StoreBlock, Latch);
151   });
152 }
153 
154 /// Return true if the load is not executed on all paths in the loop.
155 static bool isLoadConditional(LoadInst *Load, Loop *L) {
156   return Load->getParent() != L->getHeader();
157 }
158 
159 namespace {
160 
161 /// The per-loop class that does most of the work.
162 class LoadEliminationForLoop {
163 public:
164   LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI,
165                          DominatorTree *DT, BlockFrequencyInfo *BFI,
166                          ProfileSummaryInfo* PSI)
167       : L(L), LI(LI), LAI(LAI), DT(DT), BFI(BFI), PSI(PSI), PSE(LAI.getPSE()) {}
168 
169   /// Look through the loop-carried and loop-independent dependences in
170   /// this loop and find store->load dependences.
171   ///
172   /// Note that no candidate is returned if LAA has failed to analyze the loop
173   /// (e.g. if it's not bottom-tested, contains volatile memops, etc.)
174   std::forward_list<StoreToLoadForwardingCandidate>
175   findStoreToLoadDependences(const LoopAccessInfo &LAI) {
176     std::forward_list<StoreToLoadForwardingCandidate> Candidates;
177 
178     const auto *Deps = LAI.getDepChecker().getDependences();
179     if (!Deps)
180       return Candidates;
181 
182     // Find store->load dependences (consequently true dep).  Both lexically
183     // forward and backward dependences qualify.  Disqualify loads that have
184     // other unknown dependences.
185 
186     SmallPtrSet<Instruction *, 4> LoadsWithUnknownDepedence;
187 
188     for (const auto &Dep : *Deps) {
189       Instruction *Source = Dep.getSource(LAI);
190       Instruction *Destination = Dep.getDestination(LAI);
191 
192       if (Dep.Type == MemoryDepChecker::Dependence::Unknown) {
193         if (isa<LoadInst>(Source))
194           LoadsWithUnknownDepedence.insert(Source);
195         if (isa<LoadInst>(Destination))
196           LoadsWithUnknownDepedence.insert(Destination);
197         continue;
198       }
199 
200       if (Dep.isBackward())
201         // Note that the designations source and destination follow the program
202         // order, i.e. source is always first.  (The direction is given by the
203         // DepType.)
204         std::swap(Source, Destination);
205       else
206         assert(Dep.isForward() && "Needs to be a forward dependence");
207 
208       auto *Store = dyn_cast<StoreInst>(Source);
209       if (!Store)
210         continue;
211       auto *Load = dyn_cast<LoadInst>(Destination);
212       if (!Load)
213         continue;
214 
215       // Only progagate the value if they are of the same type.
216       if (Store->getPointerOperandType() != Load->getPointerOperandType())
217         continue;
218 
219       Candidates.emplace_front(Load, Store);
220     }
221 
222     if (!LoadsWithUnknownDepedence.empty())
223       Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) {
224         return LoadsWithUnknownDepedence.count(C.Load);
225       });
226 
227     return Candidates;
228   }
229 
230   /// Return the index of the instruction according to program order.
231   unsigned getInstrIndex(Instruction *Inst) {
232     auto I = InstOrder.find(Inst);
233     assert(I != InstOrder.end() && "No index for instruction");
234     return I->second;
235   }
236 
237   /// If a load has multiple candidates associated (i.e. different
238   /// stores), it means that it could be forwarding from multiple stores
239   /// depending on control flow.  Remove these candidates.
240   ///
241   /// Here, we rely on LAA to include the relevant loop-independent dependences.
242   /// LAA is known to omit these in the very simple case when the read and the
243   /// write within an alias set always takes place using the *same* pointer.
244   ///
245   /// However, we know that this is not the case here, i.e. we can rely on LAA
246   /// to provide us with loop-independent dependences for the cases we're
247   /// interested.  Consider the case for example where a loop-independent
248   /// dependece S1->S2 invalidates the forwarding S3->S2.
249   ///
250   ///         A[i]   = ...   (S1)
251   ///         ...    = A[i]  (S2)
252   ///         A[i+1] = ...   (S3)
253   ///
254   /// LAA will perform dependence analysis here because there are two
255   /// *different* pointers involved in the same alias set (&A[i] and &A[i+1]).
256   void removeDependencesFromMultipleStores(
257       std::forward_list<StoreToLoadForwardingCandidate> &Candidates) {
258     // If Store is nullptr it means that we have multiple stores forwarding to
259     // this store.
260     using LoadToSingleCandT =
261         DenseMap<LoadInst *, const StoreToLoadForwardingCandidate *>;
262     LoadToSingleCandT LoadToSingleCand;
263 
264     for (const auto &Cand : Candidates) {
265       bool NewElt;
266       LoadToSingleCandT::iterator Iter;
267 
268       std::tie(Iter, NewElt) =
269           LoadToSingleCand.insert(std::make_pair(Cand.Load, &Cand));
270       if (!NewElt) {
271         const StoreToLoadForwardingCandidate *&OtherCand = Iter->second;
272         // Already multiple stores forward to this load.
273         if (OtherCand == nullptr)
274           continue;
275 
276         // Handle the very basic case when the two stores are in the same block
277         // so deciding which one forwards is easy.  The later one forwards as
278         // long as they both have a dependence distance of one to the load.
279         if (Cand.Store->getParent() == OtherCand->Store->getParent() &&
280             Cand.isDependenceDistanceOfOne(PSE, L) &&
281             OtherCand->isDependenceDistanceOfOne(PSE, L)) {
282           // They are in the same block, the later one will forward to the load.
283           if (getInstrIndex(OtherCand->Store) < getInstrIndex(Cand.Store))
284             OtherCand = &Cand;
285         } else
286           OtherCand = nullptr;
287       }
288     }
289 
290     Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) {
291       if (LoadToSingleCand[Cand.Load] != &Cand) {
292         LLVM_DEBUG(
293             dbgs() << "Removing from candidates: \n"
294                    << Cand
295                    << "  The load may have multiple stores forwarding to "
296                    << "it\n");
297         return true;
298       }
299       return false;
300     });
301   }
302 
303   /// Given two pointers operations by their RuntimePointerChecking
304   /// indices, return true if they require an alias check.
305   ///
306   /// We need a check if one is a pointer for a candidate load and the other is
307   /// a pointer for a possibly intervening store.
308   bool needsChecking(unsigned PtrIdx1, unsigned PtrIdx2,
309                      const SmallPtrSetImpl<Value *> &PtrsWrittenOnFwdingPath,
310                      const SmallPtrSetImpl<Value *> &CandLoadPtrs) {
311     Value *Ptr1 =
312         LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx1).PointerValue;
313     Value *Ptr2 =
314         LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx2).PointerValue;
315     return ((PtrsWrittenOnFwdingPath.count(Ptr1) && CandLoadPtrs.count(Ptr2)) ||
316             (PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1)));
317   }
318 
319   /// Return pointers that are possibly written to on the path from a
320   /// forwarding store to a load.
321   ///
322   /// These pointers need to be alias-checked against the forwarding candidates.
323   SmallPtrSet<Value *, 4> findPointersWrittenOnForwardingPath(
324       const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
325     // From FirstStore to LastLoad neither of the elimination candidate loads
326     // should overlap with any of the stores.
327     //
328     // E.g.:
329     //
330     // st1 C[i]
331     // ld1 B[i] <-------,
332     // ld0 A[i] <----,  |              * LastLoad
333     // ...           |  |
334     // st2 E[i]      |  |
335     // st3 B[i+1] -- | -'              * FirstStore
336     // st0 A[i+1] ---'
337     // st4 D[i]
338     //
339     // st0 forwards to ld0 if the accesses in st4 and st1 don't overlap with
340     // ld0.
341 
342     LoadInst *LastLoad =
343         std::max_element(Candidates.begin(), Candidates.end(),
344                          [&](const StoreToLoadForwardingCandidate &A,
345                              const StoreToLoadForwardingCandidate &B) {
346                            return getInstrIndex(A.Load) < getInstrIndex(B.Load);
347                          })
348             ->Load;
349     StoreInst *FirstStore =
350         std::min_element(Candidates.begin(), Candidates.end(),
351                          [&](const StoreToLoadForwardingCandidate &A,
352                              const StoreToLoadForwardingCandidate &B) {
353                            return getInstrIndex(A.Store) <
354                                   getInstrIndex(B.Store);
355                          })
356             ->Store;
357 
358     // We're looking for stores after the first forwarding store until the end
359     // of the loop, then from the beginning of the loop until the last
360     // forwarded-to load.  Collect the pointer for the stores.
361     SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath;
362 
363     auto InsertStorePtr = [&](Instruction *I) {
364       if (auto *S = dyn_cast<StoreInst>(I))
365         PtrsWrittenOnFwdingPath.insert(S->getPointerOperand());
366     };
367     const auto &MemInstrs = LAI.getDepChecker().getMemoryInstructions();
368     std::for_each(MemInstrs.begin() + getInstrIndex(FirstStore) + 1,
369                   MemInstrs.end(), InsertStorePtr);
370     std::for_each(MemInstrs.begin(), &MemInstrs[getInstrIndex(LastLoad)],
371                   InsertStorePtr);
372 
373     return PtrsWrittenOnFwdingPath;
374   }
375 
376   /// Determine the pointer alias checks to prove that there are no
377   /// intervening stores.
378   SmallVector<RuntimePointerCheck, 4> collectMemchecks(
379       const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) {
380 
381     SmallPtrSet<Value *, 4> PtrsWrittenOnFwdingPath =
382         findPointersWrittenOnForwardingPath(Candidates);
383 
384     // Collect the pointers of the candidate loads.
385     SmallPtrSet<Value *, 4> CandLoadPtrs;
386     for (const auto &Candidate : Candidates)
387       CandLoadPtrs.insert(Candidate.getLoadPtr());
388 
389     const auto &AllChecks = LAI.getRuntimePointerChecking()->getChecks();
390     SmallVector<RuntimePointerCheck, 4> Checks;
391 
392     copy_if(AllChecks, std::back_inserter(Checks),
393             [&](const RuntimePointerCheck &Check) {
394               for (auto PtrIdx1 : Check.first->Members)
395                 for (auto PtrIdx2 : Check.second->Members)
396                   if (needsChecking(PtrIdx1, PtrIdx2, PtrsWrittenOnFwdingPath,
397                                     CandLoadPtrs))
398                     return true;
399               return false;
400             });
401 
402     LLVM_DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size()
403                       << "):\n");
404     LLVM_DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks));
405 
406     return Checks;
407   }
408 
409   /// Perform the transformation for a candidate.
410   void
411   propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand,
412                                   SCEVExpander &SEE) {
413     // loop:
414     //      %x = load %gep_i
415     //         = ... %x
416     //      store %y, %gep_i_plus_1
417     //
418     // =>
419     //
420     // ph:
421     //      %x.initial = load %gep_0
422     // loop:
423     //      %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
424     //      %x = load %gep_i            <---- now dead
425     //         = ... %x.storeforward
426     //      store %y, %gep_i_plus_1
427 
428     Value *Ptr = Cand.Load->getPointerOperand();
429     auto *PtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(Ptr));
430     auto *PH = L->getLoopPreheader();
431     assert(PH && "Preheader should exist!");
432     Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(),
433                                           PH->getTerminator());
434     Value *Initial = new LoadInst(
435         Cand.Load->getType(), InitialPtr, "load_initial",
436         /* isVolatile */ false, Cand.Load->getAlign(), PH->getTerminator());
437 
438     PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded",
439                                    &L->getHeader()->front());
440     PHI->addIncoming(Initial, PH);
441     PHI->addIncoming(Cand.Store->getOperand(0), L->getLoopLatch());
442 
443     Cand.Load->replaceAllUsesWith(PHI);
444   }
445 
446   /// Top-level driver for each loop: find store->load forwarding
447   /// candidates, add run-time checks and perform transformation.
448   bool processLoop() {
449     LLVM_DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName()
450                       << "\" checking " << *L << "\n");
451 
452     // Look for store-to-load forwarding cases across the
453     // backedge. E.g.:
454     //
455     // loop:
456     //      %x = load %gep_i
457     //         = ... %x
458     //      store %y, %gep_i_plus_1
459     //
460     // =>
461     //
462     // ph:
463     //      %x.initial = load %gep_0
464     // loop:
465     //      %x.storeforward = phi [%x.initial, %ph] [%y, %loop]
466     //      %x = load %gep_i            <---- now dead
467     //         = ... %x.storeforward
468     //      store %y, %gep_i_plus_1
469 
470     // First start with store->load dependences.
471     auto StoreToLoadDependences = findStoreToLoadDependences(LAI);
472     if (StoreToLoadDependences.empty())
473       return false;
474 
475     // Generate an index for each load and store according to the original
476     // program order.  This will be used later.
477     InstOrder = LAI.getDepChecker().generateInstructionOrderMap();
478 
479     // To keep things simple for now, remove those where the load is potentially
480     // fed by multiple stores.
481     removeDependencesFromMultipleStores(StoreToLoadDependences);
482     if (StoreToLoadDependences.empty())
483       return false;
484 
485     // Filter the candidates further.
486     SmallVector<StoreToLoadForwardingCandidate, 4> Candidates;
487     for (const StoreToLoadForwardingCandidate &Cand : StoreToLoadDependences) {
488       LLVM_DEBUG(dbgs() << "Candidate " << Cand);
489 
490       // Make sure that the stored values is available everywhere in the loop in
491       // the next iteration.
492       if (!doesStoreDominatesAllLatches(Cand.Store->getParent(), L, DT))
493         continue;
494 
495       // If the load is conditional we can't hoist its 0-iteration instance to
496       // the preheader because that would make it unconditional.  Thus we would
497       // access a memory location that the original loop did not access.
498       if (isLoadConditional(Cand.Load, L))
499         continue;
500 
501       // Check whether the SCEV difference is the same as the induction step,
502       // thus we load the value in the next iteration.
503       if (!Cand.isDependenceDistanceOfOne(PSE, L))
504         continue;
505 
506       assert(isa<SCEVAddRecExpr>(PSE.getSCEV(Cand.Load->getPointerOperand())) &&
507              "Loading from something other than indvar?");
508       assert(
509           isa<SCEVAddRecExpr>(PSE.getSCEV(Cand.Store->getPointerOperand())) &&
510           "Storing to something other than indvar?");
511 
512       Candidates.push_back(Cand);
513       LLVM_DEBUG(
514           dbgs()
515           << Candidates.size()
516           << ". Valid store-to-load forwarding across the loop backedge\n");
517     }
518     if (Candidates.empty())
519       return false;
520 
521     // Check intervening may-alias stores.  These need runtime checks for alias
522     // disambiguation.
523     SmallVector<RuntimePointerCheck, 4> Checks = collectMemchecks(Candidates);
524 
525     // Too many checks are likely to outweigh the benefits of forwarding.
526     if (Checks.size() > Candidates.size() * CheckPerElim) {
527       LLVM_DEBUG(dbgs() << "Too many run-time checks needed.\n");
528       return false;
529     }
530 
531     if (LAI.getPSE().getUnionPredicate().getComplexity() >
532         LoadElimSCEVCheckThreshold) {
533       LLVM_DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
534       return false;
535     }
536 
537     if (!L->isLoopSimplifyForm()) {
538       LLVM_DEBUG(dbgs() << "Loop is not is loop-simplify form");
539       return false;
540     }
541 
542     if (!Checks.empty() || !LAI.getPSE().getUnionPredicate().isAlwaysTrue()) {
543       if (LAI.hasConvergentOp()) {
544         LLVM_DEBUG(dbgs() << "Versioning is needed but not allowed with "
545                              "convergent calls\n");
546         return false;
547       }
548 
549       auto *HeaderBB = L->getHeader();
550       auto *F = HeaderBB->getParent();
551       bool OptForSize = F->hasOptSize() ||
552                         llvm::shouldOptimizeForSize(HeaderBB, PSI, BFI,
553                                                     PGSOQueryType::IRPass);
554       if (OptForSize) {
555         LLVM_DEBUG(
556             dbgs() << "Versioning is needed but not allowed when optimizing "
557                       "for size.\n");
558         return false;
559       }
560 
561       // Point of no-return, start the transformation.  First, version the loop
562       // if necessary.
563 
564       LoopVersioning LV(LAI, Checks, L, LI, DT, PSE.getSE());
565       LV.versionLoop();
566 
567       // After versioning, some of the candidates' pointers could stop being
568       // SCEVAddRecs. We need to filter them out.
569       auto NoLongerGoodCandidate = [this](
570           const StoreToLoadForwardingCandidate &Cand) {
571         return !isa<SCEVAddRecExpr>(
572                     PSE.getSCEV(Cand.Load->getPointerOperand())) ||
573                !isa<SCEVAddRecExpr>(
574                     PSE.getSCEV(Cand.Store->getPointerOperand()));
575       };
576       llvm::erase_if(Candidates, NoLongerGoodCandidate);
577     }
578 
579     // Next, propagate the value stored by the store to the users of the load.
580     // Also for the first iteration, generate the initial value of the load.
581     SCEVExpander SEE(*PSE.getSE(), L->getHeader()->getModule()->getDataLayout(),
582                      "storeforward");
583     for (const auto &Cand : Candidates)
584       propagateStoredValueToLoadUsers(Cand, SEE);
585     NumLoopLoadEliminted += Candidates.size();
586 
587     return true;
588   }
589 
590 private:
591   Loop *L;
592 
593   /// Maps the load/store instructions to their index according to
594   /// program order.
595   DenseMap<Instruction *, unsigned> InstOrder;
596 
597   // Analyses used.
598   LoopInfo *LI;
599   const LoopAccessInfo &LAI;
600   DominatorTree *DT;
601   BlockFrequencyInfo *BFI;
602   ProfileSummaryInfo *PSI;
603   PredicatedScalarEvolution PSE;
604 };
605 
606 } // end anonymous namespace
607 
608 static bool
609 eliminateLoadsAcrossLoops(Function &F, LoopInfo &LI, DominatorTree &DT,
610                           BlockFrequencyInfo *BFI, ProfileSummaryInfo *PSI,
611                           ScalarEvolution *SE, AssumptionCache *AC,
612                           function_ref<const LoopAccessInfo &(Loop &)> GetLAI) {
613   // Build up a worklist of inner-loops to transform to avoid iterator
614   // invalidation.
615   // FIXME: This logic comes from other passes that actually change the loop
616   // nest structure. It isn't clear this is necessary (or useful) for a pass
617   // which merely optimizes the use of loads in a loop.
618   SmallVector<Loop *, 8> Worklist;
619 
620   bool Changed = false;
621 
622   for (Loop *TopLevelLoop : LI)
623     for (Loop *L : depth_first(TopLevelLoop)) {
624       Changed |= simplifyLoop(L, &DT, &LI, SE, AC, /*MSSAU*/ nullptr, false);
625       // We only handle inner-most loops.
626       if (L->isInnermost())
627         Worklist.push_back(L);
628     }
629 
630   // Now walk the identified inner loops.
631   for (Loop *L : Worklist) {
632     // Match historical behavior
633     if (!L->isRotatedForm() || !L->getExitingBlock())
634       continue;
635     // The actual work is performed by LoadEliminationForLoop.
636     LoadEliminationForLoop LEL(L, &LI, GetLAI(*L), &DT, BFI, PSI);
637     Changed |= LEL.processLoop();
638   }
639   return Changed;
640 }
641 
642 namespace {
643 
644 /// The pass.  Most of the work is delegated to the per-loop
645 /// LoadEliminationForLoop class.
646 class LoopLoadElimination : public FunctionPass {
647 public:
648   static char ID;
649 
650   LoopLoadElimination() : FunctionPass(ID) {
651     initializeLoopLoadEliminationPass(*PassRegistry::getPassRegistry());
652   }
653 
654   bool runOnFunction(Function &F) override {
655     if (skipFunction(F))
656       return false;
657 
658     auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
659     auto &LAA = getAnalysis<LoopAccessLegacyAnalysis>();
660     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
661     auto *PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
662     auto *BFI = (PSI && PSI->hasProfileSummary()) ?
663                 &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() :
664                 nullptr;
665     auto *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
666 
667     // Process each loop nest in the function.
668     return eliminateLoadsAcrossLoops(
669         F, LI, DT, BFI, PSI, SE, /*AC*/ nullptr,
670         [&LAA](Loop &L) -> const LoopAccessInfo & { return LAA.getInfo(&L); });
671   }
672 
673   void getAnalysisUsage(AnalysisUsage &AU) const override {
674     AU.addRequiredID(LoopSimplifyID);
675     AU.addRequired<LoopInfoWrapperPass>();
676     AU.addPreserved<LoopInfoWrapperPass>();
677     AU.addRequired<LoopAccessLegacyAnalysis>();
678     AU.addRequired<ScalarEvolutionWrapperPass>();
679     AU.addRequired<DominatorTreeWrapperPass>();
680     AU.addPreserved<DominatorTreeWrapperPass>();
681     AU.addPreserved<GlobalsAAWrapperPass>();
682     AU.addRequired<ProfileSummaryInfoWrapperPass>();
683     LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
684   }
685 };
686 
687 } // end anonymous namespace
688 
689 char LoopLoadElimination::ID;
690 
691 static const char LLE_name[] = "Loop Load Elimination";
692 
693 INITIALIZE_PASS_BEGIN(LoopLoadElimination, LLE_OPTION, LLE_name, false, false)
694 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
695 INITIALIZE_PASS_DEPENDENCY(LoopAccessLegacyAnalysis)
696 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
697 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
698 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
699 INITIALIZE_PASS_DEPENDENCY(ProfileSummaryInfoWrapperPass)
700 INITIALIZE_PASS_DEPENDENCY(LazyBlockFrequencyInfoPass)
701 INITIALIZE_PASS_END(LoopLoadElimination, LLE_OPTION, LLE_name, false, false)
702 
703 FunctionPass *llvm::createLoopLoadEliminationPass() {
704   return new LoopLoadElimination();
705 }
706 
707 PreservedAnalyses LoopLoadEliminationPass::run(Function &F,
708                                                FunctionAnalysisManager &AM) {
709   auto &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
710   auto &LI = AM.getResult<LoopAnalysis>(F);
711   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
712   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
713   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
714   auto &AA = AM.getResult<AAManager>(F);
715   auto &AC = AM.getResult<AssumptionAnalysis>(F);
716   auto &MAMProxy = AM.getResult<ModuleAnalysisManagerFunctionProxy>(F);
717   auto *PSI = MAMProxy.getCachedResult<ProfileSummaryAnalysis>(*F.getParent());
718   auto *BFI = (PSI && PSI->hasProfileSummary()) ?
719       &AM.getResult<BlockFrequencyAnalysis>(F) : nullptr;
720 
721   auto &LAM = AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
722   bool Changed = eliminateLoadsAcrossLoops(
723       F, LI, DT, BFI, PSI, &SE, &AC, [&](Loop &L) -> const LoopAccessInfo & {
724         LoopStandardAnalysisResults AR = {AA,  AC,  DT,      LI,      SE,
725                                           TLI, TTI, nullptr, nullptr, nullptr};
726         return LAM.getResult<LoopAccessAnalysis>(L, AR);
727       });
728 
729   if (!Changed)
730     return PreservedAnalyses::all();
731 
732   PreservedAnalyses PA;
733   return PA;
734 }
735