xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/LICM.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs loop invariant code motion, attempting to remove as much
10 // code from the body of a loop as possible.  It does this by either hoisting
11 // code into the preheader block, or by sinking code to the exit blocks if it is
12 // safe.  This pass also promotes must-aliased memory locations in the loop to
13 // live in registers, thus hoisting and sinking "invariant" loads and stores.
14 //
15 // Hoisting operations out of loops is a canonicalization transform.  It
16 // enables and simplifies subsequent optimizations in the middle-end.
17 // Rematerialization of hoisted instructions to reduce register pressure is the
18 // responsibility of the back-end, which has more accurate information about
19 // register pressure and also handles other optimizations than LICM that
20 // increase live-ranges.
21 //
22 // This pass uses alias analysis for two purposes:
23 //
24 //  1. Moving loop invariant loads and calls out of loops.  If we can determine
25 //     that a load or call inside of a loop never aliases anything stored to,
26 //     we can hoist it or sink it like any other instruction.
27 //  2. Scalar Promotion of Memory - If there is a store instruction inside of
28 //     the loop, we try to move the store to happen AFTER the loop instead of
29 //     inside of the loop.  This can only happen if a few conditions are true:
30 //       A. The pointer stored through is loop invariant
31 //       B. There are no stores or loads in the loop which _may_ alias the
32 //          pointer.  There are no calls in the loop which mod/ref the pointer.
33 //     If these conditions are true, we can promote the loads and stores in the
34 //     loop of the pointer to use a temporary alloca'd variable.  We then use
35 //     the SSAUpdater to construct the appropriate SSA form for the value.
36 //
37 //===----------------------------------------------------------------------===//
38 
39 #include "llvm/Transforms/Scalar/LICM.h"
40 #include "llvm/ADT/SetOperations.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/Analysis/AliasAnalysis.h"
43 #include "llvm/Analysis/AliasSetTracker.h"
44 #include "llvm/Analysis/BasicAliasAnalysis.h"
45 #include "llvm/Analysis/BlockFrequencyInfo.h"
46 #include "llvm/Analysis/CaptureTracking.h"
47 #include "llvm/Analysis/ConstantFolding.h"
48 #include "llvm/Analysis/GlobalsModRef.h"
49 #include "llvm/Analysis/GuardUtils.h"
50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
51 #include "llvm/Analysis/Loads.h"
52 #include "llvm/Analysis/LoopInfo.h"
53 #include "llvm/Analysis/LoopIterator.h"
54 #include "llvm/Analysis/LoopPass.h"
55 #include "llvm/Analysis/MemoryBuiltins.h"
56 #include "llvm/Analysis/MemorySSA.h"
57 #include "llvm/Analysis/MemorySSAUpdater.h"
58 #include "llvm/Analysis/MustExecute.h"
59 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
62 #include "llvm/Analysis/TargetLibraryInfo.h"
63 #include "llvm/Analysis/ValueTracking.h"
64 #include "llvm/IR/CFG.h"
65 #include "llvm/IR/Constants.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/Instructions.h"
71 #include "llvm/IR/IntrinsicInst.h"
72 #include "llvm/IR/LLVMContext.h"
73 #include "llvm/IR/Metadata.h"
74 #include "llvm/IR/PatternMatch.h"
75 #include "llvm/IR/PredIteratorCache.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Scalar/LoopPassManager.h"
82 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
83 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
84 #include "llvm/Transforms/Utils/Local.h"
85 #include "llvm/Transforms/Utils/LoopUtils.h"
86 #include "llvm/Transforms/Utils/SSAUpdater.h"
87 #include <algorithm>
88 #include <utility>
89 using namespace llvm;
90 
91 #define DEBUG_TYPE "licm"
92 
93 STATISTIC(NumCreatedBlocks, "Number of blocks created");
94 STATISTIC(NumClonedBranches, "Number of branches cloned");
95 STATISTIC(NumSunk, "Number of instructions sunk out of loop");
96 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
97 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
98 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
99 STATISTIC(NumPromoted, "Number of memory locations promoted to registers");
100 
101 /// Memory promotion is enabled by default.
102 static cl::opt<bool>
103     DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
104                      cl::desc("Disable memory promotion in LICM pass"));
105 
106 static cl::opt<bool> ControlFlowHoisting(
107     "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
108     cl::desc("Enable control flow (and PHI) hoisting in LICM"));
109 
110 static cl::opt<unsigned> HoistSinkColdnessThreshold(
111     "licm-coldness-threshold", cl::Hidden, cl::init(4),
112     cl::desc("Relative coldness Threshold of hoisting/sinking destination "
113              "block for LICM to be considered beneficial"));
114 
115 static cl::opt<uint32_t> MaxNumUsesTraversed(
116     "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
117     cl::desc("Max num uses visited for identifying load "
118              "invariance in loop using invariant start (default = 8)"));
119 
120 // Experimental option to allow imprecision in LICM in pathological cases, in
121 // exchange for faster compile. This is to be removed if MemorySSA starts to
122 // address the same issue. This flag applies only when LICM uses MemorySSA
123 // instead on AliasSetTracker. LICM calls MemorySSAWalker's
124 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect
125 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
126 // which may not be precise, since optimizeUses is capped. The result is
127 // correct, but we may not get as "far up" as possible to get which access is
128 // clobbering the one queried.
129 cl::opt<unsigned> llvm::SetLicmMssaOptCap(
130     "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
131     cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
132              "for faster compile. Caps the MemorySSA clobbering calls."));
133 
134 // Experimentally, memory promotion carries less importance than sinking and
135 // hoisting. Limit when we do promotion when using MemorySSA, in order to save
136 // compile time.
137 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
138     "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
139     cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
140              "effect. When MSSA in LICM is enabled, then this is the maximum "
141              "number of accesses allowed to be present in a loop in order to "
142              "enable memory promotion."));
143 
144 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
145 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
146                                   const LoopSafetyInfo *SafetyInfo,
147                                   TargetTransformInfo *TTI, bool &FreeInLoop,
148                                   bool LoopNestMode);
149 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
150                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
151                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
152                   OptimizationRemarkEmitter *ORE);
153 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
154                  BlockFrequencyInfo *BFI, const Loop *CurLoop,
155                  ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
156                  OptimizationRemarkEmitter *ORE);
157 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
158                                            const DominatorTree *DT,
159                                            const TargetLibraryInfo *TLI,
160                                            const Loop *CurLoop,
161                                            const LoopSafetyInfo *SafetyInfo,
162                                            OptimizationRemarkEmitter *ORE,
163                                            const Instruction *CtxI = nullptr);
164 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
165                                      AliasSetTracker *CurAST, Loop *CurLoop,
166                                      AAResults *AA);
167 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
168                                              Loop *CurLoop, Instruction &I,
169                                              SinkAndHoistLICMFlags &Flags);
170 static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
171                                               MemoryUse &MU);
172 static Instruction *cloneInstructionInExitBlock(
173     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
174     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
175 
176 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
177                              MemorySSAUpdater *MSSAU);
178 
179 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
180                                   ICFLoopSafetyInfo &SafetyInfo,
181                                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
182 
183 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
184                                 function_ref<void(Instruction *)> Fn);
185 static SmallVector<SmallSetVector<Value *, 8>, 0>
186 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L);
187 
188 namespace {
189 struct LoopInvariantCodeMotion {
190   bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
191                  BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI,
192                  TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
193                  OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
194 
195   LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
196                           unsigned LicmMssaNoAccForPromotionCap)
197       : LicmMssaOptCap(LicmMssaOptCap),
198         LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
199 
200 private:
201   unsigned LicmMssaOptCap;
202   unsigned LicmMssaNoAccForPromotionCap;
203 };
204 
205 struct LegacyLICMPass : public LoopPass {
206   static char ID; // Pass identification, replacement for typeid
207   LegacyLICMPass(
208       unsigned LicmMssaOptCap = SetLicmMssaOptCap,
209       unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
210       : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
211     initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
212   }
213 
214   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
215     if (skipLoop(L))
216       return false;
217 
218     LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
219                       << L->getHeader()->getNameOrAsOperand() << "\n");
220 
221     auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
222     MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
223     bool hasProfileData = L->getHeader()->getParent()->hasProfileData();
224     BlockFrequencyInfo *BFI =
225         hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI()
226                        : nullptr;
227     // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
228     // pass. Function analyses need to be preserved across loop transformations
229     // but ORE cannot be preserved (see comment before the pass definition).
230     OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
231     return LICM.runOnLoop(
232         L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
233         &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
234         &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI,
235         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
236             *L->getHeader()->getParent()),
237         &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
238             *L->getHeader()->getParent()),
239         SE ? &SE->getSE() : nullptr, MSSA, &ORE);
240   }
241 
242   /// This transformation requires natural loop information & requires that
243   /// loop preheaders be inserted into the CFG...
244   ///
245   void getAnalysisUsage(AnalysisUsage &AU) const override {
246     AU.addPreserved<DominatorTreeWrapperPass>();
247     AU.addPreserved<LoopInfoWrapperPass>();
248     AU.addRequired<TargetLibraryInfoWrapperPass>();
249     AU.addRequired<MemorySSAWrapperPass>();
250     AU.addPreserved<MemorySSAWrapperPass>();
251     AU.addRequired<TargetTransformInfoWrapperPass>();
252     getLoopAnalysisUsage(AU);
253     LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
254     AU.addPreserved<LazyBlockFrequencyInfoPass>();
255     AU.addPreserved<LazyBranchProbabilityInfoPass>();
256   }
257 
258 private:
259   LoopInvariantCodeMotion LICM;
260 };
261 } // namespace
262 
263 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
264                                 LoopStandardAnalysisResults &AR, LPMUpdater &) {
265   if (!AR.MSSA)
266     report_fatal_error("LICM requires MemorySSA (loop-mssa)");
267 
268   // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
269   // pass.  Function analyses need to be preserved across loop transformations
270   // but ORE cannot be preserved (see comment before the pass definition).
271   OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
272 
273   LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
274   if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI,
275                       &AR.SE, AR.MSSA, &ORE))
276     return PreservedAnalyses::all();
277 
278   auto PA = getLoopPassPreservedAnalyses();
279 
280   PA.preserve<DominatorTreeAnalysis>();
281   PA.preserve<LoopAnalysis>();
282   PA.preserve<MemorySSAAnalysis>();
283 
284   return PA;
285 }
286 
287 PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM,
288                                  LoopStandardAnalysisResults &AR,
289                                  LPMUpdater &) {
290   if (!AR.MSSA)
291     report_fatal_error("LNICM requires MemorySSA (loop-mssa)");
292 
293   // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
294   // pass.  Function analyses need to be preserved across loop transformations
295   // but ORE cannot be preserved (see comment before the pass definition).
296   OptimizationRemarkEmitter ORE(LN.getParent());
297 
298   LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
299 
300   Loop &OutermostLoop = LN.getOutermostLoop();
301   bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, AR.BFI,
302                                 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
303 
304   if (!Changed)
305     return PreservedAnalyses::all();
306 
307   auto PA = getLoopPassPreservedAnalyses();
308 
309   PA.preserve<DominatorTreeAnalysis>();
310   PA.preserve<LoopAnalysis>();
311   PA.preserve<MemorySSAAnalysis>();
312 
313   return PA;
314 }
315 
316 char LegacyLICMPass::ID = 0;
317 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
318                       false, false)
319 INITIALIZE_PASS_DEPENDENCY(LoopPass)
320 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
321 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
322 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
323 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
324 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
325                     false)
326 
327 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
328 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
329                            unsigned LicmMssaNoAccForPromotionCap) {
330   return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
331 }
332 
333 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L,
334                                                    MemorySSA *MSSA)
335     : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
336                             IsSink, L, MSSA) {}
337 
338 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
339     unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
340     Loop *L, MemorySSA *MSSA)
341     : LicmMssaOptCap(LicmMssaOptCap),
342       LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
343       IsSink(IsSink) {
344   assert(((L != nullptr) == (MSSA != nullptr)) &&
345          "Unexpected values for SinkAndHoistLICMFlags");
346   if (!MSSA)
347     return;
348 
349   unsigned AccessCapCount = 0;
350   for (auto *BB : L->getBlocks())
351     if (const auto *Accesses = MSSA->getBlockAccesses(BB))
352       for (const auto &MA : *Accesses) {
353         (void)MA;
354         ++AccessCapCount;
355         if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
356           NoOfMemAccTooLarge = true;
357           return;
358         }
359       }
360 }
361 
362 /// Hoist expressions out of the specified loop. Note, alias info for inner
363 /// loop is not preserved so it is not a good idea to run LICM multiple
364 /// times on one loop.
365 bool LoopInvariantCodeMotion::runOnLoop(
366     Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
367     BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
368     ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE,
369     bool LoopNestMode) {
370   bool Changed = false;
371 
372   assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
373 
374   // If this loop has metadata indicating that LICM is not to be performed then
375   // just exit.
376   if (hasDisableLICMTransformsHint(L)) {
377     return false;
378   }
379 
380   // Don't sink stores from loops with coroutine suspend instructions.
381   // LICM would sink instructions into the default destination of
382   // the coroutine switch. The default destination of the switch is to
383   // handle the case where the coroutine is suspended, by which point the
384   // coroutine frame may have been destroyed. No instruction can be sunk there.
385   // FIXME: This would unfortunately hurt the performance of coroutines, however
386   // there is currently no general solution for this. Similar issues could also
387   // potentially happen in other passes where instructions are being moved
388   // across that edge.
389   bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
390     return llvm::any_of(*BB, [](Instruction &I) {
391       IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
392       return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
393     });
394   });
395 
396   MemorySSAUpdater MSSAU(MSSA);
397   SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
398                               /*IsSink=*/true, L, MSSA);
399 
400   // Get the preheader block to move instructions into...
401   BasicBlock *Preheader = L->getLoopPreheader();
402 
403   // Compute loop safety information.
404   ICFLoopSafetyInfo SafetyInfo;
405   SafetyInfo.computeLoopSafetyInfo(L);
406 
407   // We want to visit all of the instructions in this loop... that are not parts
408   // of our subloops (they have already had their invariants hoisted out of
409   // their loop, into this loop, so there is no need to process the BODIES of
410   // the subloops).
411   //
412   // Traverse the body of the loop in depth first order on the dominator tree so
413   // that we are guaranteed to see definitions before we see uses.  This allows
414   // us to sink instructions in one pass, without iteration.  After sinking
415   // instructions, we perform another pass to hoist them out of the loop.
416   if (L->hasDedicatedExits())
417     Changed |= LoopNestMode
418                    ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI,
419                                            DT, BFI, TLI, TTI, L, &MSSAU,
420                                            &SafetyInfo, Flags, ORE)
421                    : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI,
422                                 TLI, TTI, L, &MSSAU, &SafetyInfo, Flags, ORE);
423   Flags.setIsSink(false);
424   if (Preheader)
425     Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L,
426                            &MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode);
427 
428   // Now that all loop invariants have been removed from the loop, promote any
429   // memory references to scalars that we can.
430   // Don't sink stores from loops without dedicated block exits. Exits
431   // containing indirect branches are not transformed by loop simplify,
432   // make sure we catch that. An additional load may be generated in the
433   // preheader for SSA updater, so also avoid sinking when no preheader
434   // is available.
435   if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
436       !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
437     // Figure out the loop exits and their insertion points
438     SmallVector<BasicBlock *, 8> ExitBlocks;
439     L->getUniqueExitBlocks(ExitBlocks);
440 
441     // We can't insert into a catchswitch.
442     bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
443       return isa<CatchSwitchInst>(Exit->getTerminator());
444     });
445 
446     if (!HasCatchSwitch) {
447       SmallVector<Instruction *, 8> InsertPts;
448       SmallVector<MemoryAccess *, 8> MSSAInsertPts;
449       InsertPts.reserve(ExitBlocks.size());
450       MSSAInsertPts.reserve(ExitBlocks.size());
451       for (BasicBlock *ExitBlock : ExitBlocks) {
452         InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
453         MSSAInsertPts.push_back(nullptr);
454       }
455 
456       PredIteratorCache PIC;
457 
458       // Promoting one set of accesses may make the pointers for another set
459       // loop invariant, so run this in a loop (with the MaybePromotable set
460       // decreasing in size over time).
461       bool Promoted = false;
462       bool LocalPromoted;
463       do {
464         LocalPromoted = false;
465         for (const SmallSetVector<Value *, 8> &PointerMustAliases :
466              collectPromotionCandidates(MSSA, AA, L)) {
467           LocalPromoted |= promoteLoopAccessesToScalars(
468               PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC,
469               LI, DT, TLI, L, &MSSAU, &SafetyInfo, ORE);
470         }
471         Promoted |= LocalPromoted;
472       } while (LocalPromoted);
473 
474       // Once we have promoted values across the loop body we have to
475       // recursively reform LCSSA as any nested loop may now have values defined
476       // within the loop used in the outer loop.
477       // FIXME: This is really heavy handed. It would be a bit better to use an
478       // SSAUpdater strategy during promotion that was LCSSA aware and reformed
479       // it as it went.
480       if (Promoted)
481         formLCSSARecursively(*L, *DT, LI, SE);
482 
483       Changed |= Promoted;
484     }
485   }
486 
487   // Check that neither this loop nor its parent have had LCSSA broken. LICM is
488   // specifically moving instructions across the loop boundary and so it is
489   // especially in need of sanity checking here.
490   assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
491   assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
492          "Parent loop not left in LCSSA form after LICM!");
493 
494   if (VerifyMemorySSA)
495     MSSA->verifyMemorySSA();
496 
497   if (Changed && SE)
498     SE->forgetLoopDispositions(L);
499   return Changed;
500 }
501 
502 /// Walk the specified region of the CFG (defined by all blocks dominated by
503 /// the specified block, and that are in the current loop) in reverse depth
504 /// first order w.r.t the DominatorTree.  This allows us to visit uses before
505 /// definitions, allowing us to sink a loop body in one pass without iteration.
506 ///
507 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
508                       DominatorTree *DT, BlockFrequencyInfo *BFI,
509                       TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
510                       Loop *CurLoop, MemorySSAUpdater *MSSAU,
511                       ICFLoopSafetyInfo *SafetyInfo,
512                       SinkAndHoistLICMFlags &Flags,
513                       OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
514 
515   // Verify inputs.
516   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
517          CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr &&
518          "Unexpected input to sinkRegion.");
519 
520   // We want to visit children before parents. We will enque all the parents
521   // before their children in the worklist and process the worklist in reverse
522   // order.
523   SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
524 
525   bool Changed = false;
526   for (DomTreeNode *DTN : reverse(Worklist)) {
527     BasicBlock *BB = DTN->getBlock();
528     // Only need to process the contents of this block if it is not part of a
529     // subloop (which would already have been processed).
530     if (inSubLoop(BB, CurLoop, LI))
531       continue;
532 
533     for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
534       Instruction &I = *--II;
535 
536       // The instruction is not used in the loop if it is dead.  In this case,
537       // we just delete it instead of sinking it.
538       if (isInstructionTriviallyDead(&I, TLI)) {
539         LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
540         salvageKnowledge(&I);
541         salvageDebugInfo(I);
542         ++II;
543         eraseInstruction(I, *SafetyInfo, MSSAU);
544         Changed = true;
545         continue;
546       }
547 
548       // Check to see if we can sink this instruction to the exit blocks
549       // of the loop.  We can do this if the all users of the instruction are
550       // outside of the loop.  In this case, it doesn't even matter if the
551       // operands of the instruction are loop invariant.
552       //
553       bool FreeInLoop = false;
554       bool LoopNestMode = OutermostLoop != nullptr;
555       if (!I.mayHaveSideEffects() &&
556           isNotUsedOrFreeInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
557                                 SafetyInfo, TTI, FreeInLoop, LoopNestMode) &&
558           canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/nullptr, MSSAU, true,
559                              &Flags, ORE)) {
560         if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) {
561           if (!FreeInLoop) {
562             ++II;
563             salvageDebugInfo(I);
564             eraseInstruction(I, *SafetyInfo, MSSAU);
565           }
566           Changed = true;
567         }
568       }
569     }
570   }
571   if (VerifyMemorySSA)
572     MSSAU->getMemorySSA()->verifyMemorySSA();
573   return Changed;
574 }
575 
576 bool llvm::sinkRegionForLoopNest(
577     DomTreeNode *N, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
578     BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
579     Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
580     SinkAndHoistLICMFlags &Flags, OptimizationRemarkEmitter *ORE) {
581 
582   bool Changed = false;
583   SmallPriorityWorklist<Loop *, 4> Worklist;
584   Worklist.insert(CurLoop);
585   appendLoopsToWorklist(*CurLoop, Worklist);
586   while (!Worklist.empty()) {
587     Loop *L = Worklist.pop_back_val();
588     Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI,
589                           TTI, L, MSSAU, SafetyInfo, Flags, ORE, CurLoop);
590   }
591   return Changed;
592 }
593 
594 namespace {
595 // This is a helper class for hoistRegion to make it able to hoist control flow
596 // in order to be able to hoist phis. The way this works is that we initially
597 // start hoisting to the loop preheader, and when we see a loop invariant branch
598 // we make note of this. When we then come to hoist an instruction that's
599 // conditional on such a branch we duplicate the branch and the relevant control
600 // flow, then hoist the instruction into the block corresponding to its original
601 // block in the duplicated control flow.
602 class ControlFlowHoister {
603 private:
604   // Information about the loop we are hoisting from
605   LoopInfo *LI;
606   DominatorTree *DT;
607   Loop *CurLoop;
608   MemorySSAUpdater *MSSAU;
609 
610   // A map of blocks in the loop to the block their instructions will be hoisted
611   // to.
612   DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
613 
614   // The branches that we can hoist, mapped to the block that marks a
615   // convergence point of their control flow.
616   DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
617 
618 public:
619   ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
620                      MemorySSAUpdater *MSSAU)
621       : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
622 
623   void registerPossiblyHoistableBranch(BranchInst *BI) {
624     // We can only hoist conditional branches with loop invariant operands.
625     if (!ControlFlowHoisting || !BI->isConditional() ||
626         !CurLoop->hasLoopInvariantOperands(BI))
627       return;
628 
629     // The branch destinations need to be in the loop, and we don't gain
630     // anything by duplicating conditional branches with duplicate successors,
631     // as it's essentially the same as an unconditional branch.
632     BasicBlock *TrueDest = BI->getSuccessor(0);
633     BasicBlock *FalseDest = BI->getSuccessor(1);
634     if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
635         TrueDest == FalseDest)
636       return;
637 
638     // We can hoist BI if one branch destination is the successor of the other,
639     // or both have common successor which we check by seeing if the
640     // intersection of their successors is non-empty.
641     // TODO: This could be expanded to allowing branches where both ends
642     // eventually converge to a single block.
643     SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
644     TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
645     FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
646     BasicBlock *CommonSucc = nullptr;
647     if (TrueDestSucc.count(FalseDest)) {
648       CommonSucc = FalseDest;
649     } else if (FalseDestSucc.count(TrueDest)) {
650       CommonSucc = TrueDest;
651     } else {
652       set_intersect(TrueDestSucc, FalseDestSucc);
653       // If there's one common successor use that.
654       if (TrueDestSucc.size() == 1)
655         CommonSucc = *TrueDestSucc.begin();
656       // If there's more than one pick whichever appears first in the block list
657       // (we can't use the value returned by TrueDestSucc.begin() as it's
658       // unpredicatable which element gets returned).
659       else if (!TrueDestSucc.empty()) {
660         Function *F = TrueDest->getParent();
661         auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
662         auto It = llvm::find_if(*F, IsSucc);
663         assert(It != F->end() && "Could not find successor in function");
664         CommonSucc = &*It;
665       }
666     }
667     // The common successor has to be dominated by the branch, as otherwise
668     // there will be some other path to the successor that will not be
669     // controlled by this branch so any phi we hoist would be controlled by the
670     // wrong condition. This also takes care of avoiding hoisting of loop back
671     // edges.
672     // TODO: In some cases this could be relaxed if the successor is dominated
673     // by another block that's been hoisted and we can guarantee that the
674     // control flow has been replicated exactly.
675     if (CommonSucc && DT->dominates(BI, CommonSucc))
676       HoistableBranches[BI] = CommonSucc;
677   }
678 
679   bool canHoistPHI(PHINode *PN) {
680     // The phi must have loop invariant operands.
681     if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
682       return false;
683     // We can hoist phis if the block they are in is the target of hoistable
684     // branches which cover all of the predecessors of the block.
685     SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
686     BasicBlock *BB = PN->getParent();
687     for (BasicBlock *PredBB : predecessors(BB))
688       PredecessorBlocks.insert(PredBB);
689     // If we have less predecessor blocks than predecessors then the phi will
690     // have more than one incoming value for the same block which we can't
691     // handle.
692     // TODO: This could be handled be erasing some of the duplicate incoming
693     // values.
694     if (PredecessorBlocks.size() != pred_size(BB))
695       return false;
696     for (auto &Pair : HoistableBranches) {
697       if (Pair.second == BB) {
698         // Which blocks are predecessors via this branch depends on if the
699         // branch is triangle-like or diamond-like.
700         if (Pair.first->getSuccessor(0) == BB) {
701           PredecessorBlocks.erase(Pair.first->getParent());
702           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
703         } else if (Pair.first->getSuccessor(1) == BB) {
704           PredecessorBlocks.erase(Pair.first->getParent());
705           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
706         } else {
707           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
708           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
709         }
710       }
711     }
712     // PredecessorBlocks will now be empty if for every predecessor of BB we
713     // found a hoistable branch source.
714     return PredecessorBlocks.empty();
715   }
716 
717   BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
718     if (!ControlFlowHoisting)
719       return CurLoop->getLoopPreheader();
720     // If BB has already been hoisted, return that
721     if (HoistDestinationMap.count(BB))
722       return HoistDestinationMap[BB];
723 
724     // Check if this block is conditional based on a pending branch
725     auto HasBBAsSuccessor =
726         [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
727           return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
728                                        Pair.first->getSuccessor(1) == BB);
729         };
730     auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
731 
732     // If not involved in a pending branch, hoist to preheader
733     BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
734     if (It == HoistableBranches.end()) {
735       LLVM_DEBUG(dbgs() << "LICM using "
736                         << InitialPreheader->getNameOrAsOperand()
737                         << " as hoist destination for "
738                         << BB->getNameOrAsOperand() << "\n");
739       HoistDestinationMap[BB] = InitialPreheader;
740       return InitialPreheader;
741     }
742     BranchInst *BI = It->first;
743     assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
744                HoistableBranches.end() &&
745            "BB is expected to be the target of at most one branch");
746 
747     LLVMContext &C = BB->getContext();
748     BasicBlock *TrueDest = BI->getSuccessor(0);
749     BasicBlock *FalseDest = BI->getSuccessor(1);
750     BasicBlock *CommonSucc = HoistableBranches[BI];
751     BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
752 
753     // Create hoisted versions of blocks that currently don't have them
754     auto CreateHoistedBlock = [&](BasicBlock *Orig) {
755       if (HoistDestinationMap.count(Orig))
756         return HoistDestinationMap[Orig];
757       BasicBlock *New =
758           BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
759       HoistDestinationMap[Orig] = New;
760       DT->addNewBlock(New, HoistTarget);
761       if (CurLoop->getParentLoop())
762         CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
763       ++NumCreatedBlocks;
764       LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
765                         << " as hoist destination for " << Orig->getName()
766                         << "\n");
767       return New;
768     };
769     BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
770     BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
771     BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
772 
773     // Link up these blocks with branches.
774     if (!HoistCommonSucc->getTerminator()) {
775       // The new common successor we've generated will branch to whatever that
776       // hoist target branched to.
777       BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
778       assert(TargetSucc && "Expected hoist target to have a single successor");
779       HoistCommonSucc->moveBefore(TargetSucc);
780       BranchInst::Create(TargetSucc, HoistCommonSucc);
781     }
782     if (!HoistTrueDest->getTerminator()) {
783       HoistTrueDest->moveBefore(HoistCommonSucc);
784       BranchInst::Create(HoistCommonSucc, HoistTrueDest);
785     }
786     if (!HoistFalseDest->getTerminator()) {
787       HoistFalseDest->moveBefore(HoistCommonSucc);
788       BranchInst::Create(HoistCommonSucc, HoistFalseDest);
789     }
790 
791     // If BI is being cloned to what was originally the preheader then
792     // HoistCommonSucc will now be the new preheader.
793     if (HoistTarget == InitialPreheader) {
794       // Phis in the loop header now need to use the new preheader.
795       InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
796       MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
797           HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
798       // The new preheader dominates the loop header.
799       DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
800       DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
801       DT->changeImmediateDominator(HeaderNode, PreheaderNode);
802       // The preheader hoist destination is now the new preheader, with the
803       // exception of the hoist destination of this branch.
804       for (auto &Pair : HoistDestinationMap)
805         if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
806           Pair.second = HoistCommonSucc;
807     }
808 
809     // Now finally clone BI.
810     ReplaceInstWithInst(
811         HoistTarget->getTerminator(),
812         BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
813     ++NumClonedBranches;
814 
815     assert(CurLoop->getLoopPreheader() &&
816            "Hoisting blocks should not have destroyed preheader");
817     return HoistDestinationMap[BB];
818   }
819 };
820 } // namespace
821 
822 // Hoisting/sinking instruction out of a loop isn't always beneficial. It's only
823 // only worthwhile if the destination block is actually colder than current
824 // block.
825 static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock,
826                                  OptimizationRemarkEmitter *ORE,
827                                  BlockFrequencyInfo *BFI) {
828   // Check block frequency only when runtime profile is available
829   // to avoid pathological cases. With static profile, lean towards
830   // hosting because it helps canonicalize the loop for vectorizer.
831   if (!DstBlock->getParent()->hasProfileData())
832     return true;
833 
834   if (!HoistSinkColdnessThreshold || !BFI)
835     return true;
836 
837   BasicBlock *SrcBlock = I.getParent();
838   if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold >
839       BFI->getBlockFreq(SrcBlock).getFrequency()) {
840     ORE->emit([&]() {
841       return OptimizationRemarkMissed(DEBUG_TYPE, "SinkHoistInst", &I)
842              << "failed to sink or hoist instruction because containing block "
843                 "has lower frequency than destination block";
844     });
845     return false;
846   }
847 
848   return true;
849 }
850 
851 /// Walk the specified region of the CFG (defined by all blocks dominated by
852 /// the specified block, and that are in the current loop) in depth first
853 /// order w.r.t the DominatorTree.  This allows us to visit definitions before
854 /// uses, allowing us to hoist a loop body in one pass without iteration.
855 ///
856 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
857                        DominatorTree *DT, BlockFrequencyInfo *BFI,
858                        TargetLibraryInfo *TLI, Loop *CurLoop,
859                        MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
860                        ICFLoopSafetyInfo *SafetyInfo,
861                        SinkAndHoistLICMFlags &Flags,
862                        OptimizationRemarkEmitter *ORE, bool LoopNestMode) {
863   // Verify inputs.
864   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
865          CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr &&
866          "Unexpected input to hoistRegion.");
867 
868   ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
869 
870   // Keep track of instructions that have been hoisted, as they may need to be
871   // re-hoisted if they end up not dominating all of their uses.
872   SmallVector<Instruction *, 16> HoistedInstructions;
873 
874   // For PHI hoisting to work we need to hoist blocks before their successors.
875   // We can do this by iterating through the blocks in the loop in reverse
876   // post-order.
877   LoopBlocksRPO Worklist(CurLoop);
878   Worklist.perform(LI);
879   bool Changed = false;
880   for (BasicBlock *BB : Worklist) {
881     // Only need to process the contents of this block if it is not part of a
882     // subloop (which would already have been processed).
883     if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
884       continue;
885 
886     for (Instruction &I : llvm::make_early_inc_range(*BB)) {
887       // Try constant folding this instruction.  If all the operands are
888       // constants, it is technically hoistable, but it would be better to
889       // just fold it.
890       if (Constant *C = ConstantFoldInstruction(
891               &I, I.getModule()->getDataLayout(), TLI)) {
892         LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << "  --> " << *C
893                           << '\n');
894         // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
895         I.replaceAllUsesWith(C);
896         if (isInstructionTriviallyDead(&I, TLI))
897           eraseInstruction(I, *SafetyInfo, MSSAU);
898         Changed = true;
899         continue;
900       }
901 
902       // Try hoisting the instruction out to the preheader.  We can only do
903       // this if all of the operands of the instruction are loop invariant and
904       // if it is safe to hoist the instruction. We also check block frequency
905       // to make sure instruction only gets hoisted into colder blocks.
906       // TODO: It may be safe to hoist if we are hoisting to a conditional block
907       // and we have accurately duplicated the control flow from the loop header
908       // to that block.
909       if (CurLoop->hasLoopInvariantOperands(&I) &&
910           canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/ nullptr, MSSAU,
911                              true, &Flags, ORE) &&
912           worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) &&
913           isSafeToExecuteUnconditionally(
914               I, DT, TLI, CurLoop, SafetyInfo, ORE,
915               CurLoop->getLoopPreheader()->getTerminator())) {
916         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
917               MSSAU, SE, ORE);
918         HoistedInstructions.push_back(&I);
919         Changed = true;
920         continue;
921       }
922 
923       // Attempt to remove floating point division out of the loop by
924       // converting it to a reciprocal multiplication.
925       if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
926           CurLoop->isLoopInvariant(I.getOperand(1))) {
927         auto Divisor = I.getOperand(1);
928         auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
929         auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
930         ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
931         SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
932         ReciprocalDivisor->insertBefore(&I);
933 
934         auto Product =
935             BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
936         Product->setFastMathFlags(I.getFastMathFlags());
937         SafetyInfo->insertInstructionTo(Product, I.getParent());
938         Product->insertAfter(&I);
939         I.replaceAllUsesWith(Product);
940         eraseInstruction(I, *SafetyInfo, MSSAU);
941 
942         hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
943               SafetyInfo, MSSAU, SE, ORE);
944         HoistedInstructions.push_back(ReciprocalDivisor);
945         Changed = true;
946         continue;
947       }
948 
949       auto IsInvariantStart = [&](Instruction &I) {
950         using namespace PatternMatch;
951         return I.use_empty() &&
952                match(&I, m_Intrinsic<Intrinsic::invariant_start>());
953       };
954       auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
955         return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
956                SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
957       };
958       if ((IsInvariantStart(I) || isGuard(&I)) &&
959           CurLoop->hasLoopInvariantOperands(&I) &&
960           MustExecuteWithoutWritesBefore(I)) {
961         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
962               MSSAU, SE, ORE);
963         HoistedInstructions.push_back(&I);
964         Changed = true;
965         continue;
966       }
967 
968       if (PHINode *PN = dyn_cast<PHINode>(&I)) {
969         if (CFH.canHoistPHI(PN)) {
970           // Redirect incoming blocks first to ensure that we create hoisted
971           // versions of those blocks before we hoist the phi.
972           for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
973             PN->setIncomingBlock(
974                 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
975           hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
976                 MSSAU, SE, ORE);
977           assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
978           Changed = true;
979           continue;
980         }
981       }
982 
983       // Remember possibly hoistable branches so we can actually hoist them
984       // later if needed.
985       if (BranchInst *BI = dyn_cast<BranchInst>(&I))
986         CFH.registerPossiblyHoistableBranch(BI);
987     }
988   }
989 
990   // If we hoisted instructions to a conditional block they may not dominate
991   // their uses that weren't hoisted (such as phis where some operands are not
992   // loop invariant). If so make them unconditional by moving them to their
993   // immediate dominator. We iterate through the instructions in reverse order
994   // which ensures that when we rehoist an instruction we rehoist its operands,
995   // and also keep track of where in the block we are rehoisting to to make sure
996   // that we rehoist instructions before the instructions that use them.
997   Instruction *HoistPoint = nullptr;
998   if (ControlFlowHoisting) {
999     for (Instruction *I : reverse(HoistedInstructions)) {
1000       if (!llvm::all_of(I->uses(),
1001                         [&](Use &U) { return DT->dominates(I, U); })) {
1002         BasicBlock *Dominator =
1003             DT->getNode(I->getParent())->getIDom()->getBlock();
1004         if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1005           if (HoistPoint)
1006             assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1007                    "New hoist point expected to dominate old hoist point");
1008           HoistPoint = Dominator->getTerminator();
1009         }
1010         LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1011                           << HoistPoint->getParent()->getNameOrAsOperand()
1012                           << ": " << *I << "\n");
1013         moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1014         HoistPoint = I;
1015         Changed = true;
1016       }
1017     }
1018   }
1019   if (VerifyMemorySSA)
1020     MSSAU->getMemorySSA()->verifyMemorySSA();
1021 
1022     // Now that we've finished hoisting make sure that LI and DT are still
1023     // valid.
1024 #ifdef EXPENSIVE_CHECKS
1025   if (Changed) {
1026     assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1027            "Dominator tree verification failed");
1028     LI->verify(*DT);
1029   }
1030 #endif
1031 
1032   return Changed;
1033 }
1034 
1035 // Return true if LI is invariant within scope of the loop. LI is invariant if
1036 // CurLoop is dominated by an invariant.start representing the same memory
1037 // location and size as the memory location LI loads from, and also the
1038 // invariant.start has no uses.
1039 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1040                                   Loop *CurLoop) {
1041   Value *Addr = LI->getOperand(0);
1042   const DataLayout &DL = LI->getModule()->getDataLayout();
1043   const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1044 
1045   // It is not currently possible for clang to generate an invariant.start
1046   // intrinsic with scalable vector types because we don't support thread local
1047   // sizeless types and we don't permit sizeless types in structs or classes.
1048   // Furthermore, even if support is added for this in future the intrinsic
1049   // itself is defined to have a size of -1 for variable sized objects. This
1050   // makes it impossible to verify if the intrinsic envelops our region of
1051   // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1052   // types would have a -1 parameter, but the former is clearly double the size
1053   // of the latter.
1054   if (LocSizeInBits.isScalable())
1055     return false;
1056 
1057   // if the type is i8 addrspace(x)*, we know this is the type of
1058   // llvm.invariant.start operand
1059   auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1060                                      LI->getPointerAddressSpace());
1061   unsigned BitcastsVisited = 0;
1062   // Look through bitcasts until we reach the i8* type (this is invariant.start
1063   // operand type).
1064   while (Addr->getType() != PtrInt8Ty) {
1065     auto *BC = dyn_cast<BitCastInst>(Addr);
1066     // Avoid traversing high number of bitcast uses.
1067     if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1068       return false;
1069     Addr = BC->getOperand(0);
1070   }
1071   // If we've ended up at a global/constant, bail. We shouldn't be looking at
1072   // uselists for non-local Values in a loop pass.
1073   if (isa<Constant>(Addr))
1074     return false;
1075 
1076   unsigned UsesVisited = 0;
1077   // Traverse all uses of the load operand value, to see if invariant.start is
1078   // one of the uses, and whether it dominates the load instruction.
1079   for (auto *U : Addr->users()) {
1080     // Avoid traversing for Load operand with high number of users.
1081     if (++UsesVisited > MaxNumUsesTraversed)
1082       return false;
1083     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1084     // If there are escaping uses of invariant.start instruction, the load maybe
1085     // non-invariant.
1086     if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1087         !II->use_empty())
1088       continue;
1089     ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1090     // The intrinsic supports having a -1 argument for variable sized objects
1091     // so we should check for that here.
1092     if (InvariantSize->isNegative())
1093       continue;
1094     uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1095     // Confirm the invariant.start location size contains the load operand size
1096     // in bits. Also, the invariant.start should dominate the load, and we
1097     // should not hoist the load out of a loop that contains this dominating
1098     // invariant.start.
1099     if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits &&
1100         DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1101       return true;
1102   }
1103 
1104   return false;
1105 }
1106 
1107 namespace {
1108 /// Return true if-and-only-if we know how to (mechanically) both hoist and
1109 /// sink a given instruction out of a loop.  Does not address legality
1110 /// concerns such as aliasing or speculation safety.
1111 bool isHoistableAndSinkableInst(Instruction &I) {
1112   // Only these instructions are hoistable/sinkable.
1113   return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1114           isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1115           isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1116           isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1117           isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1118           isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1119           isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1120 }
1121 /// Return true if all of the alias sets within this AST are known not to
1122 /// contain a Mod, or if MSSA knows there are no MemoryDefs in the loop.
1123 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1124                 const Loop *L) {
1125   if (CurAST) {
1126     for (AliasSet &AS : *CurAST) {
1127       if (!AS.isForwardingAliasSet() && AS.isMod()) {
1128         return false;
1129       }
1130     }
1131     return true;
1132   } else { /*MSSAU*/
1133     for (auto *BB : L->getBlocks())
1134       if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1135         return false;
1136     return true;
1137   }
1138 }
1139 
1140 /// Return true if I is the only Instruction with a MemoryAccess in L.
1141 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1142                         const MemorySSAUpdater *MSSAU) {
1143   for (auto *BB : L->getBlocks())
1144     if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1145       int NotAPhi = 0;
1146       for (const auto &Acc : *Accs) {
1147         if (isa<MemoryPhi>(&Acc))
1148           continue;
1149         const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1150         if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1151           return false;
1152       }
1153     }
1154   return true;
1155 }
1156 }
1157 
1158 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1159                               Loop *CurLoop, AliasSetTracker *CurAST,
1160                               MemorySSAUpdater *MSSAU,
1161                               bool TargetExecutesOncePerLoop,
1162                               SinkAndHoistLICMFlags *Flags,
1163                               OptimizationRemarkEmitter *ORE) {
1164   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
1165          "Either AliasSetTracker or MemorySSA should be initialized.");
1166 
1167   // If we don't understand the instruction, bail early.
1168   if (!isHoistableAndSinkableInst(I))
1169     return false;
1170 
1171   MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
1172   if (MSSA)
1173     assert(Flags != nullptr && "Flags cannot be null.");
1174 
1175   // Loads have extra constraints we have to verify before we can hoist them.
1176   if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1177     if (!LI->isUnordered())
1178       return false; // Don't sink/hoist volatile or ordered atomic loads!
1179 
1180     // Loads from constant memory are always safe to move, even if they end up
1181     // in the same alias set as something that ends up being modified.
1182     if (AA->pointsToConstantMemory(LI->getOperand(0)))
1183       return true;
1184     if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1185       return true;
1186 
1187     if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1188       return false; // Don't risk duplicating unordered loads
1189 
1190     // This checks for an invariant.start dominating the load.
1191     if (isLoadInvariantInLoop(LI, DT, CurLoop))
1192       return true;
1193 
1194     bool Invalidated;
1195     if (CurAST)
1196       Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1197                                              CurLoop, AA);
1198     else
1199       Invalidated = pointerInvalidatedByLoopWithMSSA(
1200           MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags);
1201     // Check loop-invariant address because this may also be a sinkable load
1202     // whose address is not necessarily loop-invariant.
1203     if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1204       ORE->emit([&]() {
1205         return OptimizationRemarkMissed(
1206                    DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1207                << "failed to move load with loop-invariant address "
1208                   "because the loop may invalidate its value";
1209       });
1210 
1211     return !Invalidated;
1212   } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1213     // Don't sink or hoist dbg info; it's legal, but not useful.
1214     if (isa<DbgInfoIntrinsic>(I))
1215       return false;
1216 
1217     // Don't sink calls which can throw.
1218     if (CI->mayThrow())
1219       return false;
1220 
1221     // Convergent attribute has been used on operations that involve
1222     // inter-thread communication which results are implicitly affected by the
1223     // enclosing control flows. It is not safe to hoist or sink such operations
1224     // across control flow.
1225     if (CI->isConvergent())
1226       return false;
1227 
1228     using namespace PatternMatch;
1229     if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1230       // Assumes don't actually alias anything or throw
1231       return true;
1232 
1233     if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
1234       // Widenable conditions don't actually alias anything or throw
1235       return true;
1236 
1237     // Handle simple cases by querying alias analysis.
1238     FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1239     if (Behavior == FMRB_DoesNotAccessMemory)
1240       return true;
1241     if (AAResults::onlyReadsMemory(Behavior)) {
1242       // A readonly argmemonly function only reads from memory pointed to by
1243       // it's arguments with arbitrary offsets.  If we can prove there are no
1244       // writes to this memory in the loop, we can hoist or sink.
1245       if (AAResults::onlyAccessesArgPointees(Behavior)) {
1246         // TODO: expand to writeable arguments
1247         for (Value *Op : CI->args())
1248           if (Op->getType()->isPointerTy()) {
1249             bool Invalidated;
1250             if (CurAST)
1251               Invalidated = pointerInvalidatedByLoop(
1252                   MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA);
1253             else
1254               Invalidated = pointerInvalidatedByLoopWithMSSA(
1255                   MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1256                   *Flags);
1257             if (Invalidated)
1258               return false;
1259           }
1260         return true;
1261       }
1262 
1263       // If this call only reads from memory and there are no writes to memory
1264       // in the loop, we can hoist or sink the call as appropriate.
1265       if (isReadOnly(CurAST, MSSAU, CurLoop))
1266         return true;
1267     }
1268 
1269     // FIXME: This should use mod/ref information to see if we can hoist or
1270     // sink the call.
1271 
1272     return false;
1273   } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1274     // Fences alias (most) everything to provide ordering.  For the moment,
1275     // just give up if there are any other memory operations in the loop.
1276     if (CurAST) {
1277       auto Begin = CurAST->begin();
1278       assert(Begin != CurAST->end() && "must contain FI");
1279       if (std::next(Begin) != CurAST->end())
1280         // constant memory for instance, TODO: handle better
1281         return false;
1282       auto *UniqueI = Begin->getUniqueInstruction();
1283       if (!UniqueI)
1284         // other memory op, give up
1285         return false;
1286       (void)FI; // suppress unused variable warning
1287       assert(UniqueI == FI && "AS must contain FI");
1288       return true;
1289     } else // MSSAU
1290       return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1291   } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1292     if (!SI->isUnordered())
1293       return false; // Don't sink/hoist volatile or ordered atomic store!
1294 
1295     // We can only hoist a store that we can prove writes a value which is not
1296     // read or overwritten within the loop.  For those cases, we fallback to
1297     // load store promotion instead.  TODO: We can extend this to cases where
1298     // there is exactly one write to the location and that write dominates an
1299     // arbitrary number of reads in the loop.
1300     if (CurAST) {
1301       auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1302 
1303       if (AS.isRef() || !AS.isMustAlias())
1304         // Quick exit test, handled by the full path below as well.
1305         return false;
1306       auto *UniqueI = AS.getUniqueInstruction();
1307       if (!UniqueI)
1308         // other memory op, give up
1309         return false;
1310       assert(UniqueI == SI && "AS must contain SI");
1311       return true;
1312     } else { // MSSAU
1313       if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1314         return true;
1315       // If there are more accesses than the Promotion cap or no "quota" to
1316       // check clobber, then give up as we're not walking a list that long.
1317       if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls())
1318         return false;
1319       // If there are interfering Uses (i.e. their defining access is in the
1320       // loop), or ordered loads (stored as Defs!), don't move this store.
1321       // Could do better here, but this is conservatively correct.
1322       // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1323       // moving accesses. Can also extend to dominating uses.
1324       auto *SIMD = MSSA->getMemoryAccess(SI);
1325       for (auto *BB : CurLoop->getBlocks())
1326         if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1327           for (const auto &MA : *Accesses)
1328             if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1329               auto *MD = MU->getDefiningAccess();
1330               if (!MSSA->isLiveOnEntryDef(MD) &&
1331                   CurLoop->contains(MD->getBlock()))
1332                 return false;
1333               // Disable hoisting past potentially interfering loads. Optimized
1334               // Uses may point to an access outside the loop, as getClobbering
1335               // checks the previous iteration when walking the backedge.
1336               // FIXME: More precise: no Uses that alias SI.
1337               if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU))
1338                 return false;
1339             } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1340               if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1341                 (void)LI; // Silence warning.
1342                 assert(!LI->isUnordered() && "Expected unordered load");
1343                 return false;
1344               }
1345               // Any call, while it may not be clobbering SI, it may be a use.
1346               if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1347                 // Check if the call may read from the memory location written
1348                 // to by SI. Check CI's attributes and arguments; the number of
1349                 // such checks performed is limited above by NoOfMemAccTooLarge.
1350                 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1351                 if (isModOrRefSet(MRI))
1352                   return false;
1353               }
1354             }
1355         }
1356       auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1357       Flags->incrementClobberingCalls();
1358       // If there are no clobbering Defs in the loop, store is safe to hoist.
1359       return MSSA->isLiveOnEntryDef(Source) ||
1360              !CurLoop->contains(Source->getBlock());
1361     }
1362   }
1363 
1364   assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1365 
1366   // We've established mechanical ability and aliasing, it's up to the caller
1367   // to check fault safety
1368   return true;
1369 }
1370 
1371 /// Returns true if a PHINode is a trivially replaceable with an
1372 /// Instruction.
1373 /// This is true when all incoming values are that instruction.
1374 /// This pattern occurs most often with LCSSA PHI nodes.
1375 ///
1376 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1377   for (const Value *IncValue : PN.incoming_values())
1378     if (IncValue != &I)
1379       return false;
1380 
1381   return true;
1382 }
1383 
1384 /// Return true if the instruction is free in the loop.
1385 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1386                          const TargetTransformInfo *TTI) {
1387 
1388   if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1389     if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1390         TargetTransformInfo::TCC_Free)
1391       return false;
1392     // For a GEP, we cannot simply use getUserCost because currently it
1393     // optimistically assume that a GEP will fold into addressing mode
1394     // regardless of its users.
1395     const BasicBlock *BB = GEP->getParent();
1396     for (const User *U : GEP->users()) {
1397       const Instruction *UI = cast<Instruction>(U);
1398       if (CurLoop->contains(UI) &&
1399           (BB != UI->getParent() ||
1400            (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1401         return false;
1402     }
1403     return true;
1404   } else
1405     return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1406            TargetTransformInfo::TCC_Free;
1407 }
1408 
1409 /// Return true if the only users of this instruction are outside of
1410 /// the loop. If this is true, we can sink the instruction to the exit
1411 /// blocks of the loop.
1412 ///
1413 /// We also return true if the instruction could be folded away in lowering.
1414 /// (e.g.,  a GEP can be folded into a load as an addressing mode in the loop).
1415 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1416                                   const LoopSafetyInfo *SafetyInfo,
1417                                   TargetTransformInfo *TTI, bool &FreeInLoop,
1418                                   bool LoopNestMode) {
1419   const auto &BlockColors = SafetyInfo->getBlockColors();
1420   bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1421   for (const User *U : I.users()) {
1422     const Instruction *UI = cast<Instruction>(U);
1423     if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1424       const BasicBlock *BB = PN->getParent();
1425       // We cannot sink uses in catchswitches.
1426       if (isa<CatchSwitchInst>(BB->getTerminator()))
1427         return false;
1428 
1429       // We need to sink a callsite to a unique funclet.  Avoid sinking if the
1430       // phi use is too muddled.
1431       if (isa<CallInst>(I))
1432         if (!BlockColors.empty() &&
1433             BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1434           return false;
1435 
1436       if (LoopNestMode) {
1437         while (isa<PHINode>(UI) && UI->hasOneUser() &&
1438                UI->getNumOperands() == 1) {
1439           if (!CurLoop->contains(UI))
1440             break;
1441           UI = cast<Instruction>(UI->user_back());
1442         }
1443       }
1444     }
1445 
1446     if (CurLoop->contains(UI)) {
1447       if (IsFree) {
1448         FreeInLoop = true;
1449         continue;
1450       }
1451       return false;
1452     }
1453   }
1454   return true;
1455 }
1456 
1457 static Instruction *cloneInstructionInExitBlock(
1458     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1459     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1460   Instruction *New;
1461   if (auto *CI = dyn_cast<CallInst>(&I)) {
1462     const auto &BlockColors = SafetyInfo->getBlockColors();
1463 
1464     // Sinking call-sites need to be handled differently from other
1465     // instructions.  The cloned call-site needs a funclet bundle operand
1466     // appropriate for its location in the CFG.
1467     SmallVector<OperandBundleDef, 1> OpBundles;
1468     for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1469          BundleIdx != BundleEnd; ++BundleIdx) {
1470       OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1471       if (Bundle.getTagID() == LLVMContext::OB_funclet)
1472         continue;
1473 
1474       OpBundles.emplace_back(Bundle);
1475     }
1476 
1477     if (!BlockColors.empty()) {
1478       const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1479       assert(CV.size() == 1 && "non-unique color for exit block!");
1480       BasicBlock *BBColor = CV.front();
1481       Instruction *EHPad = BBColor->getFirstNonPHI();
1482       if (EHPad->isEHPad())
1483         OpBundles.emplace_back("funclet", EHPad);
1484     }
1485 
1486     New = CallInst::Create(CI, OpBundles);
1487   } else {
1488     New = I.clone();
1489   }
1490 
1491   ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1492   if (!I.getName().empty())
1493     New->setName(I.getName() + ".le");
1494 
1495   if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1496     // Create a new MemoryAccess and let MemorySSA set its defining access.
1497     MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1498         New, nullptr, New->getParent(), MemorySSA::Beginning);
1499     if (NewMemAcc) {
1500       if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1501         MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1502       else {
1503         auto *MemUse = cast<MemoryUse>(NewMemAcc);
1504         MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1505       }
1506     }
1507   }
1508 
1509   // Build LCSSA PHI nodes for any in-loop operands (if legal).  Note that
1510   // this is particularly cheap because we can rip off the PHI node that we're
1511   // replacing for the number and blocks of the predecessors.
1512   // OPT: If this shows up in a profile, we can instead finish sinking all
1513   // invariant instructions, and then walk their operands to re-establish
1514   // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1515   // sinking bottom-up.
1516   for (Use &Op : New->operands())
1517     if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1518       auto *OInst = cast<Instruction>(Op.get());
1519       PHINode *OpPN =
1520         PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1521                         OInst->getName() + ".lcssa", &ExitBlock.front());
1522       for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1523         OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1524       Op = OpPN;
1525     }
1526   return New;
1527 }
1528 
1529 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1530                              MemorySSAUpdater *MSSAU) {
1531   if (MSSAU)
1532     MSSAU->removeMemoryAccess(&I);
1533   SafetyInfo.removeInstruction(&I);
1534   I.eraseFromParent();
1535 }
1536 
1537 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1538                                   ICFLoopSafetyInfo &SafetyInfo,
1539                                   MemorySSAUpdater *MSSAU,
1540                                   ScalarEvolution *SE) {
1541   SafetyInfo.removeInstruction(&I);
1542   SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1543   I.moveBefore(&Dest);
1544   if (MSSAU)
1545     if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1546             MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1547       MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1548                          MemorySSA::BeforeTerminator);
1549   if (SE)
1550     SE->forgetValue(&I);
1551 }
1552 
1553 static Instruction *sinkThroughTriviallyReplaceablePHI(
1554     PHINode *TPN, Instruction *I, LoopInfo *LI,
1555     SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1556     const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1557     MemorySSAUpdater *MSSAU) {
1558   assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1559          "Expect only trivially replaceable PHI");
1560   BasicBlock *ExitBlock = TPN->getParent();
1561   Instruction *New;
1562   auto It = SunkCopies.find(ExitBlock);
1563   if (It != SunkCopies.end())
1564     New = It->second;
1565   else
1566     New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1567         *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1568   return New;
1569 }
1570 
1571 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1572   BasicBlock *BB = PN->getParent();
1573   if (!BB->canSplitPredecessors())
1574     return false;
1575   // It's not impossible to split EHPad blocks, but if BlockColors already exist
1576   // it require updating BlockColors for all offspring blocks accordingly. By
1577   // skipping such corner case, we can make updating BlockColors after splitting
1578   // predecessor fairly simple.
1579   if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1580     return false;
1581   for (BasicBlock *BBPred : predecessors(BB)) {
1582     if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1583         isa<CallBrInst>(BBPred->getTerminator()))
1584       return false;
1585   }
1586   return true;
1587 }
1588 
1589 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1590                                         LoopInfo *LI, const Loop *CurLoop,
1591                                         LoopSafetyInfo *SafetyInfo,
1592                                         MemorySSAUpdater *MSSAU) {
1593 #ifndef NDEBUG
1594   SmallVector<BasicBlock *, 32> ExitBlocks;
1595   CurLoop->getUniqueExitBlocks(ExitBlocks);
1596   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1597                                              ExitBlocks.end());
1598 #endif
1599   BasicBlock *ExitBB = PN->getParent();
1600   assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1601 
1602   // Split predecessors of the loop exit to make instructions in the loop are
1603   // exposed to exit blocks through trivially replaceable PHIs while keeping the
1604   // loop in the canonical form where each predecessor of each exit block should
1605   // be contained within the loop. For example, this will convert the loop below
1606   // from
1607   //
1608   // LB1:
1609   //   %v1 =
1610   //   br %LE, %LB2
1611   // LB2:
1612   //   %v2 =
1613   //   br %LE, %LB1
1614   // LE:
1615   //   %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1616   //
1617   // to
1618   //
1619   // LB1:
1620   //   %v1 =
1621   //   br %LE.split, %LB2
1622   // LB2:
1623   //   %v2 =
1624   //   br %LE.split2, %LB1
1625   // LE.split:
1626   //   %p1 = phi [%v1, %LB1]  <-- trivially replaceable
1627   //   br %LE
1628   // LE.split2:
1629   //   %p2 = phi [%v2, %LB2]  <-- trivially replaceable
1630   //   br %LE
1631   // LE:
1632   //   %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1633   //
1634   const auto &BlockColors = SafetyInfo->getBlockColors();
1635   SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1636   while (!PredBBs.empty()) {
1637     BasicBlock *PredBB = *PredBBs.begin();
1638     assert(CurLoop->contains(PredBB) &&
1639            "Expect all predecessors are in the loop");
1640     if (PN->getBasicBlockIndex(PredBB) >= 0) {
1641       BasicBlock *NewPred = SplitBlockPredecessors(
1642           ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1643       // Since we do not allow splitting EH-block with BlockColors in
1644       // canSplitPredecessors(), we can simply assign predecessor's color to
1645       // the new block.
1646       if (!BlockColors.empty())
1647         // Grab a reference to the ColorVector to be inserted before getting the
1648         // reference to the vector we are copying because inserting the new
1649         // element in BlockColors might cause the map to be reallocated.
1650         SafetyInfo->copyColors(NewPred, PredBB);
1651     }
1652     PredBBs.remove(PredBB);
1653   }
1654 }
1655 
1656 /// When an instruction is found to only be used outside of the loop, this
1657 /// function moves it to the exit blocks and patches up SSA form as needed.
1658 /// This method is guaranteed to remove the original instruction from its
1659 /// position, and may either delete it or move it to outside of the loop.
1660 ///
1661 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1662                  BlockFrequencyInfo *BFI, const Loop *CurLoop,
1663                  ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
1664                  OptimizationRemarkEmitter *ORE) {
1665   bool Changed = false;
1666   LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1667 
1668   // Iterate over users to be ready for actual sinking. Replace users via
1669   // unreachable blocks with undef and make all user PHIs trivially replaceable.
1670   SmallPtrSet<Instruction *, 8> VisitedUsers;
1671   for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1672     auto *User = cast<Instruction>(*UI);
1673     Use &U = UI.getUse();
1674     ++UI;
1675 
1676     if (VisitedUsers.count(User) || CurLoop->contains(User))
1677       continue;
1678 
1679     if (!DT->isReachableFromEntry(User->getParent())) {
1680       U = UndefValue::get(I.getType());
1681       Changed = true;
1682       continue;
1683     }
1684 
1685     // The user must be a PHI node.
1686     PHINode *PN = cast<PHINode>(User);
1687 
1688     // Surprisingly, instructions can be used outside of loops without any
1689     // exits.  This can only happen in PHI nodes if the incoming block is
1690     // unreachable.
1691     BasicBlock *BB = PN->getIncomingBlock(U);
1692     if (!DT->isReachableFromEntry(BB)) {
1693       U = UndefValue::get(I.getType());
1694       Changed = true;
1695       continue;
1696     }
1697 
1698     VisitedUsers.insert(PN);
1699     if (isTriviallyReplaceablePHI(*PN, I))
1700       continue;
1701 
1702     if (!canSplitPredecessors(PN, SafetyInfo))
1703       return Changed;
1704 
1705     // Split predecessors of the PHI so that we can make users trivially
1706     // replaceable.
1707     splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1708 
1709     // Should rebuild the iterators, as they may be invalidated by
1710     // splitPredecessorsOfLoopExit().
1711     UI = I.user_begin();
1712     UE = I.user_end();
1713   }
1714 
1715   if (VisitedUsers.empty())
1716     return Changed;
1717 
1718   ORE->emit([&]() {
1719     return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1720            << "sinking " << ore::NV("Inst", &I);
1721   });
1722   if (isa<LoadInst>(I))
1723     ++NumMovedLoads;
1724   else if (isa<CallInst>(I))
1725     ++NumMovedCalls;
1726   ++NumSunk;
1727 
1728 #ifndef NDEBUG
1729   SmallVector<BasicBlock *, 32> ExitBlocks;
1730   CurLoop->getUniqueExitBlocks(ExitBlocks);
1731   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1732                                              ExitBlocks.end());
1733 #endif
1734 
1735   // Clones of this instruction. Don't create more than one per exit block!
1736   SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1737 
1738   // If this instruction is only used outside of the loop, then all users are
1739   // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1740   // the instruction.
1741   // First check if I is worth sinking for all uses. Sink only when it is worth
1742   // across all uses.
1743   SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1744   SmallVector<PHINode *, 8> ExitPNs;
1745   for (auto *UI : Users) {
1746     auto *User = cast<Instruction>(UI);
1747 
1748     if (CurLoop->contains(User))
1749       continue;
1750 
1751     PHINode *PN = cast<PHINode>(User);
1752     assert(ExitBlockSet.count(PN->getParent()) &&
1753            "The LCSSA PHI is not in an exit block!");
1754     if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) {
1755       return Changed;
1756     }
1757 
1758     ExitPNs.push_back(PN);
1759   }
1760 
1761   for (auto *PN : ExitPNs) {
1762 
1763     // The PHI must be trivially replaceable.
1764     Instruction *New = sinkThroughTriviallyReplaceablePHI(
1765         PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1766     PN->replaceAllUsesWith(New);
1767     eraseInstruction(*PN, *SafetyInfo, nullptr);
1768     Changed = true;
1769   }
1770   return Changed;
1771 }
1772 
1773 /// When an instruction is found to only use loop invariant operands that
1774 /// is safe to hoist, this instruction is called to do the dirty work.
1775 ///
1776 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1777                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1778                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1779                   OptimizationRemarkEmitter *ORE) {
1780   LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1781                     << I << "\n");
1782   ORE->emit([&]() {
1783     return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1784                                                          << ore::NV("Inst", &I);
1785   });
1786 
1787   // Metadata can be dependent on conditions we are hoisting above.
1788   // Conservatively strip all metadata on the instruction unless we were
1789   // guaranteed to execute I if we entered the loop, in which case the metadata
1790   // is valid in the loop preheader.
1791   // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1792   // then moving to the preheader means we should strip attributes on the call
1793   // that can cause UB since we may be hoisting above conditions that allowed
1794   // inferring those attributes. They may not be valid at the preheader.
1795   if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1796       // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1797       // time in isGuaranteedToExecute if we don't actually have anything to
1798       // drop.  It is a compile time optimization, not required for correctness.
1799       !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1800     I.dropUndefImplyingAttrsAndUnknownMetadata();
1801 
1802   if (isa<PHINode>(I))
1803     // Move the new node to the end of the phi list in the destination block.
1804     moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1805   else
1806     // Move the new node to the destination block, before its terminator.
1807     moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1808 
1809   I.updateLocationAfterHoist();
1810 
1811   if (isa<LoadInst>(I))
1812     ++NumMovedLoads;
1813   else if (isa<CallInst>(I))
1814     ++NumMovedCalls;
1815   ++NumHoisted;
1816 }
1817 
1818 /// Only sink or hoist an instruction if it is not a trapping instruction,
1819 /// or if the instruction is known not to trap when moved to the preheader.
1820 /// or if it is a trapping instruction and is guaranteed to execute.
1821 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1822                                            const DominatorTree *DT,
1823                                            const TargetLibraryInfo *TLI,
1824                                            const Loop *CurLoop,
1825                                            const LoopSafetyInfo *SafetyInfo,
1826                                            OptimizationRemarkEmitter *ORE,
1827                                            const Instruction *CtxI) {
1828   if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
1829     return true;
1830 
1831   bool GuaranteedToExecute =
1832       SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1833 
1834   if (!GuaranteedToExecute) {
1835     auto *LI = dyn_cast<LoadInst>(&Inst);
1836     if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1837       ORE->emit([&]() {
1838         return OptimizationRemarkMissed(
1839                    DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1840                << "failed to hoist load with loop-invariant address "
1841                   "because load is conditionally executed";
1842       });
1843   }
1844 
1845   return GuaranteedToExecute;
1846 }
1847 
1848 namespace {
1849 class LoopPromoter : public LoadAndStorePromoter {
1850   Value *SomePtr; // Designated pointer to store to.
1851   const SmallSetVector<Value *, 8> &PointerMustAliases;
1852   SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1853   SmallVectorImpl<Instruction *> &LoopInsertPts;
1854   SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1855   PredIteratorCache &PredCache;
1856   MemorySSAUpdater *MSSAU;
1857   LoopInfo &LI;
1858   DebugLoc DL;
1859   Align Alignment;
1860   bool UnorderedAtomic;
1861   AAMDNodes AATags;
1862   ICFLoopSafetyInfo &SafetyInfo;
1863 
1864   // We're about to add a use of V in a loop exit block.  Insert an LCSSA phi
1865   // (if legal) if doing so would add an out-of-loop use to an instruction
1866   // defined in-loop.
1867   Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1868     if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1869       return V;
1870 
1871     Instruction *I = cast<Instruction>(V);
1872     // We need to create an LCSSA PHI node for the incoming value and
1873     // store that.
1874     PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1875                                   I->getName() + ".lcssa", &BB->front());
1876     for (BasicBlock *Pred : PredCache.get(BB))
1877       PN->addIncoming(I, Pred);
1878     return PN;
1879   }
1880 
1881 public:
1882   LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1883                const SmallSetVector<Value *, 8> &PMA,
1884                SmallVectorImpl<BasicBlock *> &LEB,
1885                SmallVectorImpl<Instruction *> &LIP,
1886                SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1887                MemorySSAUpdater *MSSAU, LoopInfo &li, DebugLoc dl,
1888                Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1889                ICFLoopSafetyInfo &SafetyInfo)
1890       : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1891         LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1892         PredCache(PIC), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1893         Alignment(Alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1894         SafetyInfo(SafetyInfo) {}
1895 
1896   bool isInstInList(Instruction *I,
1897                     const SmallVectorImpl<Instruction *> &) const override {
1898     Value *Ptr;
1899     if (LoadInst *LI = dyn_cast<LoadInst>(I))
1900       Ptr = LI->getOperand(0);
1901     else
1902       Ptr = cast<StoreInst>(I)->getPointerOperand();
1903     return PointerMustAliases.count(Ptr);
1904   }
1905 
1906   void doExtraRewritesBeforeFinalDeletion() override {
1907     // Insert stores after in the loop exit blocks.  Each exit block gets a
1908     // store of the live-out values that feed them.  Since we've already told
1909     // the SSA updater about the defs in the loop and the preheader
1910     // definition, it is all set and we can start using it.
1911     for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1912       BasicBlock *ExitBlock = LoopExitBlocks[i];
1913       Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1914       LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1915       Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1916       Instruction *InsertPos = LoopInsertPts[i];
1917       StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1918       if (UnorderedAtomic)
1919         NewSI->setOrdering(AtomicOrdering::Unordered);
1920       NewSI->setAlignment(Alignment);
1921       NewSI->setDebugLoc(DL);
1922       if (AATags)
1923         NewSI->setAAMetadata(AATags);
1924 
1925       MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1926       MemoryAccess *NewMemAcc;
1927       if (!MSSAInsertPoint) {
1928         NewMemAcc = MSSAU->createMemoryAccessInBB(
1929             NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1930       } else {
1931         NewMemAcc =
1932             MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1933       }
1934       MSSAInsertPts[i] = NewMemAcc;
1935       MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1936       // FIXME: true for safety, false may still be correct.
1937     }
1938   }
1939 
1940   void instructionDeleted(Instruction *I) const override {
1941     SafetyInfo.removeInstruction(I);
1942     MSSAU->removeMemoryAccess(I);
1943   }
1944 };
1945 
1946 bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1947                                  DominatorTree *DT) {
1948   // We can perform the captured-before check against any instruction in the
1949   // loop header, as the loop header is reachable from any instruction inside
1950   // the loop.
1951   // TODO: ReturnCaptures=true shouldn't be necessary here.
1952   return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1953                                      /* StoreCaptures */ true,
1954                                      L->getHeader()->getTerminator(), DT);
1955 }
1956 
1957 /// Return true iff we can prove that a caller of this function can not inspect
1958 /// the contents of the provided object in a well defined program.
1959 bool isKnownNonEscaping(Value *Object, const Loop *L,
1960                         const TargetLibraryInfo *TLI, DominatorTree *DT) {
1961   if (isa<AllocaInst>(Object))
1962     // Since the alloca goes out of scope, we know the caller can't retain a
1963     // reference to it and be well defined.  Thus, we don't need to check for
1964     // capture.
1965     return true;
1966 
1967   // For all other objects we need to know that the caller can't possibly
1968   // have gotten a reference to the object.  There are two components of
1969   // that:
1970   //   1) Object can't be escaped by this function.  This is what
1971   //      PointerMayBeCaptured checks.
1972   //   2) Object can't have been captured at definition site.  For this, we
1973   //      need to know the return value is noalias.  At the moment, we use a
1974   //      weaker condition and handle only AllocLikeFunctions (which are
1975   //      known to be noalias).  TODO
1976   return isAllocLikeFn(Object, TLI) &&
1977          isNotCapturedBeforeOrInLoop(Object, L, DT);
1978 }
1979 
1980 } // namespace
1981 
1982 /// Try to promote memory values to scalars by sinking stores out of the
1983 /// loop and moving loads to before the loop.  We do this by looping over
1984 /// the stores in the loop, looking for stores to Must pointers which are
1985 /// loop invariant.
1986 ///
1987 bool llvm::promoteLoopAccessesToScalars(
1988     const SmallSetVector<Value *, 8> &PointerMustAliases,
1989     SmallVectorImpl<BasicBlock *> &ExitBlocks,
1990     SmallVectorImpl<Instruction *> &InsertPts,
1991     SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1992     LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1993     Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1994     OptimizationRemarkEmitter *ORE) {
1995   // Verify inputs.
1996   assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1997          SafetyInfo != nullptr &&
1998          "Unexpected Input to promoteLoopAccessesToScalars");
1999 
2000   Value *SomePtr = *PointerMustAliases.begin();
2001   BasicBlock *Preheader = CurLoop->getLoopPreheader();
2002 
2003   // It is not safe to promote a load/store from the loop if the load/store is
2004   // conditional.  For example, turning:
2005   //
2006   //    for () { if (c) *P += 1; }
2007   //
2008   // into:
2009   //
2010   //    tmp = *P;  for () { if (c) tmp +=1; } *P = tmp;
2011   //
2012   // is not safe, because *P may only be valid to access if 'c' is true.
2013   //
2014   // The safety property divides into two parts:
2015   // p1) The memory may not be dereferenceable on entry to the loop.  In this
2016   //    case, we can't insert the required load in the preheader.
2017   // p2) The memory model does not allow us to insert a store along any dynamic
2018   //    path which did not originally have one.
2019   //
2020   // If at least one store is guaranteed to execute, both properties are
2021   // satisfied, and promotion is legal.
2022   //
2023   // This, however, is not a necessary condition. Even if no store/load is
2024   // guaranteed to execute, we can still establish these properties.
2025   // We can establish (p1) by proving that hoisting the load into the preheader
2026   // is safe (i.e. proving dereferenceability on all paths through the loop). We
2027   // can use any access within the alias set to prove dereferenceability,
2028   // since they're all must alias.
2029   //
2030   // There are two ways establish (p2):
2031   // a) Prove the location is thread-local. In this case the memory model
2032   // requirement does not apply, and stores are safe to insert.
2033   // b) Prove a store dominates every exit block. In this case, if an exit
2034   // blocks is reached, the original dynamic path would have taken us through
2035   // the store, so inserting a store into the exit block is safe. Note that this
2036   // is different from the store being guaranteed to execute. For instance,
2037   // if an exception is thrown on the first iteration of the loop, the original
2038   // store is never executed, but the exit blocks are not executed either.
2039 
2040   bool DereferenceableInPH = false;
2041   bool SafeToInsertStore = false;
2042 
2043   SmallVector<Instruction *, 64> LoopUses;
2044 
2045   // We start with an alignment of one and try to find instructions that allow
2046   // us to prove better alignment.
2047   Align Alignment;
2048   // Keep track of which types of access we see
2049   bool SawUnorderedAtomic = false;
2050   bool SawNotAtomic = false;
2051   AAMDNodes AATags;
2052 
2053   const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2054 
2055   bool IsKnownThreadLocalObject = false;
2056   if (SafetyInfo->anyBlockMayThrow()) {
2057     // If a loop can throw, we have to insert a store along each unwind edge.
2058     // That said, we can't actually make the unwind edge explicit. Therefore,
2059     // we have to prove that the store is dead along the unwind edge.  We do
2060     // this by proving that the caller can't have a reference to the object
2061     // after return and thus can't possibly load from the object.
2062     Value *Object = getUnderlyingObject(SomePtr);
2063     if (!isKnownNonEscaping(Object, CurLoop, TLI, DT))
2064       return false;
2065     // Subtlety: Alloca's aren't visible to callers, but *are* potentially
2066     // visible to other threads if captured and used during their lifetimes.
2067     IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
2068   }
2069 
2070   // Check that all of the pointers in the alias set have the same type.  We
2071   // cannot (yet) promote a memory location that is loaded and stored in
2072   // different sizes.  While we are at it, collect alignment and AA info.
2073   for (Value *ASIV : PointerMustAliases) {
2074     // Check that all of the pointers in the alias set have the same type.  We
2075     // cannot (yet) promote a memory location that is loaded and stored in
2076     // different sizes.
2077     if (SomePtr->getType() != ASIV->getType())
2078       return false;
2079 
2080     for (User *U : ASIV->users()) {
2081       // Ignore instructions that are outside the loop.
2082       Instruction *UI = dyn_cast<Instruction>(U);
2083       if (!UI || !CurLoop->contains(UI))
2084         continue;
2085 
2086       // If there is an non-load/store instruction in the loop, we can't promote
2087       // it.
2088       if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2089         if (!Load->isUnordered())
2090           return false;
2091 
2092         SawUnorderedAtomic |= Load->isAtomic();
2093         SawNotAtomic |= !Load->isAtomic();
2094 
2095         Align InstAlignment = Load->getAlign();
2096 
2097         // Note that proving a load safe to speculate requires proving
2098         // sufficient alignment at the target location.  Proving it guaranteed
2099         // to execute does as well.  Thus we can increase our guaranteed
2100         // alignment as well.
2101         if (!DereferenceableInPH || (InstAlignment > Alignment))
2102           if (isSafeToExecuteUnconditionally(*Load, DT, TLI, CurLoop,
2103                                              SafetyInfo, ORE,
2104                                              Preheader->getTerminator())) {
2105             DereferenceableInPH = true;
2106             Alignment = std::max(Alignment, InstAlignment);
2107           }
2108       } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2109         // Stores *of* the pointer are not interesting, only stores *to* the
2110         // pointer.
2111         if (UI->getOperand(1) != ASIV)
2112           continue;
2113         if (!Store->isUnordered())
2114           return false;
2115 
2116         SawUnorderedAtomic |= Store->isAtomic();
2117         SawNotAtomic |= !Store->isAtomic();
2118 
2119         // If the store is guaranteed to execute, both properties are satisfied.
2120         // We may want to check if a store is guaranteed to execute even if we
2121         // already know that promotion is safe, since it may have higher
2122         // alignment than any other guaranteed stores, in which case we can
2123         // raise the alignment on the promoted store.
2124         Align InstAlignment = Store->getAlign();
2125 
2126         if (!DereferenceableInPH || !SafeToInsertStore ||
2127             (InstAlignment > Alignment)) {
2128           if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2129             DereferenceableInPH = true;
2130             SafeToInsertStore = true;
2131             Alignment = std::max(Alignment, InstAlignment);
2132           }
2133         }
2134 
2135         // If a store dominates all exit blocks, it is safe to sink.
2136         // As explained above, if an exit block was executed, a dominating
2137         // store must have been executed at least once, so we are not
2138         // introducing stores on paths that did not have them.
2139         // Note that this only looks at explicit exit blocks. If we ever
2140         // start sinking stores into unwind edges (see above), this will break.
2141         if (!SafeToInsertStore)
2142           SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2143             return DT->dominates(Store->getParent(), Exit);
2144           });
2145 
2146         // If the store is not guaranteed to execute, we may still get
2147         // deref info through it.
2148         if (!DereferenceableInPH) {
2149           DereferenceableInPH = isDereferenceableAndAlignedPointer(
2150               Store->getPointerOperand(), Store->getValueOperand()->getType(),
2151               Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI);
2152         }
2153       } else
2154         return false; // Not a load or store.
2155 
2156       // Merge the AA tags.
2157       if (LoopUses.empty()) {
2158         // On the first load/store, just take its AA tags.
2159         AATags = UI->getAAMetadata();
2160       } else if (AATags) {
2161         AATags = AATags.merge(UI->getAAMetadata());
2162       }
2163 
2164       LoopUses.push_back(UI);
2165     }
2166   }
2167 
2168   // If we found both an unordered atomic instruction and a non-atomic memory
2169   // access, bail.  We can't blindly promote non-atomic to atomic since we
2170   // might not be able to lower the result.  We can't downgrade since that
2171   // would violate memory model.  Also, align 0 is an error for atomics.
2172   if (SawUnorderedAtomic && SawNotAtomic)
2173     return false;
2174 
2175   // If we're inserting an atomic load in the preheader, we must be able to
2176   // lower it.  We're only guaranteed to be able to lower naturally aligned
2177   // atomics.
2178   auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2179   if (SawUnorderedAtomic &&
2180       Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2181     return false;
2182 
2183   // If we couldn't prove we can hoist the load, bail.
2184   if (!DereferenceableInPH)
2185     return false;
2186 
2187   // We know we can hoist the load, but don't have a guaranteed store.
2188   // Check whether the location is thread-local. If it is, then we can insert
2189   // stores along paths which originally didn't have them without violating the
2190   // memory model.
2191   if (!SafeToInsertStore) {
2192     if (IsKnownThreadLocalObject)
2193       SafeToInsertStore = true;
2194     else {
2195       Value *Object = getUnderlyingObject(SomePtr);
2196       SafeToInsertStore =
2197           (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2198           isNotCapturedBeforeOrInLoop(Object, CurLoop, DT);
2199     }
2200   }
2201 
2202   // If we've still failed to prove we can sink the store, give up.
2203   if (!SafeToInsertStore)
2204     return false;
2205 
2206   // Otherwise, this is safe to promote, lets do it!
2207   LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
2208                     << '\n');
2209   ORE->emit([&]() {
2210     return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2211                               LoopUses[0])
2212            << "Moving accesses to memory location out of the loop";
2213   });
2214   ++NumPromoted;
2215 
2216   // Look at all the loop uses, and try to merge their locations.
2217   std::vector<const DILocation *> LoopUsesLocs;
2218   for (auto U : LoopUses)
2219     LoopUsesLocs.push_back(U->getDebugLoc().get());
2220   auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2221 
2222   // We use the SSAUpdater interface to insert phi nodes as required.
2223   SmallVector<PHINode *, 16> NewPHIs;
2224   SSAUpdater SSA(&NewPHIs);
2225   LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2226                         InsertPts, MSSAInsertPts, PIC, MSSAU, *LI, DL,
2227                         Alignment, SawUnorderedAtomic, AATags, *SafetyInfo);
2228 
2229   // Set up the preheader to have a definition of the value.  It is the live-out
2230   // value from the preheader that uses in the loop will use.
2231   LoadInst *PreheaderLoad = new LoadInst(
2232       SomePtr->getType()->getPointerElementType(), SomePtr,
2233       SomePtr->getName() + ".promoted", Preheader->getTerminator());
2234   if (SawUnorderedAtomic)
2235     PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2236   PreheaderLoad->setAlignment(Alignment);
2237   PreheaderLoad->setDebugLoc(DebugLoc());
2238   if (AATags)
2239     PreheaderLoad->setAAMetadata(AATags);
2240   SSA.AddAvailableValue(Preheader, PreheaderLoad);
2241 
2242   MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2243       PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2244   MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2245   MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2246 
2247   if (VerifyMemorySSA)
2248     MSSAU->getMemorySSA()->verifyMemorySSA();
2249   // Rewrite all the loads in the loop and remember all the definitions from
2250   // stores in the loop.
2251   Promoter.run(LoopUses);
2252 
2253   if (VerifyMemorySSA)
2254     MSSAU->getMemorySSA()->verifyMemorySSA();
2255   // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2256   if (PreheaderLoad->use_empty())
2257     eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2258 
2259   return true;
2260 }
2261 
2262 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2263                                 function_ref<void(Instruction *)> Fn) {
2264   for (const BasicBlock *BB : L->blocks())
2265     if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2266       for (const auto &Access : *Accesses)
2267         if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2268           Fn(MUD->getMemoryInst());
2269 }
2270 
2271 static SmallVector<SmallSetVector<Value *, 8>, 0>
2272 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {
2273   AliasSetTracker AST(*AA);
2274 
2275   auto IsPotentiallyPromotable = [L](const Instruction *I) {
2276     if (const auto *SI = dyn_cast<StoreInst>(I))
2277       return L->isLoopInvariant(SI->getPointerOperand());
2278     if (const auto *LI = dyn_cast<LoadInst>(I))
2279       return L->isLoopInvariant(LI->getPointerOperand());
2280     return false;
2281   };
2282 
2283   // Populate AST with potentially promotable accesses and remove them from
2284   // MaybePromotable, so they will not be checked again on the next iteration.
2285   SmallPtrSet<Value *, 16> AttemptingPromotion;
2286   foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2287     if (IsPotentiallyPromotable(I)) {
2288       AttemptingPromotion.insert(I);
2289       AST.add(I);
2290     }
2291   });
2292 
2293   // We're only interested in must-alias sets that contain a mod.
2294   SmallVector<const AliasSet *, 8> Sets;
2295   for (AliasSet &AS : AST)
2296     if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2297       Sets.push_back(&AS);
2298 
2299   if (Sets.empty())
2300     return {}; // Nothing to promote...
2301 
2302   // Discard any sets for which there is an aliasing non-promotable access.
2303   foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2304     if (AttemptingPromotion.contains(I))
2305       return;
2306 
2307     llvm::erase_if(Sets, [&](const AliasSet *AS) {
2308       return AS->aliasesUnknownInst(I, *AA);
2309     });
2310   });
2311 
2312   SmallVector<SmallSetVector<Value *, 8>, 0> Result;
2313   for (const AliasSet *Set : Sets) {
2314     SmallSetVector<Value *, 8> PointerMustAliases;
2315     for (const auto &ASI : *Set)
2316       PointerMustAliases.insert(ASI.getValue());
2317     Result.push_back(std::move(PointerMustAliases));
2318   }
2319 
2320   return Result;
2321 }
2322 
2323 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2324                                      AliasSetTracker *CurAST, Loop *CurLoop,
2325                                      AAResults *AA) {
2326   return CurAST->getAliasSetFor(MemLoc).isMod();
2327 }
2328 
2329 bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2330                                       Loop *CurLoop, Instruction &I,
2331                                       SinkAndHoistLICMFlags &Flags) {
2332   // For hoisting, use the walker to determine safety
2333   if (!Flags.getIsSink()) {
2334     MemoryAccess *Source;
2335     // See declaration of SetLicmMssaOptCap for usage details.
2336     if (Flags.tooManyClobberingCalls())
2337       Source = MU->getDefiningAccess();
2338     else {
2339       Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2340       Flags.incrementClobberingCalls();
2341     }
2342     return !MSSA->isLiveOnEntryDef(Source) &&
2343            CurLoop->contains(Source->getBlock());
2344   }
2345 
2346   // For sinking, we'd need to check all Defs below this use. The getClobbering
2347   // call will look on the backedge of the loop, but will check aliasing with
2348   // the instructions on the previous iteration.
2349   // For example:
2350   // for (i ... )
2351   //   load a[i] ( Use (LoE)
2352   //   store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2353   //   i++;
2354   // The load sees no clobbering inside the loop, as the backedge alias check
2355   // does phi translation, and will check aliasing against store a[i-1].
2356   // However sinking the load outside the loop, below the store is incorrect.
2357 
2358   // For now, only sink if there are no Defs in the loop, and the existing ones
2359   // precede the use and are in the same block.
2360   // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2361   // needs PostDominatorTreeAnalysis.
2362   // FIXME: More precise: no Defs that alias this Use.
2363   if (Flags.tooManyMemoryAccesses())
2364     return true;
2365   for (auto *BB : CurLoop->getBlocks())
2366     if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU))
2367       return true;
2368   // When sinking, the source block may not be part of the loop so check it.
2369   if (!CurLoop->contains(&I))
2370     return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU);
2371 
2372   return false;
2373 }
2374 
2375 bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
2376                                        MemoryUse &MU) {
2377   if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2378     for (const auto &MA : *Accesses)
2379       if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2380         if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2381           return true;
2382   return false;
2383 }
2384 
2385 /// Little predicate that returns true if the specified basic block is in
2386 /// a subloop of the current one, not the current one itself.
2387 ///
2388 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2389   assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2390   return LI->getLoopFor(BB) != CurLoop;
2391 }
2392