xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/LICM.cpp (revision 77013d11e6483b970af25e13c9b892075742f7e5)
1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs loop invariant code motion, attempting to remove as much
10 // code from the body of a loop as possible.  It does this by either hoisting
11 // code into the preheader block, or by sinking code to the exit blocks if it is
12 // safe.  This pass also promotes must-aliased memory locations in the loop to
13 // live in registers, thus hoisting and sinking "invariant" loads and stores.
14 //
15 // Hoisting operations out of loops is a canonicalization transform.  It
16 // enables and simplifies subsequent optimizations in the middle-end.
17 // Rematerialization of hoisted instructions to reduce register pressure is the
18 // responsibility of the back-end, which has more accurate information about
19 // register pressure and also handles other optimizations than LICM that
20 // increase live-ranges.
21 //
22 // This pass uses alias analysis for two purposes:
23 //
24 //  1. Moving loop invariant loads and calls out of loops.  If we can determine
25 //     that a load or call inside of a loop never aliases anything stored to,
26 //     we can hoist it or sink it like any other instruction.
27 //  2. Scalar Promotion of Memory - If there is a store instruction inside of
28 //     the loop, we try to move the store to happen AFTER the loop instead of
29 //     inside of the loop.  This can only happen if a few conditions are true:
30 //       A. The pointer stored through is loop invariant
31 //       B. There are no stores or loads in the loop which _may_ alias the
32 //          pointer.  There are no calls in the loop which mod/ref the pointer.
33 //     If these conditions are true, we can promote the loads and stores in the
34 //     loop of the pointer to use a temporary alloca'd variable.  We then use
35 //     the SSAUpdater to construct the appropriate SSA form for the value.
36 //
37 //===----------------------------------------------------------------------===//
38 
39 #include "llvm/Transforms/Scalar/LICM.h"
40 #include "llvm/ADT/SetOperations.h"
41 #include "llvm/ADT/Statistic.h"
42 #include "llvm/Analysis/AliasAnalysis.h"
43 #include "llvm/Analysis/AliasSetTracker.h"
44 #include "llvm/Analysis/BasicAliasAnalysis.h"
45 #include "llvm/Analysis/BlockFrequencyInfo.h"
46 #include "llvm/Analysis/CaptureTracking.h"
47 #include "llvm/Analysis/ConstantFolding.h"
48 #include "llvm/Analysis/GlobalsModRef.h"
49 #include "llvm/Analysis/GuardUtils.h"
50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
51 #include "llvm/Analysis/Loads.h"
52 #include "llvm/Analysis/LoopInfo.h"
53 #include "llvm/Analysis/LoopIterator.h"
54 #include "llvm/Analysis/LoopPass.h"
55 #include "llvm/Analysis/MemoryBuiltins.h"
56 #include "llvm/Analysis/MemorySSA.h"
57 #include "llvm/Analysis/MemorySSAUpdater.h"
58 #include "llvm/Analysis/MustExecute.h"
59 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
60 #include "llvm/Analysis/ScalarEvolution.h"
61 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
62 #include "llvm/Analysis/TargetLibraryInfo.h"
63 #include "llvm/Analysis/ValueTracking.h"
64 #include "llvm/IR/CFG.h"
65 #include "llvm/IR/Constants.h"
66 #include "llvm/IR/DataLayout.h"
67 #include "llvm/IR/DebugInfoMetadata.h"
68 #include "llvm/IR/DerivedTypes.h"
69 #include "llvm/IR/Dominators.h"
70 #include "llvm/IR/Instructions.h"
71 #include "llvm/IR/IntrinsicInst.h"
72 #include "llvm/IR/LLVMContext.h"
73 #include "llvm/IR/Metadata.h"
74 #include "llvm/IR/PatternMatch.h"
75 #include "llvm/IR/PredIteratorCache.h"
76 #include "llvm/InitializePasses.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Scalar/LoopPassManager.h"
82 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
83 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
84 #include "llvm/Transforms/Utils/Local.h"
85 #include "llvm/Transforms/Utils/LoopUtils.h"
86 #include "llvm/Transforms/Utils/SSAUpdater.h"
87 #include <algorithm>
88 #include <utility>
89 using namespace llvm;
90 
91 #define DEBUG_TYPE "licm"
92 
93 STATISTIC(NumCreatedBlocks, "Number of blocks created");
94 STATISTIC(NumClonedBranches, "Number of branches cloned");
95 STATISTIC(NumSunk, "Number of instructions sunk out of loop");
96 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
97 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
98 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
99 STATISTIC(NumPromoted, "Number of memory locations promoted to registers");
100 
101 /// Memory promotion is enabled by default.
102 static cl::opt<bool>
103     DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
104                      cl::desc("Disable memory promotion in LICM pass"));
105 
106 static cl::opt<bool> ControlFlowHoisting(
107     "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
108     cl::desc("Enable control flow (and PHI) hoisting in LICM"));
109 
110 static cl::opt<unsigned> HoistSinkColdnessThreshold(
111     "licm-coldness-threshold", cl::Hidden, cl::init(4),
112     cl::desc("Relative coldness Threshold of hoisting/sinking destination "
113              "block for LICM to be considered beneficial"));
114 
115 static cl::opt<uint32_t> MaxNumUsesTraversed(
116     "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
117     cl::desc("Max num uses visited for identifying load "
118              "invariance in loop using invariant start (default = 8)"));
119 
120 // Default value of zero implies we use the regular alias set tracker mechanism
121 // instead of the cross product using AA to identify aliasing of the memory
122 // location we are interested in.
123 static cl::opt<int>
124 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
125                cl::desc("How many instruction to cross product using AA"));
126 
127 // Experimental option to allow imprecision in LICM in pathological cases, in
128 // exchange for faster compile. This is to be removed if MemorySSA starts to
129 // address the same issue. This flag applies only when LICM uses MemorySSA
130 // instead on AliasSetTracker. LICM calls MemorySSAWalker's
131 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect
132 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
133 // which may not be precise, since optimizeUses is capped. The result is
134 // correct, but we may not get as "far up" as possible to get which access is
135 // clobbering the one queried.
136 cl::opt<unsigned> llvm::SetLicmMssaOptCap(
137     "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
138     cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
139              "for faster compile. Caps the MemorySSA clobbering calls."));
140 
141 // Experimentally, memory promotion carries less importance than sinking and
142 // hoisting. Limit when we do promotion when using MemorySSA, in order to save
143 // compile time.
144 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
145     "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
146     cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
147              "effect. When MSSA in LICM is enabled, then this is the maximum "
148              "number of accesses allowed to be present in a loop in order to "
149              "enable memory promotion."));
150 
151 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
152 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
153                                   const LoopSafetyInfo *SafetyInfo,
154                                   TargetTransformInfo *TTI, bool &FreeInLoop);
155 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
156                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
157                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
158                   OptimizationRemarkEmitter *ORE);
159 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
160                  BlockFrequencyInfo *BFI, const Loop *CurLoop,
161                  ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
162                  OptimizationRemarkEmitter *ORE);
163 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
164                                            const DominatorTree *DT,
165                                            const Loop *CurLoop,
166                                            const LoopSafetyInfo *SafetyInfo,
167                                            OptimizationRemarkEmitter *ORE,
168                                            const Instruction *CtxI = nullptr);
169 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
170                                      AliasSetTracker *CurAST, Loop *CurLoop,
171                                      AAResults *AA);
172 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
173                                              Loop *CurLoop, Instruction &I,
174                                              SinkAndHoistLICMFlags &Flags);
175 static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
176                                               MemoryUse &MU);
177 static Instruction *cloneInstructionInExitBlock(
178     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
179     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
180 
181 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
182                              AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
183 
184 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
185                                   ICFLoopSafetyInfo &SafetyInfo,
186                                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
187 
188 namespace {
189 struct LoopInvariantCodeMotion {
190   bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
191                  BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI,
192                  TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
193                  OptimizationRemarkEmitter *ORE);
194 
195   LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
196                           unsigned LicmMssaNoAccForPromotionCap)
197       : LicmMssaOptCap(LicmMssaOptCap),
198         LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
199 
200 private:
201   unsigned LicmMssaOptCap;
202   unsigned LicmMssaNoAccForPromotionCap;
203 
204   std::unique_ptr<AliasSetTracker>
205   collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA);
206   std::unique_ptr<AliasSetTracker>
207   collectAliasInfoForLoopWithMSSA(Loop *L, AAResults *AA,
208                                   MemorySSAUpdater *MSSAU);
209 };
210 
211 struct LegacyLICMPass : public LoopPass {
212   static char ID; // Pass identification, replacement for typeid
213   LegacyLICMPass(
214       unsigned LicmMssaOptCap = SetLicmMssaOptCap,
215       unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
216       : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
217     initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
218   }
219 
220   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
221     if (skipLoop(L))
222       return false;
223 
224     LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
225                       << L->getHeader()->getNameOrAsOperand() << "\n");
226 
227     auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
228     MemorySSA *MSSA = EnableMSSALoopDependency
229                           ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA())
230                           : nullptr;
231     bool hasProfileData = L->getHeader()->getParent()->hasProfileData();
232     BlockFrequencyInfo *BFI =
233         hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI()
234                        : nullptr;
235     // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
236     // pass. Function analyses need to be preserved across loop transformations
237     // but ORE cannot be preserved (see comment before the pass definition).
238     OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
239     return LICM.runOnLoop(
240         L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
241         &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
242         &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI,
243         &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
244             *L->getHeader()->getParent()),
245         &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
246             *L->getHeader()->getParent()),
247         SE ? &SE->getSE() : nullptr, MSSA, &ORE);
248   }
249 
250   /// This transformation requires natural loop information & requires that
251   /// loop preheaders be inserted into the CFG...
252   ///
253   void getAnalysisUsage(AnalysisUsage &AU) const override {
254     AU.addPreserved<DominatorTreeWrapperPass>();
255     AU.addPreserved<LoopInfoWrapperPass>();
256     AU.addRequired<TargetLibraryInfoWrapperPass>();
257     if (EnableMSSALoopDependency) {
258       AU.addRequired<MemorySSAWrapperPass>();
259       AU.addPreserved<MemorySSAWrapperPass>();
260     }
261     AU.addRequired<TargetTransformInfoWrapperPass>();
262     getLoopAnalysisUsage(AU);
263     LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
264     AU.addPreserved<LazyBlockFrequencyInfoPass>();
265     AU.addPreserved<LazyBranchProbabilityInfoPass>();
266   }
267 
268 private:
269   LoopInvariantCodeMotion LICM;
270 };
271 } // namespace
272 
273 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
274                                 LoopStandardAnalysisResults &AR, LPMUpdater &) {
275   // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
276   // pass.  Function analyses need to be preserved across loop transformations
277   // but ORE cannot be preserved (see comment before the pass definition).
278   OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
279 
280   LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
281   if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI,
282                       &AR.SE, AR.MSSA, &ORE))
283     return PreservedAnalyses::all();
284 
285   auto PA = getLoopPassPreservedAnalyses();
286 
287   PA.preserve<DominatorTreeAnalysis>();
288   PA.preserve<LoopAnalysis>();
289   if (AR.MSSA)
290     PA.preserve<MemorySSAAnalysis>();
291 
292   return PA;
293 }
294 
295 char LegacyLICMPass::ID = 0;
296 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
297                       false, false)
298 INITIALIZE_PASS_DEPENDENCY(LoopPass)
299 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
300 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
301 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
302 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
303 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
304                     false)
305 
306 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
307 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
308                            unsigned LicmMssaNoAccForPromotionCap) {
309   return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
310 }
311 
312 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L,
313                                                    MemorySSA *MSSA)
314     : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
315                             IsSink, L, MSSA) {}
316 
317 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
318     unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
319     Loop *L, MemorySSA *MSSA)
320     : LicmMssaOptCap(LicmMssaOptCap),
321       LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
322       IsSink(IsSink) {
323   assert(((L != nullptr) == (MSSA != nullptr)) &&
324          "Unexpected values for SinkAndHoistLICMFlags");
325   if (!MSSA)
326     return;
327 
328   unsigned AccessCapCount = 0;
329   for (auto *BB : L->getBlocks())
330     if (const auto *Accesses = MSSA->getBlockAccesses(BB))
331       for (const auto &MA : *Accesses) {
332         (void)MA;
333         ++AccessCapCount;
334         if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
335           NoOfMemAccTooLarge = true;
336           return;
337         }
338       }
339 }
340 
341 /// Hoist expressions out of the specified loop. Note, alias info for inner
342 /// loop is not preserved so it is not a good idea to run LICM multiple
343 /// times on one loop.
344 bool LoopInvariantCodeMotion::runOnLoop(
345     Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
346     BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
347     ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE) {
348   bool Changed = false;
349 
350   assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
351 
352   // If this loop has metadata indicating that LICM is not to be performed then
353   // just exit.
354   if (hasDisableLICMTransformsHint(L)) {
355     return false;
356   }
357 
358   std::unique_ptr<AliasSetTracker> CurAST;
359   std::unique_ptr<MemorySSAUpdater> MSSAU;
360   std::unique_ptr<SinkAndHoistLICMFlags> Flags;
361 
362   if (!MSSA) {
363     LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n");
364     CurAST = collectAliasInfoForLoop(L, LI, AA);
365     Flags = std::make_unique<SinkAndHoistLICMFlags>(
366         LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true);
367   } else {
368     LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n");
369     MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
370     Flags = std::make_unique<SinkAndHoistLICMFlags>(
371         LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true, L, MSSA);
372   }
373 
374   // Get the preheader block to move instructions into...
375   BasicBlock *Preheader = L->getLoopPreheader();
376 
377   // Compute loop safety information.
378   ICFLoopSafetyInfo SafetyInfo;
379   SafetyInfo.computeLoopSafetyInfo(L);
380 
381   // We want to visit all of the instructions in this loop... that are not parts
382   // of our subloops (they have already had their invariants hoisted out of
383   // their loop, into this loop, so there is no need to process the BODIES of
384   // the subloops).
385   //
386   // Traverse the body of the loop in depth first order on the dominator tree so
387   // that we are guaranteed to see definitions before we see uses.  This allows
388   // us to sink instructions in one pass, without iteration.  After sinking
389   // instructions, we perform another pass to hoist them out of the loop.
390   if (L->hasDedicatedExits())
391     Changed |=
392         sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, TTI, L,
393                    CurAST.get(), MSSAU.get(), &SafetyInfo, *Flags.get(), ORE);
394   Flags->setIsSink(false);
395   if (Preheader)
396     Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L,
397                            CurAST.get(), MSSAU.get(), SE, &SafetyInfo,
398                            *Flags.get(), ORE);
399 
400   // Now that all loop invariants have been removed from the loop, promote any
401   // memory references to scalars that we can.
402   // Don't sink stores from loops without dedicated block exits. Exits
403   // containing indirect branches are not transformed by loop simplify,
404   // make sure we catch that. An additional load may be generated in the
405   // preheader for SSA updater, so also avoid sinking when no preheader
406   // is available.
407   if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
408       !Flags->tooManyMemoryAccesses()) {
409     // Figure out the loop exits and their insertion points
410     SmallVector<BasicBlock *, 8> ExitBlocks;
411     L->getUniqueExitBlocks(ExitBlocks);
412 
413     // We can't insert into a catchswitch.
414     bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
415       return isa<CatchSwitchInst>(Exit->getTerminator());
416     });
417 
418     if (!HasCatchSwitch) {
419       SmallVector<Instruction *, 8> InsertPts;
420       SmallVector<MemoryAccess *, 8> MSSAInsertPts;
421       InsertPts.reserve(ExitBlocks.size());
422       if (MSSAU)
423         MSSAInsertPts.reserve(ExitBlocks.size());
424       for (BasicBlock *ExitBlock : ExitBlocks) {
425         InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
426         if (MSSAU)
427           MSSAInsertPts.push_back(nullptr);
428       }
429 
430       PredIteratorCache PIC;
431 
432       bool Promoted = false;
433 
434       // Build an AST using MSSA.
435       if (!CurAST.get())
436         CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get());
437 
438       // Loop over all of the alias sets in the tracker object.
439       for (AliasSet &AS : *CurAST) {
440         // We can promote this alias set if it has a store, if it is a "Must"
441         // alias set, if the pointer is loop invariant, and if we are not
442         // eliminating any volatile loads or stores.
443         if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
444             !L->isLoopInvariant(AS.begin()->getValue()))
445           continue;
446 
447         assert(
448             !AS.empty() &&
449             "Must alias set should have at least one pointer element in it!");
450 
451         SmallSetVector<Value *, 8> PointerMustAliases;
452         for (const auto &ASI : AS)
453           PointerMustAliases.insert(ASI.getValue());
454 
455         Promoted |= promoteLoopAccessesToScalars(
456             PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
457             DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
458       }
459 
460       // Once we have promoted values across the loop body we have to
461       // recursively reform LCSSA as any nested loop may now have values defined
462       // within the loop used in the outer loop.
463       // FIXME: This is really heavy handed. It would be a bit better to use an
464       // SSAUpdater strategy during promotion that was LCSSA aware and reformed
465       // it as it went.
466       if (Promoted)
467         formLCSSARecursively(*L, *DT, LI, SE);
468 
469       Changed |= Promoted;
470     }
471   }
472 
473   // Check that neither this loop nor its parent have had LCSSA broken. LICM is
474   // specifically moving instructions across the loop boundary and so it is
475   // especially in need of sanity checking here.
476   assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
477   assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
478          "Parent loop not left in LCSSA form after LICM!");
479 
480   if (MSSAU.get() && VerifyMemorySSA)
481     MSSAU->getMemorySSA()->verifyMemorySSA();
482 
483   if (Changed && SE)
484     SE->forgetLoopDispositions(L);
485   return Changed;
486 }
487 
488 /// Walk the specified region of the CFG (defined by all blocks dominated by
489 /// the specified block, and that are in the current loop) in reverse depth
490 /// first order w.r.t the DominatorTree.  This allows us to visit uses before
491 /// definitions, allowing us to sink a loop body in one pass without iteration.
492 ///
493 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
494                       DominatorTree *DT, BlockFrequencyInfo *BFI,
495                       TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
496                       Loop *CurLoop, AliasSetTracker *CurAST,
497                       MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
498                       SinkAndHoistLICMFlags &Flags,
499                       OptimizationRemarkEmitter *ORE) {
500 
501   // Verify inputs.
502   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
503          CurLoop != nullptr && SafetyInfo != nullptr &&
504          "Unexpected input to sinkRegion.");
505   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
506          "Either AliasSetTracker or MemorySSA should be initialized.");
507 
508   // We want to visit children before parents. We will enque all the parents
509   // before their children in the worklist and process the worklist in reverse
510   // order.
511   SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
512 
513   bool Changed = false;
514   for (DomTreeNode *DTN : reverse(Worklist)) {
515     BasicBlock *BB = DTN->getBlock();
516     // Only need to process the contents of this block if it is not part of a
517     // subloop (which would already have been processed).
518     if (inSubLoop(BB, CurLoop, LI))
519       continue;
520 
521     for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
522       Instruction &I = *--II;
523 
524       // If the instruction is dead, we would try to sink it because it isn't
525       // used in the loop, instead, just delete it.
526       if (isInstructionTriviallyDead(&I, TLI)) {
527         LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
528         salvageKnowledge(&I);
529         salvageDebugInfo(I);
530         ++II;
531         eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
532         Changed = true;
533         continue;
534       }
535 
536       // Check to see if we can sink this instruction to the exit blocks
537       // of the loop.  We can do this if the all users of the instruction are
538       // outside of the loop.  In this case, it doesn't even matter if the
539       // operands of the instruction are loop invariant.
540       //
541       bool FreeInLoop = false;
542       if (!I.mayHaveSideEffects() &&
543           isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
544           canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
545                              ORE)) {
546         if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) {
547           if (!FreeInLoop) {
548             ++II;
549             salvageDebugInfo(I);
550             eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
551           }
552           Changed = true;
553         }
554       }
555     }
556   }
557   if (MSSAU && VerifyMemorySSA)
558     MSSAU->getMemorySSA()->verifyMemorySSA();
559   return Changed;
560 }
561 
562 namespace {
563 // This is a helper class for hoistRegion to make it able to hoist control flow
564 // in order to be able to hoist phis. The way this works is that we initially
565 // start hoisting to the loop preheader, and when we see a loop invariant branch
566 // we make note of this. When we then come to hoist an instruction that's
567 // conditional on such a branch we duplicate the branch and the relevant control
568 // flow, then hoist the instruction into the block corresponding to its original
569 // block in the duplicated control flow.
570 class ControlFlowHoister {
571 private:
572   // Information about the loop we are hoisting from
573   LoopInfo *LI;
574   DominatorTree *DT;
575   Loop *CurLoop;
576   MemorySSAUpdater *MSSAU;
577 
578   // A map of blocks in the loop to the block their instructions will be hoisted
579   // to.
580   DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
581 
582   // The branches that we can hoist, mapped to the block that marks a
583   // convergence point of their control flow.
584   DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
585 
586 public:
587   ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
588                      MemorySSAUpdater *MSSAU)
589       : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
590 
591   void registerPossiblyHoistableBranch(BranchInst *BI) {
592     // We can only hoist conditional branches with loop invariant operands.
593     if (!ControlFlowHoisting || !BI->isConditional() ||
594         !CurLoop->hasLoopInvariantOperands(BI))
595       return;
596 
597     // The branch destinations need to be in the loop, and we don't gain
598     // anything by duplicating conditional branches with duplicate successors,
599     // as it's essentially the same as an unconditional branch.
600     BasicBlock *TrueDest = BI->getSuccessor(0);
601     BasicBlock *FalseDest = BI->getSuccessor(1);
602     if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
603         TrueDest == FalseDest)
604       return;
605 
606     // We can hoist BI if one branch destination is the successor of the other,
607     // or both have common successor which we check by seeing if the
608     // intersection of their successors is non-empty.
609     // TODO: This could be expanded to allowing branches where both ends
610     // eventually converge to a single block.
611     SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
612     TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
613     FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
614     BasicBlock *CommonSucc = nullptr;
615     if (TrueDestSucc.count(FalseDest)) {
616       CommonSucc = FalseDest;
617     } else if (FalseDestSucc.count(TrueDest)) {
618       CommonSucc = TrueDest;
619     } else {
620       set_intersect(TrueDestSucc, FalseDestSucc);
621       // If there's one common successor use that.
622       if (TrueDestSucc.size() == 1)
623         CommonSucc = *TrueDestSucc.begin();
624       // If there's more than one pick whichever appears first in the block list
625       // (we can't use the value returned by TrueDestSucc.begin() as it's
626       // unpredicatable which element gets returned).
627       else if (!TrueDestSucc.empty()) {
628         Function *F = TrueDest->getParent();
629         auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
630         auto It = llvm::find_if(*F, IsSucc);
631         assert(It != F->end() && "Could not find successor in function");
632         CommonSucc = &*It;
633       }
634     }
635     // The common successor has to be dominated by the branch, as otherwise
636     // there will be some other path to the successor that will not be
637     // controlled by this branch so any phi we hoist would be controlled by the
638     // wrong condition. This also takes care of avoiding hoisting of loop back
639     // edges.
640     // TODO: In some cases this could be relaxed if the successor is dominated
641     // by another block that's been hoisted and we can guarantee that the
642     // control flow has been replicated exactly.
643     if (CommonSucc && DT->dominates(BI, CommonSucc))
644       HoistableBranches[BI] = CommonSucc;
645   }
646 
647   bool canHoistPHI(PHINode *PN) {
648     // The phi must have loop invariant operands.
649     if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
650       return false;
651     // We can hoist phis if the block they are in is the target of hoistable
652     // branches which cover all of the predecessors of the block.
653     SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
654     BasicBlock *BB = PN->getParent();
655     for (BasicBlock *PredBB : predecessors(BB))
656       PredecessorBlocks.insert(PredBB);
657     // If we have less predecessor blocks than predecessors then the phi will
658     // have more than one incoming value for the same block which we can't
659     // handle.
660     // TODO: This could be handled be erasing some of the duplicate incoming
661     // values.
662     if (PredecessorBlocks.size() != pred_size(BB))
663       return false;
664     for (auto &Pair : HoistableBranches) {
665       if (Pair.second == BB) {
666         // Which blocks are predecessors via this branch depends on if the
667         // branch is triangle-like or diamond-like.
668         if (Pair.first->getSuccessor(0) == BB) {
669           PredecessorBlocks.erase(Pair.first->getParent());
670           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
671         } else if (Pair.first->getSuccessor(1) == BB) {
672           PredecessorBlocks.erase(Pair.first->getParent());
673           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
674         } else {
675           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
676           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
677         }
678       }
679     }
680     // PredecessorBlocks will now be empty if for every predecessor of BB we
681     // found a hoistable branch source.
682     return PredecessorBlocks.empty();
683   }
684 
685   BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
686     if (!ControlFlowHoisting)
687       return CurLoop->getLoopPreheader();
688     // If BB has already been hoisted, return that
689     if (HoistDestinationMap.count(BB))
690       return HoistDestinationMap[BB];
691 
692     // Check if this block is conditional based on a pending branch
693     auto HasBBAsSuccessor =
694         [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
695           return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
696                                        Pair.first->getSuccessor(1) == BB);
697         };
698     auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
699 
700     // If not involved in a pending branch, hoist to preheader
701     BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
702     if (It == HoistableBranches.end()) {
703       LLVM_DEBUG(dbgs() << "LICM using "
704                         << InitialPreheader->getNameOrAsOperand()
705                         << " as hoist destination for "
706                         << BB->getNameOrAsOperand() << "\n");
707       HoistDestinationMap[BB] = InitialPreheader;
708       return InitialPreheader;
709     }
710     BranchInst *BI = It->first;
711     assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
712                HoistableBranches.end() &&
713            "BB is expected to be the target of at most one branch");
714 
715     LLVMContext &C = BB->getContext();
716     BasicBlock *TrueDest = BI->getSuccessor(0);
717     BasicBlock *FalseDest = BI->getSuccessor(1);
718     BasicBlock *CommonSucc = HoistableBranches[BI];
719     BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
720 
721     // Create hoisted versions of blocks that currently don't have them
722     auto CreateHoistedBlock = [&](BasicBlock *Orig) {
723       if (HoistDestinationMap.count(Orig))
724         return HoistDestinationMap[Orig];
725       BasicBlock *New =
726           BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
727       HoistDestinationMap[Orig] = New;
728       DT->addNewBlock(New, HoistTarget);
729       if (CurLoop->getParentLoop())
730         CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
731       ++NumCreatedBlocks;
732       LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
733                         << " as hoist destination for " << Orig->getName()
734                         << "\n");
735       return New;
736     };
737     BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
738     BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
739     BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
740 
741     // Link up these blocks with branches.
742     if (!HoistCommonSucc->getTerminator()) {
743       // The new common successor we've generated will branch to whatever that
744       // hoist target branched to.
745       BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
746       assert(TargetSucc && "Expected hoist target to have a single successor");
747       HoistCommonSucc->moveBefore(TargetSucc);
748       BranchInst::Create(TargetSucc, HoistCommonSucc);
749     }
750     if (!HoistTrueDest->getTerminator()) {
751       HoistTrueDest->moveBefore(HoistCommonSucc);
752       BranchInst::Create(HoistCommonSucc, HoistTrueDest);
753     }
754     if (!HoistFalseDest->getTerminator()) {
755       HoistFalseDest->moveBefore(HoistCommonSucc);
756       BranchInst::Create(HoistCommonSucc, HoistFalseDest);
757     }
758 
759     // If BI is being cloned to what was originally the preheader then
760     // HoistCommonSucc will now be the new preheader.
761     if (HoistTarget == InitialPreheader) {
762       // Phis in the loop header now need to use the new preheader.
763       InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
764       if (MSSAU)
765         MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
766             HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
767       // The new preheader dominates the loop header.
768       DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
769       DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
770       DT->changeImmediateDominator(HeaderNode, PreheaderNode);
771       // The preheader hoist destination is now the new preheader, with the
772       // exception of the hoist destination of this branch.
773       for (auto &Pair : HoistDestinationMap)
774         if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
775           Pair.second = HoistCommonSucc;
776     }
777 
778     // Now finally clone BI.
779     ReplaceInstWithInst(
780         HoistTarget->getTerminator(),
781         BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
782     ++NumClonedBranches;
783 
784     assert(CurLoop->getLoopPreheader() &&
785            "Hoisting blocks should not have destroyed preheader");
786     return HoistDestinationMap[BB];
787   }
788 };
789 } // namespace
790 
791 // Hoisting/sinking instruction out of a loop isn't always beneficial. It's only
792 // only worthwhile if the destination block is actually colder than current
793 // block.
794 static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock,
795                                  OptimizationRemarkEmitter *ORE,
796                                  BlockFrequencyInfo *BFI) {
797   // Check block frequency only when runtime profile is available
798   // to avoid pathological cases. With static profile, lean towards
799   // hosting because it helps canonicalize the loop for vectorizer.
800   if (!DstBlock->getParent()->hasProfileData())
801     return true;
802 
803   if (!HoistSinkColdnessThreshold || !BFI)
804     return true;
805 
806   BasicBlock *SrcBlock = I.getParent();
807   if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold >
808       BFI->getBlockFreq(SrcBlock).getFrequency()) {
809     ORE->emit([&]() {
810       return OptimizationRemarkMissed(DEBUG_TYPE, "SinkHoistInst", &I)
811              << "failed to sink or hoist instruction because containing block "
812                 "has lower frequency than destination block";
813     });
814     return false;
815   }
816 
817   return true;
818 }
819 
820 /// Walk the specified region of the CFG (defined by all blocks dominated by
821 /// the specified block, and that are in the current loop) in depth first
822 /// order w.r.t the DominatorTree.  This allows us to visit definitions before
823 /// uses, allowing us to hoist a loop body in one pass without iteration.
824 ///
825 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
826                        DominatorTree *DT, BlockFrequencyInfo *BFI,
827                        TargetLibraryInfo *TLI, Loop *CurLoop,
828                        AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
829                        ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo,
830                        SinkAndHoistLICMFlags &Flags,
831                        OptimizationRemarkEmitter *ORE) {
832   // Verify inputs.
833   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
834          CurLoop != nullptr && SafetyInfo != nullptr &&
835          "Unexpected input to hoistRegion.");
836   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
837          "Either AliasSetTracker or MemorySSA should be initialized.");
838 
839   ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
840 
841   // Keep track of instructions that have been hoisted, as they may need to be
842   // re-hoisted if they end up not dominating all of their uses.
843   SmallVector<Instruction *, 16> HoistedInstructions;
844 
845   // For PHI hoisting to work we need to hoist blocks before their successors.
846   // We can do this by iterating through the blocks in the loop in reverse
847   // post-order.
848   LoopBlocksRPO Worklist(CurLoop);
849   Worklist.perform(LI);
850   bool Changed = false;
851   for (BasicBlock *BB : Worklist) {
852     // Only need to process the contents of this block if it is not part of a
853     // subloop (which would already have been processed).
854     if (inSubLoop(BB, CurLoop, LI))
855       continue;
856 
857     for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
858       Instruction &I = *II++;
859       // Try constant folding this instruction.  If all the operands are
860       // constants, it is technically hoistable, but it would be better to
861       // just fold it.
862       if (Constant *C = ConstantFoldInstruction(
863               &I, I.getModule()->getDataLayout(), TLI)) {
864         LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << "  --> " << *C
865                           << '\n');
866         if (CurAST)
867           CurAST->copyValue(&I, C);
868         // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
869         I.replaceAllUsesWith(C);
870         if (isInstructionTriviallyDead(&I, TLI))
871           eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
872         Changed = true;
873         continue;
874       }
875 
876       // Try hoisting the instruction out to the preheader.  We can only do
877       // this if all of the operands of the instruction are loop invariant and
878       // if it is safe to hoist the instruction. We also check block frequency
879       // to make sure instruction only gets hoisted into colder blocks.
880       // TODO: It may be safe to hoist if we are hoisting to a conditional block
881       // and we have accurately duplicated the control flow from the loop header
882       // to that block.
883       if (CurLoop->hasLoopInvariantOperands(&I) &&
884           canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
885                              ORE) &&
886           worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) &&
887           isSafeToExecuteUnconditionally(
888               I, DT, CurLoop, SafetyInfo, ORE,
889               CurLoop->getLoopPreheader()->getTerminator())) {
890         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
891               MSSAU, SE, ORE);
892         HoistedInstructions.push_back(&I);
893         Changed = true;
894         continue;
895       }
896 
897       // Attempt to remove floating point division out of the loop by
898       // converting it to a reciprocal multiplication.
899       if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
900           CurLoop->isLoopInvariant(I.getOperand(1))) {
901         auto Divisor = I.getOperand(1);
902         auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
903         auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
904         ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
905         SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
906         ReciprocalDivisor->insertBefore(&I);
907 
908         auto Product =
909             BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
910         Product->setFastMathFlags(I.getFastMathFlags());
911         SafetyInfo->insertInstructionTo(Product, I.getParent());
912         Product->insertAfter(&I);
913         I.replaceAllUsesWith(Product);
914         eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
915 
916         hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
917               SafetyInfo, MSSAU, SE, ORE);
918         HoistedInstructions.push_back(ReciprocalDivisor);
919         Changed = true;
920         continue;
921       }
922 
923       auto IsInvariantStart = [&](Instruction &I) {
924         using namespace PatternMatch;
925         return I.use_empty() &&
926                match(&I, m_Intrinsic<Intrinsic::invariant_start>());
927       };
928       auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
929         return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
930                SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
931       };
932       if ((IsInvariantStart(I) || isGuard(&I)) &&
933           CurLoop->hasLoopInvariantOperands(&I) &&
934           MustExecuteWithoutWritesBefore(I)) {
935         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
936               MSSAU, SE, ORE);
937         HoistedInstructions.push_back(&I);
938         Changed = true;
939         continue;
940       }
941 
942       if (PHINode *PN = dyn_cast<PHINode>(&I)) {
943         if (CFH.canHoistPHI(PN)) {
944           // Redirect incoming blocks first to ensure that we create hoisted
945           // versions of those blocks before we hoist the phi.
946           for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
947             PN->setIncomingBlock(
948                 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
949           hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
950                 MSSAU, SE, ORE);
951           assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
952           Changed = true;
953           continue;
954         }
955       }
956 
957       // Remember possibly hoistable branches so we can actually hoist them
958       // later if needed.
959       if (BranchInst *BI = dyn_cast<BranchInst>(&I))
960         CFH.registerPossiblyHoistableBranch(BI);
961     }
962   }
963 
964   // If we hoisted instructions to a conditional block they may not dominate
965   // their uses that weren't hoisted (such as phis where some operands are not
966   // loop invariant). If so make them unconditional by moving them to their
967   // immediate dominator. We iterate through the instructions in reverse order
968   // which ensures that when we rehoist an instruction we rehoist its operands,
969   // and also keep track of where in the block we are rehoisting to to make sure
970   // that we rehoist instructions before the instructions that use them.
971   Instruction *HoistPoint = nullptr;
972   if (ControlFlowHoisting) {
973     for (Instruction *I : reverse(HoistedInstructions)) {
974       if (!llvm::all_of(I->uses(),
975                         [&](Use &U) { return DT->dominates(I, U); })) {
976         BasicBlock *Dominator =
977             DT->getNode(I->getParent())->getIDom()->getBlock();
978         if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
979           if (HoistPoint)
980             assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
981                    "New hoist point expected to dominate old hoist point");
982           HoistPoint = Dominator->getTerminator();
983         }
984         LLVM_DEBUG(dbgs() << "LICM rehoisting to "
985                           << HoistPoint->getParent()->getNameOrAsOperand()
986                           << ": " << *I << "\n");
987         moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
988         HoistPoint = I;
989         Changed = true;
990       }
991     }
992   }
993   if (MSSAU && VerifyMemorySSA)
994     MSSAU->getMemorySSA()->verifyMemorySSA();
995 
996     // Now that we've finished hoisting make sure that LI and DT are still
997     // valid.
998 #ifdef EXPENSIVE_CHECKS
999   if (Changed) {
1000     assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1001            "Dominator tree verification failed");
1002     LI->verify(*DT);
1003   }
1004 #endif
1005 
1006   return Changed;
1007 }
1008 
1009 // Return true if LI is invariant within scope of the loop. LI is invariant if
1010 // CurLoop is dominated by an invariant.start representing the same memory
1011 // location and size as the memory location LI loads from, and also the
1012 // invariant.start has no uses.
1013 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1014                                   Loop *CurLoop) {
1015   Value *Addr = LI->getOperand(0);
1016   const DataLayout &DL = LI->getModule()->getDataLayout();
1017   const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1018 
1019   // It is not currently possible for clang to generate an invariant.start
1020   // intrinsic with scalable vector types because we don't support thread local
1021   // sizeless types and we don't permit sizeless types in structs or classes.
1022   // Furthermore, even if support is added for this in future the intrinsic
1023   // itself is defined to have a size of -1 for variable sized objects. This
1024   // makes it impossible to verify if the intrinsic envelops our region of
1025   // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1026   // types would have a -1 parameter, but the former is clearly double the size
1027   // of the latter.
1028   if (LocSizeInBits.isScalable())
1029     return false;
1030 
1031   // if the type is i8 addrspace(x)*, we know this is the type of
1032   // llvm.invariant.start operand
1033   auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1034                                      LI->getPointerAddressSpace());
1035   unsigned BitcastsVisited = 0;
1036   // Look through bitcasts until we reach the i8* type (this is invariant.start
1037   // operand type).
1038   while (Addr->getType() != PtrInt8Ty) {
1039     auto *BC = dyn_cast<BitCastInst>(Addr);
1040     // Avoid traversing high number of bitcast uses.
1041     if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1042       return false;
1043     Addr = BC->getOperand(0);
1044   }
1045 
1046   unsigned UsesVisited = 0;
1047   // Traverse all uses of the load operand value, to see if invariant.start is
1048   // one of the uses, and whether it dominates the load instruction.
1049   for (auto *U : Addr->users()) {
1050     // Avoid traversing for Load operand with high number of users.
1051     if (++UsesVisited > MaxNumUsesTraversed)
1052       return false;
1053     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1054     // If there are escaping uses of invariant.start instruction, the load maybe
1055     // non-invariant.
1056     if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1057         !II->use_empty())
1058       continue;
1059     ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1060     // The intrinsic supports having a -1 argument for variable sized objects
1061     // so we should check for that here.
1062     if (InvariantSize->isNegative())
1063       continue;
1064     uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1065     // Confirm the invariant.start location size contains the load operand size
1066     // in bits. Also, the invariant.start should dominate the load, and we
1067     // should not hoist the load out of a loop that contains this dominating
1068     // invariant.start.
1069     if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits &&
1070         DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1071       return true;
1072   }
1073 
1074   return false;
1075 }
1076 
1077 namespace {
1078 /// Return true if-and-only-if we know how to (mechanically) both hoist and
1079 /// sink a given instruction out of a loop.  Does not address legality
1080 /// concerns such as aliasing or speculation safety.
1081 bool isHoistableAndSinkableInst(Instruction &I) {
1082   // Only these instructions are hoistable/sinkable.
1083   return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1084           isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1085           isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1086           isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1087           isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1088           isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1089           isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1090 }
1091 /// Return true if all of the alias sets within this AST are known not to
1092 /// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop.
1093 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1094                 const Loop *L) {
1095   if (CurAST) {
1096     for (AliasSet &AS : *CurAST) {
1097       if (!AS.isForwardingAliasSet() && AS.isMod()) {
1098         return false;
1099       }
1100     }
1101     return true;
1102   } else { /*MSSAU*/
1103     for (auto *BB : L->getBlocks())
1104       if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1105         return false;
1106     return true;
1107   }
1108 }
1109 
1110 /// Return true if I is the only Instruction with a MemoryAccess in L.
1111 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1112                         const MemorySSAUpdater *MSSAU) {
1113   for (auto *BB : L->getBlocks())
1114     if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1115       int NotAPhi = 0;
1116       for (const auto &Acc : *Accs) {
1117         if (isa<MemoryPhi>(&Acc))
1118           continue;
1119         const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1120         if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1121           return false;
1122       }
1123     }
1124   return true;
1125 }
1126 }
1127 
1128 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1129                               Loop *CurLoop, AliasSetTracker *CurAST,
1130                               MemorySSAUpdater *MSSAU,
1131                               bool TargetExecutesOncePerLoop,
1132                               SinkAndHoistLICMFlags *Flags,
1133                               OptimizationRemarkEmitter *ORE) {
1134   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
1135          "Either AliasSetTracker or MemorySSA should be initialized.");
1136 
1137   // If we don't understand the instruction, bail early.
1138   if (!isHoistableAndSinkableInst(I))
1139     return false;
1140 
1141   MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
1142   if (MSSA)
1143     assert(Flags != nullptr && "Flags cannot be null.");
1144 
1145   // Loads have extra constraints we have to verify before we can hoist them.
1146   if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1147     if (!LI->isUnordered())
1148       return false; // Don't sink/hoist volatile or ordered atomic loads!
1149 
1150     // Loads from constant memory are always safe to move, even if they end up
1151     // in the same alias set as something that ends up being modified.
1152     if (AA->pointsToConstantMemory(LI->getOperand(0)))
1153       return true;
1154     if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1155       return true;
1156 
1157     if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1158       return false; // Don't risk duplicating unordered loads
1159 
1160     // This checks for an invariant.start dominating the load.
1161     if (isLoadInvariantInLoop(LI, DT, CurLoop))
1162       return true;
1163 
1164     bool Invalidated;
1165     if (CurAST)
1166       Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1167                                              CurLoop, AA);
1168     else
1169       Invalidated = pointerInvalidatedByLoopWithMSSA(
1170           MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags);
1171     // Check loop-invariant address because this may also be a sinkable load
1172     // whose address is not necessarily loop-invariant.
1173     if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1174       ORE->emit([&]() {
1175         return OptimizationRemarkMissed(
1176                    DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1177                << "failed to move load with loop-invariant address "
1178                   "because the loop may invalidate its value";
1179       });
1180 
1181     return !Invalidated;
1182   } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1183     // Don't sink or hoist dbg info; it's legal, but not useful.
1184     if (isa<DbgInfoIntrinsic>(I))
1185       return false;
1186 
1187     // Don't sink calls which can throw.
1188     if (CI->mayThrow())
1189       return false;
1190 
1191     // Convergent attribute has been used on operations that involve
1192     // inter-thread communication which results are implicitly affected by the
1193     // enclosing control flows. It is not safe to hoist or sink such operations
1194     // across control flow.
1195     if (CI->isConvergent())
1196       return false;
1197 
1198     using namespace PatternMatch;
1199     if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1200       // Assumes don't actually alias anything or throw
1201       return true;
1202 
1203     if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
1204       // Widenable conditions don't actually alias anything or throw
1205       return true;
1206 
1207     // Handle simple cases by querying alias analysis.
1208     FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1209     if (Behavior == FMRB_DoesNotAccessMemory)
1210       return true;
1211     if (AAResults::onlyReadsMemory(Behavior)) {
1212       // A readonly argmemonly function only reads from memory pointed to by
1213       // it's arguments with arbitrary offsets.  If we can prove there are no
1214       // writes to this memory in the loop, we can hoist or sink.
1215       if (AAResults::onlyAccessesArgPointees(Behavior)) {
1216         // TODO: expand to writeable arguments
1217         for (Value *Op : CI->arg_operands())
1218           if (Op->getType()->isPointerTy()) {
1219             bool Invalidated;
1220             if (CurAST)
1221               Invalidated = pointerInvalidatedByLoop(
1222                   MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA);
1223             else
1224               Invalidated = pointerInvalidatedByLoopWithMSSA(
1225                   MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1226                   *Flags);
1227             if (Invalidated)
1228               return false;
1229           }
1230         return true;
1231       }
1232 
1233       // If this call only reads from memory and there are no writes to memory
1234       // in the loop, we can hoist or sink the call as appropriate.
1235       if (isReadOnly(CurAST, MSSAU, CurLoop))
1236         return true;
1237     }
1238 
1239     // FIXME: This should use mod/ref information to see if we can hoist or
1240     // sink the call.
1241 
1242     return false;
1243   } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1244     // Fences alias (most) everything to provide ordering.  For the moment,
1245     // just give up if there are any other memory operations in the loop.
1246     if (CurAST) {
1247       auto Begin = CurAST->begin();
1248       assert(Begin != CurAST->end() && "must contain FI");
1249       if (std::next(Begin) != CurAST->end())
1250         // constant memory for instance, TODO: handle better
1251         return false;
1252       auto *UniqueI = Begin->getUniqueInstruction();
1253       if (!UniqueI)
1254         // other memory op, give up
1255         return false;
1256       (void)FI; // suppress unused variable warning
1257       assert(UniqueI == FI && "AS must contain FI");
1258       return true;
1259     } else // MSSAU
1260       return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1261   } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1262     if (!SI->isUnordered())
1263       return false; // Don't sink/hoist volatile or ordered atomic store!
1264 
1265     // We can only hoist a store that we can prove writes a value which is not
1266     // read or overwritten within the loop.  For those cases, we fallback to
1267     // load store promotion instead.  TODO: We can extend this to cases where
1268     // there is exactly one write to the location and that write dominates an
1269     // arbitrary number of reads in the loop.
1270     if (CurAST) {
1271       auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1272 
1273       if (AS.isRef() || !AS.isMustAlias())
1274         // Quick exit test, handled by the full path below as well.
1275         return false;
1276       auto *UniqueI = AS.getUniqueInstruction();
1277       if (!UniqueI)
1278         // other memory op, give up
1279         return false;
1280       assert(UniqueI == SI && "AS must contain SI");
1281       return true;
1282     } else { // MSSAU
1283       if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1284         return true;
1285       // If there are more accesses than the Promotion cap or no "quota" to
1286       // check clobber, then give up as we're not walking a list that long.
1287       if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls())
1288         return false;
1289       // If there are interfering Uses (i.e. their defining access is in the
1290       // loop), or ordered loads (stored as Defs!), don't move this store.
1291       // Could do better here, but this is conservatively correct.
1292       // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1293       // moving accesses. Can also extend to dominating uses.
1294       auto *SIMD = MSSA->getMemoryAccess(SI);
1295       for (auto *BB : CurLoop->getBlocks())
1296         if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1297           for (const auto &MA : *Accesses)
1298             if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1299               auto *MD = MU->getDefiningAccess();
1300               if (!MSSA->isLiveOnEntryDef(MD) &&
1301                   CurLoop->contains(MD->getBlock()))
1302                 return false;
1303               // Disable hoisting past potentially interfering loads. Optimized
1304               // Uses may point to an access outside the loop, as getClobbering
1305               // checks the previous iteration when walking the backedge.
1306               // FIXME: More precise: no Uses that alias SI.
1307               if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU))
1308                 return false;
1309             } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1310               if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1311                 (void)LI; // Silence warning.
1312                 assert(!LI->isUnordered() && "Expected unordered load");
1313                 return false;
1314               }
1315               // Any call, while it may not be clobbering SI, it may be a use.
1316               if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1317                 // Check if the call may read from the memory locattion written
1318                 // to by SI. Check CI's attributes and arguments; the number of
1319                 // such checks performed is limited above by NoOfMemAccTooLarge.
1320                 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1321                 if (isModOrRefSet(MRI))
1322                   return false;
1323               }
1324             }
1325         }
1326       auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1327       Flags->incrementClobberingCalls();
1328       // If there are no clobbering Defs in the loop, store is safe to hoist.
1329       return MSSA->isLiveOnEntryDef(Source) ||
1330              !CurLoop->contains(Source->getBlock());
1331     }
1332   }
1333 
1334   assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1335 
1336   // We've established mechanical ability and aliasing, it's up to the caller
1337   // to check fault safety
1338   return true;
1339 }
1340 
1341 /// Returns true if a PHINode is a trivially replaceable with an
1342 /// Instruction.
1343 /// This is true when all incoming values are that instruction.
1344 /// This pattern occurs most often with LCSSA PHI nodes.
1345 ///
1346 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1347   for (const Value *IncValue : PN.incoming_values())
1348     if (IncValue != &I)
1349       return false;
1350 
1351   return true;
1352 }
1353 
1354 /// Return true if the instruction is free in the loop.
1355 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1356                          const TargetTransformInfo *TTI) {
1357 
1358   if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1359     if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1360         TargetTransformInfo::TCC_Free)
1361       return false;
1362     // For a GEP, we cannot simply use getUserCost because currently it
1363     // optimistically assume that a GEP will fold into addressing mode
1364     // regardless of its users.
1365     const BasicBlock *BB = GEP->getParent();
1366     for (const User *U : GEP->users()) {
1367       const Instruction *UI = cast<Instruction>(U);
1368       if (CurLoop->contains(UI) &&
1369           (BB != UI->getParent() ||
1370            (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1371         return false;
1372     }
1373     return true;
1374   } else
1375     return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1376            TargetTransformInfo::TCC_Free;
1377 }
1378 
1379 /// Return true if the only users of this instruction are outside of
1380 /// the loop. If this is true, we can sink the instruction to the exit
1381 /// blocks of the loop.
1382 ///
1383 /// We also return true if the instruction could be folded away in lowering.
1384 /// (e.g.,  a GEP can be folded into a load as an addressing mode in the loop).
1385 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1386                                   const LoopSafetyInfo *SafetyInfo,
1387                                   TargetTransformInfo *TTI, bool &FreeInLoop) {
1388   const auto &BlockColors = SafetyInfo->getBlockColors();
1389   bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1390   for (const User *U : I.users()) {
1391     const Instruction *UI = cast<Instruction>(U);
1392     if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1393       const BasicBlock *BB = PN->getParent();
1394       // We cannot sink uses in catchswitches.
1395       if (isa<CatchSwitchInst>(BB->getTerminator()))
1396         return false;
1397 
1398       // We need to sink a callsite to a unique funclet.  Avoid sinking if the
1399       // phi use is too muddled.
1400       if (isa<CallInst>(I))
1401         if (!BlockColors.empty() &&
1402             BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1403           return false;
1404     }
1405 
1406     if (CurLoop->contains(UI)) {
1407       if (IsFree) {
1408         FreeInLoop = true;
1409         continue;
1410       }
1411       return false;
1412     }
1413   }
1414   return true;
1415 }
1416 
1417 static Instruction *cloneInstructionInExitBlock(
1418     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1419     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1420   Instruction *New;
1421   if (auto *CI = dyn_cast<CallInst>(&I)) {
1422     const auto &BlockColors = SafetyInfo->getBlockColors();
1423 
1424     // Sinking call-sites need to be handled differently from other
1425     // instructions.  The cloned call-site needs a funclet bundle operand
1426     // appropriate for its location in the CFG.
1427     SmallVector<OperandBundleDef, 1> OpBundles;
1428     for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1429          BundleIdx != BundleEnd; ++BundleIdx) {
1430       OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1431       if (Bundle.getTagID() == LLVMContext::OB_funclet)
1432         continue;
1433 
1434       OpBundles.emplace_back(Bundle);
1435     }
1436 
1437     if (!BlockColors.empty()) {
1438       const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1439       assert(CV.size() == 1 && "non-unique color for exit block!");
1440       BasicBlock *BBColor = CV.front();
1441       Instruction *EHPad = BBColor->getFirstNonPHI();
1442       if (EHPad->isEHPad())
1443         OpBundles.emplace_back("funclet", EHPad);
1444     }
1445 
1446     New = CallInst::Create(CI, OpBundles);
1447   } else {
1448     New = I.clone();
1449   }
1450 
1451   ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1452   if (!I.getName().empty())
1453     New->setName(I.getName() + ".le");
1454 
1455   if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1456     // Create a new MemoryAccess and let MemorySSA set its defining access.
1457     MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1458         New, nullptr, New->getParent(), MemorySSA::Beginning);
1459     if (NewMemAcc) {
1460       if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1461         MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1462       else {
1463         auto *MemUse = cast<MemoryUse>(NewMemAcc);
1464         MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1465       }
1466     }
1467   }
1468 
1469   // Build LCSSA PHI nodes for any in-loop operands. Note that this is
1470   // particularly cheap because we can rip off the PHI node that we're
1471   // replacing for the number and blocks of the predecessors.
1472   // OPT: If this shows up in a profile, we can instead finish sinking all
1473   // invariant instructions, and then walk their operands to re-establish
1474   // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1475   // sinking bottom-up.
1476   for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
1477        ++OI)
1478     if (Instruction *OInst = dyn_cast<Instruction>(*OI))
1479       if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
1480         if (!OLoop->contains(&PN)) {
1481           PHINode *OpPN =
1482               PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1483                               OInst->getName() + ".lcssa", &ExitBlock.front());
1484           for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1485             OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1486           *OI = OpPN;
1487         }
1488   return New;
1489 }
1490 
1491 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1492                              AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
1493   if (AST)
1494     AST->deleteValue(&I);
1495   if (MSSAU)
1496     MSSAU->removeMemoryAccess(&I);
1497   SafetyInfo.removeInstruction(&I);
1498   I.eraseFromParent();
1499 }
1500 
1501 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1502                                   ICFLoopSafetyInfo &SafetyInfo,
1503                                   MemorySSAUpdater *MSSAU,
1504                                   ScalarEvolution *SE) {
1505   SafetyInfo.removeInstruction(&I);
1506   SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1507   I.moveBefore(&Dest);
1508   if (MSSAU)
1509     if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1510             MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1511       MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1512                          MemorySSA::BeforeTerminator);
1513   if (SE)
1514     SE->forgetValue(&I);
1515 }
1516 
1517 static Instruction *sinkThroughTriviallyReplaceablePHI(
1518     PHINode *TPN, Instruction *I, LoopInfo *LI,
1519     SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1520     const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1521     MemorySSAUpdater *MSSAU) {
1522   assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1523          "Expect only trivially replaceable PHI");
1524   BasicBlock *ExitBlock = TPN->getParent();
1525   Instruction *New;
1526   auto It = SunkCopies.find(ExitBlock);
1527   if (It != SunkCopies.end())
1528     New = It->second;
1529   else
1530     New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1531         *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1532   return New;
1533 }
1534 
1535 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1536   BasicBlock *BB = PN->getParent();
1537   if (!BB->canSplitPredecessors())
1538     return false;
1539   // It's not impossible to split EHPad blocks, but if BlockColors already exist
1540   // it require updating BlockColors for all offspring blocks accordingly. By
1541   // skipping such corner case, we can make updating BlockColors after splitting
1542   // predecessor fairly simple.
1543   if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1544     return false;
1545   for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1546     BasicBlock *BBPred = *PI;
1547     if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1548         isa<CallBrInst>(BBPred->getTerminator()))
1549       return false;
1550   }
1551   return true;
1552 }
1553 
1554 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1555                                         LoopInfo *LI, const Loop *CurLoop,
1556                                         LoopSafetyInfo *SafetyInfo,
1557                                         MemorySSAUpdater *MSSAU) {
1558 #ifndef NDEBUG
1559   SmallVector<BasicBlock *, 32> ExitBlocks;
1560   CurLoop->getUniqueExitBlocks(ExitBlocks);
1561   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1562                                              ExitBlocks.end());
1563 #endif
1564   BasicBlock *ExitBB = PN->getParent();
1565   assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1566 
1567   // Split predecessors of the loop exit to make instructions in the loop are
1568   // exposed to exit blocks through trivially replaceable PHIs while keeping the
1569   // loop in the canonical form where each predecessor of each exit block should
1570   // be contained within the loop. For example, this will convert the loop below
1571   // from
1572   //
1573   // LB1:
1574   //   %v1 =
1575   //   br %LE, %LB2
1576   // LB2:
1577   //   %v2 =
1578   //   br %LE, %LB1
1579   // LE:
1580   //   %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1581   //
1582   // to
1583   //
1584   // LB1:
1585   //   %v1 =
1586   //   br %LE.split, %LB2
1587   // LB2:
1588   //   %v2 =
1589   //   br %LE.split2, %LB1
1590   // LE.split:
1591   //   %p1 = phi [%v1, %LB1]  <-- trivially replaceable
1592   //   br %LE
1593   // LE.split2:
1594   //   %p2 = phi [%v2, %LB2]  <-- trivially replaceable
1595   //   br %LE
1596   // LE:
1597   //   %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1598   //
1599   const auto &BlockColors = SafetyInfo->getBlockColors();
1600   SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1601   while (!PredBBs.empty()) {
1602     BasicBlock *PredBB = *PredBBs.begin();
1603     assert(CurLoop->contains(PredBB) &&
1604            "Expect all predecessors are in the loop");
1605     if (PN->getBasicBlockIndex(PredBB) >= 0) {
1606       BasicBlock *NewPred = SplitBlockPredecessors(
1607           ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1608       // Since we do not allow splitting EH-block with BlockColors in
1609       // canSplitPredecessors(), we can simply assign predecessor's color to
1610       // the new block.
1611       if (!BlockColors.empty())
1612         // Grab a reference to the ColorVector to be inserted before getting the
1613         // reference to the vector we are copying because inserting the new
1614         // element in BlockColors might cause the map to be reallocated.
1615         SafetyInfo->copyColors(NewPred, PredBB);
1616     }
1617     PredBBs.remove(PredBB);
1618   }
1619 }
1620 
1621 /// When an instruction is found to only be used outside of the loop, this
1622 /// function moves it to the exit blocks and patches up SSA form as needed.
1623 /// This method is guaranteed to remove the original instruction from its
1624 /// position, and may either delete it or move it to outside of the loop.
1625 ///
1626 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1627                  BlockFrequencyInfo *BFI, const Loop *CurLoop,
1628                  ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
1629                  OptimizationRemarkEmitter *ORE) {
1630   LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1631   ORE->emit([&]() {
1632     return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1633            << "sinking " << ore::NV("Inst", &I);
1634   });
1635   bool Changed = false;
1636   if (isa<LoadInst>(I))
1637     ++NumMovedLoads;
1638   else if (isa<CallInst>(I))
1639     ++NumMovedCalls;
1640   ++NumSunk;
1641 
1642   // Iterate over users to be ready for actual sinking. Replace users via
1643   // unreachable blocks with undef and make all user PHIs trivially replaceable.
1644   SmallPtrSet<Instruction *, 8> VisitedUsers;
1645   for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1646     auto *User = cast<Instruction>(*UI);
1647     Use &U = UI.getUse();
1648     ++UI;
1649 
1650     if (VisitedUsers.count(User) || CurLoop->contains(User))
1651       continue;
1652 
1653     if (!DT->isReachableFromEntry(User->getParent())) {
1654       U = UndefValue::get(I.getType());
1655       Changed = true;
1656       continue;
1657     }
1658 
1659     // The user must be a PHI node.
1660     PHINode *PN = cast<PHINode>(User);
1661 
1662     // Surprisingly, instructions can be used outside of loops without any
1663     // exits.  This can only happen in PHI nodes if the incoming block is
1664     // unreachable.
1665     BasicBlock *BB = PN->getIncomingBlock(U);
1666     if (!DT->isReachableFromEntry(BB)) {
1667       U = UndefValue::get(I.getType());
1668       Changed = true;
1669       continue;
1670     }
1671 
1672     VisitedUsers.insert(PN);
1673     if (isTriviallyReplaceablePHI(*PN, I))
1674       continue;
1675 
1676     if (!canSplitPredecessors(PN, SafetyInfo))
1677       return Changed;
1678 
1679     // Split predecessors of the PHI so that we can make users trivially
1680     // replaceable.
1681     splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1682 
1683     // Should rebuild the iterators, as they may be invalidated by
1684     // splitPredecessorsOfLoopExit().
1685     UI = I.user_begin();
1686     UE = I.user_end();
1687   }
1688 
1689   if (VisitedUsers.empty())
1690     return Changed;
1691 
1692 #ifndef NDEBUG
1693   SmallVector<BasicBlock *, 32> ExitBlocks;
1694   CurLoop->getUniqueExitBlocks(ExitBlocks);
1695   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1696                                              ExitBlocks.end());
1697 #endif
1698 
1699   // Clones of this instruction. Don't create more than one per exit block!
1700   SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1701 
1702   // If this instruction is only used outside of the loop, then all users are
1703   // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1704   // the instruction.
1705   // First check if I is worth sinking for all uses. Sink only when it is worth
1706   // across all uses.
1707   SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1708   SmallVector<PHINode *, 8> ExitPNs;
1709   for (auto *UI : Users) {
1710     auto *User = cast<Instruction>(UI);
1711 
1712     if (CurLoop->contains(User))
1713       continue;
1714 
1715     PHINode *PN = cast<PHINode>(User);
1716     assert(ExitBlockSet.count(PN->getParent()) &&
1717            "The LCSSA PHI is not in an exit block!");
1718     if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) {
1719       return Changed;
1720     }
1721 
1722     ExitPNs.push_back(PN);
1723   }
1724 
1725   for (auto *PN : ExitPNs) {
1726 
1727     // The PHI must be trivially replaceable.
1728     Instruction *New = sinkThroughTriviallyReplaceablePHI(
1729         PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1730     PN->replaceAllUsesWith(New);
1731     eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
1732     Changed = true;
1733   }
1734   return Changed;
1735 }
1736 
1737 /// When an instruction is found to only use loop invariant operands that
1738 /// is safe to hoist, this instruction is called to do the dirty work.
1739 ///
1740 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1741                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1742                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1743                   OptimizationRemarkEmitter *ORE) {
1744   LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1745                     << I << "\n");
1746   ORE->emit([&]() {
1747     return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1748                                                          << ore::NV("Inst", &I);
1749   });
1750 
1751   // Metadata can be dependent on conditions we are hoisting above.
1752   // Conservatively strip all metadata on the instruction unless we were
1753   // guaranteed to execute I if we entered the loop, in which case the metadata
1754   // is valid in the loop preheader.
1755   if (I.hasMetadataOtherThanDebugLoc() &&
1756       // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1757       // time in isGuaranteedToExecute if we don't actually have anything to
1758       // drop.  It is a compile time optimization, not required for correctness.
1759       !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1760     I.dropUnknownNonDebugMetadata();
1761 
1762   if (isa<PHINode>(I))
1763     // Move the new node to the end of the phi list in the destination block.
1764     moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1765   else
1766     // Move the new node to the destination block, before its terminator.
1767     moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1768 
1769   I.updateLocationAfterHoist();
1770 
1771   if (isa<LoadInst>(I))
1772     ++NumMovedLoads;
1773   else if (isa<CallInst>(I))
1774     ++NumMovedCalls;
1775   ++NumHoisted;
1776 }
1777 
1778 /// Only sink or hoist an instruction if it is not a trapping instruction,
1779 /// or if the instruction is known not to trap when moved to the preheader.
1780 /// or if it is a trapping instruction and is guaranteed to execute.
1781 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1782                                            const DominatorTree *DT,
1783                                            const Loop *CurLoop,
1784                                            const LoopSafetyInfo *SafetyInfo,
1785                                            OptimizationRemarkEmitter *ORE,
1786                                            const Instruction *CtxI) {
1787   if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT))
1788     return true;
1789 
1790   bool GuaranteedToExecute =
1791       SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1792 
1793   if (!GuaranteedToExecute) {
1794     auto *LI = dyn_cast<LoadInst>(&Inst);
1795     if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1796       ORE->emit([&]() {
1797         return OptimizationRemarkMissed(
1798                    DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1799                << "failed to hoist load with loop-invariant address "
1800                   "because load is conditionally executed";
1801       });
1802   }
1803 
1804   return GuaranteedToExecute;
1805 }
1806 
1807 namespace {
1808 class LoopPromoter : public LoadAndStorePromoter {
1809   Value *SomePtr; // Designated pointer to store to.
1810   const SmallSetVector<Value *, 8> &PointerMustAliases;
1811   SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1812   SmallVectorImpl<Instruction *> &LoopInsertPts;
1813   SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1814   PredIteratorCache &PredCache;
1815   AliasSetTracker *AST;
1816   MemorySSAUpdater *MSSAU;
1817   LoopInfo &LI;
1818   DebugLoc DL;
1819   int Alignment;
1820   bool UnorderedAtomic;
1821   AAMDNodes AATags;
1822   ICFLoopSafetyInfo &SafetyInfo;
1823 
1824   Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1825     if (Instruction *I = dyn_cast<Instruction>(V))
1826       if (Loop *L = LI.getLoopFor(I->getParent()))
1827         if (!L->contains(BB)) {
1828           // We need to create an LCSSA PHI node for the incoming value and
1829           // store that.
1830           PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1831                                         I->getName() + ".lcssa", &BB->front());
1832           for (BasicBlock *Pred : PredCache.get(BB))
1833             PN->addIncoming(I, Pred);
1834           return PN;
1835         }
1836     return V;
1837   }
1838 
1839 public:
1840   LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1841                const SmallSetVector<Value *, 8> &PMA,
1842                SmallVectorImpl<BasicBlock *> &LEB,
1843                SmallVectorImpl<Instruction *> &LIP,
1844                SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1845                AliasSetTracker *ast, MemorySSAUpdater *MSSAU, LoopInfo &li,
1846                DebugLoc dl, int alignment, bool UnorderedAtomic,
1847                const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo)
1848       : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1849         LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1850         PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1851         Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1852         SafetyInfo(SafetyInfo) {}
1853 
1854   bool isInstInList(Instruction *I,
1855                     const SmallVectorImpl<Instruction *> &) const override {
1856     Value *Ptr;
1857     if (LoadInst *LI = dyn_cast<LoadInst>(I))
1858       Ptr = LI->getOperand(0);
1859     else
1860       Ptr = cast<StoreInst>(I)->getPointerOperand();
1861     return PointerMustAliases.count(Ptr);
1862   }
1863 
1864   void doExtraRewritesBeforeFinalDeletion() override {
1865     // Insert stores after in the loop exit blocks.  Each exit block gets a
1866     // store of the live-out values that feed them.  Since we've already told
1867     // the SSA updater about the defs in the loop and the preheader
1868     // definition, it is all set and we can start using it.
1869     for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1870       BasicBlock *ExitBlock = LoopExitBlocks[i];
1871       Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1872       LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1873       Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1874       Instruction *InsertPos = LoopInsertPts[i];
1875       StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1876       if (UnorderedAtomic)
1877         NewSI->setOrdering(AtomicOrdering::Unordered);
1878       NewSI->setAlignment(Align(Alignment));
1879       NewSI->setDebugLoc(DL);
1880       if (AATags)
1881         NewSI->setAAMetadata(AATags);
1882 
1883       if (MSSAU) {
1884         MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1885         MemoryAccess *NewMemAcc;
1886         if (!MSSAInsertPoint) {
1887           NewMemAcc = MSSAU->createMemoryAccessInBB(
1888               NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1889         } else {
1890           NewMemAcc =
1891               MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1892         }
1893         MSSAInsertPts[i] = NewMemAcc;
1894         MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1895         // FIXME: true for safety, false may still be correct.
1896       }
1897     }
1898   }
1899 
1900   void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
1901     // Update alias analysis.
1902     if (AST)
1903       AST->copyValue(LI, V);
1904   }
1905   void instructionDeleted(Instruction *I) const override {
1906     SafetyInfo.removeInstruction(I);
1907     if (AST)
1908       AST->deleteValue(I);
1909     if (MSSAU)
1910       MSSAU->removeMemoryAccess(I);
1911   }
1912 };
1913 
1914 
1915 /// Return true iff we can prove that a caller of this function can not inspect
1916 /// the contents of the provided object in a well defined program.
1917 bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) {
1918   if (isa<AllocaInst>(Object))
1919     // Since the alloca goes out of scope, we know the caller can't retain a
1920     // reference to it and be well defined.  Thus, we don't need to check for
1921     // capture.
1922     return true;
1923 
1924   // For all other objects we need to know that the caller can't possibly
1925   // have gotten a reference to the object.  There are two components of
1926   // that:
1927   //   1) Object can't be escaped by this function.  This is what
1928   //      PointerMayBeCaptured checks.
1929   //   2) Object can't have been captured at definition site.  For this, we
1930   //      need to know the return value is noalias.  At the moment, we use a
1931   //      weaker condition and handle only AllocLikeFunctions (which are
1932   //      known to be noalias).  TODO
1933   return isAllocLikeFn(Object, TLI) &&
1934     !PointerMayBeCaptured(Object, true, true);
1935 }
1936 
1937 } // namespace
1938 
1939 /// Try to promote memory values to scalars by sinking stores out of the
1940 /// loop and moving loads to before the loop.  We do this by looping over
1941 /// the stores in the loop, looking for stores to Must pointers which are
1942 /// loop invariant.
1943 ///
1944 bool llvm::promoteLoopAccessesToScalars(
1945     const SmallSetVector<Value *, 8> &PointerMustAliases,
1946     SmallVectorImpl<BasicBlock *> &ExitBlocks,
1947     SmallVectorImpl<Instruction *> &InsertPts,
1948     SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1949     LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1950     Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
1951     ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) {
1952   // Verify inputs.
1953   assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1954          SafetyInfo != nullptr &&
1955          "Unexpected Input to promoteLoopAccessesToScalars");
1956 
1957   Value *SomePtr = *PointerMustAliases.begin();
1958   BasicBlock *Preheader = CurLoop->getLoopPreheader();
1959 
1960   // It is not safe to promote a load/store from the loop if the load/store is
1961   // conditional.  For example, turning:
1962   //
1963   //    for () { if (c) *P += 1; }
1964   //
1965   // into:
1966   //
1967   //    tmp = *P;  for () { if (c) tmp +=1; } *P = tmp;
1968   //
1969   // is not safe, because *P may only be valid to access if 'c' is true.
1970   //
1971   // The safety property divides into two parts:
1972   // p1) The memory may not be dereferenceable on entry to the loop.  In this
1973   //    case, we can't insert the required load in the preheader.
1974   // p2) The memory model does not allow us to insert a store along any dynamic
1975   //    path which did not originally have one.
1976   //
1977   // If at least one store is guaranteed to execute, both properties are
1978   // satisfied, and promotion is legal.
1979   //
1980   // This, however, is not a necessary condition. Even if no store/load is
1981   // guaranteed to execute, we can still establish these properties.
1982   // We can establish (p1) by proving that hoisting the load into the preheader
1983   // is safe (i.e. proving dereferenceability on all paths through the loop). We
1984   // can use any access within the alias set to prove dereferenceability,
1985   // since they're all must alias.
1986   //
1987   // There are two ways establish (p2):
1988   // a) Prove the location is thread-local. In this case the memory model
1989   // requirement does not apply, and stores are safe to insert.
1990   // b) Prove a store dominates every exit block. In this case, if an exit
1991   // blocks is reached, the original dynamic path would have taken us through
1992   // the store, so inserting a store into the exit block is safe. Note that this
1993   // is different from the store being guaranteed to execute. For instance,
1994   // if an exception is thrown on the first iteration of the loop, the original
1995   // store is never executed, but the exit blocks are not executed either.
1996 
1997   bool DereferenceableInPH = false;
1998   bool SafeToInsertStore = false;
1999 
2000   SmallVector<Instruction *, 64> LoopUses;
2001 
2002   // We start with an alignment of one and try to find instructions that allow
2003   // us to prove better alignment.
2004   Align Alignment;
2005   // Keep track of which types of access we see
2006   bool SawUnorderedAtomic = false;
2007   bool SawNotAtomic = false;
2008   AAMDNodes AATags;
2009 
2010   const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2011 
2012   bool IsKnownThreadLocalObject = false;
2013   if (SafetyInfo->anyBlockMayThrow()) {
2014     // If a loop can throw, we have to insert a store along each unwind edge.
2015     // That said, we can't actually make the unwind edge explicit. Therefore,
2016     // we have to prove that the store is dead along the unwind edge.  We do
2017     // this by proving that the caller can't have a reference to the object
2018     // after return and thus can't possibly load from the object.
2019     Value *Object = getUnderlyingObject(SomePtr);
2020     if (!isKnownNonEscaping(Object, TLI))
2021       return false;
2022     // Subtlety: Alloca's aren't visible to callers, but *are* potentially
2023     // visible to other threads if captured and used during their lifetimes.
2024     IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
2025   }
2026 
2027   // Check that all of the pointers in the alias set have the same type.  We
2028   // cannot (yet) promote a memory location that is loaded and stored in
2029   // different sizes.  While we are at it, collect alignment and AA info.
2030   for (Value *ASIV : PointerMustAliases) {
2031     // Check that all of the pointers in the alias set have the same type.  We
2032     // cannot (yet) promote a memory location that is loaded and stored in
2033     // different sizes.
2034     if (SomePtr->getType() != ASIV->getType())
2035       return false;
2036 
2037     for (User *U : ASIV->users()) {
2038       // Ignore instructions that are outside the loop.
2039       Instruction *UI = dyn_cast<Instruction>(U);
2040       if (!UI || !CurLoop->contains(UI))
2041         continue;
2042 
2043       // If there is an non-load/store instruction in the loop, we can't promote
2044       // it.
2045       if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2046         if (!Load->isUnordered())
2047           return false;
2048 
2049         SawUnorderedAtomic |= Load->isAtomic();
2050         SawNotAtomic |= !Load->isAtomic();
2051 
2052         Align InstAlignment = Load->getAlign();
2053 
2054         // Note that proving a load safe to speculate requires proving
2055         // sufficient alignment at the target location.  Proving it guaranteed
2056         // to execute does as well.  Thus we can increase our guaranteed
2057         // alignment as well.
2058         if (!DereferenceableInPH || (InstAlignment > Alignment))
2059           if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo,
2060                                              ORE, Preheader->getTerminator())) {
2061             DereferenceableInPH = true;
2062             Alignment = std::max(Alignment, InstAlignment);
2063           }
2064       } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2065         // Stores *of* the pointer are not interesting, only stores *to* the
2066         // pointer.
2067         if (UI->getOperand(1) != ASIV)
2068           continue;
2069         if (!Store->isUnordered())
2070           return false;
2071 
2072         SawUnorderedAtomic |= Store->isAtomic();
2073         SawNotAtomic |= !Store->isAtomic();
2074 
2075         // If the store is guaranteed to execute, both properties are satisfied.
2076         // We may want to check if a store is guaranteed to execute even if we
2077         // already know that promotion is safe, since it may have higher
2078         // alignment than any other guaranteed stores, in which case we can
2079         // raise the alignment on the promoted store.
2080         Align InstAlignment = Store->getAlign();
2081 
2082         if (!DereferenceableInPH || !SafeToInsertStore ||
2083             (InstAlignment > Alignment)) {
2084           if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2085             DereferenceableInPH = true;
2086             SafeToInsertStore = true;
2087             Alignment = std::max(Alignment, InstAlignment);
2088           }
2089         }
2090 
2091         // If a store dominates all exit blocks, it is safe to sink.
2092         // As explained above, if an exit block was executed, a dominating
2093         // store must have been executed at least once, so we are not
2094         // introducing stores on paths that did not have them.
2095         // Note that this only looks at explicit exit blocks. If we ever
2096         // start sinking stores into unwind edges (see above), this will break.
2097         if (!SafeToInsertStore)
2098           SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2099             return DT->dominates(Store->getParent(), Exit);
2100           });
2101 
2102         // If the store is not guaranteed to execute, we may still get
2103         // deref info through it.
2104         if (!DereferenceableInPH) {
2105           DereferenceableInPH = isDereferenceableAndAlignedPointer(
2106               Store->getPointerOperand(), Store->getValueOperand()->getType(),
2107               Store->getAlign(), MDL, Preheader->getTerminator(), DT);
2108         }
2109       } else
2110         return false; // Not a load or store.
2111 
2112       // Merge the AA tags.
2113       if (LoopUses.empty()) {
2114         // On the first load/store, just take its AA tags.
2115         UI->getAAMetadata(AATags);
2116       } else if (AATags) {
2117         UI->getAAMetadata(AATags, /* Merge = */ true);
2118       }
2119 
2120       LoopUses.push_back(UI);
2121     }
2122   }
2123 
2124   // If we found both an unordered atomic instruction and a non-atomic memory
2125   // access, bail.  We can't blindly promote non-atomic to atomic since we
2126   // might not be able to lower the result.  We can't downgrade since that
2127   // would violate memory model.  Also, align 0 is an error for atomics.
2128   if (SawUnorderedAtomic && SawNotAtomic)
2129     return false;
2130 
2131   // If we're inserting an atomic load in the preheader, we must be able to
2132   // lower it.  We're only guaranteed to be able to lower naturally aligned
2133   // atomics.
2134   auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2135   if (SawUnorderedAtomic &&
2136       Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2137     return false;
2138 
2139   // If we couldn't prove we can hoist the load, bail.
2140   if (!DereferenceableInPH)
2141     return false;
2142 
2143   // We know we can hoist the load, but don't have a guaranteed store.
2144   // Check whether the location is thread-local. If it is, then we can insert
2145   // stores along paths which originally didn't have them without violating the
2146   // memory model.
2147   if (!SafeToInsertStore) {
2148     if (IsKnownThreadLocalObject)
2149       SafeToInsertStore = true;
2150     else {
2151       Value *Object = getUnderlyingObject(SomePtr);
2152       SafeToInsertStore =
2153           (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2154           !PointerMayBeCaptured(Object, true, true);
2155     }
2156   }
2157 
2158   // If we've still failed to prove we can sink the store, give up.
2159   if (!SafeToInsertStore)
2160     return false;
2161 
2162   // Otherwise, this is safe to promote, lets do it!
2163   LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
2164                     << '\n');
2165   ORE->emit([&]() {
2166     return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2167                               LoopUses[0])
2168            << "Moving accesses to memory location out of the loop";
2169   });
2170   ++NumPromoted;
2171 
2172   // Look at all the loop uses, and try to merge their locations.
2173   std::vector<const DILocation *> LoopUsesLocs;
2174   for (auto U : LoopUses)
2175     LoopUsesLocs.push_back(U->getDebugLoc().get());
2176   auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2177 
2178   // We use the SSAUpdater interface to insert phi nodes as required.
2179   SmallVector<PHINode *, 16> NewPHIs;
2180   SSAUpdater SSA(&NewPHIs);
2181   LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2182                         InsertPts, MSSAInsertPts, PIC, CurAST, MSSAU, *LI, DL,
2183                         Alignment.value(), SawUnorderedAtomic, AATags,
2184                         *SafetyInfo);
2185 
2186   // Set up the preheader to have a definition of the value.  It is the live-out
2187   // value from the preheader that uses in the loop will use.
2188   LoadInst *PreheaderLoad = new LoadInst(
2189       SomePtr->getType()->getPointerElementType(), SomePtr,
2190       SomePtr->getName() + ".promoted", Preheader->getTerminator());
2191   if (SawUnorderedAtomic)
2192     PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2193   PreheaderLoad->setAlignment(Alignment);
2194   PreheaderLoad->setDebugLoc(DebugLoc());
2195   if (AATags)
2196     PreheaderLoad->setAAMetadata(AATags);
2197   SSA.AddAvailableValue(Preheader, PreheaderLoad);
2198 
2199   if (MSSAU) {
2200     MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2201         PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2202     MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2203     MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2204   }
2205 
2206   if (MSSAU && VerifyMemorySSA)
2207     MSSAU->getMemorySSA()->verifyMemorySSA();
2208   // Rewrite all the loads in the loop and remember all the definitions from
2209   // stores in the loop.
2210   Promoter.run(LoopUses);
2211 
2212   if (MSSAU && VerifyMemorySSA)
2213     MSSAU->getMemorySSA()->verifyMemorySSA();
2214   // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2215   if (PreheaderLoad->use_empty())
2216     eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU);
2217 
2218   return true;
2219 }
2220 
2221 /// Returns an owning pointer to an alias set which incorporates aliasing info
2222 /// from L and all subloops of L.
2223 std::unique_ptr<AliasSetTracker>
2224 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI,
2225                                                  AAResults *AA) {
2226   auto CurAST = std::make_unique<AliasSetTracker>(*AA);
2227 
2228   // Add everything from all the sub loops.
2229   for (Loop *InnerL : L->getSubLoops())
2230     for (BasicBlock *BB : InnerL->blocks())
2231       CurAST->add(*BB);
2232 
2233   // And merge in this loop (without anything from inner loops).
2234   for (BasicBlock *BB : L->blocks())
2235     if (LI->getLoopFor(BB) == L)
2236       CurAST->add(*BB);
2237 
2238   return CurAST;
2239 }
2240 
2241 std::unique_ptr<AliasSetTracker>
2242 LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA(
2243     Loop *L, AAResults *AA, MemorySSAUpdater *MSSAU) {
2244   auto *MSSA = MSSAU->getMemorySSA();
2245   auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L);
2246   CurAST->addAllInstructionsInLoopUsingMSSA();
2247   return CurAST;
2248 }
2249 
2250 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2251                                      AliasSetTracker *CurAST, Loop *CurLoop,
2252                                      AAResults *AA) {
2253   // First check to see if any of the basic blocks in CurLoop invalidate *V.
2254   bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod();
2255 
2256   if (!isInvalidatedAccordingToAST || !LICMN2Theshold)
2257     return isInvalidatedAccordingToAST;
2258 
2259   // Check with a diagnostic analysis if we can refine the information above.
2260   // This is to identify the limitations of using the AST.
2261   // The alias set mechanism used by LICM has a major weakness in that it
2262   // combines all things which may alias into a single set *before* asking
2263   // modref questions. As a result, a single readonly call within a loop will
2264   // collapse all loads and stores into a single alias set and report
2265   // invalidation if the loop contains any store. For example, readonly calls
2266   // with deopt states have this form and create a general alias set with all
2267   // loads and stores.  In order to get any LICM in loops containing possible
2268   // deopt states we need a more precise invalidation of checking the mod ref
2269   // info of each instruction within the loop and LI. This has a complexity of
2270   // O(N^2), so currently, it is used only as a diagnostic tool since the
2271   // default value of LICMN2Threshold is zero.
2272 
2273   // Don't look at nested loops.
2274   if (CurLoop->begin() != CurLoop->end())
2275     return true;
2276 
2277   int N = 0;
2278   for (BasicBlock *BB : CurLoop->getBlocks())
2279     for (Instruction &I : *BB) {
2280       if (N >= LICMN2Theshold) {
2281         LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "
2282                           << *(MemLoc.Ptr) << "\n");
2283         return true;
2284       }
2285       N++;
2286       auto Res = AA->getModRefInfo(&I, MemLoc);
2287       if (isModSet(Res)) {
2288         LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "
2289                           << *(MemLoc.Ptr) << "\n");
2290         return true;
2291       }
2292     }
2293   LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n");
2294   return false;
2295 }
2296 
2297 bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2298                                       Loop *CurLoop, Instruction &I,
2299                                       SinkAndHoistLICMFlags &Flags) {
2300   // For hoisting, use the walker to determine safety
2301   if (!Flags.getIsSink()) {
2302     MemoryAccess *Source;
2303     // See declaration of SetLicmMssaOptCap for usage details.
2304     if (Flags.tooManyClobberingCalls())
2305       Source = MU->getDefiningAccess();
2306     else {
2307       Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2308       Flags.incrementClobberingCalls();
2309     }
2310     return !MSSA->isLiveOnEntryDef(Source) &&
2311            CurLoop->contains(Source->getBlock());
2312   }
2313 
2314   // For sinking, we'd need to check all Defs below this use. The getClobbering
2315   // call will look on the backedge of the loop, but will check aliasing with
2316   // the instructions on the previous iteration.
2317   // For example:
2318   // for (i ... )
2319   //   load a[i] ( Use (LoE)
2320   //   store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2321   //   i++;
2322   // The load sees no clobbering inside the loop, as the backedge alias check
2323   // does phi translation, and will check aliasing against store a[i-1].
2324   // However sinking the load outside the loop, below the store is incorrect.
2325 
2326   // For now, only sink if there are no Defs in the loop, and the existing ones
2327   // precede the use and are in the same block.
2328   // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2329   // needs PostDominatorTreeAnalysis.
2330   // FIXME: More precise: no Defs that alias this Use.
2331   if (Flags.tooManyMemoryAccesses())
2332     return true;
2333   for (auto *BB : CurLoop->getBlocks())
2334     if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU))
2335       return true;
2336   // When sinking, the source block may not be part of the loop so check it.
2337   if (!CurLoop->contains(&I))
2338     return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU);
2339 
2340   return false;
2341 }
2342 
2343 bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
2344                                        MemoryUse &MU) {
2345   if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2346     for (const auto &MA : *Accesses)
2347       if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2348         if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2349           return true;
2350   return false;
2351 }
2352 
2353 /// Little predicate that returns true if the specified basic block is in
2354 /// a subloop of the current one, not the current one itself.
2355 ///
2356 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2357   assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2358   return LI->getLoopFor(BB) != CurLoop;
2359 }
2360