1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs loop invariant code motion, attempting to remove as much
10 // code from the body of a loop as possible. It does this by either hoisting
11 // code into the preheader block, or by sinking code to the exit blocks if it is
12 // safe. This pass also promotes must-aliased memory locations in the loop to
13 // live in registers, thus hoisting and sinking "invariant" loads and stores.
14 //
15 // Hoisting operations out of loops is a canonicalization transform. It
16 // enables and simplifies subsequent optimizations in the middle-end.
17 // Rematerialization of hoisted instructions to reduce register pressure is the
18 // responsibility of the back-end, which has more accurate information about
19 // register pressure and also handles other optimizations than LICM that
20 // increase live-ranges.
21 //
22 // This pass uses alias analysis for two purposes:
23 //
24 // 1. Moving loop invariant loads and calls out of loops. If we can determine
25 // that a load or call inside of a loop never aliases anything stored to,
26 // we can hoist it or sink it like any other instruction.
27 // 2. Scalar Promotion of Memory - If there is a store instruction inside of
28 // the loop, we try to move the store to happen AFTER the loop instead of
29 // inside of the loop. This can only happen if a few conditions are true:
30 // A. The pointer stored through is loop invariant
31 // B. There are no stores or loads in the loop which _may_ alias the
32 // pointer. There are no calls in the loop which mod/ref the pointer.
33 // If these conditions are true, we can promote the loads and stores in the
34 // loop of the pointer to use a temporary alloca'd variable. We then use
35 // the SSAUpdater to construct the appropriate SSA form for the value.
36 //
37 //===----------------------------------------------------------------------===//
38
39 #include "llvm/Transforms/Scalar/LICM.h"
40 #include "llvm/ADT/PriorityWorklist.h"
41 #include "llvm/ADT/SetOperations.h"
42 #include "llvm/ADT/Statistic.h"
43 #include "llvm/Analysis/AliasAnalysis.h"
44 #include "llvm/Analysis/AliasSetTracker.h"
45 #include "llvm/Analysis/AssumptionCache.h"
46 #include "llvm/Analysis/CaptureTracking.h"
47 #include "llvm/Analysis/GuardUtils.h"
48 #include "llvm/Analysis/LazyBlockFrequencyInfo.h"
49 #include "llvm/Analysis/Loads.h"
50 #include "llvm/Analysis/LoopInfo.h"
51 #include "llvm/Analysis/LoopIterator.h"
52 #include "llvm/Analysis/LoopNestAnalysis.h"
53 #include "llvm/Analysis/LoopPass.h"
54 #include "llvm/Analysis/MemorySSA.h"
55 #include "llvm/Analysis/MemorySSAUpdater.h"
56 #include "llvm/Analysis/MustExecute.h"
57 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
58 #include "llvm/Analysis/ScalarEvolution.h"
59 #include "llvm/Analysis/TargetLibraryInfo.h"
60 #include "llvm/Analysis/TargetTransformInfo.h"
61 #include "llvm/Analysis/ValueTracking.h"
62 #include "llvm/IR/CFG.h"
63 #include "llvm/IR/Constants.h"
64 #include "llvm/IR/DataLayout.h"
65 #include "llvm/IR/DebugInfoMetadata.h"
66 #include "llvm/IR/DerivedTypes.h"
67 #include "llvm/IR/Dominators.h"
68 #include "llvm/IR/Instructions.h"
69 #include "llvm/IR/IntrinsicInst.h"
70 #include "llvm/IR/IRBuilder.h"
71 #include "llvm/IR/LLVMContext.h"
72 #include "llvm/IR/Metadata.h"
73 #include "llvm/IR/PatternMatch.h"
74 #include "llvm/IR/PredIteratorCache.h"
75 #include "llvm/InitializePasses.h"
76 #include "llvm/Support/CommandLine.h"
77 #include "llvm/Support/Debug.h"
78 #include "llvm/Support/raw_ostream.h"
79 #include "llvm/Target/TargetOptions.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
82 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
83 #include "llvm/Transforms/Utils/Local.h"
84 #include "llvm/Transforms/Utils/LoopUtils.h"
85 #include "llvm/Transforms/Utils/SSAUpdater.h"
86 #include <algorithm>
87 #include <utility>
88 using namespace llvm;
89
90 namespace llvm {
91 class LPMUpdater;
92 } // namespace llvm
93
94 #define DEBUG_TYPE "licm"
95
96 STATISTIC(NumCreatedBlocks, "Number of blocks created");
97 STATISTIC(NumClonedBranches, "Number of branches cloned");
98 STATISTIC(NumSunk, "Number of instructions sunk out of loop");
99 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
100 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
101 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
102 STATISTIC(NumPromotionCandidates, "Number of promotion candidates");
103 STATISTIC(NumLoadPromoted, "Number of load-only promotions");
104 STATISTIC(NumLoadStorePromoted, "Number of load and store promotions");
105 STATISTIC(NumMinMaxHoisted,
106 "Number of min/max expressions hoisted out of the loop");
107 STATISTIC(NumGEPsHoisted,
108 "Number of geps reassociated and hoisted out of the loop");
109 STATISTIC(NumAddSubHoisted, "Number of add/subtract expressions reassociated "
110 "and hoisted out of the loop");
111 STATISTIC(NumFPAssociationsHoisted, "Number of invariant FP expressions "
112 "reassociated and hoisted out of the loop");
113 STATISTIC(NumIntAssociationsHoisted,
114 "Number of invariant int expressions "
115 "reassociated and hoisted out of the loop");
116
117 /// Memory promotion is enabled by default.
118 static cl::opt<bool>
119 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
120 cl::desc("Disable memory promotion in LICM pass"));
121
122 static cl::opt<bool> ControlFlowHoisting(
123 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
124 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
125
126 static cl::opt<bool>
127 SingleThread("licm-force-thread-model-single", cl::Hidden, cl::init(false),
128 cl::desc("Force thread model single in LICM pass"));
129
130 static cl::opt<uint32_t> MaxNumUsesTraversed(
131 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
132 cl::desc("Max num uses visited for identifying load "
133 "invariance in loop using invariant start (default = 8)"));
134
135 static cl::opt<unsigned> FPAssociationUpperLimit(
136 "licm-max-num-fp-reassociations", cl::init(5U), cl::Hidden,
137 cl::desc(
138 "Set upper limit for the number of transformations performed "
139 "during a single round of hoisting the reassociated expressions."));
140
141 cl::opt<unsigned> IntAssociationUpperLimit(
142 "licm-max-num-int-reassociations", cl::init(5U), cl::Hidden,
143 cl::desc(
144 "Set upper limit for the number of transformations performed "
145 "during a single round of hoisting the reassociated expressions."));
146
147 // Experimental option to allow imprecision in LICM in pathological cases, in
148 // exchange for faster compile. This is to be removed if MemorySSA starts to
149 // address the same issue. LICM calls MemorySSAWalker's
150 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect
151 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
152 // which may not be precise, since optimizeUses is capped. The result is
153 // correct, but we may not get as "far up" as possible to get which access is
154 // clobbering the one queried.
155 cl::opt<unsigned> llvm::SetLicmMssaOptCap(
156 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
157 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
158 "for faster compile. Caps the MemorySSA clobbering calls."));
159
160 // Experimentally, memory promotion carries less importance than sinking and
161 // hoisting. Limit when we do promotion when using MemorySSA, in order to save
162 // compile time.
163 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
164 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
165 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
166 "effect. When MSSA in LICM is enabled, then this is the maximum "
167 "number of accesses allowed to be present in a loop in order to "
168 "enable memory promotion."));
169
170 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
171 static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
172 const LoopSafetyInfo *SafetyInfo,
173 TargetTransformInfo *TTI,
174 bool &FoldableInLoop, bool LoopNestMode);
175 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
176 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
177 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
178 OptimizationRemarkEmitter *ORE);
179 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
180 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
181 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE);
182 static bool isSafeToExecuteUnconditionally(
183 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
184 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
185 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
186 AssumptionCache *AC, bool AllowSpeculation);
187 static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
188 Loop *CurLoop, Instruction &I,
189 SinkAndHoistLICMFlags &Flags,
190 bool InvariantGroup);
191 static bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA,
192 MemoryUse &MU);
193 /// Aggregates various functions for hoisting computations out of loop.
194 static bool hoistArithmetics(Instruction &I, Loop &L,
195 ICFLoopSafetyInfo &SafetyInfo,
196 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
197 DominatorTree *DT);
198 static Instruction *cloneInstructionInExitBlock(
199 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
200 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU);
201
202 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
203 MemorySSAUpdater &MSSAU);
204
205 static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
206 ICFLoopSafetyInfo &SafetyInfo,
207 MemorySSAUpdater &MSSAU, ScalarEvolution *SE);
208
209 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
210 function_ref<void(Instruction *)> Fn);
211 using PointersAndHasReadsOutsideSet =
212 std::pair<SmallSetVector<Value *, 8>, bool>;
213 static SmallVector<PointersAndHasReadsOutsideSet, 0>
214 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L);
215
216 namespace {
217 struct LoopInvariantCodeMotion {
218 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
219 AssumptionCache *AC, TargetLibraryInfo *TLI,
220 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
221 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
222
LoopInvariantCodeMotion__anon9998ac180111::LoopInvariantCodeMotion223 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
224 unsigned LicmMssaNoAccForPromotionCap,
225 bool LicmAllowSpeculation)
226 : LicmMssaOptCap(LicmMssaOptCap),
227 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
228 LicmAllowSpeculation(LicmAllowSpeculation) {}
229
230 private:
231 unsigned LicmMssaOptCap;
232 unsigned LicmMssaNoAccForPromotionCap;
233 bool LicmAllowSpeculation;
234 };
235
236 struct LegacyLICMPass : public LoopPass {
237 static char ID; // Pass identification, replacement for typeid
LegacyLICMPass__anon9998ac180111::LegacyLICMPass238 LegacyLICMPass(
239 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
240 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
241 bool LicmAllowSpeculation = true)
242 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
243 LicmAllowSpeculation) {
244 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
245 }
246
runOnLoop__anon9998ac180111::LegacyLICMPass247 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
248 if (skipLoop(L))
249 return false;
250
251 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "
252 << L->getHeader()->getNameOrAsOperand() << "\n");
253
254 Function *F = L->getHeader()->getParent();
255
256 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
257 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
258 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
259 // pass. Function analyses need to be preserved across loop transformations
260 // but ORE cannot be preserved (see comment before the pass definition).
261 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
262 return LICM.runOnLoop(
263 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
264 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
265 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
266 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F),
267 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(*F),
268 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*F),
269 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
270 }
271
272 /// This transformation requires natural loop information & requires that
273 /// loop preheaders be inserted into the CFG...
274 ///
getAnalysisUsage__anon9998ac180111::LegacyLICMPass275 void getAnalysisUsage(AnalysisUsage &AU) const override {
276 AU.addPreserved<DominatorTreeWrapperPass>();
277 AU.addPreserved<LoopInfoWrapperPass>();
278 AU.addRequired<TargetLibraryInfoWrapperPass>();
279 AU.addRequired<MemorySSAWrapperPass>();
280 AU.addPreserved<MemorySSAWrapperPass>();
281 AU.addRequired<TargetTransformInfoWrapperPass>();
282 AU.addRequired<AssumptionCacheTracker>();
283 getLoopAnalysisUsage(AU);
284 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
285 AU.addPreserved<LazyBlockFrequencyInfoPass>();
286 AU.addPreserved<LazyBranchProbabilityInfoPass>();
287 }
288
289 private:
290 LoopInvariantCodeMotion LICM;
291 };
292 } // namespace
293
run(Loop & L,LoopAnalysisManager & AM,LoopStandardAnalysisResults & AR,LPMUpdater &)294 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
295 LoopStandardAnalysisResults &AR, LPMUpdater &) {
296 if (!AR.MSSA)
297 report_fatal_error("LICM requires MemorySSA (loop-mssa)",
298 /*GenCrashDiag*/false);
299
300 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
301 // pass. Function analyses need to be preserved across loop transformations
302 // but ORE cannot be preserved (see comment before the pass definition).
303 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
304
305 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
306 Opts.AllowSpeculation);
307 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.AC, &AR.TLI, &AR.TTI,
308 &AR.SE, AR.MSSA, &ORE))
309 return PreservedAnalyses::all();
310
311 auto PA = getLoopPassPreservedAnalyses();
312 PA.preserve<MemorySSAAnalysis>();
313
314 return PA;
315 }
316
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)317 void LICMPass::printPipeline(
318 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
319 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
320 OS, MapClassName2PassName);
321
322 OS << '<';
323 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
324 OS << '>';
325 }
326
run(LoopNest & LN,LoopAnalysisManager & AM,LoopStandardAnalysisResults & AR,LPMUpdater &)327 PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM,
328 LoopStandardAnalysisResults &AR,
329 LPMUpdater &) {
330 if (!AR.MSSA)
331 report_fatal_error("LNICM requires MemorySSA (loop-mssa)",
332 /*GenCrashDiag*/false);
333
334 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
335 // pass. Function analyses need to be preserved across loop transformations
336 // but ORE cannot be preserved (see comment before the pass definition).
337 OptimizationRemarkEmitter ORE(LN.getParent());
338
339 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
340 Opts.AllowSpeculation);
341
342 Loop &OutermostLoop = LN.getOutermostLoop();
343 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, &AR.AC,
344 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
345
346 if (!Changed)
347 return PreservedAnalyses::all();
348
349 auto PA = getLoopPassPreservedAnalyses();
350
351 PA.preserve<DominatorTreeAnalysis>();
352 PA.preserve<LoopAnalysis>();
353 PA.preserve<MemorySSAAnalysis>();
354
355 return PA;
356 }
357
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)358 void LNICMPass::printPipeline(
359 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
360 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
361 OS, MapClassName2PassName);
362
363 OS << '<';
364 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
365 OS << '>';
366 }
367
368 char LegacyLICMPass::ID = 0;
369 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
370 false, false)
INITIALIZE_PASS_DEPENDENCY(LoopPass)371 INITIALIZE_PASS_DEPENDENCY(LoopPass)
372 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
373 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
374 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
375 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)
376 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
377 false)
378
379 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
380
SinkAndHoistLICMFlags(bool IsSink,Loop & L,MemorySSA & MSSA)381 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop &L,
382 MemorySSA &MSSA)
383 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
384 IsSink, L, MSSA) {}
385
SinkAndHoistLICMFlags(unsigned LicmMssaOptCap,unsigned LicmMssaNoAccForPromotionCap,bool IsSink,Loop & L,MemorySSA & MSSA)386 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
387 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
388 Loop &L, MemorySSA &MSSA)
389 : LicmMssaOptCap(LicmMssaOptCap),
390 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
391 IsSink(IsSink) {
392 unsigned AccessCapCount = 0;
393 for (auto *BB : L.getBlocks())
394 if (const auto *Accesses = MSSA.getBlockAccesses(BB))
395 for (const auto &MA : *Accesses) {
396 (void)MA;
397 ++AccessCapCount;
398 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
399 NoOfMemAccTooLarge = true;
400 return;
401 }
402 }
403 }
404
405 /// Hoist expressions out of the specified loop. Note, alias info for inner
406 /// loop is not preserved so it is not a good idea to run LICM multiple
407 /// times on one loop.
runOnLoop(Loop * L,AAResults * AA,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * TLI,TargetTransformInfo * TTI,ScalarEvolution * SE,MemorySSA * MSSA,OptimizationRemarkEmitter * ORE,bool LoopNestMode)408 bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI,
409 DominatorTree *DT, AssumptionCache *AC,
410 TargetLibraryInfo *TLI,
411 TargetTransformInfo *TTI,
412 ScalarEvolution *SE, MemorySSA *MSSA,
413 OptimizationRemarkEmitter *ORE,
414 bool LoopNestMode) {
415 bool Changed = false;
416
417 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
418
419 // If this loop has metadata indicating that LICM is not to be performed then
420 // just exit.
421 if (hasDisableLICMTransformsHint(L)) {
422 return false;
423 }
424
425 // Don't sink stores from loops with coroutine suspend instructions.
426 // LICM would sink instructions into the default destination of
427 // the coroutine switch. The default destination of the switch is to
428 // handle the case where the coroutine is suspended, by which point the
429 // coroutine frame may have been destroyed. No instruction can be sunk there.
430 // FIXME: This would unfortunately hurt the performance of coroutines, however
431 // there is currently no general solution for this. Similar issues could also
432 // potentially happen in other passes where instructions are being moved
433 // across that edge.
434 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
435 return llvm::any_of(*BB, [](Instruction &I) {
436 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
437 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
438 });
439 });
440
441 MemorySSAUpdater MSSAU(MSSA);
442 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
443 /*IsSink=*/true, *L, *MSSA);
444
445 // Get the preheader block to move instructions into...
446 BasicBlock *Preheader = L->getLoopPreheader();
447
448 // Compute loop safety information.
449 ICFLoopSafetyInfo SafetyInfo;
450 SafetyInfo.computeLoopSafetyInfo(L);
451
452 // We want to visit all of the instructions in this loop... that are not parts
453 // of our subloops (they have already had their invariants hoisted out of
454 // their loop, into this loop, so there is no need to process the BODIES of
455 // the subloops).
456 //
457 // Traverse the body of the loop in depth first order on the dominator tree so
458 // that we are guaranteed to see definitions before we see uses. This allows
459 // us to sink instructions in one pass, without iteration. After sinking
460 // instructions, we perform another pass to hoist them out of the loop.
461 if (L->hasDedicatedExits())
462 Changed |=
463 LoopNestMode
464 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, DT,
465 TLI, TTI, L, MSSAU, &SafetyInfo, Flags, ORE)
466 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
467 MSSAU, &SafetyInfo, Flags, ORE);
468 Flags.setIsSink(false);
469 if (Preheader)
470 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, AC, TLI, L,
471 MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
472 LicmAllowSpeculation);
473
474 // Now that all loop invariants have been removed from the loop, promote any
475 // memory references to scalars that we can.
476 // Don't sink stores from loops without dedicated block exits. Exits
477 // containing indirect branches are not transformed by loop simplify,
478 // make sure we catch that. An additional load may be generated in the
479 // preheader for SSA updater, so also avoid sinking when no preheader
480 // is available.
481 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
482 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
483 // Figure out the loop exits and their insertion points
484 SmallVector<BasicBlock *, 8> ExitBlocks;
485 L->getUniqueExitBlocks(ExitBlocks);
486
487 // We can't insert into a catchswitch.
488 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
489 return isa<CatchSwitchInst>(Exit->getTerminator());
490 });
491
492 if (!HasCatchSwitch) {
493 SmallVector<BasicBlock::iterator, 8> InsertPts;
494 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
495 InsertPts.reserve(ExitBlocks.size());
496 MSSAInsertPts.reserve(ExitBlocks.size());
497 for (BasicBlock *ExitBlock : ExitBlocks) {
498 InsertPts.push_back(ExitBlock->getFirstInsertionPt());
499 MSSAInsertPts.push_back(nullptr);
500 }
501
502 PredIteratorCache PIC;
503
504 // Promoting one set of accesses may make the pointers for another set
505 // loop invariant, so run this in a loop.
506 bool Promoted = false;
507 bool LocalPromoted;
508 do {
509 LocalPromoted = false;
510 for (auto [PointerMustAliases, HasReadsOutsideSet] :
511 collectPromotionCandidates(MSSA, AA, L)) {
512 LocalPromoted |= promoteLoopAccessesToScalars(
513 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
514 DT, AC, TLI, TTI, L, MSSAU, &SafetyInfo, ORE,
515 LicmAllowSpeculation, HasReadsOutsideSet);
516 }
517 Promoted |= LocalPromoted;
518 } while (LocalPromoted);
519
520 // Once we have promoted values across the loop body we have to
521 // recursively reform LCSSA as any nested loop may now have values defined
522 // within the loop used in the outer loop.
523 // FIXME: This is really heavy handed. It would be a bit better to use an
524 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
525 // it as it went.
526 if (Promoted)
527 formLCSSARecursively(*L, *DT, LI, SE);
528
529 Changed |= Promoted;
530 }
531 }
532
533 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
534 // specifically moving instructions across the loop boundary and so it is
535 // especially in need of basic functional correctness checking here.
536 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
537 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&
538 "Parent loop not left in LCSSA form after LICM!");
539
540 if (VerifyMemorySSA)
541 MSSA->verifyMemorySSA();
542
543 if (Changed && SE)
544 SE->forgetLoopDispositions();
545 return Changed;
546 }
547
548 /// Walk the specified region of the CFG (defined by all blocks dominated by
549 /// the specified block, and that are in the current loop) in reverse depth
550 /// first order w.r.t the DominatorTree. This allows us to visit uses before
551 /// definitions, allowing us to sink a loop body in one pass without iteration.
552 ///
sinkRegion(DomTreeNode * N,AAResults * AA,LoopInfo * LI,DominatorTree * DT,TargetLibraryInfo * TLI,TargetTransformInfo * TTI,Loop * CurLoop,MemorySSAUpdater & MSSAU,ICFLoopSafetyInfo * SafetyInfo,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE,Loop * OutermostLoop)553 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
554 DominatorTree *DT, TargetLibraryInfo *TLI,
555 TargetTransformInfo *TTI, Loop *CurLoop,
556 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
557 SinkAndHoistLICMFlags &Flags,
558 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
559
560 // Verify inputs.
561 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
562 CurLoop != nullptr && SafetyInfo != nullptr &&
563 "Unexpected input to sinkRegion.");
564
565 // We want to visit children before parents. We will enqueue all the parents
566 // before their children in the worklist and process the worklist in reverse
567 // order.
568 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
569
570 bool Changed = false;
571 for (DomTreeNode *DTN : reverse(Worklist)) {
572 BasicBlock *BB = DTN->getBlock();
573 // Only need to process the contents of this block if it is not part of a
574 // subloop (which would already have been processed).
575 if (inSubLoop(BB, CurLoop, LI))
576 continue;
577
578 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
579 Instruction &I = *--II;
580
581 // The instruction is not used in the loop if it is dead. In this case,
582 // we just delete it instead of sinking it.
583 if (isInstructionTriviallyDead(&I, TLI)) {
584 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
585 salvageKnowledge(&I);
586 salvageDebugInfo(I);
587 ++II;
588 eraseInstruction(I, *SafetyInfo, MSSAU);
589 Changed = true;
590 continue;
591 }
592
593 // Check to see if we can sink this instruction to the exit blocks
594 // of the loop. We can do this if the all users of the instruction are
595 // outside of the loop. In this case, it doesn't even matter if the
596 // operands of the instruction are loop invariant.
597 //
598 bool FoldableInLoop = false;
599 bool LoopNestMode = OutermostLoop != nullptr;
600 if (!I.mayHaveSideEffects() &&
601 isNotUsedOrFoldableInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
602 SafetyInfo, TTI, FoldableInLoop,
603 LoopNestMode) &&
604 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE)) {
605 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
606 if (!FoldableInLoop) {
607 ++II;
608 salvageDebugInfo(I);
609 eraseInstruction(I, *SafetyInfo, MSSAU);
610 }
611 Changed = true;
612 }
613 }
614 }
615 }
616 if (VerifyMemorySSA)
617 MSSAU.getMemorySSA()->verifyMemorySSA();
618 return Changed;
619 }
620
sinkRegionForLoopNest(DomTreeNode * N,AAResults * AA,LoopInfo * LI,DominatorTree * DT,TargetLibraryInfo * TLI,TargetTransformInfo * TTI,Loop * CurLoop,MemorySSAUpdater & MSSAU,ICFLoopSafetyInfo * SafetyInfo,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE)621 bool llvm::sinkRegionForLoopNest(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
622 DominatorTree *DT, TargetLibraryInfo *TLI,
623 TargetTransformInfo *TTI, Loop *CurLoop,
624 MemorySSAUpdater &MSSAU,
625 ICFLoopSafetyInfo *SafetyInfo,
626 SinkAndHoistLICMFlags &Flags,
627 OptimizationRemarkEmitter *ORE) {
628
629 bool Changed = false;
630 SmallPriorityWorklist<Loop *, 4> Worklist;
631 Worklist.insert(CurLoop);
632 appendLoopsToWorklist(*CurLoop, Worklist);
633 while (!Worklist.empty()) {
634 Loop *L = Worklist.pop_back_val();
635 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
636 MSSAU, SafetyInfo, Flags, ORE, CurLoop);
637 }
638 return Changed;
639 }
640
641 namespace {
642 // This is a helper class for hoistRegion to make it able to hoist control flow
643 // in order to be able to hoist phis. The way this works is that we initially
644 // start hoisting to the loop preheader, and when we see a loop invariant branch
645 // we make note of this. When we then come to hoist an instruction that's
646 // conditional on such a branch we duplicate the branch and the relevant control
647 // flow, then hoist the instruction into the block corresponding to its original
648 // block in the duplicated control flow.
649 class ControlFlowHoister {
650 private:
651 // Information about the loop we are hoisting from
652 LoopInfo *LI;
653 DominatorTree *DT;
654 Loop *CurLoop;
655 MemorySSAUpdater &MSSAU;
656
657 // A map of blocks in the loop to the block their instructions will be hoisted
658 // to.
659 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
660
661 // The branches that we can hoist, mapped to the block that marks a
662 // convergence point of their control flow.
663 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
664
665 public:
ControlFlowHoister(LoopInfo * LI,DominatorTree * DT,Loop * CurLoop,MemorySSAUpdater & MSSAU)666 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
667 MemorySSAUpdater &MSSAU)
668 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
669
registerPossiblyHoistableBranch(BranchInst * BI)670 void registerPossiblyHoistableBranch(BranchInst *BI) {
671 // We can only hoist conditional branches with loop invariant operands.
672 if (!ControlFlowHoisting || !BI->isConditional() ||
673 !CurLoop->hasLoopInvariantOperands(BI))
674 return;
675
676 // The branch destinations need to be in the loop, and we don't gain
677 // anything by duplicating conditional branches with duplicate successors,
678 // as it's essentially the same as an unconditional branch.
679 BasicBlock *TrueDest = BI->getSuccessor(0);
680 BasicBlock *FalseDest = BI->getSuccessor(1);
681 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
682 TrueDest == FalseDest)
683 return;
684
685 // We can hoist BI if one branch destination is the successor of the other,
686 // or both have common successor which we check by seeing if the
687 // intersection of their successors is non-empty.
688 // TODO: This could be expanded to allowing branches where both ends
689 // eventually converge to a single block.
690 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
691 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
692 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
693 BasicBlock *CommonSucc = nullptr;
694 if (TrueDestSucc.count(FalseDest)) {
695 CommonSucc = FalseDest;
696 } else if (FalseDestSucc.count(TrueDest)) {
697 CommonSucc = TrueDest;
698 } else {
699 set_intersect(TrueDestSucc, FalseDestSucc);
700 // If there's one common successor use that.
701 if (TrueDestSucc.size() == 1)
702 CommonSucc = *TrueDestSucc.begin();
703 // If there's more than one pick whichever appears first in the block list
704 // (we can't use the value returned by TrueDestSucc.begin() as it's
705 // unpredicatable which element gets returned).
706 else if (!TrueDestSucc.empty()) {
707 Function *F = TrueDest->getParent();
708 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
709 auto It = llvm::find_if(*F, IsSucc);
710 assert(It != F->end() && "Could not find successor in function");
711 CommonSucc = &*It;
712 }
713 }
714 // The common successor has to be dominated by the branch, as otherwise
715 // there will be some other path to the successor that will not be
716 // controlled by this branch so any phi we hoist would be controlled by the
717 // wrong condition. This also takes care of avoiding hoisting of loop back
718 // edges.
719 // TODO: In some cases this could be relaxed if the successor is dominated
720 // by another block that's been hoisted and we can guarantee that the
721 // control flow has been replicated exactly.
722 if (CommonSucc && DT->dominates(BI, CommonSucc))
723 HoistableBranches[BI] = CommonSucc;
724 }
725
canHoistPHI(PHINode * PN)726 bool canHoistPHI(PHINode *PN) {
727 // The phi must have loop invariant operands.
728 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
729 return false;
730 // We can hoist phis if the block they are in is the target of hoistable
731 // branches which cover all of the predecessors of the block.
732 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
733 BasicBlock *BB = PN->getParent();
734 for (BasicBlock *PredBB : predecessors(BB))
735 PredecessorBlocks.insert(PredBB);
736 // If we have less predecessor blocks than predecessors then the phi will
737 // have more than one incoming value for the same block which we can't
738 // handle.
739 // TODO: This could be handled be erasing some of the duplicate incoming
740 // values.
741 if (PredecessorBlocks.size() != pred_size(BB))
742 return false;
743 for (auto &Pair : HoistableBranches) {
744 if (Pair.second == BB) {
745 // Which blocks are predecessors via this branch depends on if the
746 // branch is triangle-like or diamond-like.
747 if (Pair.first->getSuccessor(0) == BB) {
748 PredecessorBlocks.erase(Pair.first->getParent());
749 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
750 } else if (Pair.first->getSuccessor(1) == BB) {
751 PredecessorBlocks.erase(Pair.first->getParent());
752 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
753 } else {
754 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
755 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
756 }
757 }
758 }
759 // PredecessorBlocks will now be empty if for every predecessor of BB we
760 // found a hoistable branch source.
761 return PredecessorBlocks.empty();
762 }
763
getOrCreateHoistedBlock(BasicBlock * BB)764 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
765 if (!ControlFlowHoisting)
766 return CurLoop->getLoopPreheader();
767 // If BB has already been hoisted, return that
768 if (HoistDestinationMap.count(BB))
769 return HoistDestinationMap[BB];
770
771 // Check if this block is conditional based on a pending branch
772 auto HasBBAsSuccessor =
773 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
774 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
775 Pair.first->getSuccessor(1) == BB);
776 };
777 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
778
779 // If not involved in a pending branch, hoist to preheader
780 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
781 if (It == HoistableBranches.end()) {
782 LLVM_DEBUG(dbgs() << "LICM using "
783 << InitialPreheader->getNameOrAsOperand()
784 << " as hoist destination for "
785 << BB->getNameOrAsOperand() << "\n");
786 HoistDestinationMap[BB] = InitialPreheader;
787 return InitialPreheader;
788 }
789 BranchInst *BI = It->first;
790 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
791 HoistableBranches.end() &&
792 "BB is expected to be the target of at most one branch");
793
794 LLVMContext &C = BB->getContext();
795 BasicBlock *TrueDest = BI->getSuccessor(0);
796 BasicBlock *FalseDest = BI->getSuccessor(1);
797 BasicBlock *CommonSucc = HoistableBranches[BI];
798 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
799
800 // Create hoisted versions of blocks that currently don't have them
801 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
802 if (HoistDestinationMap.count(Orig))
803 return HoistDestinationMap[Orig];
804 BasicBlock *New =
805 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
806 HoistDestinationMap[Orig] = New;
807 DT->addNewBlock(New, HoistTarget);
808 if (CurLoop->getParentLoop())
809 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
810 ++NumCreatedBlocks;
811 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
812 << " as hoist destination for " << Orig->getName()
813 << "\n");
814 return New;
815 };
816 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
817 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
818 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
819
820 // Link up these blocks with branches.
821 if (!HoistCommonSucc->getTerminator()) {
822 // The new common successor we've generated will branch to whatever that
823 // hoist target branched to.
824 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
825 assert(TargetSucc && "Expected hoist target to have a single successor");
826 HoistCommonSucc->moveBefore(TargetSucc);
827 BranchInst::Create(TargetSucc, HoistCommonSucc);
828 }
829 if (!HoistTrueDest->getTerminator()) {
830 HoistTrueDest->moveBefore(HoistCommonSucc);
831 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
832 }
833 if (!HoistFalseDest->getTerminator()) {
834 HoistFalseDest->moveBefore(HoistCommonSucc);
835 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
836 }
837
838 // If BI is being cloned to what was originally the preheader then
839 // HoistCommonSucc will now be the new preheader.
840 if (HoistTarget == InitialPreheader) {
841 // Phis in the loop header now need to use the new preheader.
842 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
843 MSSAU.wireOldPredecessorsToNewImmediatePredecessor(
844 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
845 // The new preheader dominates the loop header.
846 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
847 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
848 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
849 // The preheader hoist destination is now the new preheader, with the
850 // exception of the hoist destination of this branch.
851 for (auto &Pair : HoistDestinationMap)
852 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
853 Pair.second = HoistCommonSucc;
854 }
855
856 // Now finally clone BI.
857 ReplaceInstWithInst(
858 HoistTarget->getTerminator(),
859 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
860 ++NumClonedBranches;
861
862 assert(CurLoop->getLoopPreheader() &&
863 "Hoisting blocks should not have destroyed preheader");
864 return HoistDestinationMap[BB];
865 }
866 };
867 } // namespace
868
869 /// Walk the specified region of the CFG (defined by all blocks dominated by
870 /// the specified block, and that are in the current loop) in depth first
871 /// order w.r.t the DominatorTree. This allows us to visit definitions before
872 /// uses, allowing us to hoist a loop body in one pass without iteration.
873 ///
hoistRegion(DomTreeNode * N,AAResults * AA,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,TargetLibraryInfo * TLI,Loop * CurLoop,MemorySSAUpdater & MSSAU,ScalarEvolution * SE,ICFLoopSafetyInfo * SafetyInfo,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE,bool LoopNestMode,bool AllowSpeculation)874 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
875 DominatorTree *DT, AssumptionCache *AC,
876 TargetLibraryInfo *TLI, Loop *CurLoop,
877 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
878 ICFLoopSafetyInfo *SafetyInfo,
879 SinkAndHoistLICMFlags &Flags,
880 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
881 bool AllowSpeculation) {
882 // Verify inputs.
883 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
884 CurLoop != nullptr && SafetyInfo != nullptr &&
885 "Unexpected input to hoistRegion.");
886
887 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
888
889 // Keep track of instructions that have been hoisted, as they may need to be
890 // re-hoisted if they end up not dominating all of their uses.
891 SmallVector<Instruction *, 16> HoistedInstructions;
892
893 // For PHI hoisting to work we need to hoist blocks before their successors.
894 // We can do this by iterating through the blocks in the loop in reverse
895 // post-order.
896 LoopBlocksRPO Worklist(CurLoop);
897 Worklist.perform(LI);
898 bool Changed = false;
899 BasicBlock *Preheader = CurLoop->getLoopPreheader();
900 for (BasicBlock *BB : Worklist) {
901 // Only need to process the contents of this block if it is not part of a
902 // subloop (which would already have been processed).
903 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
904 continue;
905
906 for (Instruction &I : llvm::make_early_inc_range(*BB)) {
907 // Try hoisting the instruction out to the preheader. We can only do
908 // this if all of the operands of the instruction are loop invariant and
909 // if it is safe to hoist the instruction. We also check block frequency
910 // to make sure instruction only gets hoisted into colder blocks.
911 // TODO: It may be safe to hoist if we are hoisting to a conditional block
912 // and we have accurately duplicated the control flow from the loop header
913 // to that block.
914 if (CurLoop->hasLoopInvariantOperands(&I) &&
915 canSinkOrHoistInst(I, AA, DT, CurLoop, MSSAU, true, Flags, ORE) &&
916 isSafeToExecuteUnconditionally(
917 I, DT, TLI, CurLoop, SafetyInfo, ORE,
918 Preheader->getTerminator(), AC, AllowSpeculation)) {
919 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
920 MSSAU, SE, ORE);
921 HoistedInstructions.push_back(&I);
922 Changed = true;
923 continue;
924 }
925
926 // Attempt to remove floating point division out of the loop by
927 // converting it to a reciprocal multiplication.
928 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
929 CurLoop->isLoopInvariant(I.getOperand(1))) {
930 auto Divisor = I.getOperand(1);
931 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
932 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
933 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
934 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
935 ReciprocalDivisor->insertBefore(&I);
936 ReciprocalDivisor->setDebugLoc(I.getDebugLoc());
937
938 auto Product =
939 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
940 Product->setFastMathFlags(I.getFastMathFlags());
941 SafetyInfo->insertInstructionTo(Product, I.getParent());
942 Product->insertAfter(&I);
943 Product->setDebugLoc(I.getDebugLoc());
944 I.replaceAllUsesWith(Product);
945 eraseInstruction(I, *SafetyInfo, MSSAU);
946
947 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
948 SafetyInfo, MSSAU, SE, ORE);
949 HoistedInstructions.push_back(ReciprocalDivisor);
950 Changed = true;
951 continue;
952 }
953
954 auto IsInvariantStart = [&](Instruction &I) {
955 using namespace PatternMatch;
956 return I.use_empty() &&
957 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
958 };
959 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
960 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
961 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
962 };
963 if ((IsInvariantStart(I) || isGuard(&I)) &&
964 CurLoop->hasLoopInvariantOperands(&I) &&
965 MustExecuteWithoutWritesBefore(I)) {
966 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
967 MSSAU, SE, ORE);
968 HoistedInstructions.push_back(&I);
969 Changed = true;
970 continue;
971 }
972
973 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
974 if (CFH.canHoistPHI(PN)) {
975 // Redirect incoming blocks first to ensure that we create hoisted
976 // versions of those blocks before we hoist the phi.
977 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
978 PN->setIncomingBlock(
979 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
980 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
981 MSSAU, SE, ORE);
982 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
983 Changed = true;
984 continue;
985 }
986 }
987
988 // Try to reassociate instructions so that part of computations can be
989 // done out of loop.
990 if (hoistArithmetics(I, *CurLoop, *SafetyInfo, MSSAU, AC, DT)) {
991 Changed = true;
992 continue;
993 }
994
995 // Remember possibly hoistable branches so we can actually hoist them
996 // later if needed.
997 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
998 CFH.registerPossiblyHoistableBranch(BI);
999 }
1000 }
1001
1002 // If we hoisted instructions to a conditional block they may not dominate
1003 // their uses that weren't hoisted (such as phis where some operands are not
1004 // loop invariant). If so make them unconditional by moving them to their
1005 // immediate dominator. We iterate through the instructions in reverse order
1006 // which ensures that when we rehoist an instruction we rehoist its operands,
1007 // and also keep track of where in the block we are rehoisting to make sure
1008 // that we rehoist instructions before the instructions that use them.
1009 Instruction *HoistPoint = nullptr;
1010 if (ControlFlowHoisting) {
1011 for (Instruction *I : reverse(HoistedInstructions)) {
1012 if (!llvm::all_of(I->uses(),
1013 [&](Use &U) { return DT->dominates(I, U); })) {
1014 BasicBlock *Dominator =
1015 DT->getNode(I->getParent())->getIDom()->getBlock();
1016 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1017 if (HoistPoint)
1018 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
1019 "New hoist point expected to dominate old hoist point");
1020 HoistPoint = Dominator->getTerminator();
1021 }
1022 LLVM_DEBUG(dbgs() << "LICM rehoisting to "
1023 << HoistPoint->getParent()->getNameOrAsOperand()
1024 << ": " << *I << "\n");
1025 moveInstructionBefore(*I, HoistPoint->getIterator(), *SafetyInfo, MSSAU,
1026 SE);
1027 HoistPoint = I;
1028 Changed = true;
1029 }
1030 }
1031 }
1032 if (VerifyMemorySSA)
1033 MSSAU.getMemorySSA()->verifyMemorySSA();
1034
1035 // Now that we've finished hoisting make sure that LI and DT are still
1036 // valid.
1037 #ifdef EXPENSIVE_CHECKS
1038 if (Changed) {
1039 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
1040 "Dominator tree verification failed");
1041 LI->verify(*DT);
1042 }
1043 #endif
1044
1045 return Changed;
1046 }
1047
1048 // Return true if LI is invariant within scope of the loop. LI is invariant if
1049 // CurLoop is dominated by an invariant.start representing the same memory
1050 // location and size as the memory location LI loads from, and also the
1051 // invariant.start has no uses.
isLoadInvariantInLoop(LoadInst * LI,DominatorTree * DT,Loop * CurLoop)1052 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1053 Loop *CurLoop) {
1054 Value *Addr = LI->getPointerOperand();
1055 const DataLayout &DL = LI->getDataLayout();
1056 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1057
1058 // It is not currently possible for clang to generate an invariant.start
1059 // intrinsic with scalable vector types because we don't support thread local
1060 // sizeless types and we don't permit sizeless types in structs or classes.
1061 // Furthermore, even if support is added for this in future the intrinsic
1062 // itself is defined to have a size of -1 for variable sized objects. This
1063 // makes it impossible to verify if the intrinsic envelops our region of
1064 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1065 // types would have a -1 parameter, but the former is clearly double the size
1066 // of the latter.
1067 if (LocSizeInBits.isScalable())
1068 return false;
1069
1070 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1071 // uselists for non-local Values in a loop pass.
1072 if (isa<Constant>(Addr))
1073 return false;
1074
1075 unsigned UsesVisited = 0;
1076 // Traverse all uses of the load operand value, to see if invariant.start is
1077 // one of the uses, and whether it dominates the load instruction.
1078 for (auto *U : Addr->users()) {
1079 // Avoid traversing for Load operand with high number of users.
1080 if (++UsesVisited > MaxNumUsesTraversed)
1081 return false;
1082 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1083 // If there are escaping uses of invariant.start instruction, the load maybe
1084 // non-invariant.
1085 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1086 !II->use_empty())
1087 continue;
1088 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1089 // The intrinsic supports having a -1 argument for variable sized objects
1090 // so we should check for that here.
1091 if (InvariantSize->isNegative())
1092 continue;
1093 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1094 // Confirm the invariant.start location size contains the load operand size
1095 // in bits. Also, the invariant.start should dominate the load, and we
1096 // should not hoist the load out of a loop that contains this dominating
1097 // invariant.start.
1098 if (LocSizeInBits.getFixedValue() <= InvariantSizeInBits &&
1099 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1100 return true;
1101 }
1102
1103 return false;
1104 }
1105
1106 namespace {
1107 /// Return true if-and-only-if we know how to (mechanically) both hoist and
1108 /// sink a given instruction out of a loop. Does not address legality
1109 /// concerns such as aliasing or speculation safety.
isHoistableAndSinkableInst(Instruction & I)1110 bool isHoistableAndSinkableInst(Instruction &I) {
1111 // Only these instructions are hoistable/sinkable.
1112 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1113 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1114 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1115 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1116 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1117 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1118 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1119 }
1120 /// Return true if MSSA knows there are no MemoryDefs in the loop.
isReadOnly(const MemorySSAUpdater & MSSAU,const Loop * L)1121 bool isReadOnly(const MemorySSAUpdater &MSSAU, const Loop *L) {
1122 for (auto *BB : L->getBlocks())
1123 if (MSSAU.getMemorySSA()->getBlockDefs(BB))
1124 return false;
1125 return true;
1126 }
1127
1128 /// Return true if I is the only Instruction with a MemoryAccess in L.
isOnlyMemoryAccess(const Instruction * I,const Loop * L,const MemorySSAUpdater & MSSAU)1129 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1130 const MemorySSAUpdater &MSSAU) {
1131 for (auto *BB : L->getBlocks())
1132 if (auto *Accs = MSSAU.getMemorySSA()->getBlockAccesses(BB)) {
1133 int NotAPhi = 0;
1134 for (const auto &Acc : *Accs) {
1135 if (isa<MemoryPhi>(&Acc))
1136 continue;
1137 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1138 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1139 return false;
1140 }
1141 }
1142 return true;
1143 }
1144 }
1145
getClobberingMemoryAccess(MemorySSA & MSSA,BatchAAResults & BAA,SinkAndHoistLICMFlags & Flags,MemoryUseOrDef * MA)1146 static MemoryAccess *getClobberingMemoryAccess(MemorySSA &MSSA,
1147 BatchAAResults &BAA,
1148 SinkAndHoistLICMFlags &Flags,
1149 MemoryUseOrDef *MA) {
1150 // See declaration of SetLicmMssaOptCap for usage details.
1151 if (Flags.tooManyClobberingCalls())
1152 return MA->getDefiningAccess();
1153
1154 MemoryAccess *Source =
1155 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(MA, BAA);
1156 Flags.incrementClobberingCalls();
1157 return Source;
1158 }
1159
canSinkOrHoistInst(Instruction & I,AAResults * AA,DominatorTree * DT,Loop * CurLoop,MemorySSAUpdater & MSSAU,bool TargetExecutesOncePerLoop,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE)1160 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1161 Loop *CurLoop, MemorySSAUpdater &MSSAU,
1162 bool TargetExecutesOncePerLoop,
1163 SinkAndHoistLICMFlags &Flags,
1164 OptimizationRemarkEmitter *ORE) {
1165 // If we don't understand the instruction, bail early.
1166 if (!isHoistableAndSinkableInst(I))
1167 return false;
1168
1169 MemorySSA *MSSA = MSSAU.getMemorySSA();
1170 // Loads have extra constraints we have to verify before we can hoist them.
1171 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1172 if (!LI->isUnordered())
1173 return false; // Don't sink/hoist volatile or ordered atomic loads!
1174
1175 // Loads from constant memory are always safe to move, even if they end up
1176 // in the same alias set as something that ends up being modified.
1177 if (!isModSet(AA->getModRefInfoMask(LI->getOperand(0))))
1178 return true;
1179 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1180 return true;
1181
1182 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1183 return false; // Don't risk duplicating unordered loads
1184
1185 // This checks for an invariant.start dominating the load.
1186 if (isLoadInvariantInLoop(LI, DT, CurLoop))
1187 return true;
1188
1189 auto MU = cast<MemoryUse>(MSSA->getMemoryAccess(LI));
1190
1191 bool InvariantGroup = LI->hasMetadata(LLVMContext::MD_invariant_group);
1192
1193 bool Invalidated = pointerInvalidatedByLoop(
1194 MSSA, MU, CurLoop, I, Flags, InvariantGroup);
1195 // Check loop-invariant address because this may also be a sinkable load
1196 // whose address is not necessarily loop-invariant.
1197 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1198 ORE->emit([&]() {
1199 return OptimizationRemarkMissed(
1200 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1201 << "failed to move load with loop-invariant address "
1202 "because the loop may invalidate its value";
1203 });
1204
1205 return !Invalidated;
1206 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1207 // Don't sink or hoist dbg info; it's legal, but not useful.
1208 if (isa<DbgInfoIntrinsic>(I))
1209 return false;
1210
1211 // Don't sink calls which can throw.
1212 if (CI->mayThrow())
1213 return false;
1214
1215 // Convergent attribute has been used on operations that involve
1216 // inter-thread communication which results are implicitly affected by the
1217 // enclosing control flows. It is not safe to hoist or sink such operations
1218 // across control flow.
1219 if (CI->isConvergent())
1220 return false;
1221
1222 // FIXME: Current LLVM IR semantics don't work well with coroutines and
1223 // thread local globals. We currently treat getting the address of a thread
1224 // local global as not accessing memory, even though it may not be a
1225 // constant throughout a function with coroutines. Remove this check after
1226 // we better model semantics of thread local globals.
1227 if (CI->getFunction()->isPresplitCoroutine())
1228 return false;
1229
1230 using namespace PatternMatch;
1231 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1232 // Assumes don't actually alias anything or throw
1233 return true;
1234
1235 // Handle simple cases by querying alias analysis.
1236 MemoryEffects Behavior = AA->getMemoryEffects(CI);
1237
1238 if (Behavior.doesNotAccessMemory())
1239 return true;
1240 if (Behavior.onlyReadsMemory()) {
1241 // A readonly argmemonly function only reads from memory pointed to by
1242 // it's arguments with arbitrary offsets. If we can prove there are no
1243 // writes to this memory in the loop, we can hoist or sink.
1244 if (Behavior.onlyAccessesArgPointees()) {
1245 // TODO: expand to writeable arguments
1246 for (Value *Op : CI->args())
1247 if (Op->getType()->isPointerTy() &&
1248 pointerInvalidatedByLoop(
1249 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1250 Flags, /*InvariantGroup=*/false))
1251 return false;
1252 return true;
1253 }
1254
1255 // If this call only reads from memory and there are no writes to memory
1256 // in the loop, we can hoist or sink the call as appropriate.
1257 if (isReadOnly(MSSAU, CurLoop))
1258 return true;
1259 }
1260
1261 // FIXME: This should use mod/ref information to see if we can hoist or
1262 // sink the call.
1263
1264 return false;
1265 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1266 // Fences alias (most) everything to provide ordering. For the moment,
1267 // just give up if there are any other memory operations in the loop.
1268 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1269 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1270 if (!SI->isUnordered())
1271 return false; // Don't sink/hoist volatile or ordered atomic store!
1272
1273 // We can only hoist a store that we can prove writes a value which is not
1274 // read or overwritten within the loop. For those cases, we fallback to
1275 // load store promotion instead. TODO: We can extend this to cases where
1276 // there is exactly one write to the location and that write dominates an
1277 // arbitrary number of reads in the loop.
1278 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1279 return true;
1280 // If there are more accesses than the Promotion cap, then give up as we're
1281 // not walking a list that long.
1282 if (Flags.tooManyMemoryAccesses())
1283 return false;
1284
1285 auto *SIMD = MSSA->getMemoryAccess(SI);
1286 BatchAAResults BAA(*AA);
1287 auto *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, SIMD);
1288 // Make sure there are no clobbers inside the loop.
1289 if (!MSSA->isLiveOnEntryDef(Source) &&
1290 CurLoop->contains(Source->getBlock()))
1291 return false;
1292
1293 // If there are interfering Uses (i.e. their defining access is in the
1294 // loop), or ordered loads (stored as Defs!), don't move this store.
1295 // Could do better here, but this is conservatively correct.
1296 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1297 // moving accesses. Can also extend to dominating uses.
1298 for (auto *BB : CurLoop->getBlocks())
1299 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1300 for (const auto &MA : *Accesses)
1301 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1302 auto *MD = getClobberingMemoryAccess(*MSSA, BAA, Flags,
1303 const_cast<MemoryUse *>(MU));
1304 if (!MSSA->isLiveOnEntryDef(MD) &&
1305 CurLoop->contains(MD->getBlock()))
1306 return false;
1307 // Disable hoisting past potentially interfering loads. Optimized
1308 // Uses may point to an access outside the loop, as getClobbering
1309 // checks the previous iteration when walking the backedge.
1310 // FIXME: More precise: no Uses that alias SI.
1311 if (!Flags.getIsSink() && !MSSA->dominates(SIMD, MU))
1312 return false;
1313 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1314 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1315 (void)LI; // Silence warning.
1316 assert(!LI->isUnordered() && "Expected unordered load");
1317 return false;
1318 }
1319 // Any call, while it may not be clobbering SI, it may be a use.
1320 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1321 // Check if the call may read from the memory location written
1322 // to by SI. Check CI's attributes and arguments; the number of
1323 // such checks performed is limited above by NoOfMemAccTooLarge.
1324 ModRefInfo MRI = BAA.getModRefInfo(CI, MemoryLocation::get(SI));
1325 if (isModOrRefSet(MRI))
1326 return false;
1327 }
1328 }
1329 }
1330 return true;
1331 }
1332
1333 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1334
1335 // We've established mechanical ability and aliasing, it's up to the caller
1336 // to check fault safety
1337 return true;
1338 }
1339
1340 /// Returns true if a PHINode is a trivially replaceable with an
1341 /// Instruction.
1342 /// This is true when all incoming values are that instruction.
1343 /// This pattern occurs most often with LCSSA PHI nodes.
1344 ///
isTriviallyReplaceablePHI(const PHINode & PN,const Instruction & I)1345 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1346 for (const Value *IncValue : PN.incoming_values())
1347 if (IncValue != &I)
1348 return false;
1349
1350 return true;
1351 }
1352
1353 /// Return true if the instruction is foldable in the loop.
isFoldableInLoop(const Instruction & I,const Loop * CurLoop,const TargetTransformInfo * TTI)1354 static bool isFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1355 const TargetTransformInfo *TTI) {
1356 if (auto *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1357 InstructionCost CostI =
1358 TTI->getInstructionCost(&I, TargetTransformInfo::TCK_SizeAndLatency);
1359 if (CostI != TargetTransformInfo::TCC_Free)
1360 return false;
1361 // For a GEP, we cannot simply use getInstructionCost because currently
1362 // it optimistically assumes that a GEP will fold into addressing mode
1363 // regardless of its users.
1364 const BasicBlock *BB = GEP->getParent();
1365 for (const User *U : GEP->users()) {
1366 const Instruction *UI = cast<Instruction>(U);
1367 if (CurLoop->contains(UI) &&
1368 (BB != UI->getParent() ||
1369 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1370 return false;
1371 }
1372 return true;
1373 }
1374
1375 return false;
1376 }
1377
1378 /// Return true if the only users of this instruction are outside of
1379 /// the loop. If this is true, we can sink the instruction to the exit
1380 /// blocks of the loop.
1381 ///
1382 /// We also return true if the instruction could be folded away in lowering.
1383 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
isNotUsedOrFoldableInLoop(const Instruction & I,const Loop * CurLoop,const LoopSafetyInfo * SafetyInfo,TargetTransformInfo * TTI,bool & FoldableInLoop,bool LoopNestMode)1384 static bool isNotUsedOrFoldableInLoop(const Instruction &I, const Loop *CurLoop,
1385 const LoopSafetyInfo *SafetyInfo,
1386 TargetTransformInfo *TTI,
1387 bool &FoldableInLoop, bool LoopNestMode) {
1388 const auto &BlockColors = SafetyInfo->getBlockColors();
1389 bool IsFoldable = isFoldableInLoop(I, CurLoop, TTI);
1390 for (const User *U : I.users()) {
1391 const Instruction *UI = cast<Instruction>(U);
1392 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1393 const BasicBlock *BB = PN->getParent();
1394 // We cannot sink uses in catchswitches.
1395 if (isa<CatchSwitchInst>(BB->getTerminator()))
1396 return false;
1397
1398 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1399 // phi use is too muddled.
1400 if (isa<CallInst>(I))
1401 if (!BlockColors.empty() &&
1402 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1403 return false;
1404
1405 if (LoopNestMode) {
1406 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1407 UI->getNumOperands() == 1) {
1408 if (!CurLoop->contains(UI))
1409 break;
1410 UI = cast<Instruction>(UI->user_back());
1411 }
1412 }
1413 }
1414
1415 if (CurLoop->contains(UI)) {
1416 if (IsFoldable) {
1417 FoldableInLoop = true;
1418 continue;
1419 }
1420 return false;
1421 }
1422 }
1423 return true;
1424 }
1425
cloneInstructionInExitBlock(Instruction & I,BasicBlock & ExitBlock,PHINode & PN,const LoopInfo * LI,const LoopSafetyInfo * SafetyInfo,MemorySSAUpdater & MSSAU)1426 static Instruction *cloneInstructionInExitBlock(
1427 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1428 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater &MSSAU) {
1429 Instruction *New;
1430 if (auto *CI = dyn_cast<CallInst>(&I)) {
1431 const auto &BlockColors = SafetyInfo->getBlockColors();
1432
1433 // Sinking call-sites need to be handled differently from other
1434 // instructions. The cloned call-site needs a funclet bundle operand
1435 // appropriate for its location in the CFG.
1436 SmallVector<OperandBundleDef, 1> OpBundles;
1437 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1438 BundleIdx != BundleEnd; ++BundleIdx) {
1439 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1440 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1441 continue;
1442
1443 OpBundles.emplace_back(Bundle);
1444 }
1445
1446 if (!BlockColors.empty()) {
1447 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1448 assert(CV.size() == 1 && "non-unique color for exit block!");
1449 BasicBlock *BBColor = CV.front();
1450 Instruction *EHPad = BBColor->getFirstNonPHI();
1451 if (EHPad->isEHPad())
1452 OpBundles.emplace_back("funclet", EHPad);
1453 }
1454
1455 New = CallInst::Create(CI, OpBundles);
1456 New->copyMetadata(*CI);
1457 } else {
1458 New = I.clone();
1459 }
1460
1461 New->insertInto(&ExitBlock, ExitBlock.getFirstInsertionPt());
1462 if (!I.getName().empty())
1463 New->setName(I.getName() + ".le");
1464
1465 if (MSSAU.getMemorySSA()->getMemoryAccess(&I)) {
1466 // Create a new MemoryAccess and let MemorySSA set its defining access.
1467 // After running some passes, MemorySSA might be outdated, and the
1468 // instruction `I` may have become a non-memory touching instruction.
1469 MemoryAccess *NewMemAcc = MSSAU.createMemoryAccessInBB(
1470 New, nullptr, New->getParent(), MemorySSA::Beginning,
1471 /*CreationMustSucceed=*/false);
1472 if (NewMemAcc) {
1473 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1474 MSSAU.insertDef(MemDef, /*RenameUses=*/true);
1475 else {
1476 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1477 MSSAU.insertUse(MemUse, /*RenameUses=*/true);
1478 }
1479 }
1480 }
1481
1482 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1483 // this is particularly cheap because we can rip off the PHI node that we're
1484 // replacing for the number and blocks of the predecessors.
1485 // OPT: If this shows up in a profile, we can instead finish sinking all
1486 // invariant instructions, and then walk their operands to re-establish
1487 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1488 // sinking bottom-up.
1489 for (Use &Op : New->operands())
1490 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1491 auto *OInst = cast<Instruction>(Op.get());
1492 PHINode *OpPN =
1493 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1494 OInst->getName() + ".lcssa");
1495 OpPN->insertBefore(ExitBlock.begin());
1496 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1497 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1498 Op = OpPN;
1499 }
1500 return New;
1501 }
1502
eraseInstruction(Instruction & I,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU)1503 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1504 MemorySSAUpdater &MSSAU) {
1505 MSSAU.removeMemoryAccess(&I);
1506 SafetyInfo.removeInstruction(&I);
1507 I.eraseFromParent();
1508 }
1509
moveInstructionBefore(Instruction & I,BasicBlock::iterator Dest,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,ScalarEvolution * SE)1510 static void moveInstructionBefore(Instruction &I, BasicBlock::iterator Dest,
1511 ICFLoopSafetyInfo &SafetyInfo,
1512 MemorySSAUpdater &MSSAU,
1513 ScalarEvolution *SE) {
1514 SafetyInfo.removeInstruction(&I);
1515 SafetyInfo.insertInstructionTo(&I, Dest->getParent());
1516 I.moveBefore(*Dest->getParent(), Dest);
1517 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1518 MSSAU.getMemorySSA()->getMemoryAccess(&I)))
1519 MSSAU.moveToPlace(OldMemAcc, Dest->getParent(),
1520 MemorySSA::BeforeTerminator);
1521 if (SE)
1522 SE->forgetBlockAndLoopDispositions(&I);
1523 }
1524
sinkThroughTriviallyReplaceablePHI(PHINode * TPN,Instruction * I,LoopInfo * LI,SmallDenseMap<BasicBlock *,Instruction *,32> & SunkCopies,const LoopSafetyInfo * SafetyInfo,const Loop * CurLoop,MemorySSAUpdater & MSSAU)1525 static Instruction *sinkThroughTriviallyReplaceablePHI(
1526 PHINode *TPN, Instruction *I, LoopInfo *LI,
1527 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1528 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1529 MemorySSAUpdater &MSSAU) {
1530 assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1531 "Expect only trivially replaceable PHI");
1532 BasicBlock *ExitBlock = TPN->getParent();
1533 Instruction *New;
1534 auto It = SunkCopies.find(ExitBlock);
1535 if (It != SunkCopies.end())
1536 New = It->second;
1537 else
1538 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1539 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1540 return New;
1541 }
1542
canSplitPredecessors(PHINode * PN,LoopSafetyInfo * SafetyInfo)1543 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1544 BasicBlock *BB = PN->getParent();
1545 if (!BB->canSplitPredecessors())
1546 return false;
1547 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1548 // it require updating BlockColors for all offspring blocks accordingly. By
1549 // skipping such corner case, we can make updating BlockColors after splitting
1550 // predecessor fairly simple.
1551 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1552 return false;
1553 for (BasicBlock *BBPred : predecessors(BB)) {
1554 if (isa<IndirectBrInst>(BBPred->getTerminator()))
1555 return false;
1556 }
1557 return true;
1558 }
1559
splitPredecessorsOfLoopExit(PHINode * PN,DominatorTree * DT,LoopInfo * LI,const Loop * CurLoop,LoopSafetyInfo * SafetyInfo,MemorySSAUpdater * MSSAU)1560 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1561 LoopInfo *LI, const Loop *CurLoop,
1562 LoopSafetyInfo *SafetyInfo,
1563 MemorySSAUpdater *MSSAU) {
1564 #ifndef NDEBUG
1565 SmallVector<BasicBlock *, 32> ExitBlocks;
1566 CurLoop->getUniqueExitBlocks(ExitBlocks);
1567 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1568 ExitBlocks.end());
1569 #endif
1570 BasicBlock *ExitBB = PN->getParent();
1571 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1572
1573 // Split predecessors of the loop exit to make instructions in the loop are
1574 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1575 // loop in the canonical form where each predecessor of each exit block should
1576 // be contained within the loop. For example, this will convert the loop below
1577 // from
1578 //
1579 // LB1:
1580 // %v1 =
1581 // br %LE, %LB2
1582 // LB2:
1583 // %v2 =
1584 // br %LE, %LB1
1585 // LE:
1586 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1587 //
1588 // to
1589 //
1590 // LB1:
1591 // %v1 =
1592 // br %LE.split, %LB2
1593 // LB2:
1594 // %v2 =
1595 // br %LE.split2, %LB1
1596 // LE.split:
1597 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1598 // br %LE
1599 // LE.split2:
1600 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1601 // br %LE
1602 // LE:
1603 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1604 //
1605 const auto &BlockColors = SafetyInfo->getBlockColors();
1606 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1607 while (!PredBBs.empty()) {
1608 BasicBlock *PredBB = *PredBBs.begin();
1609 assert(CurLoop->contains(PredBB) &&
1610 "Expect all predecessors are in the loop");
1611 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1612 BasicBlock *NewPred = SplitBlockPredecessors(
1613 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1614 // Since we do not allow splitting EH-block with BlockColors in
1615 // canSplitPredecessors(), we can simply assign predecessor's color to
1616 // the new block.
1617 if (!BlockColors.empty())
1618 // Grab a reference to the ColorVector to be inserted before getting the
1619 // reference to the vector we are copying because inserting the new
1620 // element in BlockColors might cause the map to be reallocated.
1621 SafetyInfo->copyColors(NewPred, PredBB);
1622 }
1623 PredBBs.remove(PredBB);
1624 }
1625 }
1626
1627 /// When an instruction is found to only be used outside of the loop, this
1628 /// function moves it to the exit blocks and patches up SSA form as needed.
1629 /// This method is guaranteed to remove the original instruction from its
1630 /// position, and may either delete it or move it to outside of the loop.
1631 ///
sink(Instruction & I,LoopInfo * LI,DominatorTree * DT,const Loop * CurLoop,ICFLoopSafetyInfo * SafetyInfo,MemorySSAUpdater & MSSAU,OptimizationRemarkEmitter * ORE)1632 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1633 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1634 MemorySSAUpdater &MSSAU, OptimizationRemarkEmitter *ORE) {
1635 bool Changed = false;
1636 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1637
1638 // Iterate over users to be ready for actual sinking. Replace users via
1639 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1640 SmallPtrSet<Instruction *, 8> VisitedUsers;
1641 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1642 auto *User = cast<Instruction>(*UI);
1643 Use &U = UI.getUse();
1644 ++UI;
1645
1646 if (VisitedUsers.count(User) || CurLoop->contains(User))
1647 continue;
1648
1649 if (!DT->isReachableFromEntry(User->getParent())) {
1650 U = PoisonValue::get(I.getType());
1651 Changed = true;
1652 continue;
1653 }
1654
1655 // The user must be a PHI node.
1656 PHINode *PN = cast<PHINode>(User);
1657
1658 // Surprisingly, instructions can be used outside of loops without any
1659 // exits. This can only happen in PHI nodes if the incoming block is
1660 // unreachable.
1661 BasicBlock *BB = PN->getIncomingBlock(U);
1662 if (!DT->isReachableFromEntry(BB)) {
1663 U = PoisonValue::get(I.getType());
1664 Changed = true;
1665 continue;
1666 }
1667
1668 VisitedUsers.insert(PN);
1669 if (isTriviallyReplaceablePHI(*PN, I))
1670 continue;
1671
1672 if (!canSplitPredecessors(PN, SafetyInfo))
1673 return Changed;
1674
1675 // Split predecessors of the PHI so that we can make users trivially
1676 // replaceable.
1677 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, &MSSAU);
1678
1679 // Should rebuild the iterators, as they may be invalidated by
1680 // splitPredecessorsOfLoopExit().
1681 UI = I.user_begin();
1682 UE = I.user_end();
1683 }
1684
1685 if (VisitedUsers.empty())
1686 return Changed;
1687
1688 ORE->emit([&]() {
1689 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1690 << "sinking " << ore::NV("Inst", &I);
1691 });
1692 if (isa<LoadInst>(I))
1693 ++NumMovedLoads;
1694 else if (isa<CallInst>(I))
1695 ++NumMovedCalls;
1696 ++NumSunk;
1697
1698 #ifndef NDEBUG
1699 SmallVector<BasicBlock *, 32> ExitBlocks;
1700 CurLoop->getUniqueExitBlocks(ExitBlocks);
1701 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1702 ExitBlocks.end());
1703 #endif
1704
1705 // Clones of this instruction. Don't create more than one per exit block!
1706 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1707
1708 // If this instruction is only used outside of the loop, then all users are
1709 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1710 // the instruction.
1711 // First check if I is worth sinking for all uses. Sink only when it is worth
1712 // across all uses.
1713 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1714 for (auto *UI : Users) {
1715 auto *User = cast<Instruction>(UI);
1716
1717 if (CurLoop->contains(User))
1718 continue;
1719
1720 PHINode *PN = cast<PHINode>(User);
1721 assert(ExitBlockSet.count(PN->getParent()) &&
1722 "The LCSSA PHI is not in an exit block!");
1723
1724 // The PHI must be trivially replaceable.
1725 Instruction *New = sinkThroughTriviallyReplaceablePHI(
1726 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1727 // As we sink the instruction out of the BB, drop its debug location.
1728 New->dropLocation();
1729 PN->replaceAllUsesWith(New);
1730 eraseInstruction(*PN, *SafetyInfo, MSSAU);
1731 Changed = true;
1732 }
1733 return Changed;
1734 }
1735
1736 /// When an instruction is found to only use loop invariant operands that
1737 /// is safe to hoist, this instruction is called to do the dirty work.
1738 ///
hoist(Instruction & I,const DominatorTree * DT,const Loop * CurLoop,BasicBlock * Dest,ICFLoopSafetyInfo * SafetyInfo,MemorySSAUpdater & MSSAU,ScalarEvolution * SE,OptimizationRemarkEmitter * ORE)1739 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1740 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1741 MemorySSAUpdater &MSSAU, ScalarEvolution *SE,
1742 OptimizationRemarkEmitter *ORE) {
1743 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "
1744 << I << "\n");
1745 ORE->emit([&]() {
1746 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1747 << ore::NV("Inst", &I);
1748 });
1749
1750 // Metadata can be dependent on conditions we are hoisting above.
1751 // Conservatively strip all metadata on the instruction unless we were
1752 // guaranteed to execute I if we entered the loop, in which case the metadata
1753 // is valid in the loop preheader.
1754 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1755 // then moving to the preheader means we should strip attributes on the call
1756 // that can cause UB since we may be hoisting above conditions that allowed
1757 // inferring those attributes. They may not be valid at the preheader.
1758 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1759 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1760 // time in isGuaranteedToExecute if we don't actually have anything to
1761 // drop. It is a compile time optimization, not required for correctness.
1762 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1763 I.dropUBImplyingAttrsAndMetadata();
1764
1765 if (isa<PHINode>(I))
1766 // Move the new node to the end of the phi list in the destination block.
1767 moveInstructionBefore(I, Dest->getFirstNonPHIIt(), *SafetyInfo, MSSAU, SE);
1768 else
1769 // Move the new node to the destination block, before its terminator.
1770 moveInstructionBefore(I, Dest->getTerminator()->getIterator(), *SafetyInfo,
1771 MSSAU, SE);
1772
1773 I.updateLocationAfterHoist();
1774
1775 if (isa<LoadInst>(I))
1776 ++NumMovedLoads;
1777 else if (isa<CallInst>(I))
1778 ++NumMovedCalls;
1779 ++NumHoisted;
1780 }
1781
1782 /// Only sink or hoist an instruction if it is not a trapping instruction,
1783 /// or if the instruction is known not to trap when moved to the preheader.
1784 /// or if it is a trapping instruction and is guaranteed to execute.
isSafeToExecuteUnconditionally(Instruction & Inst,const DominatorTree * DT,const TargetLibraryInfo * TLI,const Loop * CurLoop,const LoopSafetyInfo * SafetyInfo,OptimizationRemarkEmitter * ORE,const Instruction * CtxI,AssumptionCache * AC,bool AllowSpeculation)1785 static bool isSafeToExecuteUnconditionally(
1786 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1787 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1788 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1789 AssumptionCache *AC, bool AllowSpeculation) {
1790 if (AllowSpeculation &&
1791 isSafeToSpeculativelyExecute(&Inst, CtxI, AC, DT, TLI))
1792 return true;
1793
1794 bool GuaranteedToExecute =
1795 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1796
1797 if (!GuaranteedToExecute) {
1798 auto *LI = dyn_cast<LoadInst>(&Inst);
1799 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1800 ORE->emit([&]() {
1801 return OptimizationRemarkMissed(
1802 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1803 << "failed to hoist load with loop-invariant address "
1804 "because load is conditionally executed";
1805 });
1806 }
1807
1808 return GuaranteedToExecute;
1809 }
1810
1811 namespace {
1812 class LoopPromoter : public LoadAndStorePromoter {
1813 Value *SomePtr; // Designated pointer to store to.
1814 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1815 SmallVectorImpl<BasicBlock::iterator> &LoopInsertPts;
1816 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1817 PredIteratorCache &PredCache;
1818 MemorySSAUpdater &MSSAU;
1819 LoopInfo &LI;
1820 DebugLoc DL;
1821 Align Alignment;
1822 bool UnorderedAtomic;
1823 AAMDNodes AATags;
1824 ICFLoopSafetyInfo &SafetyInfo;
1825 bool CanInsertStoresInExitBlocks;
1826 ArrayRef<const Instruction *> Uses;
1827
1828 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1829 // (if legal) if doing so would add an out-of-loop use to an instruction
1830 // defined in-loop.
maybeInsertLCSSAPHI(Value * V,BasicBlock * BB) const1831 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1832 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1833 return V;
1834
1835 Instruction *I = cast<Instruction>(V);
1836 // We need to create an LCSSA PHI node for the incoming value and
1837 // store that.
1838 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1839 I->getName() + ".lcssa");
1840 PN->insertBefore(BB->begin());
1841 for (BasicBlock *Pred : PredCache.get(BB))
1842 PN->addIncoming(I, Pred);
1843 return PN;
1844 }
1845
1846 public:
LoopPromoter(Value * SP,ArrayRef<const Instruction * > Insts,SSAUpdater & S,SmallVectorImpl<BasicBlock * > & LEB,SmallVectorImpl<BasicBlock::iterator> & LIP,SmallVectorImpl<MemoryAccess * > & MSSAIP,PredIteratorCache & PIC,MemorySSAUpdater & MSSAU,LoopInfo & li,DebugLoc dl,Align Alignment,bool UnorderedAtomic,const AAMDNodes & AATags,ICFLoopSafetyInfo & SafetyInfo,bool CanInsertStoresInExitBlocks)1847 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1848 SmallVectorImpl<BasicBlock *> &LEB,
1849 SmallVectorImpl<BasicBlock::iterator> &LIP,
1850 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1851 MemorySSAUpdater &MSSAU, LoopInfo &li, DebugLoc dl,
1852 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1853 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1854 : LoadAndStorePromoter(Insts, S), SomePtr(SP), LoopExitBlocks(LEB),
1855 LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), PredCache(PIC), MSSAU(MSSAU),
1856 LI(li), DL(std::move(dl)), Alignment(Alignment),
1857 UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1858 SafetyInfo(SafetyInfo),
1859 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks), Uses(Insts) {}
1860
insertStoresInLoopExitBlocks()1861 void insertStoresInLoopExitBlocks() {
1862 // Insert stores after in the loop exit blocks. Each exit block gets a
1863 // store of the live-out values that feed them. Since we've already told
1864 // the SSA updater about the defs in the loop and the preheader
1865 // definition, it is all set and we can start using it.
1866 DIAssignID *NewID = nullptr;
1867 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1868 BasicBlock *ExitBlock = LoopExitBlocks[i];
1869 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1870 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1871 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1872 BasicBlock::iterator InsertPos = LoopInsertPts[i];
1873 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1874 if (UnorderedAtomic)
1875 NewSI->setOrdering(AtomicOrdering::Unordered);
1876 NewSI->setAlignment(Alignment);
1877 NewSI->setDebugLoc(DL);
1878 // Attach DIAssignID metadata to the new store, generating it on the
1879 // first loop iteration.
1880 if (i == 0) {
1881 // NewSI will have its DIAssignID set here if there are any stores in
1882 // Uses with a DIAssignID attachment. This merged ID will then be
1883 // attached to the other inserted stores (in the branch below).
1884 NewSI->mergeDIAssignID(Uses);
1885 NewID = cast_or_null<DIAssignID>(
1886 NewSI->getMetadata(LLVMContext::MD_DIAssignID));
1887 } else {
1888 // Attach the DIAssignID (or nullptr) merged from Uses in the branch
1889 // above.
1890 NewSI->setMetadata(LLVMContext::MD_DIAssignID, NewID);
1891 }
1892
1893 if (AATags)
1894 NewSI->setAAMetadata(AATags);
1895
1896 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1897 MemoryAccess *NewMemAcc;
1898 if (!MSSAInsertPoint) {
1899 NewMemAcc = MSSAU.createMemoryAccessInBB(
1900 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1901 } else {
1902 NewMemAcc =
1903 MSSAU.createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1904 }
1905 MSSAInsertPts[i] = NewMemAcc;
1906 MSSAU.insertDef(cast<MemoryDef>(NewMemAcc), true);
1907 // FIXME: true for safety, false may still be correct.
1908 }
1909 }
1910
doExtraRewritesBeforeFinalDeletion()1911 void doExtraRewritesBeforeFinalDeletion() override {
1912 if (CanInsertStoresInExitBlocks)
1913 insertStoresInLoopExitBlocks();
1914 }
1915
instructionDeleted(Instruction * I) const1916 void instructionDeleted(Instruction *I) const override {
1917 SafetyInfo.removeInstruction(I);
1918 MSSAU.removeMemoryAccess(I);
1919 }
1920
shouldDelete(Instruction * I) const1921 bool shouldDelete(Instruction *I) const override {
1922 if (isa<StoreInst>(I))
1923 return CanInsertStoresInExitBlocks;
1924 return true;
1925 }
1926 };
1927
isNotCapturedBeforeOrInLoop(const Value * V,const Loop * L,DominatorTree * DT)1928 bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1929 DominatorTree *DT) {
1930 // We can perform the captured-before check against any instruction in the
1931 // loop header, as the loop header is reachable from any instruction inside
1932 // the loop.
1933 // TODO: ReturnCaptures=true shouldn't be necessary here.
1934 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1935 /* StoreCaptures */ true,
1936 L->getHeader()->getTerminator(), DT);
1937 }
1938
1939 /// Return true if we can prove that a caller cannot inspect the object if an
1940 /// unwind occurs inside the loop.
isNotVisibleOnUnwindInLoop(const Value * Object,const Loop * L,DominatorTree * DT)1941 bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1942 DominatorTree *DT) {
1943 bool RequiresNoCaptureBeforeUnwind;
1944 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1945 return false;
1946
1947 return !RequiresNoCaptureBeforeUnwind ||
1948 isNotCapturedBeforeOrInLoop(Object, L, DT);
1949 }
1950
isThreadLocalObject(const Value * Object,const Loop * L,DominatorTree * DT,TargetTransformInfo * TTI)1951 bool isThreadLocalObject(const Value *Object, const Loop *L, DominatorTree *DT,
1952 TargetTransformInfo *TTI) {
1953 // The object must be function-local to start with, and then not captured
1954 // before/in the loop.
1955 return (isIdentifiedFunctionLocal(Object) &&
1956 isNotCapturedBeforeOrInLoop(Object, L, DT)) ||
1957 (TTI->isSingleThreaded() || SingleThread);
1958 }
1959
1960 } // namespace
1961
1962 /// Try to promote memory values to scalars by sinking stores out of the
1963 /// loop and moving loads to before the loop. We do this by looping over
1964 /// the stores in the loop, looking for stores to Must pointers which are
1965 /// loop invariant.
1966 ///
promoteLoopAccessesToScalars(const SmallSetVector<Value *,8> & PointerMustAliases,SmallVectorImpl<BasicBlock * > & ExitBlocks,SmallVectorImpl<BasicBlock::iterator> & InsertPts,SmallVectorImpl<MemoryAccess * > & MSSAInsertPts,PredIteratorCache & PIC,LoopInfo * LI,DominatorTree * DT,AssumptionCache * AC,const TargetLibraryInfo * TLI,TargetTransformInfo * TTI,Loop * CurLoop,MemorySSAUpdater & MSSAU,ICFLoopSafetyInfo * SafetyInfo,OptimizationRemarkEmitter * ORE,bool AllowSpeculation,bool HasReadsOutsideSet)1967 bool llvm::promoteLoopAccessesToScalars(
1968 const SmallSetVector<Value *, 8> &PointerMustAliases,
1969 SmallVectorImpl<BasicBlock *> &ExitBlocks,
1970 SmallVectorImpl<BasicBlock::iterator> &InsertPts,
1971 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1972 LoopInfo *LI, DominatorTree *DT, AssumptionCache *AC,
1973 const TargetLibraryInfo *TLI, TargetTransformInfo *TTI, Loop *CurLoop,
1974 MemorySSAUpdater &MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1975 OptimizationRemarkEmitter *ORE, bool AllowSpeculation,
1976 bool HasReadsOutsideSet) {
1977 // Verify inputs.
1978 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1979 SafetyInfo != nullptr &&
1980 "Unexpected Input to promoteLoopAccessesToScalars");
1981
1982 LLVM_DEBUG({
1983 dbgs() << "Trying to promote set of must-aliased pointers:\n";
1984 for (Value *Ptr : PointerMustAliases)
1985 dbgs() << " " << *Ptr << "\n";
1986 });
1987 ++NumPromotionCandidates;
1988
1989 Value *SomePtr = *PointerMustAliases.begin();
1990 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1991
1992 // It is not safe to promote a load/store from the loop if the load/store is
1993 // conditional. For example, turning:
1994 //
1995 // for () { if (c) *P += 1; }
1996 //
1997 // into:
1998 //
1999 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
2000 //
2001 // is not safe, because *P may only be valid to access if 'c' is true.
2002 //
2003 // The safety property divides into two parts:
2004 // p1) The memory may not be dereferenceable on entry to the loop. In this
2005 // case, we can't insert the required load in the preheader.
2006 // p2) The memory model does not allow us to insert a store along any dynamic
2007 // path which did not originally have one.
2008 //
2009 // If at least one store is guaranteed to execute, both properties are
2010 // satisfied, and promotion is legal.
2011 //
2012 // This, however, is not a necessary condition. Even if no store/load is
2013 // guaranteed to execute, we can still establish these properties.
2014 // We can establish (p1) by proving that hoisting the load into the preheader
2015 // is safe (i.e. proving dereferenceability on all paths through the loop). We
2016 // can use any access within the alias set to prove dereferenceability,
2017 // since they're all must alias.
2018 //
2019 // There are two ways establish (p2):
2020 // a) Prove the location is thread-local. In this case the memory model
2021 // requirement does not apply, and stores are safe to insert.
2022 // b) Prove a store dominates every exit block. In this case, if an exit
2023 // blocks is reached, the original dynamic path would have taken us through
2024 // the store, so inserting a store into the exit block is safe. Note that this
2025 // is different from the store being guaranteed to execute. For instance,
2026 // if an exception is thrown on the first iteration of the loop, the original
2027 // store is never executed, but the exit blocks are not executed either.
2028
2029 bool DereferenceableInPH = false;
2030 bool StoreIsGuanteedToExecute = false;
2031 bool FoundLoadToPromote = false;
2032 // Goes from Unknown to either Safe or Unsafe, but can't switch between them.
2033 enum {
2034 StoreSafe,
2035 StoreUnsafe,
2036 StoreSafetyUnknown,
2037 } StoreSafety = StoreSafetyUnknown;
2038
2039 SmallVector<Instruction *, 64> LoopUses;
2040
2041 // We start with an alignment of one and try to find instructions that allow
2042 // us to prove better alignment.
2043 Align Alignment;
2044 // Keep track of which types of access we see
2045 bool SawUnorderedAtomic = false;
2046 bool SawNotAtomic = false;
2047 AAMDNodes AATags;
2048
2049 const DataLayout &MDL = Preheader->getDataLayout();
2050
2051 // If there are reads outside the promoted set, then promoting stores is
2052 // definitely not safe.
2053 if (HasReadsOutsideSet)
2054 StoreSafety = StoreUnsafe;
2055
2056 if (StoreSafety == StoreSafetyUnknown && SafetyInfo->anyBlockMayThrow()) {
2057 // If a loop can throw, we have to insert a store along each unwind edge.
2058 // That said, we can't actually make the unwind edge explicit. Therefore,
2059 // we have to prove that the store is dead along the unwind edge. We do
2060 // this by proving that the caller can't have a reference to the object
2061 // after return and thus can't possibly load from the object.
2062 Value *Object = getUnderlyingObject(SomePtr);
2063 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2064 StoreSafety = StoreUnsafe;
2065 }
2066
2067 // Check that all accesses to pointers in the alias set use the same type.
2068 // We cannot (yet) promote a memory location that is loaded and stored in
2069 // different sizes. While we are at it, collect alignment and AA info.
2070 Type *AccessTy = nullptr;
2071 for (Value *ASIV : PointerMustAliases) {
2072 for (Use &U : ASIV->uses()) {
2073 // Ignore instructions that are outside the loop.
2074 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2075 if (!UI || !CurLoop->contains(UI))
2076 continue;
2077
2078 // If there is an non-load/store instruction in the loop, we can't promote
2079 // it.
2080 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2081 if (!Load->isUnordered())
2082 return false;
2083
2084 SawUnorderedAtomic |= Load->isAtomic();
2085 SawNotAtomic |= !Load->isAtomic();
2086 FoundLoadToPromote = true;
2087
2088 Align InstAlignment = Load->getAlign();
2089
2090 // Note that proving a load safe to speculate requires proving
2091 // sufficient alignment at the target location. Proving it guaranteed
2092 // to execute does as well. Thus we can increase our guaranteed
2093 // alignment as well.
2094 if (!DereferenceableInPH || (InstAlignment > Alignment))
2095 if (isSafeToExecuteUnconditionally(
2096 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2097 Preheader->getTerminator(), AC, AllowSpeculation)) {
2098 DereferenceableInPH = true;
2099 Alignment = std::max(Alignment, InstAlignment);
2100 }
2101 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2102 // Stores *of* the pointer are not interesting, only stores *to* the
2103 // pointer.
2104 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2105 continue;
2106 if (!Store->isUnordered())
2107 return false;
2108
2109 SawUnorderedAtomic |= Store->isAtomic();
2110 SawNotAtomic |= !Store->isAtomic();
2111
2112 // If the store is guaranteed to execute, both properties are satisfied.
2113 // We may want to check if a store is guaranteed to execute even if we
2114 // already know that promotion is safe, since it may have higher
2115 // alignment than any other guaranteed stores, in which case we can
2116 // raise the alignment on the promoted store.
2117 Align InstAlignment = Store->getAlign();
2118 bool GuaranteedToExecute =
2119 SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop);
2120 StoreIsGuanteedToExecute |= GuaranteedToExecute;
2121 if (GuaranteedToExecute) {
2122 DereferenceableInPH = true;
2123 if (StoreSafety == StoreSafetyUnknown)
2124 StoreSafety = StoreSafe;
2125 Alignment = std::max(Alignment, InstAlignment);
2126 }
2127
2128 // If a store dominates all exit blocks, it is safe to sink.
2129 // As explained above, if an exit block was executed, a dominating
2130 // store must have been executed at least once, so we are not
2131 // introducing stores on paths that did not have them.
2132 // Note that this only looks at explicit exit blocks. If we ever
2133 // start sinking stores into unwind edges (see above), this will break.
2134 if (StoreSafety == StoreSafetyUnknown &&
2135 llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2136 return DT->dominates(Store->getParent(), Exit);
2137 }))
2138 StoreSafety = StoreSafe;
2139
2140 // If the store is not guaranteed to execute, we may still get
2141 // deref info through it.
2142 if (!DereferenceableInPH) {
2143 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2144 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2145 Store->getAlign(), MDL, Preheader->getTerminator(), AC, DT, TLI);
2146 }
2147 } else
2148 continue; // Not a load or store.
2149
2150 if (!AccessTy)
2151 AccessTy = getLoadStoreType(UI);
2152 else if (AccessTy != getLoadStoreType(UI))
2153 return false;
2154
2155 // Merge the AA tags.
2156 if (LoopUses.empty()) {
2157 // On the first load/store, just take its AA tags.
2158 AATags = UI->getAAMetadata();
2159 } else if (AATags) {
2160 AATags = AATags.merge(UI->getAAMetadata());
2161 }
2162
2163 LoopUses.push_back(UI);
2164 }
2165 }
2166
2167 // If we found both an unordered atomic instruction and a non-atomic memory
2168 // access, bail. We can't blindly promote non-atomic to atomic since we
2169 // might not be able to lower the result. We can't downgrade since that
2170 // would violate memory model. Also, align 0 is an error for atomics.
2171 if (SawUnorderedAtomic && SawNotAtomic)
2172 return false;
2173
2174 // If we're inserting an atomic load in the preheader, we must be able to
2175 // lower it. We're only guaranteed to be able to lower naturally aligned
2176 // atomics.
2177 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2178 return false;
2179
2180 // If we couldn't prove we can hoist the load, bail.
2181 if (!DereferenceableInPH) {
2182 LLVM_DEBUG(dbgs() << "Not promoting: Not dereferenceable in preheader\n");
2183 return false;
2184 }
2185
2186 // We know we can hoist the load, but don't have a guaranteed store.
2187 // Check whether the location is writable and thread-local. If it is, then we
2188 // can insert stores along paths which originally didn't have them without
2189 // violating the memory model.
2190 if (StoreSafety == StoreSafetyUnknown) {
2191 Value *Object = getUnderlyingObject(SomePtr);
2192 bool ExplicitlyDereferenceableOnly;
2193 if (isWritableObject(Object, ExplicitlyDereferenceableOnly) &&
2194 (!ExplicitlyDereferenceableOnly ||
2195 isDereferenceablePointer(SomePtr, AccessTy, MDL)) &&
2196 isThreadLocalObject(Object, CurLoop, DT, TTI))
2197 StoreSafety = StoreSafe;
2198 }
2199
2200 // If we've still failed to prove we can sink the store, hoist the load
2201 // only, if possible.
2202 if (StoreSafety != StoreSafe && !FoundLoadToPromote)
2203 // If we cannot hoist the load either, give up.
2204 return false;
2205
2206 // Lets do the promotion!
2207 if (StoreSafety == StoreSafe) {
2208 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr
2209 << '\n');
2210 ++NumLoadStorePromoted;
2211 } else {
2212 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr
2213 << '\n');
2214 ++NumLoadPromoted;
2215 }
2216
2217 ORE->emit([&]() {
2218 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2219 LoopUses[0])
2220 << "Moving accesses to memory location out of the loop";
2221 });
2222
2223 // Look at all the loop uses, and try to merge their locations.
2224 std::vector<DILocation *> LoopUsesLocs;
2225 for (auto *U : LoopUses)
2226 LoopUsesLocs.push_back(U->getDebugLoc().get());
2227 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2228
2229 // We use the SSAUpdater interface to insert phi nodes as required.
2230 SmallVector<PHINode *, 16> NewPHIs;
2231 SSAUpdater SSA(&NewPHIs);
2232 LoopPromoter Promoter(SomePtr, LoopUses, SSA, ExitBlocks, InsertPts,
2233 MSSAInsertPts, PIC, MSSAU, *LI, DL, Alignment,
2234 SawUnorderedAtomic, AATags, *SafetyInfo,
2235 StoreSafety == StoreSafe);
2236
2237 // Set up the preheader to have a definition of the value. It is the live-out
2238 // value from the preheader that uses in the loop will use.
2239 LoadInst *PreheaderLoad = nullptr;
2240 if (FoundLoadToPromote || !StoreIsGuanteedToExecute) {
2241 PreheaderLoad =
2242 new LoadInst(AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2243 Preheader->getTerminator()->getIterator());
2244 if (SawUnorderedAtomic)
2245 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2246 PreheaderLoad->setAlignment(Alignment);
2247 PreheaderLoad->setDebugLoc(DebugLoc());
2248 if (AATags)
2249 PreheaderLoad->setAAMetadata(AATags);
2250
2251 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU.createMemoryAccessInBB(
2252 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2253 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2254 MSSAU.insertUse(NewMemUse, /*RenameUses=*/true);
2255 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2256 } else {
2257 SSA.AddAvailableValue(Preheader, PoisonValue::get(AccessTy));
2258 }
2259
2260 if (VerifyMemorySSA)
2261 MSSAU.getMemorySSA()->verifyMemorySSA();
2262 // Rewrite all the loads in the loop and remember all the definitions from
2263 // stores in the loop.
2264 Promoter.run(LoopUses);
2265
2266 if (VerifyMemorySSA)
2267 MSSAU.getMemorySSA()->verifyMemorySSA();
2268 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2269 if (PreheaderLoad && PreheaderLoad->use_empty())
2270 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2271
2272 return true;
2273 }
2274
foreachMemoryAccess(MemorySSA * MSSA,Loop * L,function_ref<void (Instruction *)> Fn)2275 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2276 function_ref<void(Instruction *)> Fn) {
2277 for (const BasicBlock *BB : L->blocks())
2278 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2279 for (const auto &Access : *Accesses)
2280 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2281 Fn(MUD->getMemoryInst());
2282 }
2283
2284 // The bool indicates whether there might be reads outside the set, in which
2285 // case only loads may be promoted.
2286 static SmallVector<PointersAndHasReadsOutsideSet, 0>
collectPromotionCandidates(MemorySSA * MSSA,AliasAnalysis * AA,Loop * L)2287 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {
2288 BatchAAResults BatchAA(*AA);
2289 AliasSetTracker AST(BatchAA);
2290
2291 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2292 if (const auto *SI = dyn_cast<StoreInst>(I))
2293 return L->isLoopInvariant(SI->getPointerOperand());
2294 if (const auto *LI = dyn_cast<LoadInst>(I))
2295 return L->isLoopInvariant(LI->getPointerOperand());
2296 return false;
2297 };
2298
2299 // Populate AST with potentially promotable accesses.
2300 SmallPtrSet<Value *, 16> AttemptingPromotion;
2301 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2302 if (IsPotentiallyPromotable(I)) {
2303 AttemptingPromotion.insert(I);
2304 AST.add(I);
2305 }
2306 });
2307
2308 // We're only interested in must-alias sets that contain a mod.
2309 SmallVector<PointerIntPair<const AliasSet *, 1, bool>, 8> Sets;
2310 for (AliasSet &AS : AST)
2311 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2312 Sets.push_back({&AS, false});
2313
2314 if (Sets.empty())
2315 return {}; // Nothing to promote...
2316
2317 // Discard any sets for which there is an aliasing non-promotable access.
2318 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2319 if (AttemptingPromotion.contains(I))
2320 return;
2321
2322 llvm::erase_if(Sets, [&](PointerIntPair<const AliasSet *, 1, bool> &Pair) {
2323 ModRefInfo MR = Pair.getPointer()->aliasesUnknownInst(I, BatchAA);
2324 // Cannot promote if there are writes outside the set.
2325 if (isModSet(MR))
2326 return true;
2327 if (isRefSet(MR)) {
2328 // Remember reads outside the set.
2329 Pair.setInt(true);
2330 // If this is a mod-only set and there are reads outside the set,
2331 // we will not be able to promote, so bail out early.
2332 return !Pair.getPointer()->isRef();
2333 }
2334 return false;
2335 });
2336 });
2337
2338 SmallVector<std::pair<SmallSetVector<Value *, 8>, bool>, 0> Result;
2339 for (auto [Set, HasReadsOutsideSet] : Sets) {
2340 SmallSetVector<Value *, 8> PointerMustAliases;
2341 for (const auto &MemLoc : *Set)
2342 PointerMustAliases.insert(const_cast<Value *>(MemLoc.Ptr));
2343 Result.emplace_back(std::move(PointerMustAliases), HasReadsOutsideSet);
2344 }
2345
2346 return Result;
2347 }
2348
pointerInvalidatedByLoop(MemorySSA * MSSA,MemoryUse * MU,Loop * CurLoop,Instruction & I,SinkAndHoistLICMFlags & Flags,bool InvariantGroup)2349 static bool pointerInvalidatedByLoop(MemorySSA *MSSA, MemoryUse *MU,
2350 Loop *CurLoop, Instruction &I,
2351 SinkAndHoistLICMFlags &Flags,
2352 bool InvariantGroup) {
2353 // For hoisting, use the walker to determine safety
2354 if (!Flags.getIsSink()) {
2355 // If hoisting an invariant group, we only need to check that there
2356 // is no store to the loaded pointer between the start of the loop,
2357 // and the load (since all values must be the same).
2358
2359 // This can be checked in two conditions:
2360 // 1) if the memoryaccess is outside the loop
2361 // 2) the earliest access is at the loop header,
2362 // if the memory loaded is the phi node
2363
2364 BatchAAResults BAA(MSSA->getAA());
2365 MemoryAccess *Source = getClobberingMemoryAccess(*MSSA, BAA, Flags, MU);
2366 return !MSSA->isLiveOnEntryDef(Source) &&
2367 CurLoop->contains(Source->getBlock()) &&
2368 !(InvariantGroup && Source->getBlock() == CurLoop->getHeader() && isa<MemoryPhi>(Source));
2369 }
2370
2371 // For sinking, we'd need to check all Defs below this use. The getClobbering
2372 // call will look on the backedge of the loop, but will check aliasing with
2373 // the instructions on the previous iteration.
2374 // For example:
2375 // for (i ... )
2376 // load a[i] ( Use (LoE)
2377 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2378 // i++;
2379 // The load sees no clobbering inside the loop, as the backedge alias check
2380 // does phi translation, and will check aliasing against store a[i-1].
2381 // However sinking the load outside the loop, below the store is incorrect.
2382
2383 // For now, only sink if there are no Defs in the loop, and the existing ones
2384 // precede the use and are in the same block.
2385 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2386 // needs PostDominatorTreeAnalysis.
2387 // FIXME: More precise: no Defs that alias this Use.
2388 if (Flags.tooManyMemoryAccesses())
2389 return true;
2390 for (auto *BB : CurLoop->getBlocks())
2391 if (pointerInvalidatedByBlock(*BB, *MSSA, *MU))
2392 return true;
2393 // When sinking, the source block may not be part of the loop so check it.
2394 if (!CurLoop->contains(&I))
2395 return pointerInvalidatedByBlock(*I.getParent(), *MSSA, *MU);
2396
2397 return false;
2398 }
2399
pointerInvalidatedByBlock(BasicBlock & BB,MemorySSA & MSSA,MemoryUse & MU)2400 bool pointerInvalidatedByBlock(BasicBlock &BB, MemorySSA &MSSA, MemoryUse &MU) {
2401 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2402 for (const auto &MA : *Accesses)
2403 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2404 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2405 return true;
2406 return false;
2407 }
2408
2409 /// Try to simplify things like (A < INV_1 AND icmp A < INV_2) into (A <
2410 /// min(INV_1, INV_2)), if INV_1 and INV_2 are both loop invariants and their
2411 /// minimun can be computed outside of loop, and X is not a loop-invariant.
hoistMinMax(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU)2412 static bool hoistMinMax(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2413 MemorySSAUpdater &MSSAU) {
2414 bool Inverse = false;
2415 using namespace PatternMatch;
2416 Value *Cond1, *Cond2;
2417 if (match(&I, m_LogicalOr(m_Value(Cond1), m_Value(Cond2)))) {
2418 Inverse = true;
2419 } else if (match(&I, m_LogicalAnd(m_Value(Cond1), m_Value(Cond2)))) {
2420 // Do nothing
2421 } else
2422 return false;
2423
2424 auto MatchICmpAgainstInvariant = [&](Value *C, ICmpInst::Predicate &P,
2425 Value *&LHS, Value *&RHS) {
2426 if (!match(C, m_OneUse(m_ICmp(P, m_Value(LHS), m_Value(RHS)))))
2427 return false;
2428 if (!LHS->getType()->isIntegerTy())
2429 return false;
2430 if (!ICmpInst::isRelational(P))
2431 return false;
2432 if (L.isLoopInvariant(LHS)) {
2433 std::swap(LHS, RHS);
2434 P = ICmpInst::getSwappedPredicate(P);
2435 }
2436 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS))
2437 return false;
2438 if (Inverse)
2439 P = ICmpInst::getInversePredicate(P);
2440 return true;
2441 };
2442 ICmpInst::Predicate P1, P2;
2443 Value *LHS1, *LHS2, *RHS1, *RHS2;
2444 if (!MatchICmpAgainstInvariant(Cond1, P1, LHS1, RHS1) ||
2445 !MatchICmpAgainstInvariant(Cond2, P2, LHS2, RHS2))
2446 return false;
2447 if (P1 != P2 || LHS1 != LHS2)
2448 return false;
2449
2450 // Everything is fine, we can do the transform.
2451 bool UseMin = ICmpInst::isLT(P1) || ICmpInst::isLE(P1);
2452 assert(
2453 (UseMin || ICmpInst::isGT(P1) || ICmpInst::isGE(P1)) &&
2454 "Relational predicate is either less (or equal) or greater (or equal)!");
2455 Intrinsic::ID id = ICmpInst::isSigned(P1)
2456 ? (UseMin ? Intrinsic::smin : Intrinsic::smax)
2457 : (UseMin ? Intrinsic::umin : Intrinsic::umax);
2458 auto *Preheader = L.getLoopPreheader();
2459 assert(Preheader && "Loop is not in simplify form?");
2460 IRBuilder<> Builder(Preheader->getTerminator());
2461 // We are about to create a new guaranteed use for RHS2 which might not exist
2462 // before (if it was a non-taken input of logical and/or instruction). If it
2463 // was poison, we need to freeze it. Note that no new use for LHS and RHS1 are
2464 // introduced, so they don't need this.
2465 if (isa<SelectInst>(I))
2466 RHS2 = Builder.CreateFreeze(RHS2, RHS2->getName() + ".fr");
2467 Value *NewRHS = Builder.CreateBinaryIntrinsic(
2468 id, RHS1, RHS2, nullptr, StringRef("invariant.") +
2469 (ICmpInst::isSigned(P1) ? "s" : "u") +
2470 (UseMin ? "min" : "max"));
2471 Builder.SetInsertPoint(&I);
2472 ICmpInst::Predicate P = P1;
2473 if (Inverse)
2474 P = ICmpInst::getInversePredicate(P);
2475 Value *NewCond = Builder.CreateICmp(P, LHS1, NewRHS);
2476 NewCond->takeName(&I);
2477 I.replaceAllUsesWith(NewCond);
2478 eraseInstruction(I, SafetyInfo, MSSAU);
2479 eraseInstruction(*cast<Instruction>(Cond1), SafetyInfo, MSSAU);
2480 eraseInstruction(*cast<Instruction>(Cond2), SafetyInfo, MSSAU);
2481 return true;
2482 }
2483
2484 /// Reassociate gep (gep ptr, idx1), idx2 to gep (gep ptr, idx2), idx1 if
2485 /// this allows hoisting the inner GEP.
hoistGEP(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2486 static bool hoistGEP(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2487 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2488 DominatorTree *DT) {
2489 auto *GEP = dyn_cast<GetElementPtrInst>(&I);
2490 if (!GEP)
2491 return false;
2492
2493 auto *Src = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
2494 if (!Src || !Src->hasOneUse() || !L.contains(Src))
2495 return false;
2496
2497 Value *SrcPtr = Src->getPointerOperand();
2498 auto LoopInvariant = [&](Value *V) { return L.isLoopInvariant(V); };
2499 if (!L.isLoopInvariant(SrcPtr) || !all_of(GEP->indices(), LoopInvariant))
2500 return false;
2501
2502 // This can only happen if !AllowSpeculation, otherwise this would already be
2503 // handled.
2504 // FIXME: Should we respect AllowSpeculation in these reassociation folds?
2505 // The flag exists to prevent metadata dropping, which is not relevant here.
2506 if (all_of(Src->indices(), LoopInvariant))
2507 return false;
2508
2509 // The swapped GEPs are inbounds if both original GEPs are inbounds
2510 // and the sign of the offsets is the same. For simplicity, only
2511 // handle both offsets being non-negative.
2512 const DataLayout &DL = GEP->getDataLayout();
2513 auto NonNegative = [&](Value *V) {
2514 return isKnownNonNegative(V, SimplifyQuery(DL, DT, AC, GEP));
2515 };
2516 bool IsInBounds = Src->isInBounds() && GEP->isInBounds() &&
2517 all_of(Src->indices(), NonNegative) &&
2518 all_of(GEP->indices(), NonNegative);
2519
2520 BasicBlock *Preheader = L.getLoopPreheader();
2521 IRBuilder<> Builder(Preheader->getTerminator());
2522 Value *NewSrc = Builder.CreateGEP(GEP->getSourceElementType(), SrcPtr,
2523 SmallVector<Value *>(GEP->indices()),
2524 "invariant.gep", IsInBounds);
2525 Builder.SetInsertPoint(GEP);
2526 Value *NewGEP = Builder.CreateGEP(Src->getSourceElementType(), NewSrc,
2527 SmallVector<Value *>(Src->indices()), "gep",
2528 IsInBounds);
2529 GEP->replaceAllUsesWith(NewGEP);
2530 eraseInstruction(*GEP, SafetyInfo, MSSAU);
2531 eraseInstruction(*Src, SafetyInfo, MSSAU);
2532 return true;
2533 }
2534
2535 /// Try to turn things like "LV + C1 < C2" into "LV < C2 - C1". Here
2536 /// C1 and C2 are loop invariants and LV is a loop-variant.
hoistAdd(ICmpInst::Predicate Pred,Value * VariantLHS,Value * InvariantRHS,ICmpInst & ICmp,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2537 static bool hoistAdd(ICmpInst::Predicate Pred, Value *VariantLHS,
2538 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2539 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2540 AssumptionCache *AC, DominatorTree *DT) {
2541 assert(ICmpInst::isSigned(Pred) && "Not supported yet!");
2542 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2543 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2544
2545 // Try to represent VariantLHS as sum of invariant and variant operands.
2546 using namespace PatternMatch;
2547 Value *VariantOp, *InvariantOp;
2548 if (!match(VariantLHS, m_NSWAdd(m_Value(VariantOp), m_Value(InvariantOp))))
2549 return false;
2550
2551 // LHS itself is a loop-variant, try to represent it in the form:
2552 // "VariantOp + InvariantOp". If it is possible, then we can reassociate.
2553 if (L.isLoopInvariant(VariantOp))
2554 std::swap(VariantOp, InvariantOp);
2555 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2556 return false;
2557
2558 // In order to turn "LV + C1 < C2" into "LV < C2 - C1", we need to be able to
2559 // freely move values from left side of inequality to right side (just as in
2560 // normal linear arithmetics). Overflows make things much more complicated, so
2561 // we want to avoid this.
2562 auto &DL = L.getHeader()->getDataLayout();
2563 bool ProvedNoOverflowAfterReassociate =
2564 computeOverflowForSignedSub(InvariantRHS, InvariantOp,
2565 SimplifyQuery(DL, DT, AC, &ICmp)) ==
2566 llvm::OverflowResult::NeverOverflows;
2567 if (!ProvedNoOverflowAfterReassociate)
2568 return false;
2569 auto *Preheader = L.getLoopPreheader();
2570 assert(Preheader && "Loop is not in simplify form?");
2571 IRBuilder<> Builder(Preheader->getTerminator());
2572 Value *NewCmpOp = Builder.CreateSub(InvariantRHS, InvariantOp, "invariant.op",
2573 /*HasNUW*/ false, /*HasNSW*/ true);
2574 ICmp.setPredicate(Pred);
2575 ICmp.setOperand(0, VariantOp);
2576 ICmp.setOperand(1, NewCmpOp);
2577 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
2578 return true;
2579 }
2580
2581 /// Try to reassociate and hoist the following two patterns:
2582 /// LV - C1 < C2 --> LV < C1 + C2,
2583 /// C1 - LV < C2 --> LV > C1 - C2.
hoistSub(ICmpInst::Predicate Pred,Value * VariantLHS,Value * InvariantRHS,ICmpInst & ICmp,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2584 static bool hoistSub(ICmpInst::Predicate Pred, Value *VariantLHS,
2585 Value *InvariantRHS, ICmpInst &ICmp, Loop &L,
2586 ICFLoopSafetyInfo &SafetyInfo, MemorySSAUpdater &MSSAU,
2587 AssumptionCache *AC, DominatorTree *DT) {
2588 assert(ICmpInst::isSigned(Pred) && "Not supported yet!");
2589 assert(!L.isLoopInvariant(VariantLHS) && "Precondition.");
2590 assert(L.isLoopInvariant(InvariantRHS) && "Precondition.");
2591
2592 // Try to represent VariantLHS as sum of invariant and variant operands.
2593 using namespace PatternMatch;
2594 Value *VariantOp, *InvariantOp;
2595 if (!match(VariantLHS, m_NSWSub(m_Value(VariantOp), m_Value(InvariantOp))))
2596 return false;
2597
2598 bool VariantSubtracted = false;
2599 // LHS itself is a loop-variant, try to represent it in the form:
2600 // "VariantOp + InvariantOp". If it is possible, then we can reassociate. If
2601 // the variant operand goes with minus, we use a slightly different scheme.
2602 if (L.isLoopInvariant(VariantOp)) {
2603 std::swap(VariantOp, InvariantOp);
2604 VariantSubtracted = true;
2605 Pred = ICmpInst::getSwappedPredicate(Pred);
2606 }
2607 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2608 return false;
2609
2610 // In order to turn "LV - C1 < C2" into "LV < C2 + C1", we need to be able to
2611 // freely move values from left side of inequality to right side (just as in
2612 // normal linear arithmetics). Overflows make things much more complicated, so
2613 // we want to avoid this. Likewise, for "C1 - LV < C2" we need to prove that
2614 // "C1 - C2" does not overflow.
2615 auto &DL = L.getHeader()->getDataLayout();
2616 SimplifyQuery SQ(DL, DT, AC, &ICmp);
2617 if (VariantSubtracted) {
2618 // C1 - LV < C2 --> LV > C1 - C2
2619 if (computeOverflowForSignedSub(InvariantOp, InvariantRHS, SQ) !=
2620 llvm::OverflowResult::NeverOverflows)
2621 return false;
2622 } else {
2623 // LV - C1 < C2 --> LV < C1 + C2
2624 if (computeOverflowForSignedAdd(InvariantOp, InvariantRHS, SQ) !=
2625 llvm::OverflowResult::NeverOverflows)
2626 return false;
2627 }
2628 auto *Preheader = L.getLoopPreheader();
2629 assert(Preheader && "Loop is not in simplify form?");
2630 IRBuilder<> Builder(Preheader->getTerminator());
2631 Value *NewCmpOp =
2632 VariantSubtracted
2633 ? Builder.CreateSub(InvariantOp, InvariantRHS, "invariant.op",
2634 /*HasNUW*/ false, /*HasNSW*/ true)
2635 : Builder.CreateAdd(InvariantOp, InvariantRHS, "invariant.op",
2636 /*HasNUW*/ false, /*HasNSW*/ true);
2637 ICmp.setPredicate(Pred);
2638 ICmp.setOperand(0, VariantOp);
2639 ICmp.setOperand(1, NewCmpOp);
2640 eraseInstruction(cast<Instruction>(*VariantLHS), SafetyInfo, MSSAU);
2641 return true;
2642 }
2643
2644 /// Reassociate and hoist add/sub expressions.
hoistAddSub(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2645 static bool hoistAddSub(Instruction &I, Loop &L, ICFLoopSafetyInfo &SafetyInfo,
2646 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2647 DominatorTree *DT) {
2648 using namespace PatternMatch;
2649 ICmpInst::Predicate Pred;
2650 Value *LHS, *RHS;
2651 if (!match(&I, m_ICmp(Pred, m_Value(LHS), m_Value(RHS))))
2652 return false;
2653
2654 // TODO: Support unsigned predicates?
2655 if (!ICmpInst::isSigned(Pred))
2656 return false;
2657
2658 // Put variant operand to LHS position.
2659 if (L.isLoopInvariant(LHS)) {
2660 std::swap(LHS, RHS);
2661 Pred = ICmpInst::getSwappedPredicate(Pred);
2662 }
2663 // We want to delete the initial operation after reassociation, so only do it
2664 // if it has no other uses.
2665 if (L.isLoopInvariant(LHS) || !L.isLoopInvariant(RHS) || !LHS->hasOneUse())
2666 return false;
2667
2668 // TODO: We could go with smarter context, taking common dominator of all I's
2669 // users instead of I itself.
2670 if (hoistAdd(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2671 return true;
2672
2673 if (hoistSub(Pred, LHS, RHS, cast<ICmpInst>(I), L, SafetyInfo, MSSAU, AC, DT))
2674 return true;
2675
2676 return false;
2677 }
2678
isReassociableOp(Instruction * I,unsigned IntOpcode,unsigned FPOpcode)2679 static bool isReassociableOp(Instruction *I, unsigned IntOpcode,
2680 unsigned FPOpcode) {
2681 if (I->getOpcode() == IntOpcode)
2682 return true;
2683 if (I->getOpcode() == FPOpcode && I->hasAllowReassoc() &&
2684 I->hasNoSignedZeros())
2685 return true;
2686 return false;
2687 }
2688
2689 /// Try to reassociate expressions like ((A1 * B1) + (A2 * B2) + ...) * C where
2690 /// A1, A2, ... and C are loop invariants into expressions like
2691 /// ((A1 * C * B1) + (A2 * C * B2) + ...) and hoist the (A1 * C), (A2 * C), ...
2692 /// invariant expressions. This functions returns true only if any hoisting has
2693 /// actually occured.
hoistMulAddAssociation(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2694 static bool hoistMulAddAssociation(Instruction &I, Loop &L,
2695 ICFLoopSafetyInfo &SafetyInfo,
2696 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2697 DominatorTree *DT) {
2698 if (!isReassociableOp(&I, Instruction::Mul, Instruction::FMul))
2699 return false;
2700 Value *VariantOp = I.getOperand(0);
2701 Value *InvariantOp = I.getOperand(1);
2702 if (L.isLoopInvariant(VariantOp))
2703 std::swap(VariantOp, InvariantOp);
2704 if (L.isLoopInvariant(VariantOp) || !L.isLoopInvariant(InvariantOp))
2705 return false;
2706 Value *Factor = InvariantOp;
2707
2708 // First, we need to make sure we should do the transformation.
2709 SmallVector<Use *> Changes;
2710 SmallVector<BinaryOperator *> Adds;
2711 SmallVector<BinaryOperator *> Worklist;
2712 if (BinaryOperator *VariantBinOp = dyn_cast<BinaryOperator>(VariantOp))
2713 Worklist.push_back(VariantBinOp);
2714 while (!Worklist.empty()) {
2715 BinaryOperator *BO = Worklist.pop_back_val();
2716 if (!BO->hasOneUse())
2717 return false;
2718 if (isReassociableOp(BO, Instruction::Add, Instruction::FAdd) &&
2719 isa<BinaryOperator>(BO->getOperand(0)) &&
2720 isa<BinaryOperator>(BO->getOperand(1))) {
2721 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(0)));
2722 Worklist.push_back(cast<BinaryOperator>(BO->getOperand(1)));
2723 Adds.push_back(BO);
2724 continue;
2725 }
2726 if (!isReassociableOp(BO, Instruction::Mul, Instruction::FMul) ||
2727 L.isLoopInvariant(BO))
2728 return false;
2729 Use &U0 = BO->getOperandUse(0);
2730 Use &U1 = BO->getOperandUse(1);
2731 if (L.isLoopInvariant(U0))
2732 Changes.push_back(&U0);
2733 else if (L.isLoopInvariant(U1))
2734 Changes.push_back(&U1);
2735 else
2736 return false;
2737 unsigned Limit = I.getType()->isIntOrIntVectorTy()
2738 ? IntAssociationUpperLimit
2739 : FPAssociationUpperLimit;
2740 if (Changes.size() > Limit)
2741 return false;
2742 }
2743 if (Changes.empty())
2744 return false;
2745
2746 // Drop the poison flags for any adds we looked through.
2747 if (I.getType()->isIntOrIntVectorTy()) {
2748 for (auto *Add : Adds)
2749 Add->dropPoisonGeneratingFlags();
2750 }
2751
2752 // We know we should do it so let's do the transformation.
2753 auto *Preheader = L.getLoopPreheader();
2754 assert(Preheader && "Loop is not in simplify form?");
2755 IRBuilder<> Builder(Preheader->getTerminator());
2756 for (auto *U : Changes) {
2757 assert(L.isLoopInvariant(U->get()));
2758 auto *Ins = cast<BinaryOperator>(U->getUser());
2759 Value *Mul;
2760 if (I.getType()->isIntOrIntVectorTy()) {
2761 Mul = Builder.CreateMul(U->get(), Factor, "factor.op.mul");
2762 // Drop the poison flags on the original multiply.
2763 Ins->dropPoisonGeneratingFlags();
2764 } else
2765 Mul = Builder.CreateFMulFMF(U->get(), Factor, Ins, "factor.op.fmul");
2766
2767 // Rewrite the reassociable instruction.
2768 unsigned OpIdx = U->getOperandNo();
2769 auto *LHS = OpIdx == 0 ? Mul : Ins->getOperand(0);
2770 auto *RHS = OpIdx == 1 ? Mul : Ins->getOperand(1);
2771 auto *NewBO = BinaryOperator::Create(Ins->getOpcode(), LHS, RHS,
2772 Ins->getName() + ".reass", Ins);
2773 NewBO->copyIRFlags(Ins);
2774 if (VariantOp == Ins)
2775 VariantOp = NewBO;
2776 Ins->replaceAllUsesWith(NewBO);
2777 eraseInstruction(*Ins, SafetyInfo, MSSAU);
2778 }
2779
2780 I.replaceAllUsesWith(VariantOp);
2781 eraseInstruction(I, SafetyInfo, MSSAU);
2782 return true;
2783 }
2784
hoistArithmetics(Instruction & I,Loop & L,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater & MSSAU,AssumptionCache * AC,DominatorTree * DT)2785 static bool hoistArithmetics(Instruction &I, Loop &L,
2786 ICFLoopSafetyInfo &SafetyInfo,
2787 MemorySSAUpdater &MSSAU, AssumptionCache *AC,
2788 DominatorTree *DT) {
2789 // Optimize complex patterns, such as (x < INV1 && x < INV2), turning them
2790 // into (x < min(INV1, INV2)), and hoisting the invariant part of this
2791 // expression out of the loop.
2792 if (hoistMinMax(I, L, SafetyInfo, MSSAU)) {
2793 ++NumHoisted;
2794 ++NumMinMaxHoisted;
2795 return true;
2796 }
2797
2798 // Try to hoist GEPs by reassociation.
2799 if (hoistGEP(I, L, SafetyInfo, MSSAU, AC, DT)) {
2800 ++NumHoisted;
2801 ++NumGEPsHoisted;
2802 return true;
2803 }
2804
2805 // Try to hoist add/sub's by reassociation.
2806 if (hoistAddSub(I, L, SafetyInfo, MSSAU, AC, DT)) {
2807 ++NumHoisted;
2808 ++NumAddSubHoisted;
2809 return true;
2810 }
2811
2812 bool IsInt = I.getType()->isIntOrIntVectorTy();
2813 if (hoistMulAddAssociation(I, L, SafetyInfo, MSSAU, AC, DT)) {
2814 ++NumHoisted;
2815 if (IsInt)
2816 ++NumIntAssociationsHoisted;
2817 else
2818 ++NumFPAssociationsHoisted;
2819 return true;
2820 }
2821
2822 return false;
2823 }
2824
2825 /// Little predicate that returns true if the specified basic block is in
2826 /// a subloop of the current one, not the current one itself.
2827 ///
inSubLoop(BasicBlock * BB,Loop * CurLoop,LoopInfo * LI)2828 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2829 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2830 return LI->getLoopFor(BB) != CurLoop;
2831 }
2832