1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs loop invariant code motion, attempting to remove as much 10 // code from the body of a loop as possible. It does this by either hoisting 11 // code into the preheader block, or by sinking code to the exit blocks if it is 12 // safe. This pass also promotes must-aliased memory locations in the loop to 13 // live in registers, thus hoisting and sinking "invariant" loads and stores. 14 // 15 // Hoisting operations out of loops is a canonicalization transform. It 16 // enables and simplifies subsequent optimizations in the middle-end. 17 // Rematerialization of hoisted instructions to reduce register pressure is the 18 // responsibility of the back-end, which has more accurate information about 19 // register pressure and also handles other optimizations than LICM that 20 // increase live-ranges. 21 // 22 // This pass uses alias analysis for two purposes: 23 // 24 // 1. Moving loop invariant loads and calls out of loops. If we can determine 25 // that a load or call inside of a loop never aliases anything stored to, 26 // we can hoist it or sink it like any other instruction. 27 // 2. Scalar Promotion of Memory - If there is a store instruction inside of 28 // the loop, we try to move the store to happen AFTER the loop instead of 29 // inside of the loop. This can only happen if a few conditions are true: 30 // A. The pointer stored through is loop invariant 31 // B. There are no stores or loads in the loop which _may_ alias the 32 // pointer. There are no calls in the loop which mod/ref the pointer. 33 // If these conditions are true, we can promote the loads and stores in the 34 // loop of the pointer to use a temporary alloca'd variable. We then use 35 // the SSAUpdater to construct the appropriate SSA form for the value. 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LICM.h" 40 #include "llvm/ADT/SetOperations.h" 41 #include "llvm/ADT/Statistic.h" 42 #include "llvm/Analysis/AliasAnalysis.h" 43 #include "llvm/Analysis/AliasSetTracker.h" 44 #include "llvm/Analysis/BasicAliasAnalysis.h" 45 #include "llvm/Analysis/BlockFrequencyInfo.h" 46 #include "llvm/Analysis/CaptureTracking.h" 47 #include "llvm/Analysis/ConstantFolding.h" 48 #include "llvm/Analysis/GlobalsModRef.h" 49 #include "llvm/Analysis/GuardUtils.h" 50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 51 #include "llvm/Analysis/Loads.h" 52 #include "llvm/Analysis/LoopInfo.h" 53 #include "llvm/Analysis/LoopIterator.h" 54 #include "llvm/Analysis/LoopPass.h" 55 #include "llvm/Analysis/MemoryBuiltins.h" 56 #include "llvm/Analysis/MemorySSA.h" 57 #include "llvm/Analysis/MemorySSAUpdater.h" 58 #include "llvm/Analysis/MustExecute.h" 59 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 62 #include "llvm/Analysis/TargetLibraryInfo.h" 63 #include "llvm/Analysis/ValueTracking.h" 64 #include "llvm/IR/CFG.h" 65 #include "llvm/IR/Constants.h" 66 #include "llvm/IR/DataLayout.h" 67 #include "llvm/IR/DebugInfoMetadata.h" 68 #include "llvm/IR/DerivedTypes.h" 69 #include "llvm/IR/Dominators.h" 70 #include "llvm/IR/Instructions.h" 71 #include "llvm/IR/IntrinsicInst.h" 72 #include "llvm/IR/LLVMContext.h" 73 #include "llvm/IR/Metadata.h" 74 #include "llvm/IR/PatternMatch.h" 75 #include "llvm/IR/PredIteratorCache.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/raw_ostream.h" 80 #include "llvm/Transforms/Scalar.h" 81 #include "llvm/Transforms/Scalar/LoopPassManager.h" 82 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 83 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 84 #include "llvm/Transforms/Utils/Local.h" 85 #include "llvm/Transforms/Utils/LoopUtils.h" 86 #include "llvm/Transforms/Utils/SSAUpdater.h" 87 #include <algorithm> 88 #include <utility> 89 using namespace llvm; 90 91 #define DEBUG_TYPE "licm" 92 93 STATISTIC(NumCreatedBlocks, "Number of blocks created"); 94 STATISTIC(NumClonedBranches, "Number of branches cloned"); 95 STATISTIC(NumSunk, "Number of instructions sunk out of loop"); 96 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop"); 97 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk"); 98 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); 99 STATISTIC(NumPromoted, "Number of memory locations promoted to registers"); 100 101 /// Memory promotion is enabled by default. 102 static cl::opt<bool> 103 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), 104 cl::desc("Disable memory promotion in LICM pass")); 105 106 static cl::opt<bool> ControlFlowHoisting( 107 "licm-control-flow-hoisting", cl::Hidden, cl::init(false), 108 cl::desc("Enable control flow (and PHI) hoisting in LICM")); 109 110 static cl::opt<uint32_t> MaxNumUsesTraversed( 111 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), 112 cl::desc("Max num uses visited for identifying load " 113 "invariance in loop using invariant start (default = 8)")); 114 115 // Experimental option to allow imprecision in LICM in pathological cases, in 116 // exchange for faster compile. This is to be removed if MemorySSA starts to 117 // address the same issue. This flag applies only when LICM uses MemorySSA 118 // instead on AliasSetTracker. LICM calls MemorySSAWalker's 119 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect 120 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess, 121 // which may not be precise, since optimizeUses is capped. The result is 122 // correct, but we may not get as "far up" as possible to get which access is 123 // clobbering the one queried. 124 cl::opt<unsigned> llvm::SetLicmMssaOptCap( 125 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, 126 cl::desc("Enable imprecision in LICM in pathological cases, in exchange " 127 "for faster compile. Caps the MemorySSA clobbering calls.")); 128 129 // Experimentally, memory promotion carries less importance than sinking and 130 // hoisting. Limit when we do promotion when using MemorySSA, in order to save 131 // compile time. 132 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( 133 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, 134 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " 135 "effect. When MSSA in LICM is enabled, then this is the maximum " 136 "number of accesses allowed to be present in a loop in order to " 137 "enable memory promotion.")); 138 139 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); 140 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 141 const LoopSafetyInfo *SafetyInfo, 142 TargetTransformInfo *TTI, bool &FreeInLoop, 143 bool LoopNestMode); 144 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 145 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 146 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 147 OptimizationRemarkEmitter *ORE); 148 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 149 BlockFrequencyInfo *BFI, const Loop *CurLoop, 150 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, 151 OptimizationRemarkEmitter *ORE); 152 static bool isSafeToExecuteUnconditionally( 153 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, 154 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, 155 OptimizationRemarkEmitter *ORE, const Instruction *CtxI, 156 bool AllowSpeculation); 157 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 158 AliasSetTracker *CurAST, Loop *CurLoop, 159 AAResults *AA); 160 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 161 Loop *CurLoop, Instruction &I, 162 SinkAndHoistLICMFlags &Flags); 163 static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA, 164 MemoryUse &MU); 165 static Instruction *cloneInstructionInExitBlock( 166 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 167 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU); 168 169 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 170 MemorySSAUpdater *MSSAU); 171 172 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 173 ICFLoopSafetyInfo &SafetyInfo, 174 MemorySSAUpdater *MSSAU, ScalarEvolution *SE); 175 176 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, 177 function_ref<void(Instruction *)> Fn); 178 static SmallVector<SmallSetVector<Value *, 8>, 0> 179 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L); 180 181 namespace { 182 struct LoopInvariantCodeMotion { 183 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 184 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, 185 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA, 186 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false); 187 188 LoopInvariantCodeMotion(unsigned LicmMssaOptCap, 189 unsigned LicmMssaNoAccForPromotionCap, 190 bool LicmAllowSpeculation) 191 : LicmMssaOptCap(LicmMssaOptCap), 192 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap), 193 LicmAllowSpeculation(LicmAllowSpeculation) {} 194 195 private: 196 unsigned LicmMssaOptCap; 197 unsigned LicmMssaNoAccForPromotionCap; 198 bool LicmAllowSpeculation; 199 }; 200 201 struct LegacyLICMPass : public LoopPass { 202 static char ID; // Pass identification, replacement for typeid 203 LegacyLICMPass( 204 unsigned LicmMssaOptCap = SetLicmMssaOptCap, 205 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap, 206 bool LicmAllowSpeculation = true) 207 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 208 LicmAllowSpeculation) { 209 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); 210 } 211 212 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 213 if (skipLoop(L)) 214 return false; 215 216 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block " 217 << L->getHeader()->getNameOrAsOperand() << "\n"); 218 219 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); 220 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA(); 221 bool hasProfileData = L->getHeader()->getParent()->hasProfileData(); 222 BlockFrequencyInfo *BFI = 223 hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() 224 : nullptr; 225 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 226 // pass. Function analyses need to be preserved across loop transformations 227 // but ORE cannot be preserved (see comment before the pass definition). 228 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 229 return LICM.runOnLoop( 230 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(), 231 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), 232 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI, 233 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 234 *L->getHeader()->getParent()), 235 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 236 *L->getHeader()->getParent()), 237 SE ? &SE->getSE() : nullptr, MSSA, &ORE); 238 } 239 240 /// This transformation requires natural loop information & requires that 241 /// loop preheaders be inserted into the CFG... 242 /// 243 void getAnalysisUsage(AnalysisUsage &AU) const override { 244 AU.addPreserved<DominatorTreeWrapperPass>(); 245 AU.addPreserved<LoopInfoWrapperPass>(); 246 AU.addRequired<TargetLibraryInfoWrapperPass>(); 247 AU.addRequired<MemorySSAWrapperPass>(); 248 AU.addPreserved<MemorySSAWrapperPass>(); 249 AU.addRequired<TargetTransformInfoWrapperPass>(); 250 getLoopAnalysisUsage(AU); 251 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 252 AU.addPreserved<LazyBlockFrequencyInfoPass>(); 253 AU.addPreserved<LazyBranchProbabilityInfoPass>(); 254 } 255 256 private: 257 LoopInvariantCodeMotion LICM; 258 }; 259 } // namespace 260 261 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, 262 LoopStandardAnalysisResults &AR, LPMUpdater &) { 263 if (!AR.MSSA) 264 report_fatal_error("LICM requires MemorySSA (loop-mssa)"); 265 266 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 267 // pass. Function analyses need to be preserved across loop transformations 268 // but ORE cannot be preserved (see comment before the pass definition). 269 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 270 271 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 272 LicmAllowSpeculation); 273 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI, 274 &AR.SE, AR.MSSA, &ORE)) 275 return PreservedAnalyses::all(); 276 277 auto PA = getLoopPassPreservedAnalyses(); 278 279 PA.preserve<DominatorTreeAnalysis>(); 280 PA.preserve<LoopAnalysis>(); 281 PA.preserve<MemorySSAAnalysis>(); 282 283 return PA; 284 } 285 286 PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM, 287 LoopStandardAnalysisResults &AR, 288 LPMUpdater &) { 289 if (!AR.MSSA) 290 report_fatal_error("LNICM requires MemorySSA (loop-mssa)"); 291 292 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 293 // pass. Function analyses need to be preserved across loop transformations 294 // but ORE cannot be preserved (see comment before the pass definition). 295 OptimizationRemarkEmitter ORE(LN.getParent()); 296 297 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 298 LicmAllowSpeculation); 299 300 Loop &OutermostLoop = LN.getOutermostLoop(); 301 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, AR.BFI, 302 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true); 303 304 if (!Changed) 305 return PreservedAnalyses::all(); 306 307 auto PA = getLoopPassPreservedAnalyses(); 308 309 PA.preserve<DominatorTreeAnalysis>(); 310 PA.preserve<LoopAnalysis>(); 311 PA.preserve<MemorySSAAnalysis>(); 312 313 return PA; 314 } 315 316 char LegacyLICMPass::ID = 0; 317 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion", 318 false, false) 319 INITIALIZE_PASS_DEPENDENCY(LoopPass) 320 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 321 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 322 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 323 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass) 324 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false, 325 false) 326 327 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } 328 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap, 329 unsigned LicmMssaNoAccForPromotionCap, 330 bool LicmAllowSpeculation) { 331 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 332 LicmAllowSpeculation); 333 } 334 335 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L, 336 MemorySSA *MSSA) 337 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap, 338 IsSink, L, MSSA) {} 339 340 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags( 341 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, 342 Loop *L, MemorySSA *MSSA) 343 : LicmMssaOptCap(LicmMssaOptCap), 344 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap), 345 IsSink(IsSink) { 346 assert(((L != nullptr) == (MSSA != nullptr)) && 347 "Unexpected values for SinkAndHoistLICMFlags"); 348 if (!MSSA) 349 return; 350 351 unsigned AccessCapCount = 0; 352 for (auto *BB : L->getBlocks()) 353 if (const auto *Accesses = MSSA->getBlockAccesses(BB)) 354 for (const auto &MA : *Accesses) { 355 (void)MA; 356 ++AccessCapCount; 357 if (AccessCapCount > LicmMssaNoAccForPromotionCap) { 358 NoOfMemAccTooLarge = true; 359 return; 360 } 361 } 362 } 363 364 /// Hoist expressions out of the specified loop. Note, alias info for inner 365 /// loop is not preserved so it is not a good idea to run LICM multiple 366 /// times on one loop. 367 bool LoopInvariantCodeMotion::runOnLoop( 368 Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 369 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 370 ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE, 371 bool LoopNestMode) { 372 bool Changed = false; 373 374 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); 375 376 // If this loop has metadata indicating that LICM is not to be performed then 377 // just exit. 378 if (hasDisableLICMTransformsHint(L)) { 379 return false; 380 } 381 382 // Don't sink stores from loops with coroutine suspend instructions. 383 // LICM would sink instructions into the default destination of 384 // the coroutine switch. The default destination of the switch is to 385 // handle the case where the coroutine is suspended, by which point the 386 // coroutine frame may have been destroyed. No instruction can be sunk there. 387 // FIXME: This would unfortunately hurt the performance of coroutines, however 388 // there is currently no general solution for this. Similar issues could also 389 // potentially happen in other passes where instructions are being moved 390 // across that edge. 391 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) { 392 return llvm::any_of(*BB, [](Instruction &I) { 393 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 394 return II && II->getIntrinsicID() == Intrinsic::coro_suspend; 395 }); 396 }); 397 398 MemorySSAUpdater MSSAU(MSSA); 399 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 400 /*IsSink=*/true, L, MSSA); 401 402 // Get the preheader block to move instructions into... 403 BasicBlock *Preheader = L->getLoopPreheader(); 404 405 // Compute loop safety information. 406 ICFLoopSafetyInfo SafetyInfo; 407 SafetyInfo.computeLoopSafetyInfo(L); 408 409 // We want to visit all of the instructions in this loop... that are not parts 410 // of our subloops (they have already had their invariants hoisted out of 411 // their loop, into this loop, so there is no need to process the BODIES of 412 // the subloops). 413 // 414 // Traverse the body of the loop in depth first order on the dominator tree so 415 // that we are guaranteed to see definitions before we see uses. This allows 416 // us to sink instructions in one pass, without iteration. After sinking 417 // instructions, we perform another pass to hoist them out of the loop. 418 if (L->hasDedicatedExits()) 419 Changed |= LoopNestMode 420 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI, 421 DT, BFI, TLI, TTI, L, &MSSAU, 422 &SafetyInfo, Flags, ORE) 423 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, 424 TLI, TTI, L, &MSSAU, &SafetyInfo, Flags, ORE); 425 Flags.setIsSink(false); 426 if (Preheader) 427 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L, 428 &MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode, 429 LicmAllowSpeculation); 430 431 // Now that all loop invariants have been removed from the loop, promote any 432 // memory references to scalars that we can. 433 // Don't sink stores from loops without dedicated block exits. Exits 434 // containing indirect branches are not transformed by loop simplify, 435 // make sure we catch that. An additional load may be generated in the 436 // preheader for SSA updater, so also avoid sinking when no preheader 437 // is available. 438 if (!DisablePromotion && Preheader && L->hasDedicatedExits() && 439 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) { 440 // Figure out the loop exits and their insertion points 441 SmallVector<BasicBlock *, 8> ExitBlocks; 442 L->getUniqueExitBlocks(ExitBlocks); 443 444 // We can't insert into a catchswitch. 445 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { 446 return isa<CatchSwitchInst>(Exit->getTerminator()); 447 }); 448 449 if (!HasCatchSwitch) { 450 SmallVector<Instruction *, 8> InsertPts; 451 SmallVector<MemoryAccess *, 8> MSSAInsertPts; 452 InsertPts.reserve(ExitBlocks.size()); 453 MSSAInsertPts.reserve(ExitBlocks.size()); 454 for (BasicBlock *ExitBlock : ExitBlocks) { 455 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); 456 MSSAInsertPts.push_back(nullptr); 457 } 458 459 PredIteratorCache PIC; 460 461 // Promoting one set of accesses may make the pointers for another set 462 // loop invariant, so run this in a loop (with the MaybePromotable set 463 // decreasing in size over time). 464 bool Promoted = false; 465 bool LocalPromoted; 466 do { 467 LocalPromoted = false; 468 for (const SmallSetVector<Value *, 8> &PointerMustAliases : 469 collectPromotionCandidates(MSSA, AA, L)) { 470 LocalPromoted |= promoteLoopAccessesToScalars( 471 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI, 472 DT, TLI, L, &MSSAU, &SafetyInfo, ORE, LicmAllowSpeculation); 473 } 474 Promoted |= LocalPromoted; 475 } while (LocalPromoted); 476 477 // Once we have promoted values across the loop body we have to 478 // recursively reform LCSSA as any nested loop may now have values defined 479 // within the loop used in the outer loop. 480 // FIXME: This is really heavy handed. It would be a bit better to use an 481 // SSAUpdater strategy during promotion that was LCSSA aware and reformed 482 // it as it went. 483 if (Promoted) 484 formLCSSARecursively(*L, *DT, LI, SE); 485 486 Changed |= Promoted; 487 } 488 } 489 490 // Check that neither this loop nor its parent have had LCSSA broken. LICM is 491 // specifically moving instructions across the loop boundary and so it is 492 // especially in need of basic functional correctness checking here. 493 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"); 494 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && 495 "Parent loop not left in LCSSA form after LICM!"); 496 497 if (VerifyMemorySSA) 498 MSSA->verifyMemorySSA(); 499 500 if (Changed && SE) 501 SE->forgetLoopDispositions(L); 502 return Changed; 503 } 504 505 /// Walk the specified region of the CFG (defined by all blocks dominated by 506 /// the specified block, and that are in the current loop) in reverse depth 507 /// first order w.r.t the DominatorTree. This allows us to visit uses before 508 /// definitions, allowing us to sink a loop body in one pass without iteration. 509 /// 510 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 511 DominatorTree *DT, BlockFrequencyInfo *BFI, 512 TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 513 Loop *CurLoop, MemorySSAUpdater *MSSAU, 514 ICFLoopSafetyInfo *SafetyInfo, 515 SinkAndHoistLICMFlags &Flags, 516 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) { 517 518 // Verify inputs. 519 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 520 CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && 521 "Unexpected input to sinkRegion."); 522 523 // We want to visit children before parents. We will enque all the parents 524 // before their children in the worklist and process the worklist in reverse 525 // order. 526 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); 527 528 bool Changed = false; 529 for (DomTreeNode *DTN : reverse(Worklist)) { 530 BasicBlock *BB = DTN->getBlock(); 531 // Only need to process the contents of this block if it is not part of a 532 // subloop (which would already have been processed). 533 if (inSubLoop(BB, CurLoop, LI)) 534 continue; 535 536 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { 537 Instruction &I = *--II; 538 539 // The instruction is not used in the loop if it is dead. In this case, 540 // we just delete it instead of sinking it. 541 if (isInstructionTriviallyDead(&I, TLI)) { 542 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); 543 salvageKnowledge(&I); 544 salvageDebugInfo(I); 545 ++II; 546 eraseInstruction(I, *SafetyInfo, MSSAU); 547 Changed = true; 548 continue; 549 } 550 551 // Check to see if we can sink this instruction to the exit blocks 552 // of the loop. We can do this if the all users of the instruction are 553 // outside of the loop. In this case, it doesn't even matter if the 554 // operands of the instruction are loop invariant. 555 // 556 bool FreeInLoop = false; 557 bool LoopNestMode = OutermostLoop != nullptr; 558 if (!I.mayHaveSideEffects() && 559 isNotUsedOrFreeInLoop(I, LoopNestMode ? OutermostLoop : CurLoop, 560 SafetyInfo, TTI, FreeInLoop, LoopNestMode) && 561 canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/nullptr, MSSAU, true, 562 &Flags, ORE)) { 563 if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) { 564 if (!FreeInLoop) { 565 ++II; 566 salvageDebugInfo(I); 567 eraseInstruction(I, *SafetyInfo, MSSAU); 568 } 569 Changed = true; 570 } 571 } 572 } 573 } 574 if (VerifyMemorySSA) 575 MSSAU->getMemorySSA()->verifyMemorySSA(); 576 return Changed; 577 } 578 579 bool llvm::sinkRegionForLoopNest( 580 DomTreeNode *N, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 581 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 582 Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo, 583 SinkAndHoistLICMFlags &Flags, OptimizationRemarkEmitter *ORE) { 584 585 bool Changed = false; 586 SmallPriorityWorklist<Loop *, 4> Worklist; 587 Worklist.insert(CurLoop); 588 appendLoopsToWorklist(*CurLoop, Worklist); 589 while (!Worklist.empty()) { 590 Loop *L = Worklist.pop_back_val(); 591 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, 592 TTI, L, MSSAU, SafetyInfo, Flags, ORE, CurLoop); 593 } 594 return Changed; 595 } 596 597 namespace { 598 // This is a helper class for hoistRegion to make it able to hoist control flow 599 // in order to be able to hoist phis. The way this works is that we initially 600 // start hoisting to the loop preheader, and when we see a loop invariant branch 601 // we make note of this. When we then come to hoist an instruction that's 602 // conditional on such a branch we duplicate the branch and the relevant control 603 // flow, then hoist the instruction into the block corresponding to its original 604 // block in the duplicated control flow. 605 class ControlFlowHoister { 606 private: 607 // Information about the loop we are hoisting from 608 LoopInfo *LI; 609 DominatorTree *DT; 610 Loop *CurLoop; 611 MemorySSAUpdater *MSSAU; 612 613 // A map of blocks in the loop to the block their instructions will be hoisted 614 // to. 615 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap; 616 617 // The branches that we can hoist, mapped to the block that marks a 618 // convergence point of their control flow. 619 DenseMap<BranchInst *, BasicBlock *> HoistableBranches; 620 621 public: 622 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, 623 MemorySSAUpdater *MSSAU) 624 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {} 625 626 void registerPossiblyHoistableBranch(BranchInst *BI) { 627 // We can only hoist conditional branches with loop invariant operands. 628 if (!ControlFlowHoisting || !BI->isConditional() || 629 !CurLoop->hasLoopInvariantOperands(BI)) 630 return; 631 632 // The branch destinations need to be in the loop, and we don't gain 633 // anything by duplicating conditional branches with duplicate successors, 634 // as it's essentially the same as an unconditional branch. 635 BasicBlock *TrueDest = BI->getSuccessor(0); 636 BasicBlock *FalseDest = BI->getSuccessor(1); 637 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) || 638 TrueDest == FalseDest) 639 return; 640 641 // We can hoist BI if one branch destination is the successor of the other, 642 // or both have common successor which we check by seeing if the 643 // intersection of their successors is non-empty. 644 // TODO: This could be expanded to allowing branches where both ends 645 // eventually converge to a single block. 646 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc; 647 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest)); 648 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest)); 649 BasicBlock *CommonSucc = nullptr; 650 if (TrueDestSucc.count(FalseDest)) { 651 CommonSucc = FalseDest; 652 } else if (FalseDestSucc.count(TrueDest)) { 653 CommonSucc = TrueDest; 654 } else { 655 set_intersect(TrueDestSucc, FalseDestSucc); 656 // If there's one common successor use that. 657 if (TrueDestSucc.size() == 1) 658 CommonSucc = *TrueDestSucc.begin(); 659 // If there's more than one pick whichever appears first in the block list 660 // (we can't use the value returned by TrueDestSucc.begin() as it's 661 // unpredicatable which element gets returned). 662 else if (!TrueDestSucc.empty()) { 663 Function *F = TrueDest->getParent(); 664 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); }; 665 auto It = llvm::find_if(*F, IsSucc); 666 assert(It != F->end() && "Could not find successor in function"); 667 CommonSucc = &*It; 668 } 669 } 670 // The common successor has to be dominated by the branch, as otherwise 671 // there will be some other path to the successor that will not be 672 // controlled by this branch so any phi we hoist would be controlled by the 673 // wrong condition. This also takes care of avoiding hoisting of loop back 674 // edges. 675 // TODO: In some cases this could be relaxed if the successor is dominated 676 // by another block that's been hoisted and we can guarantee that the 677 // control flow has been replicated exactly. 678 if (CommonSucc && DT->dominates(BI, CommonSucc)) 679 HoistableBranches[BI] = CommonSucc; 680 } 681 682 bool canHoistPHI(PHINode *PN) { 683 // The phi must have loop invariant operands. 684 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN)) 685 return false; 686 // We can hoist phis if the block they are in is the target of hoistable 687 // branches which cover all of the predecessors of the block. 688 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks; 689 BasicBlock *BB = PN->getParent(); 690 for (BasicBlock *PredBB : predecessors(BB)) 691 PredecessorBlocks.insert(PredBB); 692 // If we have less predecessor blocks than predecessors then the phi will 693 // have more than one incoming value for the same block which we can't 694 // handle. 695 // TODO: This could be handled be erasing some of the duplicate incoming 696 // values. 697 if (PredecessorBlocks.size() != pred_size(BB)) 698 return false; 699 for (auto &Pair : HoistableBranches) { 700 if (Pair.second == BB) { 701 // Which blocks are predecessors via this branch depends on if the 702 // branch is triangle-like or diamond-like. 703 if (Pair.first->getSuccessor(0) == BB) { 704 PredecessorBlocks.erase(Pair.first->getParent()); 705 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 706 } else if (Pair.first->getSuccessor(1) == BB) { 707 PredecessorBlocks.erase(Pair.first->getParent()); 708 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 709 } else { 710 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 711 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 712 } 713 } 714 } 715 // PredecessorBlocks will now be empty if for every predecessor of BB we 716 // found a hoistable branch source. 717 return PredecessorBlocks.empty(); 718 } 719 720 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) { 721 if (!ControlFlowHoisting) 722 return CurLoop->getLoopPreheader(); 723 // If BB has already been hoisted, return that 724 if (HoistDestinationMap.count(BB)) 725 return HoistDestinationMap[BB]; 726 727 // Check if this block is conditional based on a pending branch 728 auto HasBBAsSuccessor = 729 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) { 730 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB || 731 Pair.first->getSuccessor(1) == BB); 732 }; 733 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor); 734 735 // If not involved in a pending branch, hoist to preheader 736 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader(); 737 if (It == HoistableBranches.end()) { 738 LLVM_DEBUG(dbgs() << "LICM using " 739 << InitialPreheader->getNameOrAsOperand() 740 << " as hoist destination for " 741 << BB->getNameOrAsOperand() << "\n"); 742 HoistDestinationMap[BB] = InitialPreheader; 743 return InitialPreheader; 744 } 745 BranchInst *BI = It->first; 746 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == 747 HoistableBranches.end() && 748 "BB is expected to be the target of at most one branch"); 749 750 LLVMContext &C = BB->getContext(); 751 BasicBlock *TrueDest = BI->getSuccessor(0); 752 BasicBlock *FalseDest = BI->getSuccessor(1); 753 BasicBlock *CommonSucc = HoistableBranches[BI]; 754 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent()); 755 756 // Create hoisted versions of blocks that currently don't have them 757 auto CreateHoistedBlock = [&](BasicBlock *Orig) { 758 if (HoistDestinationMap.count(Orig)) 759 return HoistDestinationMap[Orig]; 760 BasicBlock *New = 761 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent()); 762 HoistDestinationMap[Orig] = New; 763 DT->addNewBlock(New, HoistTarget); 764 if (CurLoop->getParentLoop()) 765 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI); 766 ++NumCreatedBlocks; 767 LLVM_DEBUG(dbgs() << "LICM created " << New->getName() 768 << " as hoist destination for " << Orig->getName() 769 << "\n"); 770 return New; 771 }; 772 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest); 773 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest); 774 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc); 775 776 // Link up these blocks with branches. 777 if (!HoistCommonSucc->getTerminator()) { 778 // The new common successor we've generated will branch to whatever that 779 // hoist target branched to. 780 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor(); 781 assert(TargetSucc && "Expected hoist target to have a single successor"); 782 HoistCommonSucc->moveBefore(TargetSucc); 783 BranchInst::Create(TargetSucc, HoistCommonSucc); 784 } 785 if (!HoistTrueDest->getTerminator()) { 786 HoistTrueDest->moveBefore(HoistCommonSucc); 787 BranchInst::Create(HoistCommonSucc, HoistTrueDest); 788 } 789 if (!HoistFalseDest->getTerminator()) { 790 HoistFalseDest->moveBefore(HoistCommonSucc); 791 BranchInst::Create(HoistCommonSucc, HoistFalseDest); 792 } 793 794 // If BI is being cloned to what was originally the preheader then 795 // HoistCommonSucc will now be the new preheader. 796 if (HoistTarget == InitialPreheader) { 797 // Phis in the loop header now need to use the new preheader. 798 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc); 799 MSSAU->wireOldPredecessorsToNewImmediatePredecessor( 800 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget}); 801 // The new preheader dominates the loop header. 802 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc); 803 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader()); 804 DT->changeImmediateDominator(HeaderNode, PreheaderNode); 805 // The preheader hoist destination is now the new preheader, with the 806 // exception of the hoist destination of this branch. 807 for (auto &Pair : HoistDestinationMap) 808 if (Pair.second == InitialPreheader && Pair.first != BI->getParent()) 809 Pair.second = HoistCommonSucc; 810 } 811 812 // Now finally clone BI. 813 ReplaceInstWithInst( 814 HoistTarget->getTerminator(), 815 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition())); 816 ++NumClonedBranches; 817 818 assert(CurLoop->getLoopPreheader() && 819 "Hoisting blocks should not have destroyed preheader"); 820 return HoistDestinationMap[BB]; 821 } 822 }; 823 } // namespace 824 825 /// Walk the specified region of the CFG (defined by all blocks dominated by 826 /// the specified block, and that are in the current loop) in depth first 827 /// order w.r.t the DominatorTree. This allows us to visit definitions before 828 /// uses, allowing us to hoist a loop body in one pass without iteration. 829 /// 830 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 831 DominatorTree *DT, BlockFrequencyInfo *BFI, 832 TargetLibraryInfo *TLI, Loop *CurLoop, 833 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 834 ICFLoopSafetyInfo *SafetyInfo, 835 SinkAndHoistLICMFlags &Flags, 836 OptimizationRemarkEmitter *ORE, bool LoopNestMode, 837 bool AllowSpeculation) { 838 // Verify inputs. 839 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 840 CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && 841 "Unexpected input to hoistRegion."); 842 843 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU); 844 845 // Keep track of instructions that have been hoisted, as they may need to be 846 // re-hoisted if they end up not dominating all of their uses. 847 SmallVector<Instruction *, 16> HoistedInstructions; 848 849 // For PHI hoisting to work we need to hoist blocks before their successors. 850 // We can do this by iterating through the blocks in the loop in reverse 851 // post-order. 852 LoopBlocksRPO Worklist(CurLoop); 853 Worklist.perform(LI); 854 bool Changed = false; 855 for (BasicBlock *BB : Worklist) { 856 // Only need to process the contents of this block if it is not part of a 857 // subloop (which would already have been processed). 858 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI)) 859 continue; 860 861 for (Instruction &I : llvm::make_early_inc_range(*BB)) { 862 // Try constant folding this instruction. If all the operands are 863 // constants, it is technically hoistable, but it would be better to 864 // just fold it. 865 if (Constant *C = ConstantFoldInstruction( 866 &I, I.getModule()->getDataLayout(), TLI)) { 867 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C 868 << '\n'); 869 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960). 870 I.replaceAllUsesWith(C); 871 if (isInstructionTriviallyDead(&I, TLI)) 872 eraseInstruction(I, *SafetyInfo, MSSAU); 873 Changed = true; 874 continue; 875 } 876 877 // Try hoisting the instruction out to the preheader. We can only do 878 // this if all of the operands of the instruction are loop invariant and 879 // if it is safe to hoist the instruction. We also check block frequency 880 // to make sure instruction only gets hoisted into colder blocks. 881 // TODO: It may be safe to hoist if we are hoisting to a conditional block 882 // and we have accurately duplicated the control flow from the loop header 883 // to that block. 884 if (CurLoop->hasLoopInvariantOperands(&I) && 885 canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/ nullptr, MSSAU, 886 true, &Flags, ORE) && 887 isSafeToExecuteUnconditionally( 888 I, DT, TLI, CurLoop, SafetyInfo, ORE, 889 CurLoop->getLoopPreheader()->getTerminator(), AllowSpeculation)) { 890 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 891 MSSAU, SE, ORE); 892 HoistedInstructions.push_back(&I); 893 Changed = true; 894 continue; 895 } 896 897 // Attempt to remove floating point division out of the loop by 898 // converting it to a reciprocal multiplication. 899 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() && 900 CurLoop->isLoopInvariant(I.getOperand(1))) { 901 auto Divisor = I.getOperand(1); 902 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); 903 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); 904 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); 905 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent()); 906 ReciprocalDivisor->insertBefore(&I); 907 908 auto Product = 909 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); 910 Product->setFastMathFlags(I.getFastMathFlags()); 911 SafetyInfo->insertInstructionTo(Product, I.getParent()); 912 Product->insertAfter(&I); 913 I.replaceAllUsesWith(Product); 914 eraseInstruction(I, *SafetyInfo, MSSAU); 915 916 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), 917 SafetyInfo, MSSAU, SE, ORE); 918 HoistedInstructions.push_back(ReciprocalDivisor); 919 Changed = true; 920 continue; 921 } 922 923 auto IsInvariantStart = [&](Instruction &I) { 924 using namespace PatternMatch; 925 return I.use_empty() && 926 match(&I, m_Intrinsic<Intrinsic::invariant_start>()); 927 }; 928 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) { 929 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && 930 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop); 931 }; 932 if ((IsInvariantStart(I) || isGuard(&I)) && 933 CurLoop->hasLoopInvariantOperands(&I) && 934 MustExecuteWithoutWritesBefore(I)) { 935 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 936 MSSAU, SE, ORE); 937 HoistedInstructions.push_back(&I); 938 Changed = true; 939 continue; 940 } 941 942 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 943 if (CFH.canHoistPHI(PN)) { 944 // Redirect incoming blocks first to ensure that we create hoisted 945 // versions of those blocks before we hoist the phi. 946 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i) 947 PN->setIncomingBlock( 948 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i))); 949 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 950 MSSAU, SE, ORE); 951 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected"); 952 Changed = true; 953 continue; 954 } 955 } 956 957 // Remember possibly hoistable branches so we can actually hoist them 958 // later if needed. 959 if (BranchInst *BI = dyn_cast<BranchInst>(&I)) 960 CFH.registerPossiblyHoistableBranch(BI); 961 } 962 } 963 964 // If we hoisted instructions to a conditional block they may not dominate 965 // their uses that weren't hoisted (such as phis where some operands are not 966 // loop invariant). If so make them unconditional by moving them to their 967 // immediate dominator. We iterate through the instructions in reverse order 968 // which ensures that when we rehoist an instruction we rehoist its operands, 969 // and also keep track of where in the block we are rehoisting to to make sure 970 // that we rehoist instructions before the instructions that use them. 971 Instruction *HoistPoint = nullptr; 972 if (ControlFlowHoisting) { 973 for (Instruction *I : reverse(HoistedInstructions)) { 974 if (!llvm::all_of(I->uses(), 975 [&](Use &U) { return DT->dominates(I, U); })) { 976 BasicBlock *Dominator = 977 DT->getNode(I->getParent())->getIDom()->getBlock(); 978 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) { 979 if (HoistPoint) 980 assert(DT->dominates(Dominator, HoistPoint->getParent()) && 981 "New hoist point expected to dominate old hoist point"); 982 HoistPoint = Dominator->getTerminator(); 983 } 984 LLVM_DEBUG(dbgs() << "LICM rehoisting to " 985 << HoistPoint->getParent()->getNameOrAsOperand() 986 << ": " << *I << "\n"); 987 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE); 988 HoistPoint = I; 989 Changed = true; 990 } 991 } 992 } 993 if (VerifyMemorySSA) 994 MSSAU->getMemorySSA()->verifyMemorySSA(); 995 996 // Now that we've finished hoisting make sure that LI and DT are still 997 // valid. 998 #ifdef EXPENSIVE_CHECKS 999 if (Changed) { 1000 assert(DT->verify(DominatorTree::VerificationLevel::Fast) && 1001 "Dominator tree verification failed"); 1002 LI->verify(*DT); 1003 } 1004 #endif 1005 1006 return Changed; 1007 } 1008 1009 // Return true if LI is invariant within scope of the loop. LI is invariant if 1010 // CurLoop is dominated by an invariant.start representing the same memory 1011 // location and size as the memory location LI loads from, and also the 1012 // invariant.start has no uses. 1013 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, 1014 Loop *CurLoop) { 1015 Value *Addr = LI->getOperand(0); 1016 const DataLayout &DL = LI->getModule()->getDataLayout(); 1017 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType()); 1018 1019 // It is not currently possible for clang to generate an invariant.start 1020 // intrinsic with scalable vector types because we don't support thread local 1021 // sizeless types and we don't permit sizeless types in structs or classes. 1022 // Furthermore, even if support is added for this in future the intrinsic 1023 // itself is defined to have a size of -1 for variable sized objects. This 1024 // makes it impossible to verify if the intrinsic envelops our region of 1025 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8> 1026 // types would have a -1 parameter, but the former is clearly double the size 1027 // of the latter. 1028 if (LocSizeInBits.isScalable()) 1029 return false; 1030 1031 // if the type is i8 addrspace(x)*, we know this is the type of 1032 // llvm.invariant.start operand 1033 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()), 1034 LI->getPointerAddressSpace()); 1035 unsigned BitcastsVisited = 0; 1036 // Look through bitcasts until we reach the i8* type (this is invariant.start 1037 // operand type). 1038 while (Addr->getType() != PtrInt8Ty) { 1039 auto *BC = dyn_cast<BitCastInst>(Addr); 1040 // Avoid traversing high number of bitcast uses. 1041 if (++BitcastsVisited > MaxNumUsesTraversed || !BC) 1042 return false; 1043 Addr = BC->getOperand(0); 1044 } 1045 // If we've ended up at a global/constant, bail. We shouldn't be looking at 1046 // uselists for non-local Values in a loop pass. 1047 if (isa<Constant>(Addr)) 1048 return false; 1049 1050 unsigned UsesVisited = 0; 1051 // Traverse all uses of the load operand value, to see if invariant.start is 1052 // one of the uses, and whether it dominates the load instruction. 1053 for (auto *U : Addr->users()) { 1054 // Avoid traversing for Load operand with high number of users. 1055 if (++UsesVisited > MaxNumUsesTraversed) 1056 return false; 1057 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 1058 // If there are escaping uses of invariant.start instruction, the load maybe 1059 // non-invariant. 1060 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || 1061 !II->use_empty()) 1062 continue; 1063 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0)); 1064 // The intrinsic supports having a -1 argument for variable sized objects 1065 // so we should check for that here. 1066 if (InvariantSize->isNegative()) 1067 continue; 1068 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8; 1069 // Confirm the invariant.start location size contains the load operand size 1070 // in bits. Also, the invariant.start should dominate the load, and we 1071 // should not hoist the load out of a loop that contains this dominating 1072 // invariant.start. 1073 if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits && 1074 DT->properlyDominates(II->getParent(), CurLoop->getHeader())) 1075 return true; 1076 } 1077 1078 return false; 1079 } 1080 1081 namespace { 1082 /// Return true if-and-only-if we know how to (mechanically) both hoist and 1083 /// sink a given instruction out of a loop. Does not address legality 1084 /// concerns such as aliasing or speculation safety. 1085 bool isHoistableAndSinkableInst(Instruction &I) { 1086 // Only these instructions are hoistable/sinkable. 1087 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || 1088 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) || 1089 isa<BinaryOperator>(I) || isa<SelectInst>(I) || 1090 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) || 1091 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 1092 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) || 1093 isa<InsertValueInst>(I) || isa<FreezeInst>(I)); 1094 } 1095 /// Return true if all of the alias sets within this AST are known not to 1096 /// contain a Mod, or if MSSA knows there are no MemoryDefs in the loop. 1097 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU, 1098 const Loop *L) { 1099 if (CurAST) { 1100 for (AliasSet &AS : *CurAST) { 1101 if (!AS.isForwardingAliasSet() && AS.isMod()) { 1102 return false; 1103 } 1104 } 1105 return true; 1106 } else { /*MSSAU*/ 1107 for (auto *BB : L->getBlocks()) 1108 if (MSSAU->getMemorySSA()->getBlockDefs(BB)) 1109 return false; 1110 return true; 1111 } 1112 } 1113 1114 /// Return true if I is the only Instruction with a MemoryAccess in L. 1115 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, 1116 const MemorySSAUpdater *MSSAU) { 1117 for (auto *BB : L->getBlocks()) 1118 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) { 1119 int NotAPhi = 0; 1120 for (const auto &Acc : *Accs) { 1121 if (isa<MemoryPhi>(&Acc)) 1122 continue; 1123 const auto *MUD = cast<MemoryUseOrDef>(&Acc); 1124 if (MUD->getMemoryInst() != I || NotAPhi++ == 1) 1125 return false; 1126 } 1127 } 1128 return true; 1129 } 1130 } 1131 1132 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, 1133 Loop *CurLoop, AliasSetTracker *CurAST, 1134 MemorySSAUpdater *MSSAU, 1135 bool TargetExecutesOncePerLoop, 1136 SinkAndHoistLICMFlags *Flags, 1137 OptimizationRemarkEmitter *ORE) { 1138 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 1139 "Either AliasSetTracker or MemorySSA should be initialized."); 1140 1141 // If we don't understand the instruction, bail early. 1142 if (!isHoistableAndSinkableInst(I)) 1143 return false; 1144 1145 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr; 1146 if (MSSA) 1147 assert(Flags != nullptr && "Flags cannot be null."); 1148 1149 // Loads have extra constraints we have to verify before we can hoist them. 1150 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 1151 if (!LI->isUnordered()) 1152 return false; // Don't sink/hoist volatile or ordered atomic loads! 1153 1154 // Loads from constant memory are always safe to move, even if they end up 1155 // in the same alias set as something that ends up being modified. 1156 if (AA->pointsToConstantMemory(LI->getOperand(0))) 1157 return true; 1158 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 1159 return true; 1160 1161 if (LI->isAtomic() && !TargetExecutesOncePerLoop) 1162 return false; // Don't risk duplicating unordered loads 1163 1164 // This checks for an invariant.start dominating the load. 1165 if (isLoadInvariantInLoop(LI, DT, CurLoop)) 1166 return true; 1167 1168 bool Invalidated; 1169 if (CurAST) 1170 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST, 1171 CurLoop, AA); 1172 else 1173 Invalidated = pointerInvalidatedByLoopWithMSSA( 1174 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags); 1175 // Check loop-invariant address because this may also be a sinkable load 1176 // whose address is not necessarily loop-invariant. 1177 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1178 ORE->emit([&]() { 1179 return OptimizationRemarkMissed( 1180 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI) 1181 << "failed to move load with loop-invariant address " 1182 "because the loop may invalidate its value"; 1183 }); 1184 1185 return !Invalidated; 1186 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { 1187 // Don't sink or hoist dbg info; it's legal, but not useful. 1188 if (isa<DbgInfoIntrinsic>(I)) 1189 return false; 1190 1191 // Don't sink calls which can throw. 1192 if (CI->mayThrow()) 1193 return false; 1194 1195 // Convergent attribute has been used on operations that involve 1196 // inter-thread communication which results are implicitly affected by the 1197 // enclosing control flows. It is not safe to hoist or sink such operations 1198 // across control flow. 1199 if (CI->isConvergent()) 1200 return false; 1201 1202 using namespace PatternMatch; 1203 if (match(CI, m_Intrinsic<Intrinsic::assume>())) 1204 // Assumes don't actually alias anything or throw 1205 return true; 1206 1207 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>())) 1208 // Widenable conditions don't actually alias anything or throw 1209 return true; 1210 1211 // Handle simple cases by querying alias analysis. 1212 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI); 1213 if (Behavior == FMRB_DoesNotAccessMemory) 1214 return true; 1215 if (AAResults::onlyReadsMemory(Behavior)) { 1216 // A readonly argmemonly function only reads from memory pointed to by 1217 // it's arguments with arbitrary offsets. If we can prove there are no 1218 // writes to this memory in the loop, we can hoist or sink. 1219 if (AAResults::onlyAccessesArgPointees(Behavior)) { 1220 // TODO: expand to writeable arguments 1221 for (Value *Op : CI->args()) 1222 if (Op->getType()->isPointerTy()) { 1223 bool Invalidated; 1224 if (CurAST) 1225 Invalidated = pointerInvalidatedByLoop( 1226 MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA); 1227 else 1228 Invalidated = pointerInvalidatedByLoopWithMSSA( 1229 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I, 1230 *Flags); 1231 if (Invalidated) 1232 return false; 1233 } 1234 return true; 1235 } 1236 1237 // If this call only reads from memory and there are no writes to memory 1238 // in the loop, we can hoist or sink the call as appropriate. 1239 if (isReadOnly(CurAST, MSSAU, CurLoop)) 1240 return true; 1241 } 1242 1243 // FIXME: This should use mod/ref information to see if we can hoist or 1244 // sink the call. 1245 1246 return false; 1247 } else if (auto *FI = dyn_cast<FenceInst>(&I)) { 1248 // Fences alias (most) everything to provide ordering. For the moment, 1249 // just give up if there are any other memory operations in the loop. 1250 if (CurAST) { 1251 auto Begin = CurAST->begin(); 1252 assert(Begin != CurAST->end() && "must contain FI"); 1253 if (std::next(Begin) != CurAST->end()) 1254 // constant memory for instance, TODO: handle better 1255 return false; 1256 auto *UniqueI = Begin->getUniqueInstruction(); 1257 if (!UniqueI) 1258 // other memory op, give up 1259 return false; 1260 (void)FI; // suppress unused variable warning 1261 assert(UniqueI == FI && "AS must contain FI"); 1262 return true; 1263 } else // MSSAU 1264 return isOnlyMemoryAccess(FI, CurLoop, MSSAU); 1265 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 1266 if (!SI->isUnordered()) 1267 return false; // Don't sink/hoist volatile or ordered atomic store! 1268 1269 // We can only hoist a store that we can prove writes a value which is not 1270 // read or overwritten within the loop. For those cases, we fallback to 1271 // load store promotion instead. TODO: We can extend this to cases where 1272 // there is exactly one write to the location and that write dominates an 1273 // arbitrary number of reads in the loop. 1274 if (CurAST) { 1275 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI)); 1276 1277 if (AS.isRef() || !AS.isMustAlias()) 1278 // Quick exit test, handled by the full path below as well. 1279 return false; 1280 auto *UniqueI = AS.getUniqueInstruction(); 1281 if (!UniqueI) 1282 // other memory op, give up 1283 return false; 1284 assert(UniqueI == SI && "AS must contain SI"); 1285 return true; 1286 } else { // MSSAU 1287 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU)) 1288 return true; 1289 // If there are more accesses than the Promotion cap or no "quota" to 1290 // check clobber, then give up as we're not walking a list that long. 1291 if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls()) 1292 return false; 1293 // If there are interfering Uses (i.e. their defining access is in the 1294 // loop), or ordered loads (stored as Defs!), don't move this store. 1295 // Could do better here, but this is conservatively correct. 1296 // TODO: Cache set of Uses on the first walk in runOnLoop, update when 1297 // moving accesses. Can also extend to dominating uses. 1298 auto *SIMD = MSSA->getMemoryAccess(SI); 1299 for (auto *BB : CurLoop->getBlocks()) 1300 if (auto *Accesses = MSSA->getBlockAccesses(BB)) { 1301 for (const auto &MA : *Accesses) 1302 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) { 1303 auto *MD = MU->getDefiningAccess(); 1304 if (!MSSA->isLiveOnEntryDef(MD) && 1305 CurLoop->contains(MD->getBlock())) 1306 return false; 1307 // Disable hoisting past potentially interfering loads. Optimized 1308 // Uses may point to an access outside the loop, as getClobbering 1309 // checks the previous iteration when walking the backedge. 1310 // FIXME: More precise: no Uses that alias SI. 1311 if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU)) 1312 return false; 1313 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) { 1314 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) { 1315 (void)LI; // Silence warning. 1316 assert(!LI->isUnordered() && "Expected unordered load"); 1317 return false; 1318 } 1319 // Any call, while it may not be clobbering SI, it may be a use. 1320 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) { 1321 // Check if the call may read from the memory location written 1322 // to by SI. Check CI's attributes and arguments; the number of 1323 // such checks performed is limited above by NoOfMemAccTooLarge. 1324 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI)); 1325 if (isModOrRefSet(MRI)) 1326 return false; 1327 } 1328 } 1329 } 1330 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI); 1331 Flags->incrementClobberingCalls(); 1332 // If there are no clobbering Defs in the loop, store is safe to hoist. 1333 return MSSA->isLiveOnEntryDef(Source) || 1334 !CurLoop->contains(Source->getBlock()); 1335 } 1336 } 1337 1338 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing"); 1339 1340 // We've established mechanical ability and aliasing, it's up to the caller 1341 // to check fault safety 1342 return true; 1343 } 1344 1345 /// Returns true if a PHINode is a trivially replaceable with an 1346 /// Instruction. 1347 /// This is true when all incoming values are that instruction. 1348 /// This pattern occurs most often with LCSSA PHI nodes. 1349 /// 1350 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) { 1351 for (const Value *IncValue : PN.incoming_values()) 1352 if (IncValue != &I) 1353 return false; 1354 1355 return true; 1356 } 1357 1358 /// Return true if the instruction is free in the loop. 1359 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop, 1360 const TargetTransformInfo *TTI) { 1361 1362 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1363 if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) != 1364 TargetTransformInfo::TCC_Free) 1365 return false; 1366 // For a GEP, we cannot simply use getUserCost because currently it 1367 // optimistically assumes that a GEP will fold into addressing mode 1368 // regardless of its users. 1369 const BasicBlock *BB = GEP->getParent(); 1370 for (const User *U : GEP->users()) { 1371 const Instruction *UI = cast<Instruction>(U); 1372 if (CurLoop->contains(UI) && 1373 (BB != UI->getParent() || 1374 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI)))) 1375 return false; 1376 } 1377 return true; 1378 } else 1379 return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1380 TargetTransformInfo::TCC_Free; 1381 } 1382 1383 /// Return true if the only users of this instruction are outside of 1384 /// the loop. If this is true, we can sink the instruction to the exit 1385 /// blocks of the loop. 1386 /// 1387 /// We also return true if the instruction could be folded away in lowering. 1388 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop). 1389 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 1390 const LoopSafetyInfo *SafetyInfo, 1391 TargetTransformInfo *TTI, bool &FreeInLoop, 1392 bool LoopNestMode) { 1393 const auto &BlockColors = SafetyInfo->getBlockColors(); 1394 bool IsFree = isFreeInLoop(I, CurLoop, TTI); 1395 for (const User *U : I.users()) { 1396 const Instruction *UI = cast<Instruction>(U); 1397 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1398 const BasicBlock *BB = PN->getParent(); 1399 // We cannot sink uses in catchswitches. 1400 if (isa<CatchSwitchInst>(BB->getTerminator())) 1401 return false; 1402 1403 // We need to sink a callsite to a unique funclet. Avoid sinking if the 1404 // phi use is too muddled. 1405 if (isa<CallInst>(I)) 1406 if (!BlockColors.empty() && 1407 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) 1408 return false; 1409 1410 if (LoopNestMode) { 1411 while (isa<PHINode>(UI) && UI->hasOneUser() && 1412 UI->getNumOperands() == 1) { 1413 if (!CurLoop->contains(UI)) 1414 break; 1415 UI = cast<Instruction>(UI->user_back()); 1416 } 1417 } 1418 } 1419 1420 if (CurLoop->contains(UI)) { 1421 if (IsFree) { 1422 FreeInLoop = true; 1423 continue; 1424 } 1425 return false; 1426 } 1427 } 1428 return true; 1429 } 1430 1431 static Instruction *cloneInstructionInExitBlock( 1432 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 1433 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) { 1434 Instruction *New; 1435 if (auto *CI = dyn_cast<CallInst>(&I)) { 1436 const auto &BlockColors = SafetyInfo->getBlockColors(); 1437 1438 // Sinking call-sites need to be handled differently from other 1439 // instructions. The cloned call-site needs a funclet bundle operand 1440 // appropriate for its location in the CFG. 1441 SmallVector<OperandBundleDef, 1> OpBundles; 1442 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); 1443 BundleIdx != BundleEnd; ++BundleIdx) { 1444 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); 1445 if (Bundle.getTagID() == LLVMContext::OB_funclet) 1446 continue; 1447 1448 OpBundles.emplace_back(Bundle); 1449 } 1450 1451 if (!BlockColors.empty()) { 1452 const ColorVector &CV = BlockColors.find(&ExitBlock)->second; 1453 assert(CV.size() == 1 && "non-unique color for exit block!"); 1454 BasicBlock *BBColor = CV.front(); 1455 Instruction *EHPad = BBColor->getFirstNonPHI(); 1456 if (EHPad->isEHPad()) 1457 OpBundles.emplace_back("funclet", EHPad); 1458 } 1459 1460 New = CallInst::Create(CI, OpBundles); 1461 } else { 1462 New = I.clone(); 1463 } 1464 1465 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); 1466 if (!I.getName().empty()) 1467 New->setName(I.getName() + ".le"); 1468 1469 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 1470 // Create a new MemoryAccess and let MemorySSA set its defining access. 1471 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1472 New, nullptr, New->getParent(), MemorySSA::Beginning); 1473 if (NewMemAcc) { 1474 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc)) 1475 MSSAU->insertDef(MemDef, /*RenameUses=*/true); 1476 else { 1477 auto *MemUse = cast<MemoryUse>(NewMemAcc); 1478 MSSAU->insertUse(MemUse, /*RenameUses=*/true); 1479 } 1480 } 1481 } 1482 1483 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that 1484 // this is particularly cheap because we can rip off the PHI node that we're 1485 // replacing for the number and blocks of the predecessors. 1486 // OPT: If this shows up in a profile, we can instead finish sinking all 1487 // invariant instructions, and then walk their operands to re-establish 1488 // LCSSA. That will eliminate creating PHI nodes just to nuke them when 1489 // sinking bottom-up. 1490 for (Use &Op : New->operands()) 1491 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) { 1492 auto *OInst = cast<Instruction>(Op.get()); 1493 PHINode *OpPN = 1494 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), 1495 OInst->getName() + ".lcssa", &ExitBlock.front()); 1496 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) 1497 OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); 1498 Op = OpPN; 1499 } 1500 return New; 1501 } 1502 1503 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 1504 MemorySSAUpdater *MSSAU) { 1505 if (MSSAU) 1506 MSSAU->removeMemoryAccess(&I); 1507 SafetyInfo.removeInstruction(&I); 1508 I.eraseFromParent(); 1509 } 1510 1511 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 1512 ICFLoopSafetyInfo &SafetyInfo, 1513 MemorySSAUpdater *MSSAU, 1514 ScalarEvolution *SE) { 1515 SafetyInfo.removeInstruction(&I); 1516 SafetyInfo.insertInstructionTo(&I, Dest.getParent()); 1517 I.moveBefore(&Dest); 1518 if (MSSAU) 1519 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>( 1520 MSSAU->getMemorySSA()->getMemoryAccess(&I))) 1521 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(), 1522 MemorySSA::BeforeTerminator); 1523 if (SE) 1524 SE->forgetValue(&I); 1525 } 1526 1527 static Instruction *sinkThroughTriviallyReplaceablePHI( 1528 PHINode *TPN, Instruction *I, LoopInfo *LI, 1529 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies, 1530 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, 1531 MemorySSAUpdater *MSSAU) { 1532 assert(isTriviallyReplaceablePHI(*TPN, *I) && 1533 "Expect only trivially replaceable PHI"); 1534 BasicBlock *ExitBlock = TPN->getParent(); 1535 Instruction *New; 1536 auto It = SunkCopies.find(ExitBlock); 1537 if (It != SunkCopies.end()) 1538 New = It->second; 1539 else 1540 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock( 1541 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU); 1542 return New; 1543 } 1544 1545 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { 1546 BasicBlock *BB = PN->getParent(); 1547 if (!BB->canSplitPredecessors()) 1548 return false; 1549 // It's not impossible to split EHPad blocks, but if BlockColors already exist 1550 // it require updating BlockColors for all offspring blocks accordingly. By 1551 // skipping such corner case, we can make updating BlockColors after splitting 1552 // predecessor fairly simple. 1553 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad()) 1554 return false; 1555 for (BasicBlock *BBPred : predecessors(BB)) { 1556 if (isa<IndirectBrInst>(BBPred->getTerminator()) || 1557 isa<CallBrInst>(BBPred->getTerminator())) 1558 return false; 1559 } 1560 return true; 1561 } 1562 1563 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, 1564 LoopInfo *LI, const Loop *CurLoop, 1565 LoopSafetyInfo *SafetyInfo, 1566 MemorySSAUpdater *MSSAU) { 1567 #ifndef NDEBUG 1568 SmallVector<BasicBlock *, 32> ExitBlocks; 1569 CurLoop->getUniqueExitBlocks(ExitBlocks); 1570 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1571 ExitBlocks.end()); 1572 #endif 1573 BasicBlock *ExitBB = PN->getParent(); 1574 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block."); 1575 1576 // Split predecessors of the loop exit to make instructions in the loop are 1577 // exposed to exit blocks through trivially replaceable PHIs while keeping the 1578 // loop in the canonical form where each predecessor of each exit block should 1579 // be contained within the loop. For example, this will convert the loop below 1580 // from 1581 // 1582 // LB1: 1583 // %v1 = 1584 // br %LE, %LB2 1585 // LB2: 1586 // %v2 = 1587 // br %LE, %LB1 1588 // LE: 1589 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable 1590 // 1591 // to 1592 // 1593 // LB1: 1594 // %v1 = 1595 // br %LE.split, %LB2 1596 // LB2: 1597 // %v2 = 1598 // br %LE.split2, %LB1 1599 // LE.split: 1600 // %p1 = phi [%v1, %LB1] <-- trivially replaceable 1601 // br %LE 1602 // LE.split2: 1603 // %p2 = phi [%v2, %LB2] <-- trivially replaceable 1604 // br %LE 1605 // LE: 1606 // %p = phi [%p1, %LE.split], [%p2, %LE.split2] 1607 // 1608 const auto &BlockColors = SafetyInfo->getBlockColors(); 1609 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); 1610 while (!PredBBs.empty()) { 1611 BasicBlock *PredBB = *PredBBs.begin(); 1612 assert(CurLoop->contains(PredBB) && 1613 "Expect all predecessors are in the loop"); 1614 if (PN->getBasicBlockIndex(PredBB) >= 0) { 1615 BasicBlock *NewPred = SplitBlockPredecessors( 1616 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true); 1617 // Since we do not allow splitting EH-block with BlockColors in 1618 // canSplitPredecessors(), we can simply assign predecessor's color to 1619 // the new block. 1620 if (!BlockColors.empty()) 1621 // Grab a reference to the ColorVector to be inserted before getting the 1622 // reference to the vector we are copying because inserting the new 1623 // element in BlockColors might cause the map to be reallocated. 1624 SafetyInfo->copyColors(NewPred, PredBB); 1625 } 1626 PredBBs.remove(PredBB); 1627 } 1628 } 1629 1630 /// When an instruction is found to only be used outside of the loop, this 1631 /// function moves it to the exit blocks and patches up SSA form as needed. 1632 /// This method is guaranteed to remove the original instruction from its 1633 /// position, and may either delete it or move it to outside of the loop. 1634 /// 1635 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 1636 BlockFrequencyInfo *BFI, const Loop *CurLoop, 1637 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, 1638 OptimizationRemarkEmitter *ORE) { 1639 bool Changed = false; 1640 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); 1641 1642 // Iterate over users to be ready for actual sinking. Replace users via 1643 // unreachable blocks with undef and make all user PHIs trivially replaceable. 1644 SmallPtrSet<Instruction *, 8> VisitedUsers; 1645 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) { 1646 auto *User = cast<Instruction>(*UI); 1647 Use &U = UI.getUse(); 1648 ++UI; 1649 1650 if (VisitedUsers.count(User) || CurLoop->contains(User)) 1651 continue; 1652 1653 if (!DT->isReachableFromEntry(User->getParent())) { 1654 U = UndefValue::get(I.getType()); 1655 Changed = true; 1656 continue; 1657 } 1658 1659 // The user must be a PHI node. 1660 PHINode *PN = cast<PHINode>(User); 1661 1662 // Surprisingly, instructions can be used outside of loops without any 1663 // exits. This can only happen in PHI nodes if the incoming block is 1664 // unreachable. 1665 BasicBlock *BB = PN->getIncomingBlock(U); 1666 if (!DT->isReachableFromEntry(BB)) { 1667 U = UndefValue::get(I.getType()); 1668 Changed = true; 1669 continue; 1670 } 1671 1672 VisitedUsers.insert(PN); 1673 if (isTriviallyReplaceablePHI(*PN, I)) 1674 continue; 1675 1676 if (!canSplitPredecessors(PN, SafetyInfo)) 1677 return Changed; 1678 1679 // Split predecessors of the PHI so that we can make users trivially 1680 // replaceable. 1681 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU); 1682 1683 // Should rebuild the iterators, as they may be invalidated by 1684 // splitPredecessorsOfLoopExit(). 1685 UI = I.user_begin(); 1686 UE = I.user_end(); 1687 } 1688 1689 if (VisitedUsers.empty()) 1690 return Changed; 1691 1692 ORE->emit([&]() { 1693 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I) 1694 << "sinking " << ore::NV("Inst", &I); 1695 }); 1696 if (isa<LoadInst>(I)) 1697 ++NumMovedLoads; 1698 else if (isa<CallInst>(I)) 1699 ++NumMovedCalls; 1700 ++NumSunk; 1701 1702 #ifndef NDEBUG 1703 SmallVector<BasicBlock *, 32> ExitBlocks; 1704 CurLoop->getUniqueExitBlocks(ExitBlocks); 1705 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1706 ExitBlocks.end()); 1707 #endif 1708 1709 // Clones of this instruction. Don't create more than one per exit block! 1710 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; 1711 1712 // If this instruction is only used outside of the loop, then all users are 1713 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of 1714 // the instruction. 1715 // First check if I is worth sinking for all uses. Sink only when it is worth 1716 // across all uses. 1717 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end()); 1718 for (auto *UI : Users) { 1719 auto *User = cast<Instruction>(UI); 1720 1721 if (CurLoop->contains(User)) 1722 continue; 1723 1724 PHINode *PN = cast<PHINode>(User); 1725 assert(ExitBlockSet.count(PN->getParent()) && 1726 "The LCSSA PHI is not in an exit block!"); 1727 1728 // The PHI must be trivially replaceable. 1729 Instruction *New = sinkThroughTriviallyReplaceablePHI( 1730 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU); 1731 PN->replaceAllUsesWith(New); 1732 eraseInstruction(*PN, *SafetyInfo, nullptr); 1733 Changed = true; 1734 } 1735 return Changed; 1736 } 1737 1738 /// When an instruction is found to only use loop invariant operands that 1739 /// is safe to hoist, this instruction is called to do the dirty work. 1740 /// 1741 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 1742 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 1743 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 1744 OptimizationRemarkEmitter *ORE) { 1745 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": " 1746 << I << "\n"); 1747 ORE->emit([&]() { 1748 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting " 1749 << ore::NV("Inst", &I); 1750 }); 1751 1752 // Metadata can be dependent on conditions we are hoisting above. 1753 // Conservatively strip all metadata on the instruction unless we were 1754 // guaranteed to execute I if we entered the loop, in which case the metadata 1755 // is valid in the loop preheader. 1756 // Similarly, If I is a call and it is not guaranteed to execute in the loop, 1757 // then moving to the preheader means we should strip attributes on the call 1758 // that can cause UB since we may be hoisting above conditions that allowed 1759 // inferring those attributes. They may not be valid at the preheader. 1760 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) && 1761 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning 1762 // time in isGuaranteedToExecute if we don't actually have anything to 1763 // drop. It is a compile time optimization, not required for correctness. 1764 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) 1765 I.dropUndefImplyingAttrsAndUnknownMetadata(); 1766 1767 if (isa<PHINode>(I)) 1768 // Move the new node to the end of the phi list in the destination block. 1769 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE); 1770 else 1771 // Move the new node to the destination block, before its terminator. 1772 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE); 1773 1774 I.updateLocationAfterHoist(); 1775 1776 if (isa<LoadInst>(I)) 1777 ++NumMovedLoads; 1778 else if (isa<CallInst>(I)) 1779 ++NumMovedCalls; 1780 ++NumHoisted; 1781 } 1782 1783 /// Only sink or hoist an instruction if it is not a trapping instruction, 1784 /// or if the instruction is known not to trap when moved to the preheader. 1785 /// or if it is a trapping instruction and is guaranteed to execute. 1786 static bool isSafeToExecuteUnconditionally( 1787 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI, 1788 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, 1789 OptimizationRemarkEmitter *ORE, const Instruction *CtxI, 1790 bool AllowSpeculation) { 1791 if (AllowSpeculation && isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI)) 1792 return true; 1793 1794 bool GuaranteedToExecute = 1795 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop); 1796 1797 if (!GuaranteedToExecute) { 1798 auto *LI = dyn_cast<LoadInst>(&Inst); 1799 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1800 ORE->emit([&]() { 1801 return OptimizationRemarkMissed( 1802 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI) 1803 << "failed to hoist load with loop-invariant address " 1804 "because load is conditionally executed"; 1805 }); 1806 } 1807 1808 return GuaranteedToExecute; 1809 } 1810 1811 namespace { 1812 class LoopPromoter : public LoadAndStorePromoter { 1813 Value *SomePtr; // Designated pointer to store to. 1814 const SmallSetVector<Value *, 8> &PointerMustAliases; 1815 SmallVectorImpl<BasicBlock *> &LoopExitBlocks; 1816 SmallVectorImpl<Instruction *> &LoopInsertPts; 1817 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts; 1818 PredIteratorCache &PredCache; 1819 MemorySSAUpdater *MSSAU; 1820 LoopInfo &LI; 1821 DebugLoc DL; 1822 Align Alignment; 1823 bool UnorderedAtomic; 1824 AAMDNodes AATags; 1825 ICFLoopSafetyInfo &SafetyInfo; 1826 bool CanInsertStoresInExitBlocks; 1827 1828 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi 1829 // (if legal) if doing so would add an out-of-loop use to an instruction 1830 // defined in-loop. 1831 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { 1832 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB)) 1833 return V; 1834 1835 Instruction *I = cast<Instruction>(V); 1836 // We need to create an LCSSA PHI node for the incoming value and 1837 // store that. 1838 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), 1839 I->getName() + ".lcssa", &BB->front()); 1840 for (BasicBlock *Pred : PredCache.get(BB)) 1841 PN->addIncoming(I, Pred); 1842 return PN; 1843 } 1844 1845 public: 1846 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, 1847 const SmallSetVector<Value *, 8> &PMA, 1848 SmallVectorImpl<BasicBlock *> &LEB, 1849 SmallVectorImpl<Instruction *> &LIP, 1850 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC, 1851 MemorySSAUpdater *MSSAU, LoopInfo &li, DebugLoc dl, 1852 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags, 1853 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks) 1854 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), 1855 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), 1856 PredCache(PIC), MSSAU(MSSAU), LI(li), DL(std::move(dl)), 1857 Alignment(Alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags), 1858 SafetyInfo(SafetyInfo), 1859 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks) {} 1860 1861 bool isInstInList(Instruction *I, 1862 const SmallVectorImpl<Instruction *> &) const override { 1863 Value *Ptr; 1864 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1865 Ptr = LI->getOperand(0); 1866 else 1867 Ptr = cast<StoreInst>(I)->getPointerOperand(); 1868 return PointerMustAliases.count(Ptr); 1869 } 1870 1871 void insertStoresInLoopExitBlocks() { 1872 // Insert stores after in the loop exit blocks. Each exit block gets a 1873 // store of the live-out values that feed them. Since we've already told 1874 // the SSA updater about the defs in the loop and the preheader 1875 // definition, it is all set and we can start using it. 1876 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { 1877 BasicBlock *ExitBlock = LoopExitBlocks[i]; 1878 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 1879 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); 1880 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); 1881 Instruction *InsertPos = LoopInsertPts[i]; 1882 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); 1883 if (UnorderedAtomic) 1884 NewSI->setOrdering(AtomicOrdering::Unordered); 1885 NewSI->setAlignment(Alignment); 1886 NewSI->setDebugLoc(DL); 1887 if (AATags) 1888 NewSI->setAAMetadata(AATags); 1889 1890 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i]; 1891 MemoryAccess *NewMemAcc; 1892 if (!MSSAInsertPoint) { 1893 NewMemAcc = MSSAU->createMemoryAccessInBB( 1894 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning); 1895 } else { 1896 NewMemAcc = 1897 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint); 1898 } 1899 MSSAInsertPts[i] = NewMemAcc; 1900 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1901 // FIXME: true for safety, false may still be correct. 1902 } 1903 } 1904 1905 void doExtraRewritesBeforeFinalDeletion() override { 1906 if (CanInsertStoresInExitBlocks) 1907 insertStoresInLoopExitBlocks(); 1908 } 1909 1910 void instructionDeleted(Instruction *I) const override { 1911 SafetyInfo.removeInstruction(I); 1912 MSSAU->removeMemoryAccess(I); 1913 } 1914 1915 bool shouldDelete(Instruction *I) const override { 1916 if (isa<StoreInst>(I)) 1917 return CanInsertStoresInExitBlocks; 1918 return true; 1919 } 1920 }; 1921 1922 bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L, 1923 DominatorTree *DT) { 1924 // We can perform the captured-before check against any instruction in the 1925 // loop header, as the loop header is reachable from any instruction inside 1926 // the loop. 1927 // TODO: ReturnCaptures=true shouldn't be necessary here. 1928 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true, 1929 /* StoreCaptures */ true, 1930 L->getHeader()->getTerminator(), DT); 1931 } 1932 1933 /// Return true if we can prove that a caller cannot inspect the object if an 1934 /// unwind occurs inside the loop. 1935 bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L, 1936 DominatorTree *DT) { 1937 bool RequiresNoCaptureBeforeUnwind; 1938 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind)) 1939 return false; 1940 1941 return !RequiresNoCaptureBeforeUnwind || 1942 isNotCapturedBeforeOrInLoop(Object, L, DT); 1943 } 1944 1945 } // namespace 1946 1947 /// Try to promote memory values to scalars by sinking stores out of the 1948 /// loop and moving loads to before the loop. We do this by looping over 1949 /// the stores in the loop, looking for stores to Must pointers which are 1950 /// loop invariant. 1951 /// 1952 bool llvm::promoteLoopAccessesToScalars( 1953 const SmallSetVector<Value *, 8> &PointerMustAliases, 1954 SmallVectorImpl<BasicBlock *> &ExitBlocks, 1955 SmallVectorImpl<Instruction *> &InsertPts, 1956 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC, 1957 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, 1958 Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo, 1959 OptimizationRemarkEmitter *ORE, bool AllowSpeculation) { 1960 // Verify inputs. 1961 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && 1962 SafetyInfo != nullptr && 1963 "Unexpected Input to promoteLoopAccessesToScalars"); 1964 1965 Value *SomePtr = *PointerMustAliases.begin(); 1966 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1967 1968 // It is not safe to promote a load/store from the loop if the load/store is 1969 // conditional. For example, turning: 1970 // 1971 // for () { if (c) *P += 1; } 1972 // 1973 // into: 1974 // 1975 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; 1976 // 1977 // is not safe, because *P may only be valid to access if 'c' is true. 1978 // 1979 // The safety property divides into two parts: 1980 // p1) The memory may not be dereferenceable on entry to the loop. In this 1981 // case, we can't insert the required load in the preheader. 1982 // p2) The memory model does not allow us to insert a store along any dynamic 1983 // path which did not originally have one. 1984 // 1985 // If at least one store is guaranteed to execute, both properties are 1986 // satisfied, and promotion is legal. 1987 // 1988 // This, however, is not a necessary condition. Even if no store/load is 1989 // guaranteed to execute, we can still establish these properties. 1990 // We can establish (p1) by proving that hoisting the load into the preheader 1991 // is safe (i.e. proving dereferenceability on all paths through the loop). We 1992 // can use any access within the alias set to prove dereferenceability, 1993 // since they're all must alias. 1994 // 1995 // There are two ways establish (p2): 1996 // a) Prove the location is thread-local. In this case the memory model 1997 // requirement does not apply, and stores are safe to insert. 1998 // b) Prove a store dominates every exit block. In this case, if an exit 1999 // blocks is reached, the original dynamic path would have taken us through 2000 // the store, so inserting a store into the exit block is safe. Note that this 2001 // is different from the store being guaranteed to execute. For instance, 2002 // if an exception is thrown on the first iteration of the loop, the original 2003 // store is never executed, but the exit blocks are not executed either. 2004 2005 bool DereferenceableInPH = false; 2006 bool SafeToInsertStore = false; 2007 bool FoundLoadToPromote = false; 2008 2009 SmallVector<Instruction *, 64> LoopUses; 2010 2011 // We start with an alignment of one and try to find instructions that allow 2012 // us to prove better alignment. 2013 Align Alignment; 2014 // Keep track of which types of access we see 2015 bool SawUnorderedAtomic = false; 2016 bool SawNotAtomic = false; 2017 AAMDNodes AATags; 2018 2019 const DataLayout &MDL = Preheader->getModule()->getDataLayout(); 2020 2021 bool IsKnownThreadLocalObject = false; 2022 if (SafetyInfo->anyBlockMayThrow()) { 2023 // If a loop can throw, we have to insert a store along each unwind edge. 2024 // That said, we can't actually make the unwind edge explicit. Therefore, 2025 // we have to prove that the store is dead along the unwind edge. We do 2026 // this by proving that the caller can't have a reference to the object 2027 // after return and thus can't possibly load from the object. 2028 Value *Object = getUnderlyingObject(SomePtr); 2029 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT)) 2030 return false; 2031 // Subtlety: Alloca's aren't visible to callers, but *are* potentially 2032 // visible to other threads if captured and used during their lifetimes. 2033 IsKnownThreadLocalObject = !isa<AllocaInst>(Object); 2034 } 2035 2036 // Check that all accesses to pointers in the aliass set use the same type. 2037 // We cannot (yet) promote a memory location that is loaded and stored in 2038 // different sizes. While we are at it, collect alignment and AA info. 2039 Type *AccessTy = nullptr; 2040 for (Value *ASIV : PointerMustAliases) { 2041 for (User *U : ASIV->users()) { 2042 // Ignore instructions that are outside the loop. 2043 Instruction *UI = dyn_cast<Instruction>(U); 2044 if (!UI || !CurLoop->contains(UI)) 2045 continue; 2046 2047 // If there is an non-load/store instruction in the loop, we can't promote 2048 // it. 2049 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { 2050 if (!Load->isUnordered()) 2051 return false; 2052 2053 SawUnorderedAtomic |= Load->isAtomic(); 2054 SawNotAtomic |= !Load->isAtomic(); 2055 FoundLoadToPromote = true; 2056 2057 Align InstAlignment = Load->getAlign(); 2058 2059 // Note that proving a load safe to speculate requires proving 2060 // sufficient alignment at the target location. Proving it guaranteed 2061 // to execute does as well. Thus we can increase our guaranteed 2062 // alignment as well. 2063 if (!DereferenceableInPH || (InstAlignment > Alignment)) 2064 if (isSafeToExecuteUnconditionally( 2065 *Load, DT, TLI, CurLoop, SafetyInfo, ORE, 2066 Preheader->getTerminator(), AllowSpeculation)) { 2067 DereferenceableInPH = true; 2068 Alignment = std::max(Alignment, InstAlignment); 2069 } 2070 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { 2071 // Stores *of* the pointer are not interesting, only stores *to* the 2072 // pointer. 2073 if (UI->getOperand(1) != ASIV) 2074 continue; 2075 if (!Store->isUnordered()) 2076 return false; 2077 2078 SawUnorderedAtomic |= Store->isAtomic(); 2079 SawNotAtomic |= !Store->isAtomic(); 2080 2081 // If the store is guaranteed to execute, both properties are satisfied. 2082 // We may want to check if a store is guaranteed to execute even if we 2083 // already know that promotion is safe, since it may have higher 2084 // alignment than any other guaranteed stores, in which case we can 2085 // raise the alignment on the promoted store. 2086 Align InstAlignment = Store->getAlign(); 2087 2088 if (!DereferenceableInPH || !SafeToInsertStore || 2089 (InstAlignment > Alignment)) { 2090 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) { 2091 DereferenceableInPH = true; 2092 SafeToInsertStore = true; 2093 Alignment = std::max(Alignment, InstAlignment); 2094 } 2095 } 2096 2097 // If a store dominates all exit blocks, it is safe to sink. 2098 // As explained above, if an exit block was executed, a dominating 2099 // store must have been executed at least once, so we are not 2100 // introducing stores on paths that did not have them. 2101 // Note that this only looks at explicit exit blocks. If we ever 2102 // start sinking stores into unwind edges (see above), this will break. 2103 if (!SafeToInsertStore) 2104 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { 2105 return DT->dominates(Store->getParent(), Exit); 2106 }); 2107 2108 // If the store is not guaranteed to execute, we may still get 2109 // deref info through it. 2110 if (!DereferenceableInPH) { 2111 DereferenceableInPH = isDereferenceableAndAlignedPointer( 2112 Store->getPointerOperand(), Store->getValueOperand()->getType(), 2113 Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI); 2114 } 2115 } else 2116 return false; // Not a load or store. 2117 2118 if (!AccessTy) 2119 AccessTy = getLoadStoreType(UI); 2120 else if (AccessTy != getLoadStoreType(UI)) 2121 return false; 2122 2123 // Merge the AA tags. 2124 if (LoopUses.empty()) { 2125 // On the first load/store, just take its AA tags. 2126 AATags = UI->getAAMetadata(); 2127 } else if (AATags) { 2128 AATags = AATags.merge(UI->getAAMetadata()); 2129 } 2130 2131 LoopUses.push_back(UI); 2132 } 2133 } 2134 2135 // If we found both an unordered atomic instruction and a non-atomic memory 2136 // access, bail. We can't blindly promote non-atomic to atomic since we 2137 // might not be able to lower the result. We can't downgrade since that 2138 // would violate memory model. Also, align 0 is an error for atomics. 2139 if (SawUnorderedAtomic && SawNotAtomic) 2140 return false; 2141 2142 // If we're inserting an atomic load in the preheader, we must be able to 2143 // lower it. We're only guaranteed to be able to lower naturally aligned 2144 // atomics. 2145 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy)) 2146 return false; 2147 2148 // If we couldn't prove we can hoist the load, bail. 2149 if (!DereferenceableInPH) 2150 return false; 2151 2152 // We know we can hoist the load, but don't have a guaranteed store. 2153 // Check whether the location is thread-local. If it is, then we can insert 2154 // stores along paths which originally didn't have them without violating the 2155 // memory model. 2156 if (!SafeToInsertStore) { 2157 if (IsKnownThreadLocalObject) 2158 SafeToInsertStore = true; 2159 else { 2160 Value *Object = getUnderlyingObject(SomePtr); 2161 SafeToInsertStore = 2162 (isNoAliasCall(Object) || isa<AllocaInst>(Object)) && 2163 isNotCapturedBeforeOrInLoop(Object, CurLoop, DT); 2164 } 2165 } 2166 2167 // If we've still failed to prove we can sink the store, hoist the load 2168 // only, if possible. 2169 if (!SafeToInsertStore && !FoundLoadToPromote) 2170 // If we cannot hoist the load either, give up. 2171 return false; 2172 2173 // Lets do the promotion! 2174 if (SafeToInsertStore) 2175 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtr 2176 << '\n'); 2177 else 2178 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtr 2179 << '\n'); 2180 2181 ORE->emit([&]() { 2182 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", 2183 LoopUses[0]) 2184 << "Moving accesses to memory location out of the loop"; 2185 }); 2186 ++NumPromoted; 2187 2188 // Look at all the loop uses, and try to merge their locations. 2189 std::vector<const DILocation *> LoopUsesLocs; 2190 for (auto U : LoopUses) 2191 LoopUsesLocs.push_back(U->getDebugLoc().get()); 2192 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs)); 2193 2194 // We use the SSAUpdater interface to insert phi nodes as required. 2195 SmallVector<PHINode *, 16> NewPHIs; 2196 SSAUpdater SSA(&NewPHIs); 2197 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, 2198 InsertPts, MSSAInsertPts, PIC, MSSAU, *LI, DL, 2199 Alignment, SawUnorderedAtomic, AATags, *SafetyInfo, 2200 SafeToInsertStore); 2201 2202 // Set up the preheader to have a definition of the value. It is the live-out 2203 // value from the preheader that uses in the loop will use. 2204 LoadInst *PreheaderLoad = new LoadInst( 2205 AccessTy, SomePtr, SomePtr->getName() + ".promoted", 2206 Preheader->getTerminator()); 2207 if (SawUnorderedAtomic) 2208 PreheaderLoad->setOrdering(AtomicOrdering::Unordered); 2209 PreheaderLoad->setAlignment(Alignment); 2210 PreheaderLoad->setDebugLoc(DebugLoc()); 2211 if (AATags) 2212 PreheaderLoad->setAAMetadata(AATags); 2213 SSA.AddAvailableValue(Preheader, PreheaderLoad); 2214 2215 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB( 2216 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End); 2217 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess); 2218 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true); 2219 2220 if (VerifyMemorySSA) 2221 MSSAU->getMemorySSA()->verifyMemorySSA(); 2222 // Rewrite all the loads in the loop and remember all the definitions from 2223 // stores in the loop. 2224 Promoter.run(LoopUses); 2225 2226 if (VerifyMemorySSA) 2227 MSSAU->getMemorySSA()->verifyMemorySSA(); 2228 // If the SSAUpdater didn't use the load in the preheader, just zap it now. 2229 if (PreheaderLoad->use_empty()) 2230 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU); 2231 2232 return true; 2233 } 2234 2235 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, 2236 function_ref<void(Instruction *)> Fn) { 2237 for (const BasicBlock *BB : L->blocks()) 2238 if (const auto *Accesses = MSSA->getBlockAccesses(BB)) 2239 for (const auto &Access : *Accesses) 2240 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access)) 2241 Fn(MUD->getMemoryInst()); 2242 } 2243 2244 static SmallVector<SmallSetVector<Value *, 8>, 0> 2245 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) { 2246 AliasSetTracker AST(*AA); 2247 2248 auto IsPotentiallyPromotable = [L](const Instruction *I) { 2249 if (const auto *SI = dyn_cast<StoreInst>(I)) 2250 return L->isLoopInvariant(SI->getPointerOperand()); 2251 if (const auto *LI = dyn_cast<LoadInst>(I)) 2252 return L->isLoopInvariant(LI->getPointerOperand()); 2253 return false; 2254 }; 2255 2256 // Populate AST with potentially promotable accesses and remove them from 2257 // MaybePromotable, so they will not be checked again on the next iteration. 2258 SmallPtrSet<Value *, 16> AttemptingPromotion; 2259 foreachMemoryAccess(MSSA, L, [&](Instruction *I) { 2260 if (IsPotentiallyPromotable(I)) { 2261 AttemptingPromotion.insert(I); 2262 AST.add(I); 2263 } 2264 }); 2265 2266 // We're only interested in must-alias sets that contain a mod. 2267 SmallVector<const AliasSet *, 8> Sets; 2268 for (AliasSet &AS : AST) 2269 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias()) 2270 Sets.push_back(&AS); 2271 2272 if (Sets.empty()) 2273 return {}; // Nothing to promote... 2274 2275 // Discard any sets for which there is an aliasing non-promotable access. 2276 foreachMemoryAccess(MSSA, L, [&](Instruction *I) { 2277 if (AttemptingPromotion.contains(I)) 2278 return; 2279 2280 llvm::erase_if(Sets, [&](const AliasSet *AS) { 2281 return AS->aliasesUnknownInst(I, *AA); 2282 }); 2283 }); 2284 2285 SmallVector<SmallSetVector<Value *, 8>, 0> Result; 2286 for (const AliasSet *Set : Sets) { 2287 SmallSetVector<Value *, 8> PointerMustAliases; 2288 for (const auto &ASI : *Set) 2289 PointerMustAliases.insert(ASI.getValue()); 2290 Result.push_back(std::move(PointerMustAliases)); 2291 } 2292 2293 return Result; 2294 } 2295 2296 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 2297 AliasSetTracker *CurAST, Loop *CurLoop, 2298 AAResults *AA) { 2299 return CurAST->getAliasSetFor(MemLoc).isMod(); 2300 } 2301 2302 bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 2303 Loop *CurLoop, Instruction &I, 2304 SinkAndHoistLICMFlags &Flags) { 2305 // For hoisting, use the walker to determine safety 2306 if (!Flags.getIsSink()) { 2307 MemoryAccess *Source; 2308 // See declaration of SetLicmMssaOptCap for usage details. 2309 if (Flags.tooManyClobberingCalls()) 2310 Source = MU->getDefiningAccess(); 2311 else { 2312 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU); 2313 Flags.incrementClobberingCalls(); 2314 } 2315 return !MSSA->isLiveOnEntryDef(Source) && 2316 CurLoop->contains(Source->getBlock()); 2317 } 2318 2319 // For sinking, we'd need to check all Defs below this use. The getClobbering 2320 // call will look on the backedge of the loop, but will check aliasing with 2321 // the instructions on the previous iteration. 2322 // For example: 2323 // for (i ... ) 2324 // load a[i] ( Use (LoE) 2325 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop. 2326 // i++; 2327 // The load sees no clobbering inside the loop, as the backedge alias check 2328 // does phi translation, and will check aliasing against store a[i-1]. 2329 // However sinking the load outside the loop, below the store is incorrect. 2330 2331 // For now, only sink if there are no Defs in the loop, and the existing ones 2332 // precede the use and are in the same block. 2333 // FIXME: Increase precision: Safe to sink if Use post dominates the Def; 2334 // needs PostDominatorTreeAnalysis. 2335 // FIXME: More precise: no Defs that alias this Use. 2336 if (Flags.tooManyMemoryAccesses()) 2337 return true; 2338 for (auto *BB : CurLoop->getBlocks()) 2339 if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU)) 2340 return true; 2341 // When sinking, the source block may not be part of the loop so check it. 2342 if (!CurLoop->contains(&I)) 2343 return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU); 2344 2345 return false; 2346 } 2347 2348 bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA, 2349 MemoryUse &MU) { 2350 if (const auto *Accesses = MSSA.getBlockDefs(&BB)) 2351 for (const auto &MA : *Accesses) 2352 if (const auto *MD = dyn_cast<MemoryDef>(&MA)) 2353 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU)) 2354 return true; 2355 return false; 2356 } 2357 2358 /// Little predicate that returns true if the specified basic block is in 2359 /// a subloop of the current one, not the current one itself. 2360 /// 2361 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { 2362 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); 2363 return LI->getLoopFor(BB) != CurLoop; 2364 } 2365