1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs loop invariant code motion, attempting to remove as much 10 // code from the body of a loop as possible. It does this by either hoisting 11 // code into the preheader block, or by sinking code to the exit blocks if it is 12 // safe. This pass also promotes must-aliased memory locations in the loop to 13 // live in registers, thus hoisting and sinking "invariant" loads and stores. 14 // 15 // Hoisting operations out of loops is a canonicalization transform. It 16 // enables and simplifies subsequent optimizations in the middle-end. 17 // Rematerialization of hoisted instructions to reduce register pressure is the 18 // responsibility of the back-end, which has more accurate information about 19 // register pressure and also handles other optimizations than LICM that 20 // increase live-ranges. 21 // 22 // This pass uses alias analysis for two purposes: 23 // 24 // 1. Moving loop invariant loads and calls out of loops. If we can determine 25 // that a load or call inside of a loop never aliases anything stored to, 26 // we can hoist it or sink it like any other instruction. 27 // 2. Scalar Promotion of Memory - If there is a store instruction inside of 28 // the loop, we try to move the store to happen AFTER the loop instead of 29 // inside of the loop. This can only happen if a few conditions are true: 30 // A. The pointer stored through is loop invariant 31 // B. There are no stores or loads in the loop which _may_ alias the 32 // pointer. There are no calls in the loop which mod/ref the pointer. 33 // If these conditions are true, we can promote the loads and stores in the 34 // loop of the pointer to use a temporary alloca'd variable. We then use 35 // the SSAUpdater to construct the appropriate SSA form for the value. 36 // 37 //===----------------------------------------------------------------------===// 38 39 #include "llvm/Transforms/Scalar/LICM.h" 40 #include "llvm/ADT/SetOperations.h" 41 #include "llvm/ADT/Statistic.h" 42 #include "llvm/Analysis/AliasAnalysis.h" 43 #include "llvm/Analysis/AliasSetTracker.h" 44 #include "llvm/Analysis/BasicAliasAnalysis.h" 45 #include "llvm/Analysis/BlockFrequencyInfo.h" 46 #include "llvm/Analysis/CaptureTracking.h" 47 #include "llvm/Analysis/ConstantFolding.h" 48 #include "llvm/Analysis/GlobalsModRef.h" 49 #include "llvm/Analysis/GuardUtils.h" 50 #include "llvm/Analysis/LazyBlockFrequencyInfo.h" 51 #include "llvm/Analysis/Loads.h" 52 #include "llvm/Analysis/LoopInfo.h" 53 #include "llvm/Analysis/LoopIterator.h" 54 #include "llvm/Analysis/LoopPass.h" 55 #include "llvm/Analysis/MemoryBuiltins.h" 56 #include "llvm/Analysis/MemorySSA.h" 57 #include "llvm/Analysis/MemorySSAUpdater.h" 58 #include "llvm/Analysis/MustExecute.h" 59 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 60 #include "llvm/Analysis/ScalarEvolution.h" 61 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 62 #include "llvm/Analysis/TargetLibraryInfo.h" 63 #include "llvm/Analysis/ValueTracking.h" 64 #include "llvm/IR/CFG.h" 65 #include "llvm/IR/Constants.h" 66 #include "llvm/IR/DataLayout.h" 67 #include "llvm/IR/DebugInfoMetadata.h" 68 #include "llvm/IR/DerivedTypes.h" 69 #include "llvm/IR/Dominators.h" 70 #include "llvm/IR/Instructions.h" 71 #include "llvm/IR/IntrinsicInst.h" 72 #include "llvm/IR/LLVMContext.h" 73 #include "llvm/IR/Metadata.h" 74 #include "llvm/IR/PatternMatch.h" 75 #include "llvm/IR/PredIteratorCache.h" 76 #include "llvm/InitializePasses.h" 77 #include "llvm/Support/CommandLine.h" 78 #include "llvm/Support/Debug.h" 79 #include "llvm/Support/raw_ostream.h" 80 #include "llvm/Transforms/Scalar.h" 81 #include "llvm/Transforms/Scalar/LoopPassManager.h" 82 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 83 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 84 #include "llvm/Transforms/Utils/Local.h" 85 #include "llvm/Transforms/Utils/LoopUtils.h" 86 #include "llvm/Transforms/Utils/SSAUpdater.h" 87 #include <algorithm> 88 #include <utility> 89 using namespace llvm; 90 91 #define DEBUG_TYPE "licm" 92 93 STATISTIC(NumCreatedBlocks, "Number of blocks created"); 94 STATISTIC(NumClonedBranches, "Number of branches cloned"); 95 STATISTIC(NumSunk, "Number of instructions sunk out of loop"); 96 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop"); 97 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk"); 98 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); 99 STATISTIC(NumPromoted, "Number of memory locations promoted to registers"); 100 101 /// Memory promotion is enabled by default. 102 static cl::opt<bool> 103 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), 104 cl::desc("Disable memory promotion in LICM pass")); 105 106 static cl::opt<bool> ControlFlowHoisting( 107 "licm-control-flow-hoisting", cl::Hidden, cl::init(false), 108 cl::desc("Enable control flow (and PHI) hoisting in LICM")); 109 110 static cl::opt<unsigned> HoistSinkColdnessThreshold( 111 "licm-coldness-threshold", cl::Hidden, cl::init(4), 112 cl::desc("Relative coldness Threshold of hoisting/sinking destination " 113 "block for LICM to be considered beneficial")); 114 115 static cl::opt<uint32_t> MaxNumUsesTraversed( 116 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), 117 cl::desc("Max num uses visited for identifying load " 118 "invariance in loop using invariant start (default = 8)")); 119 120 // Default value of zero implies we use the regular alias set tracker mechanism 121 // instead of the cross product using AA to identify aliasing of the memory 122 // location we are interested in. 123 static cl::opt<int> 124 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0), 125 cl::desc("How many instruction to cross product using AA")); 126 127 // Experimental option to allow imprecision in LICM in pathological cases, in 128 // exchange for faster compile. This is to be removed if MemorySSA starts to 129 // address the same issue. This flag applies only when LICM uses MemorySSA 130 // instead on AliasSetTracker. LICM calls MemorySSAWalker's 131 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect 132 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess, 133 // which may not be precise, since optimizeUses is capped. The result is 134 // correct, but we may not get as "far up" as possible to get which access is 135 // clobbering the one queried. 136 cl::opt<unsigned> llvm::SetLicmMssaOptCap( 137 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, 138 cl::desc("Enable imprecision in LICM in pathological cases, in exchange " 139 "for faster compile. Caps the MemorySSA clobbering calls.")); 140 141 // Experimentally, memory promotion carries less importance than sinking and 142 // hoisting. Limit when we do promotion when using MemorySSA, in order to save 143 // compile time. 144 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( 145 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, 146 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " 147 "effect. When MSSA in LICM is enabled, then this is the maximum " 148 "number of accesses allowed to be present in a loop in order to " 149 "enable memory promotion.")); 150 151 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); 152 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 153 const LoopSafetyInfo *SafetyInfo, 154 TargetTransformInfo *TTI, bool &FreeInLoop); 155 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 156 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 157 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 158 OptimizationRemarkEmitter *ORE); 159 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 160 BlockFrequencyInfo *BFI, const Loop *CurLoop, 161 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, 162 OptimizationRemarkEmitter *ORE); 163 static bool isSafeToExecuteUnconditionally(Instruction &Inst, 164 const DominatorTree *DT, 165 const TargetLibraryInfo *TLI, 166 const Loop *CurLoop, 167 const LoopSafetyInfo *SafetyInfo, 168 OptimizationRemarkEmitter *ORE, 169 const Instruction *CtxI = nullptr); 170 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 171 AliasSetTracker *CurAST, Loop *CurLoop, 172 AAResults *AA); 173 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 174 Loop *CurLoop, Instruction &I, 175 SinkAndHoistLICMFlags &Flags); 176 static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA, 177 MemoryUse &MU); 178 static Instruction *cloneInstructionInExitBlock( 179 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 180 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU); 181 182 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 183 AliasSetTracker *AST, MemorySSAUpdater *MSSAU); 184 185 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 186 ICFLoopSafetyInfo &SafetyInfo, 187 MemorySSAUpdater *MSSAU, ScalarEvolution *SE); 188 189 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, 190 function_ref<void(Instruction *)> Fn); 191 static SmallVector<SmallSetVector<Value *, 8>, 0> 192 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L); 193 194 namespace { 195 struct LoopInvariantCodeMotion { 196 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 197 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, 198 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA, 199 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false); 200 201 LoopInvariantCodeMotion(unsigned LicmMssaOptCap, 202 unsigned LicmMssaNoAccForPromotionCap) 203 : LicmMssaOptCap(LicmMssaOptCap), 204 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {} 205 206 private: 207 unsigned LicmMssaOptCap; 208 unsigned LicmMssaNoAccForPromotionCap; 209 210 std::unique_ptr<AliasSetTracker> 211 collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA); 212 }; 213 214 struct LegacyLICMPass : public LoopPass { 215 static char ID; // Pass identification, replacement for typeid 216 LegacyLICMPass( 217 unsigned LicmMssaOptCap = SetLicmMssaOptCap, 218 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap) 219 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) { 220 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); 221 } 222 223 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 224 if (skipLoop(L)) 225 return false; 226 227 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block " 228 << L->getHeader()->getNameOrAsOperand() << "\n"); 229 230 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); 231 MemorySSA *MSSA = EnableMSSALoopDependency 232 ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA()) 233 : nullptr; 234 bool hasProfileData = L->getHeader()->getParent()->hasProfileData(); 235 BlockFrequencyInfo *BFI = 236 hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() 237 : nullptr; 238 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 239 // pass. Function analyses need to be preserved across loop transformations 240 // but ORE cannot be preserved (see comment before the pass definition). 241 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 242 return LICM.runOnLoop( 243 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(), 244 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), 245 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI, 246 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 247 *L->getHeader()->getParent()), 248 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 249 *L->getHeader()->getParent()), 250 SE ? &SE->getSE() : nullptr, MSSA, &ORE); 251 } 252 253 /// This transformation requires natural loop information & requires that 254 /// loop preheaders be inserted into the CFG... 255 /// 256 void getAnalysisUsage(AnalysisUsage &AU) const override { 257 AU.addPreserved<DominatorTreeWrapperPass>(); 258 AU.addPreserved<LoopInfoWrapperPass>(); 259 AU.addRequired<TargetLibraryInfoWrapperPass>(); 260 if (EnableMSSALoopDependency) { 261 AU.addRequired<MemorySSAWrapperPass>(); 262 AU.addPreserved<MemorySSAWrapperPass>(); 263 } 264 AU.addRequired<TargetTransformInfoWrapperPass>(); 265 getLoopAnalysisUsage(AU); 266 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); 267 AU.addPreserved<LazyBlockFrequencyInfoPass>(); 268 AU.addPreserved<LazyBranchProbabilityInfoPass>(); 269 } 270 271 private: 272 LoopInvariantCodeMotion LICM; 273 }; 274 } // namespace 275 276 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, 277 LoopStandardAnalysisResults &AR, LPMUpdater &) { 278 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 279 // pass. Function analyses need to be preserved across loop transformations 280 // but ORE cannot be preserved (see comment before the pass definition). 281 OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); 282 283 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); 284 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI, 285 &AR.SE, AR.MSSA, &ORE)) 286 return PreservedAnalyses::all(); 287 288 auto PA = getLoopPassPreservedAnalyses(); 289 290 PA.preserve<DominatorTreeAnalysis>(); 291 PA.preserve<LoopAnalysis>(); 292 if (AR.MSSA) 293 PA.preserve<MemorySSAAnalysis>(); 294 295 return PA; 296 } 297 298 PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM, 299 LoopStandardAnalysisResults &AR, 300 LPMUpdater &) { 301 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis 302 // pass. Function analyses need to be preserved across loop transformations 303 // but ORE cannot be preserved (see comment before the pass definition). 304 OptimizationRemarkEmitter ORE(LN.getParent()); 305 306 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); 307 308 Loop &OutermostLoop = LN.getOutermostLoop(); 309 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, AR.BFI, 310 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true); 311 312 if (!Changed) 313 return PreservedAnalyses::all(); 314 315 auto PA = getLoopPassPreservedAnalyses(); 316 317 PA.preserve<DominatorTreeAnalysis>(); 318 PA.preserve<LoopAnalysis>(); 319 if (AR.MSSA) 320 PA.preserve<MemorySSAAnalysis>(); 321 322 return PA; 323 } 324 325 char LegacyLICMPass::ID = 0; 326 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion", 327 false, false) 328 INITIALIZE_PASS_DEPENDENCY(LoopPass) 329 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 330 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 331 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 332 INITIALIZE_PASS_DEPENDENCY(LazyBFIPass) 333 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false, 334 false) 335 336 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } 337 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap, 338 unsigned LicmMssaNoAccForPromotionCap) { 339 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); 340 } 341 342 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L, 343 MemorySSA *MSSA) 344 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap, 345 IsSink, L, MSSA) {} 346 347 llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags( 348 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink, 349 Loop *L, MemorySSA *MSSA) 350 : LicmMssaOptCap(LicmMssaOptCap), 351 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap), 352 IsSink(IsSink) { 353 assert(((L != nullptr) == (MSSA != nullptr)) && 354 "Unexpected values for SinkAndHoistLICMFlags"); 355 if (!MSSA) 356 return; 357 358 unsigned AccessCapCount = 0; 359 for (auto *BB : L->getBlocks()) 360 if (const auto *Accesses = MSSA->getBlockAccesses(BB)) 361 for (const auto &MA : *Accesses) { 362 (void)MA; 363 ++AccessCapCount; 364 if (AccessCapCount > LicmMssaNoAccForPromotionCap) { 365 NoOfMemAccTooLarge = true; 366 return; 367 } 368 } 369 } 370 371 /// Hoist expressions out of the specified loop. Note, alias info for inner 372 /// loop is not preserved so it is not a good idea to run LICM multiple 373 /// times on one loop. 374 bool LoopInvariantCodeMotion::runOnLoop( 375 Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, 376 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 377 ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE, 378 bool LoopNestMode) { 379 bool Changed = false; 380 381 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); 382 383 // If this loop has metadata indicating that LICM is not to be performed then 384 // just exit. 385 if (hasDisableLICMTransformsHint(L)) { 386 return false; 387 } 388 389 std::unique_ptr<AliasSetTracker> CurAST; 390 std::unique_ptr<MemorySSAUpdater> MSSAU; 391 std::unique_ptr<SinkAndHoistLICMFlags> Flags; 392 393 // Don't sink stores from loops with coroutine suspend instructions. 394 // LICM would sink instructions into the default destination of 395 // the coroutine switch. The default destination of the switch is to 396 // handle the case where the coroutine is suspended, by which point the 397 // coroutine frame may have been destroyed. No instruction can be sunk there. 398 // FIXME: This would unfortunately hurt the performance of coroutines, however 399 // there is currently no general solution for this. Similar issues could also 400 // potentially happen in other passes where instructions are being moved 401 // across that edge. 402 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) { 403 return llvm::any_of(*BB, [](Instruction &I) { 404 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I); 405 return II && II->getIntrinsicID() == Intrinsic::coro_suspend; 406 }); 407 }); 408 409 if (!MSSA) { 410 LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n"); 411 CurAST = collectAliasInfoForLoop(L, LI, AA); 412 Flags = std::make_unique<SinkAndHoistLICMFlags>( 413 LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true); 414 } else { 415 LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n"); 416 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 417 Flags = std::make_unique<SinkAndHoistLICMFlags>( 418 LicmMssaOptCap, LicmMssaNoAccForPromotionCap, /*IsSink=*/true, L, MSSA); 419 } 420 421 // Get the preheader block to move instructions into... 422 BasicBlock *Preheader = L->getLoopPreheader(); 423 424 // Compute loop safety information. 425 ICFLoopSafetyInfo SafetyInfo; 426 SafetyInfo.computeLoopSafetyInfo(L); 427 428 // We want to visit all of the instructions in this loop... that are not parts 429 // of our subloops (they have already had their invariants hoisted out of 430 // their loop, into this loop, so there is no need to process the BODIES of 431 // the subloops). 432 // 433 // Traverse the body of the loop in depth first order on the dominator tree so 434 // that we are guaranteed to see definitions before we see uses. This allows 435 // us to sink instructions in one pass, without iteration. After sinking 436 // instructions, we perform another pass to hoist them out of the loop. 437 if (L->hasDedicatedExits()) 438 Changed |= 439 sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, TTI, L, 440 CurAST.get(), MSSAU.get(), &SafetyInfo, *Flags.get(), ORE); 441 Flags->setIsSink(false); 442 if (Preheader) 443 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L, 444 CurAST.get(), MSSAU.get(), SE, &SafetyInfo, 445 *Flags.get(), ORE, LoopNestMode); 446 447 // Now that all loop invariants have been removed from the loop, promote any 448 // memory references to scalars that we can. 449 // Don't sink stores from loops without dedicated block exits. Exits 450 // containing indirect branches are not transformed by loop simplify, 451 // make sure we catch that. An additional load may be generated in the 452 // preheader for SSA updater, so also avoid sinking when no preheader 453 // is available. 454 if (!DisablePromotion && Preheader && L->hasDedicatedExits() && 455 !Flags->tooManyMemoryAccesses() && !HasCoroSuspendInst) { 456 // Figure out the loop exits and their insertion points 457 SmallVector<BasicBlock *, 8> ExitBlocks; 458 L->getUniqueExitBlocks(ExitBlocks); 459 460 // We can't insert into a catchswitch. 461 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { 462 return isa<CatchSwitchInst>(Exit->getTerminator()); 463 }); 464 465 if (!HasCatchSwitch) { 466 SmallVector<Instruction *, 8> InsertPts; 467 SmallVector<MemoryAccess *, 8> MSSAInsertPts; 468 InsertPts.reserve(ExitBlocks.size()); 469 if (MSSAU) 470 MSSAInsertPts.reserve(ExitBlocks.size()); 471 for (BasicBlock *ExitBlock : ExitBlocks) { 472 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); 473 if (MSSAU) 474 MSSAInsertPts.push_back(nullptr); 475 } 476 477 PredIteratorCache PIC; 478 479 bool Promoted = false; 480 if (CurAST.get()) { 481 // Loop over all of the alias sets in the tracker object. 482 for (AliasSet &AS : *CurAST) { 483 // We can promote this alias set if it has a store, if it is a "Must" 484 // alias set, if the pointer is loop invariant, and if we are not 485 // eliminating any volatile loads or stores. 486 if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || 487 !L->isLoopInvariant(AS.begin()->getValue())) 488 continue; 489 490 assert( 491 !AS.empty() && 492 "Must alias set should have at least one pointer element in it!"); 493 494 SmallSetVector<Value *, 8> PointerMustAliases; 495 for (const auto &ASI : AS) 496 PointerMustAliases.insert(ASI.getValue()); 497 498 Promoted |= promoteLoopAccessesToScalars( 499 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI, 500 DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE); 501 } 502 } else { 503 // Promoting one set of accesses may make the pointers for another set 504 // loop invariant, so run this in a loop (with the MaybePromotable set 505 // decreasing in size over time). 506 bool LocalPromoted; 507 do { 508 LocalPromoted = false; 509 for (const SmallSetVector<Value *, 8> &PointerMustAliases : 510 collectPromotionCandidates(MSSA, AA, L)) { 511 LocalPromoted |= promoteLoopAccessesToScalars( 512 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, 513 LI, DT, TLI, L, /*AST*/nullptr, MSSAU.get(), &SafetyInfo, ORE); 514 } 515 Promoted |= LocalPromoted; 516 } while (LocalPromoted); 517 } 518 519 // Once we have promoted values across the loop body we have to 520 // recursively reform LCSSA as any nested loop may now have values defined 521 // within the loop used in the outer loop. 522 // FIXME: This is really heavy handed. It would be a bit better to use an 523 // SSAUpdater strategy during promotion that was LCSSA aware and reformed 524 // it as it went. 525 if (Promoted) 526 formLCSSARecursively(*L, *DT, LI, SE); 527 528 Changed |= Promoted; 529 } 530 } 531 532 // Check that neither this loop nor its parent have had LCSSA broken. LICM is 533 // specifically moving instructions across the loop boundary and so it is 534 // especially in need of sanity checking here. 535 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"); 536 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && 537 "Parent loop not left in LCSSA form after LICM!"); 538 539 if (MSSAU.get() && VerifyMemorySSA) 540 MSSAU->getMemorySSA()->verifyMemorySSA(); 541 542 if (Changed && SE) 543 SE->forgetLoopDispositions(L); 544 return Changed; 545 } 546 547 /// Walk the specified region of the CFG (defined by all blocks dominated by 548 /// the specified block, and that are in the current loop) in reverse depth 549 /// first order w.r.t the DominatorTree. This allows us to visit uses before 550 /// definitions, allowing us to sink a loop body in one pass without iteration. 551 /// 552 bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 553 DominatorTree *DT, BlockFrequencyInfo *BFI, 554 TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 555 Loop *CurLoop, AliasSetTracker *CurAST, 556 MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo, 557 SinkAndHoistLICMFlags &Flags, 558 OptimizationRemarkEmitter *ORE) { 559 560 // Verify inputs. 561 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 562 CurLoop != nullptr && SafetyInfo != nullptr && 563 "Unexpected input to sinkRegion."); 564 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 565 "Either AliasSetTracker or MemorySSA should be initialized."); 566 567 // We want to visit children before parents. We will enque all the parents 568 // before their children in the worklist and process the worklist in reverse 569 // order. 570 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); 571 572 bool Changed = false; 573 for (DomTreeNode *DTN : reverse(Worklist)) { 574 BasicBlock *BB = DTN->getBlock(); 575 // Only need to process the contents of this block if it is not part of a 576 // subloop (which would already have been processed). 577 if (inSubLoop(BB, CurLoop, LI)) 578 continue; 579 580 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { 581 Instruction &I = *--II; 582 583 // The instruction is not used in the loop if it is dead. In this case, 584 // we just delete it instead of sinking it. 585 if (isInstructionTriviallyDead(&I, TLI)) { 586 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); 587 salvageKnowledge(&I); 588 salvageDebugInfo(I); 589 ++II; 590 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 591 Changed = true; 592 continue; 593 } 594 595 // Check to see if we can sink this instruction to the exit blocks 596 // of the loop. We can do this if the all users of the instruction are 597 // outside of the loop. In this case, it doesn't even matter if the 598 // operands of the instruction are loop invariant. 599 // 600 bool FreeInLoop = false; 601 if (!I.mayHaveSideEffects() && 602 isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) && 603 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, 604 ORE)) { 605 if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) { 606 if (!FreeInLoop) { 607 ++II; 608 salvageDebugInfo(I); 609 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 610 } 611 Changed = true; 612 } 613 } 614 } 615 } 616 if (MSSAU && VerifyMemorySSA) 617 MSSAU->getMemorySSA()->verifyMemorySSA(); 618 return Changed; 619 } 620 621 namespace { 622 // This is a helper class for hoistRegion to make it able to hoist control flow 623 // in order to be able to hoist phis. The way this works is that we initially 624 // start hoisting to the loop preheader, and when we see a loop invariant branch 625 // we make note of this. When we then come to hoist an instruction that's 626 // conditional on such a branch we duplicate the branch and the relevant control 627 // flow, then hoist the instruction into the block corresponding to its original 628 // block in the duplicated control flow. 629 class ControlFlowHoister { 630 private: 631 // Information about the loop we are hoisting from 632 LoopInfo *LI; 633 DominatorTree *DT; 634 Loop *CurLoop; 635 MemorySSAUpdater *MSSAU; 636 637 // A map of blocks in the loop to the block their instructions will be hoisted 638 // to. 639 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap; 640 641 // The branches that we can hoist, mapped to the block that marks a 642 // convergence point of their control flow. 643 DenseMap<BranchInst *, BasicBlock *> HoistableBranches; 644 645 public: 646 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, 647 MemorySSAUpdater *MSSAU) 648 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {} 649 650 void registerPossiblyHoistableBranch(BranchInst *BI) { 651 // We can only hoist conditional branches with loop invariant operands. 652 if (!ControlFlowHoisting || !BI->isConditional() || 653 !CurLoop->hasLoopInvariantOperands(BI)) 654 return; 655 656 // The branch destinations need to be in the loop, and we don't gain 657 // anything by duplicating conditional branches with duplicate successors, 658 // as it's essentially the same as an unconditional branch. 659 BasicBlock *TrueDest = BI->getSuccessor(0); 660 BasicBlock *FalseDest = BI->getSuccessor(1); 661 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) || 662 TrueDest == FalseDest) 663 return; 664 665 // We can hoist BI if one branch destination is the successor of the other, 666 // or both have common successor which we check by seeing if the 667 // intersection of their successors is non-empty. 668 // TODO: This could be expanded to allowing branches where both ends 669 // eventually converge to a single block. 670 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc; 671 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest)); 672 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest)); 673 BasicBlock *CommonSucc = nullptr; 674 if (TrueDestSucc.count(FalseDest)) { 675 CommonSucc = FalseDest; 676 } else if (FalseDestSucc.count(TrueDest)) { 677 CommonSucc = TrueDest; 678 } else { 679 set_intersect(TrueDestSucc, FalseDestSucc); 680 // If there's one common successor use that. 681 if (TrueDestSucc.size() == 1) 682 CommonSucc = *TrueDestSucc.begin(); 683 // If there's more than one pick whichever appears first in the block list 684 // (we can't use the value returned by TrueDestSucc.begin() as it's 685 // unpredicatable which element gets returned). 686 else if (!TrueDestSucc.empty()) { 687 Function *F = TrueDest->getParent(); 688 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); }; 689 auto It = llvm::find_if(*F, IsSucc); 690 assert(It != F->end() && "Could not find successor in function"); 691 CommonSucc = &*It; 692 } 693 } 694 // The common successor has to be dominated by the branch, as otherwise 695 // there will be some other path to the successor that will not be 696 // controlled by this branch so any phi we hoist would be controlled by the 697 // wrong condition. This also takes care of avoiding hoisting of loop back 698 // edges. 699 // TODO: In some cases this could be relaxed if the successor is dominated 700 // by another block that's been hoisted and we can guarantee that the 701 // control flow has been replicated exactly. 702 if (CommonSucc && DT->dominates(BI, CommonSucc)) 703 HoistableBranches[BI] = CommonSucc; 704 } 705 706 bool canHoistPHI(PHINode *PN) { 707 // The phi must have loop invariant operands. 708 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN)) 709 return false; 710 // We can hoist phis if the block they are in is the target of hoistable 711 // branches which cover all of the predecessors of the block. 712 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks; 713 BasicBlock *BB = PN->getParent(); 714 for (BasicBlock *PredBB : predecessors(BB)) 715 PredecessorBlocks.insert(PredBB); 716 // If we have less predecessor blocks than predecessors then the phi will 717 // have more than one incoming value for the same block which we can't 718 // handle. 719 // TODO: This could be handled be erasing some of the duplicate incoming 720 // values. 721 if (PredecessorBlocks.size() != pred_size(BB)) 722 return false; 723 for (auto &Pair : HoistableBranches) { 724 if (Pair.second == BB) { 725 // Which blocks are predecessors via this branch depends on if the 726 // branch is triangle-like or diamond-like. 727 if (Pair.first->getSuccessor(0) == BB) { 728 PredecessorBlocks.erase(Pair.first->getParent()); 729 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 730 } else if (Pair.first->getSuccessor(1) == BB) { 731 PredecessorBlocks.erase(Pair.first->getParent()); 732 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 733 } else { 734 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 735 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 736 } 737 } 738 } 739 // PredecessorBlocks will now be empty if for every predecessor of BB we 740 // found a hoistable branch source. 741 return PredecessorBlocks.empty(); 742 } 743 744 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) { 745 if (!ControlFlowHoisting) 746 return CurLoop->getLoopPreheader(); 747 // If BB has already been hoisted, return that 748 if (HoistDestinationMap.count(BB)) 749 return HoistDestinationMap[BB]; 750 751 // Check if this block is conditional based on a pending branch 752 auto HasBBAsSuccessor = 753 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) { 754 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB || 755 Pair.first->getSuccessor(1) == BB); 756 }; 757 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor); 758 759 // If not involved in a pending branch, hoist to preheader 760 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader(); 761 if (It == HoistableBranches.end()) { 762 LLVM_DEBUG(dbgs() << "LICM using " 763 << InitialPreheader->getNameOrAsOperand() 764 << " as hoist destination for " 765 << BB->getNameOrAsOperand() << "\n"); 766 HoistDestinationMap[BB] = InitialPreheader; 767 return InitialPreheader; 768 } 769 BranchInst *BI = It->first; 770 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == 771 HoistableBranches.end() && 772 "BB is expected to be the target of at most one branch"); 773 774 LLVMContext &C = BB->getContext(); 775 BasicBlock *TrueDest = BI->getSuccessor(0); 776 BasicBlock *FalseDest = BI->getSuccessor(1); 777 BasicBlock *CommonSucc = HoistableBranches[BI]; 778 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent()); 779 780 // Create hoisted versions of blocks that currently don't have them 781 auto CreateHoistedBlock = [&](BasicBlock *Orig) { 782 if (HoistDestinationMap.count(Orig)) 783 return HoistDestinationMap[Orig]; 784 BasicBlock *New = 785 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent()); 786 HoistDestinationMap[Orig] = New; 787 DT->addNewBlock(New, HoistTarget); 788 if (CurLoop->getParentLoop()) 789 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI); 790 ++NumCreatedBlocks; 791 LLVM_DEBUG(dbgs() << "LICM created " << New->getName() 792 << " as hoist destination for " << Orig->getName() 793 << "\n"); 794 return New; 795 }; 796 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest); 797 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest); 798 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc); 799 800 // Link up these blocks with branches. 801 if (!HoistCommonSucc->getTerminator()) { 802 // The new common successor we've generated will branch to whatever that 803 // hoist target branched to. 804 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor(); 805 assert(TargetSucc && "Expected hoist target to have a single successor"); 806 HoistCommonSucc->moveBefore(TargetSucc); 807 BranchInst::Create(TargetSucc, HoistCommonSucc); 808 } 809 if (!HoistTrueDest->getTerminator()) { 810 HoistTrueDest->moveBefore(HoistCommonSucc); 811 BranchInst::Create(HoistCommonSucc, HoistTrueDest); 812 } 813 if (!HoistFalseDest->getTerminator()) { 814 HoistFalseDest->moveBefore(HoistCommonSucc); 815 BranchInst::Create(HoistCommonSucc, HoistFalseDest); 816 } 817 818 // If BI is being cloned to what was originally the preheader then 819 // HoistCommonSucc will now be the new preheader. 820 if (HoistTarget == InitialPreheader) { 821 // Phis in the loop header now need to use the new preheader. 822 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc); 823 if (MSSAU) 824 MSSAU->wireOldPredecessorsToNewImmediatePredecessor( 825 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget}); 826 // The new preheader dominates the loop header. 827 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc); 828 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader()); 829 DT->changeImmediateDominator(HeaderNode, PreheaderNode); 830 // The preheader hoist destination is now the new preheader, with the 831 // exception of the hoist destination of this branch. 832 for (auto &Pair : HoistDestinationMap) 833 if (Pair.second == InitialPreheader && Pair.first != BI->getParent()) 834 Pair.second = HoistCommonSucc; 835 } 836 837 // Now finally clone BI. 838 ReplaceInstWithInst( 839 HoistTarget->getTerminator(), 840 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition())); 841 ++NumClonedBranches; 842 843 assert(CurLoop->getLoopPreheader() && 844 "Hoisting blocks should not have destroyed preheader"); 845 return HoistDestinationMap[BB]; 846 } 847 }; 848 } // namespace 849 850 // Hoisting/sinking instruction out of a loop isn't always beneficial. It's only 851 // only worthwhile if the destination block is actually colder than current 852 // block. 853 static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock, 854 OptimizationRemarkEmitter *ORE, 855 BlockFrequencyInfo *BFI) { 856 // Check block frequency only when runtime profile is available 857 // to avoid pathological cases. With static profile, lean towards 858 // hosting because it helps canonicalize the loop for vectorizer. 859 if (!DstBlock->getParent()->hasProfileData()) 860 return true; 861 862 if (!HoistSinkColdnessThreshold || !BFI) 863 return true; 864 865 BasicBlock *SrcBlock = I.getParent(); 866 if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold > 867 BFI->getBlockFreq(SrcBlock).getFrequency()) { 868 ORE->emit([&]() { 869 return OptimizationRemarkMissed(DEBUG_TYPE, "SinkHoistInst", &I) 870 << "failed to sink or hoist instruction because containing block " 871 "has lower frequency than destination block"; 872 }); 873 return false; 874 } 875 876 return true; 877 } 878 879 /// Walk the specified region of the CFG (defined by all blocks dominated by 880 /// the specified block, and that are in the current loop) in depth first 881 /// order w.r.t the DominatorTree. This allows us to visit definitions before 882 /// uses, allowing us to hoist a loop body in one pass without iteration. 883 /// 884 bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, 885 DominatorTree *DT, BlockFrequencyInfo *BFI, 886 TargetLibraryInfo *TLI, Loop *CurLoop, 887 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, 888 ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo, 889 SinkAndHoistLICMFlags &Flags, 890 OptimizationRemarkEmitter *ORE, bool LoopNestMode) { 891 // Verify inputs. 892 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 893 CurLoop != nullptr && SafetyInfo != nullptr && 894 "Unexpected input to hoistRegion."); 895 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 896 "Either AliasSetTracker or MemorySSA should be initialized."); 897 898 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU); 899 900 // Keep track of instructions that have been hoisted, as they may need to be 901 // re-hoisted if they end up not dominating all of their uses. 902 SmallVector<Instruction *, 16> HoistedInstructions; 903 904 // For PHI hoisting to work we need to hoist blocks before their successors. 905 // We can do this by iterating through the blocks in the loop in reverse 906 // post-order. 907 LoopBlocksRPO Worklist(CurLoop); 908 Worklist.perform(LI); 909 bool Changed = false; 910 for (BasicBlock *BB : Worklist) { 911 // Only need to process the contents of this block if it is not part of a 912 // subloop (which would already have been processed). 913 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI)) 914 continue; 915 916 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { 917 Instruction &I = *II++; 918 // Try constant folding this instruction. If all the operands are 919 // constants, it is technically hoistable, but it would be better to 920 // just fold it. 921 if (Constant *C = ConstantFoldInstruction( 922 &I, I.getModule()->getDataLayout(), TLI)) { 923 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C 924 << '\n'); 925 if (CurAST) 926 CurAST->copyValue(&I, C); 927 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960). 928 I.replaceAllUsesWith(C); 929 if (isInstructionTriviallyDead(&I, TLI)) 930 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 931 Changed = true; 932 continue; 933 } 934 935 // Try hoisting the instruction out to the preheader. We can only do 936 // this if all of the operands of the instruction are loop invariant and 937 // if it is safe to hoist the instruction. We also check block frequency 938 // to make sure instruction only gets hoisted into colder blocks. 939 // TODO: It may be safe to hoist if we are hoisting to a conditional block 940 // and we have accurately duplicated the control flow from the loop header 941 // to that block. 942 if (CurLoop->hasLoopInvariantOperands(&I) && 943 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, 944 ORE) && 945 worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) && 946 isSafeToExecuteUnconditionally( 947 I, DT, TLI, CurLoop, SafetyInfo, ORE, 948 CurLoop->getLoopPreheader()->getTerminator())) { 949 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 950 MSSAU, SE, ORE); 951 HoistedInstructions.push_back(&I); 952 Changed = true; 953 continue; 954 } 955 956 // Attempt to remove floating point division out of the loop by 957 // converting it to a reciprocal multiplication. 958 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() && 959 CurLoop->isLoopInvariant(I.getOperand(1))) { 960 auto Divisor = I.getOperand(1); 961 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); 962 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); 963 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); 964 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent()); 965 ReciprocalDivisor->insertBefore(&I); 966 967 auto Product = 968 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); 969 Product->setFastMathFlags(I.getFastMathFlags()); 970 SafetyInfo->insertInstructionTo(Product, I.getParent()); 971 Product->insertAfter(&I); 972 I.replaceAllUsesWith(Product); 973 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 974 975 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), 976 SafetyInfo, MSSAU, SE, ORE); 977 HoistedInstructions.push_back(ReciprocalDivisor); 978 Changed = true; 979 continue; 980 } 981 982 auto IsInvariantStart = [&](Instruction &I) { 983 using namespace PatternMatch; 984 return I.use_empty() && 985 match(&I, m_Intrinsic<Intrinsic::invariant_start>()); 986 }; 987 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) { 988 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && 989 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop); 990 }; 991 if ((IsInvariantStart(I) || isGuard(&I)) && 992 CurLoop->hasLoopInvariantOperands(&I) && 993 MustExecuteWithoutWritesBefore(I)) { 994 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 995 MSSAU, SE, ORE); 996 HoistedInstructions.push_back(&I); 997 Changed = true; 998 continue; 999 } 1000 1001 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 1002 if (CFH.canHoistPHI(PN)) { 1003 // Redirect incoming blocks first to ensure that we create hoisted 1004 // versions of those blocks before we hoist the phi. 1005 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i) 1006 PN->setIncomingBlock( 1007 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i))); 1008 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 1009 MSSAU, SE, ORE); 1010 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected"); 1011 Changed = true; 1012 continue; 1013 } 1014 } 1015 1016 // Remember possibly hoistable branches so we can actually hoist them 1017 // later if needed. 1018 if (BranchInst *BI = dyn_cast<BranchInst>(&I)) 1019 CFH.registerPossiblyHoistableBranch(BI); 1020 } 1021 } 1022 1023 // If we hoisted instructions to a conditional block they may not dominate 1024 // their uses that weren't hoisted (such as phis where some operands are not 1025 // loop invariant). If so make them unconditional by moving them to their 1026 // immediate dominator. We iterate through the instructions in reverse order 1027 // which ensures that when we rehoist an instruction we rehoist its operands, 1028 // and also keep track of where in the block we are rehoisting to to make sure 1029 // that we rehoist instructions before the instructions that use them. 1030 Instruction *HoistPoint = nullptr; 1031 if (ControlFlowHoisting) { 1032 for (Instruction *I : reverse(HoistedInstructions)) { 1033 if (!llvm::all_of(I->uses(), 1034 [&](Use &U) { return DT->dominates(I, U); })) { 1035 BasicBlock *Dominator = 1036 DT->getNode(I->getParent())->getIDom()->getBlock(); 1037 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) { 1038 if (HoistPoint) 1039 assert(DT->dominates(Dominator, HoistPoint->getParent()) && 1040 "New hoist point expected to dominate old hoist point"); 1041 HoistPoint = Dominator->getTerminator(); 1042 } 1043 LLVM_DEBUG(dbgs() << "LICM rehoisting to " 1044 << HoistPoint->getParent()->getNameOrAsOperand() 1045 << ": " << *I << "\n"); 1046 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE); 1047 HoistPoint = I; 1048 Changed = true; 1049 } 1050 } 1051 } 1052 if (MSSAU && VerifyMemorySSA) 1053 MSSAU->getMemorySSA()->verifyMemorySSA(); 1054 1055 // Now that we've finished hoisting make sure that LI and DT are still 1056 // valid. 1057 #ifdef EXPENSIVE_CHECKS 1058 if (Changed) { 1059 assert(DT->verify(DominatorTree::VerificationLevel::Fast) && 1060 "Dominator tree verification failed"); 1061 LI->verify(*DT); 1062 } 1063 #endif 1064 1065 return Changed; 1066 } 1067 1068 // Return true if LI is invariant within scope of the loop. LI is invariant if 1069 // CurLoop is dominated by an invariant.start representing the same memory 1070 // location and size as the memory location LI loads from, and also the 1071 // invariant.start has no uses. 1072 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, 1073 Loop *CurLoop) { 1074 Value *Addr = LI->getOperand(0); 1075 const DataLayout &DL = LI->getModule()->getDataLayout(); 1076 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType()); 1077 1078 // It is not currently possible for clang to generate an invariant.start 1079 // intrinsic with scalable vector types because we don't support thread local 1080 // sizeless types and we don't permit sizeless types in structs or classes. 1081 // Furthermore, even if support is added for this in future the intrinsic 1082 // itself is defined to have a size of -1 for variable sized objects. This 1083 // makes it impossible to verify if the intrinsic envelops our region of 1084 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8> 1085 // types would have a -1 parameter, but the former is clearly double the size 1086 // of the latter. 1087 if (LocSizeInBits.isScalable()) 1088 return false; 1089 1090 // if the type is i8 addrspace(x)*, we know this is the type of 1091 // llvm.invariant.start operand 1092 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()), 1093 LI->getPointerAddressSpace()); 1094 unsigned BitcastsVisited = 0; 1095 // Look through bitcasts until we reach the i8* type (this is invariant.start 1096 // operand type). 1097 while (Addr->getType() != PtrInt8Ty) { 1098 auto *BC = dyn_cast<BitCastInst>(Addr); 1099 // Avoid traversing high number of bitcast uses. 1100 if (++BitcastsVisited > MaxNumUsesTraversed || !BC) 1101 return false; 1102 Addr = BC->getOperand(0); 1103 } 1104 1105 unsigned UsesVisited = 0; 1106 // Traverse all uses of the load operand value, to see if invariant.start is 1107 // one of the uses, and whether it dominates the load instruction. 1108 for (auto *U : Addr->users()) { 1109 // Avoid traversing for Load operand with high number of users. 1110 if (++UsesVisited > MaxNumUsesTraversed) 1111 return false; 1112 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 1113 // If there are escaping uses of invariant.start instruction, the load maybe 1114 // non-invariant. 1115 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || 1116 !II->use_empty()) 1117 continue; 1118 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0)); 1119 // The intrinsic supports having a -1 argument for variable sized objects 1120 // so we should check for that here. 1121 if (InvariantSize->isNegative()) 1122 continue; 1123 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8; 1124 // Confirm the invariant.start location size contains the load operand size 1125 // in bits. Also, the invariant.start should dominate the load, and we 1126 // should not hoist the load out of a loop that contains this dominating 1127 // invariant.start. 1128 if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits && 1129 DT->properlyDominates(II->getParent(), CurLoop->getHeader())) 1130 return true; 1131 } 1132 1133 return false; 1134 } 1135 1136 namespace { 1137 /// Return true if-and-only-if we know how to (mechanically) both hoist and 1138 /// sink a given instruction out of a loop. Does not address legality 1139 /// concerns such as aliasing or speculation safety. 1140 bool isHoistableAndSinkableInst(Instruction &I) { 1141 // Only these instructions are hoistable/sinkable. 1142 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || 1143 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) || 1144 isa<BinaryOperator>(I) || isa<SelectInst>(I) || 1145 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) || 1146 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 1147 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) || 1148 isa<InsertValueInst>(I) || isa<FreezeInst>(I)); 1149 } 1150 /// Return true if all of the alias sets within this AST are known not to 1151 /// contain a Mod, or if MSSA knows there are no MemoryDefs in the loop. 1152 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU, 1153 const Loop *L) { 1154 if (CurAST) { 1155 for (AliasSet &AS : *CurAST) { 1156 if (!AS.isForwardingAliasSet() && AS.isMod()) { 1157 return false; 1158 } 1159 } 1160 return true; 1161 } else { /*MSSAU*/ 1162 for (auto *BB : L->getBlocks()) 1163 if (MSSAU->getMemorySSA()->getBlockDefs(BB)) 1164 return false; 1165 return true; 1166 } 1167 } 1168 1169 /// Return true if I is the only Instruction with a MemoryAccess in L. 1170 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, 1171 const MemorySSAUpdater *MSSAU) { 1172 for (auto *BB : L->getBlocks()) 1173 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) { 1174 int NotAPhi = 0; 1175 for (const auto &Acc : *Accs) { 1176 if (isa<MemoryPhi>(&Acc)) 1177 continue; 1178 const auto *MUD = cast<MemoryUseOrDef>(&Acc); 1179 if (MUD->getMemoryInst() != I || NotAPhi++ == 1) 1180 return false; 1181 } 1182 } 1183 return true; 1184 } 1185 } 1186 1187 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, 1188 Loop *CurLoop, AliasSetTracker *CurAST, 1189 MemorySSAUpdater *MSSAU, 1190 bool TargetExecutesOncePerLoop, 1191 SinkAndHoistLICMFlags *Flags, 1192 OptimizationRemarkEmitter *ORE) { 1193 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 1194 "Either AliasSetTracker or MemorySSA should be initialized."); 1195 1196 // If we don't understand the instruction, bail early. 1197 if (!isHoistableAndSinkableInst(I)) 1198 return false; 1199 1200 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr; 1201 if (MSSA) 1202 assert(Flags != nullptr && "Flags cannot be null."); 1203 1204 // Loads have extra constraints we have to verify before we can hoist them. 1205 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 1206 if (!LI->isUnordered()) 1207 return false; // Don't sink/hoist volatile or ordered atomic loads! 1208 1209 // Loads from constant memory are always safe to move, even if they end up 1210 // in the same alias set as something that ends up being modified. 1211 if (AA->pointsToConstantMemory(LI->getOperand(0))) 1212 return true; 1213 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 1214 return true; 1215 1216 if (LI->isAtomic() && !TargetExecutesOncePerLoop) 1217 return false; // Don't risk duplicating unordered loads 1218 1219 // This checks for an invariant.start dominating the load. 1220 if (isLoadInvariantInLoop(LI, DT, CurLoop)) 1221 return true; 1222 1223 bool Invalidated; 1224 if (CurAST) 1225 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST, 1226 CurLoop, AA); 1227 else 1228 Invalidated = pointerInvalidatedByLoopWithMSSA( 1229 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags); 1230 // Check loop-invariant address because this may also be a sinkable load 1231 // whose address is not necessarily loop-invariant. 1232 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1233 ORE->emit([&]() { 1234 return OptimizationRemarkMissed( 1235 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI) 1236 << "failed to move load with loop-invariant address " 1237 "because the loop may invalidate its value"; 1238 }); 1239 1240 return !Invalidated; 1241 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { 1242 // Don't sink or hoist dbg info; it's legal, but not useful. 1243 if (isa<DbgInfoIntrinsic>(I)) 1244 return false; 1245 1246 // Don't sink calls which can throw. 1247 if (CI->mayThrow()) 1248 return false; 1249 1250 // Convergent attribute has been used on operations that involve 1251 // inter-thread communication which results are implicitly affected by the 1252 // enclosing control flows. It is not safe to hoist or sink such operations 1253 // across control flow. 1254 if (CI->isConvergent()) 1255 return false; 1256 1257 using namespace PatternMatch; 1258 if (match(CI, m_Intrinsic<Intrinsic::assume>())) 1259 // Assumes don't actually alias anything or throw 1260 return true; 1261 1262 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>())) 1263 // Widenable conditions don't actually alias anything or throw 1264 return true; 1265 1266 // Handle simple cases by querying alias analysis. 1267 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI); 1268 if (Behavior == FMRB_DoesNotAccessMemory) 1269 return true; 1270 if (AAResults::onlyReadsMemory(Behavior)) { 1271 // A readonly argmemonly function only reads from memory pointed to by 1272 // it's arguments with arbitrary offsets. If we can prove there are no 1273 // writes to this memory in the loop, we can hoist or sink. 1274 if (AAResults::onlyAccessesArgPointees(Behavior)) { 1275 // TODO: expand to writeable arguments 1276 for (Value *Op : CI->arg_operands()) 1277 if (Op->getType()->isPointerTy()) { 1278 bool Invalidated; 1279 if (CurAST) 1280 Invalidated = pointerInvalidatedByLoop( 1281 MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA); 1282 else 1283 Invalidated = pointerInvalidatedByLoopWithMSSA( 1284 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I, 1285 *Flags); 1286 if (Invalidated) 1287 return false; 1288 } 1289 return true; 1290 } 1291 1292 // If this call only reads from memory and there are no writes to memory 1293 // in the loop, we can hoist or sink the call as appropriate. 1294 if (isReadOnly(CurAST, MSSAU, CurLoop)) 1295 return true; 1296 } 1297 1298 // FIXME: This should use mod/ref information to see if we can hoist or 1299 // sink the call. 1300 1301 return false; 1302 } else if (auto *FI = dyn_cast<FenceInst>(&I)) { 1303 // Fences alias (most) everything to provide ordering. For the moment, 1304 // just give up if there are any other memory operations in the loop. 1305 if (CurAST) { 1306 auto Begin = CurAST->begin(); 1307 assert(Begin != CurAST->end() && "must contain FI"); 1308 if (std::next(Begin) != CurAST->end()) 1309 // constant memory for instance, TODO: handle better 1310 return false; 1311 auto *UniqueI = Begin->getUniqueInstruction(); 1312 if (!UniqueI) 1313 // other memory op, give up 1314 return false; 1315 (void)FI; // suppress unused variable warning 1316 assert(UniqueI == FI && "AS must contain FI"); 1317 return true; 1318 } else // MSSAU 1319 return isOnlyMemoryAccess(FI, CurLoop, MSSAU); 1320 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 1321 if (!SI->isUnordered()) 1322 return false; // Don't sink/hoist volatile or ordered atomic store! 1323 1324 // We can only hoist a store that we can prove writes a value which is not 1325 // read or overwritten within the loop. For those cases, we fallback to 1326 // load store promotion instead. TODO: We can extend this to cases where 1327 // there is exactly one write to the location and that write dominates an 1328 // arbitrary number of reads in the loop. 1329 if (CurAST) { 1330 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI)); 1331 1332 if (AS.isRef() || !AS.isMustAlias()) 1333 // Quick exit test, handled by the full path below as well. 1334 return false; 1335 auto *UniqueI = AS.getUniqueInstruction(); 1336 if (!UniqueI) 1337 // other memory op, give up 1338 return false; 1339 assert(UniqueI == SI && "AS must contain SI"); 1340 return true; 1341 } else { // MSSAU 1342 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU)) 1343 return true; 1344 // If there are more accesses than the Promotion cap or no "quota" to 1345 // check clobber, then give up as we're not walking a list that long. 1346 if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls()) 1347 return false; 1348 // If there are interfering Uses (i.e. their defining access is in the 1349 // loop), or ordered loads (stored as Defs!), don't move this store. 1350 // Could do better here, but this is conservatively correct. 1351 // TODO: Cache set of Uses on the first walk in runOnLoop, update when 1352 // moving accesses. Can also extend to dominating uses. 1353 auto *SIMD = MSSA->getMemoryAccess(SI); 1354 for (auto *BB : CurLoop->getBlocks()) 1355 if (auto *Accesses = MSSA->getBlockAccesses(BB)) { 1356 for (const auto &MA : *Accesses) 1357 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) { 1358 auto *MD = MU->getDefiningAccess(); 1359 if (!MSSA->isLiveOnEntryDef(MD) && 1360 CurLoop->contains(MD->getBlock())) 1361 return false; 1362 // Disable hoisting past potentially interfering loads. Optimized 1363 // Uses may point to an access outside the loop, as getClobbering 1364 // checks the previous iteration when walking the backedge. 1365 // FIXME: More precise: no Uses that alias SI. 1366 if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU)) 1367 return false; 1368 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) { 1369 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) { 1370 (void)LI; // Silence warning. 1371 assert(!LI->isUnordered() && "Expected unordered load"); 1372 return false; 1373 } 1374 // Any call, while it may not be clobbering SI, it may be a use. 1375 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) { 1376 // Check if the call may read from the memory location written 1377 // to by SI. Check CI's attributes and arguments; the number of 1378 // such checks performed is limited above by NoOfMemAccTooLarge. 1379 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI)); 1380 if (isModOrRefSet(MRI)) 1381 return false; 1382 } 1383 } 1384 } 1385 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI); 1386 Flags->incrementClobberingCalls(); 1387 // If there are no clobbering Defs in the loop, store is safe to hoist. 1388 return MSSA->isLiveOnEntryDef(Source) || 1389 !CurLoop->contains(Source->getBlock()); 1390 } 1391 } 1392 1393 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing"); 1394 1395 // We've established mechanical ability and aliasing, it's up to the caller 1396 // to check fault safety 1397 return true; 1398 } 1399 1400 /// Returns true if a PHINode is a trivially replaceable with an 1401 /// Instruction. 1402 /// This is true when all incoming values are that instruction. 1403 /// This pattern occurs most often with LCSSA PHI nodes. 1404 /// 1405 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) { 1406 for (const Value *IncValue : PN.incoming_values()) 1407 if (IncValue != &I) 1408 return false; 1409 1410 return true; 1411 } 1412 1413 /// Return true if the instruction is free in the loop. 1414 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop, 1415 const TargetTransformInfo *TTI) { 1416 1417 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1418 if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) != 1419 TargetTransformInfo::TCC_Free) 1420 return false; 1421 // For a GEP, we cannot simply use getUserCost because currently it 1422 // optimistically assume that a GEP will fold into addressing mode 1423 // regardless of its users. 1424 const BasicBlock *BB = GEP->getParent(); 1425 for (const User *U : GEP->users()) { 1426 const Instruction *UI = cast<Instruction>(U); 1427 if (CurLoop->contains(UI) && 1428 (BB != UI->getParent() || 1429 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI)))) 1430 return false; 1431 } 1432 return true; 1433 } else 1434 return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == 1435 TargetTransformInfo::TCC_Free; 1436 } 1437 1438 /// Return true if the only users of this instruction are outside of 1439 /// the loop. If this is true, we can sink the instruction to the exit 1440 /// blocks of the loop. 1441 /// 1442 /// We also return true if the instruction could be folded away in lowering. 1443 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop). 1444 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 1445 const LoopSafetyInfo *SafetyInfo, 1446 TargetTransformInfo *TTI, bool &FreeInLoop) { 1447 const auto &BlockColors = SafetyInfo->getBlockColors(); 1448 bool IsFree = isFreeInLoop(I, CurLoop, TTI); 1449 for (const User *U : I.users()) { 1450 const Instruction *UI = cast<Instruction>(U); 1451 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1452 const BasicBlock *BB = PN->getParent(); 1453 // We cannot sink uses in catchswitches. 1454 if (isa<CatchSwitchInst>(BB->getTerminator())) 1455 return false; 1456 1457 // We need to sink a callsite to a unique funclet. Avoid sinking if the 1458 // phi use is too muddled. 1459 if (isa<CallInst>(I)) 1460 if (!BlockColors.empty() && 1461 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) 1462 return false; 1463 } 1464 1465 if (CurLoop->contains(UI)) { 1466 if (IsFree) { 1467 FreeInLoop = true; 1468 continue; 1469 } 1470 return false; 1471 } 1472 } 1473 return true; 1474 } 1475 1476 static Instruction *cloneInstructionInExitBlock( 1477 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 1478 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) { 1479 Instruction *New; 1480 if (auto *CI = dyn_cast<CallInst>(&I)) { 1481 const auto &BlockColors = SafetyInfo->getBlockColors(); 1482 1483 // Sinking call-sites need to be handled differently from other 1484 // instructions. The cloned call-site needs a funclet bundle operand 1485 // appropriate for its location in the CFG. 1486 SmallVector<OperandBundleDef, 1> OpBundles; 1487 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); 1488 BundleIdx != BundleEnd; ++BundleIdx) { 1489 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); 1490 if (Bundle.getTagID() == LLVMContext::OB_funclet) 1491 continue; 1492 1493 OpBundles.emplace_back(Bundle); 1494 } 1495 1496 if (!BlockColors.empty()) { 1497 const ColorVector &CV = BlockColors.find(&ExitBlock)->second; 1498 assert(CV.size() == 1 && "non-unique color for exit block!"); 1499 BasicBlock *BBColor = CV.front(); 1500 Instruction *EHPad = BBColor->getFirstNonPHI(); 1501 if (EHPad->isEHPad()) 1502 OpBundles.emplace_back("funclet", EHPad); 1503 } 1504 1505 New = CallInst::Create(CI, OpBundles); 1506 } else { 1507 New = I.clone(); 1508 } 1509 1510 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); 1511 if (!I.getName().empty()) 1512 New->setName(I.getName() + ".le"); 1513 1514 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 1515 // Create a new MemoryAccess and let MemorySSA set its defining access. 1516 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1517 New, nullptr, New->getParent(), MemorySSA::Beginning); 1518 if (NewMemAcc) { 1519 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc)) 1520 MSSAU->insertDef(MemDef, /*RenameUses=*/true); 1521 else { 1522 auto *MemUse = cast<MemoryUse>(NewMemAcc); 1523 MSSAU->insertUse(MemUse, /*RenameUses=*/true); 1524 } 1525 } 1526 } 1527 1528 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that 1529 // this is particularly cheap because we can rip off the PHI node that we're 1530 // replacing for the number and blocks of the predecessors. 1531 // OPT: If this shows up in a profile, we can instead finish sinking all 1532 // invariant instructions, and then walk their operands to re-establish 1533 // LCSSA. That will eliminate creating PHI nodes just to nuke them when 1534 // sinking bottom-up. 1535 for (Use &Op : New->operands()) 1536 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) { 1537 auto *OInst = cast<Instruction>(Op.get()); 1538 PHINode *OpPN = 1539 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), 1540 OInst->getName() + ".lcssa", &ExitBlock.front()); 1541 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) 1542 OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); 1543 Op = OpPN; 1544 } 1545 return New; 1546 } 1547 1548 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 1549 AliasSetTracker *AST, MemorySSAUpdater *MSSAU) { 1550 if (AST) 1551 AST->deleteValue(&I); 1552 if (MSSAU) 1553 MSSAU->removeMemoryAccess(&I); 1554 SafetyInfo.removeInstruction(&I); 1555 I.eraseFromParent(); 1556 } 1557 1558 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 1559 ICFLoopSafetyInfo &SafetyInfo, 1560 MemorySSAUpdater *MSSAU, 1561 ScalarEvolution *SE) { 1562 SafetyInfo.removeInstruction(&I); 1563 SafetyInfo.insertInstructionTo(&I, Dest.getParent()); 1564 I.moveBefore(&Dest); 1565 if (MSSAU) 1566 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>( 1567 MSSAU->getMemorySSA()->getMemoryAccess(&I))) 1568 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(), 1569 MemorySSA::BeforeTerminator); 1570 if (SE) 1571 SE->forgetValue(&I); 1572 } 1573 1574 static Instruction *sinkThroughTriviallyReplaceablePHI( 1575 PHINode *TPN, Instruction *I, LoopInfo *LI, 1576 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies, 1577 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, 1578 MemorySSAUpdater *MSSAU) { 1579 assert(isTriviallyReplaceablePHI(*TPN, *I) && 1580 "Expect only trivially replaceable PHI"); 1581 BasicBlock *ExitBlock = TPN->getParent(); 1582 Instruction *New; 1583 auto It = SunkCopies.find(ExitBlock); 1584 if (It != SunkCopies.end()) 1585 New = It->second; 1586 else 1587 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock( 1588 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU); 1589 return New; 1590 } 1591 1592 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { 1593 BasicBlock *BB = PN->getParent(); 1594 if (!BB->canSplitPredecessors()) 1595 return false; 1596 // It's not impossible to split EHPad blocks, but if BlockColors already exist 1597 // it require updating BlockColors for all offspring blocks accordingly. By 1598 // skipping such corner case, we can make updating BlockColors after splitting 1599 // predecessor fairly simple. 1600 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad()) 1601 return false; 1602 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 1603 BasicBlock *BBPred = *PI; 1604 if (isa<IndirectBrInst>(BBPred->getTerminator()) || 1605 isa<CallBrInst>(BBPred->getTerminator())) 1606 return false; 1607 } 1608 return true; 1609 } 1610 1611 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, 1612 LoopInfo *LI, const Loop *CurLoop, 1613 LoopSafetyInfo *SafetyInfo, 1614 MemorySSAUpdater *MSSAU) { 1615 #ifndef NDEBUG 1616 SmallVector<BasicBlock *, 32> ExitBlocks; 1617 CurLoop->getUniqueExitBlocks(ExitBlocks); 1618 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1619 ExitBlocks.end()); 1620 #endif 1621 BasicBlock *ExitBB = PN->getParent(); 1622 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block."); 1623 1624 // Split predecessors of the loop exit to make instructions in the loop are 1625 // exposed to exit blocks through trivially replaceable PHIs while keeping the 1626 // loop in the canonical form where each predecessor of each exit block should 1627 // be contained within the loop. For example, this will convert the loop below 1628 // from 1629 // 1630 // LB1: 1631 // %v1 = 1632 // br %LE, %LB2 1633 // LB2: 1634 // %v2 = 1635 // br %LE, %LB1 1636 // LE: 1637 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable 1638 // 1639 // to 1640 // 1641 // LB1: 1642 // %v1 = 1643 // br %LE.split, %LB2 1644 // LB2: 1645 // %v2 = 1646 // br %LE.split2, %LB1 1647 // LE.split: 1648 // %p1 = phi [%v1, %LB1] <-- trivially replaceable 1649 // br %LE 1650 // LE.split2: 1651 // %p2 = phi [%v2, %LB2] <-- trivially replaceable 1652 // br %LE 1653 // LE: 1654 // %p = phi [%p1, %LE.split], [%p2, %LE.split2] 1655 // 1656 const auto &BlockColors = SafetyInfo->getBlockColors(); 1657 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); 1658 while (!PredBBs.empty()) { 1659 BasicBlock *PredBB = *PredBBs.begin(); 1660 assert(CurLoop->contains(PredBB) && 1661 "Expect all predecessors are in the loop"); 1662 if (PN->getBasicBlockIndex(PredBB) >= 0) { 1663 BasicBlock *NewPred = SplitBlockPredecessors( 1664 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true); 1665 // Since we do not allow splitting EH-block with BlockColors in 1666 // canSplitPredecessors(), we can simply assign predecessor's color to 1667 // the new block. 1668 if (!BlockColors.empty()) 1669 // Grab a reference to the ColorVector to be inserted before getting the 1670 // reference to the vector we are copying because inserting the new 1671 // element in BlockColors might cause the map to be reallocated. 1672 SafetyInfo->copyColors(NewPred, PredBB); 1673 } 1674 PredBBs.remove(PredBB); 1675 } 1676 } 1677 1678 /// When an instruction is found to only be used outside of the loop, this 1679 /// function moves it to the exit blocks and patches up SSA form as needed. 1680 /// This method is guaranteed to remove the original instruction from its 1681 /// position, and may either delete it or move it to outside of the loop. 1682 /// 1683 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 1684 BlockFrequencyInfo *BFI, const Loop *CurLoop, 1685 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, 1686 OptimizationRemarkEmitter *ORE) { 1687 bool Changed = false; 1688 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); 1689 1690 // Iterate over users to be ready for actual sinking. Replace users via 1691 // unreachable blocks with undef and make all user PHIs trivially replaceable. 1692 SmallPtrSet<Instruction *, 8> VisitedUsers; 1693 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) { 1694 auto *User = cast<Instruction>(*UI); 1695 Use &U = UI.getUse(); 1696 ++UI; 1697 1698 if (VisitedUsers.count(User) || CurLoop->contains(User)) 1699 continue; 1700 1701 if (!DT->isReachableFromEntry(User->getParent())) { 1702 U = UndefValue::get(I.getType()); 1703 Changed = true; 1704 continue; 1705 } 1706 1707 // The user must be a PHI node. 1708 PHINode *PN = cast<PHINode>(User); 1709 1710 // Surprisingly, instructions can be used outside of loops without any 1711 // exits. This can only happen in PHI nodes if the incoming block is 1712 // unreachable. 1713 BasicBlock *BB = PN->getIncomingBlock(U); 1714 if (!DT->isReachableFromEntry(BB)) { 1715 U = UndefValue::get(I.getType()); 1716 Changed = true; 1717 continue; 1718 } 1719 1720 VisitedUsers.insert(PN); 1721 if (isTriviallyReplaceablePHI(*PN, I)) 1722 continue; 1723 1724 if (!canSplitPredecessors(PN, SafetyInfo)) 1725 return Changed; 1726 1727 // Split predecessors of the PHI so that we can make users trivially 1728 // replaceable. 1729 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU); 1730 1731 // Should rebuild the iterators, as they may be invalidated by 1732 // splitPredecessorsOfLoopExit(). 1733 UI = I.user_begin(); 1734 UE = I.user_end(); 1735 } 1736 1737 if (VisitedUsers.empty()) 1738 return Changed; 1739 1740 ORE->emit([&]() { 1741 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I) 1742 << "sinking " << ore::NV("Inst", &I); 1743 }); 1744 if (isa<LoadInst>(I)) 1745 ++NumMovedLoads; 1746 else if (isa<CallInst>(I)) 1747 ++NumMovedCalls; 1748 ++NumSunk; 1749 1750 #ifndef NDEBUG 1751 SmallVector<BasicBlock *, 32> ExitBlocks; 1752 CurLoop->getUniqueExitBlocks(ExitBlocks); 1753 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1754 ExitBlocks.end()); 1755 #endif 1756 1757 // Clones of this instruction. Don't create more than one per exit block! 1758 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; 1759 1760 // If this instruction is only used outside of the loop, then all users are 1761 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of 1762 // the instruction. 1763 // First check if I is worth sinking for all uses. Sink only when it is worth 1764 // across all uses. 1765 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end()); 1766 SmallVector<PHINode *, 8> ExitPNs; 1767 for (auto *UI : Users) { 1768 auto *User = cast<Instruction>(UI); 1769 1770 if (CurLoop->contains(User)) 1771 continue; 1772 1773 PHINode *PN = cast<PHINode>(User); 1774 assert(ExitBlockSet.count(PN->getParent()) && 1775 "The LCSSA PHI is not in an exit block!"); 1776 if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) { 1777 return Changed; 1778 } 1779 1780 ExitPNs.push_back(PN); 1781 } 1782 1783 for (auto *PN : ExitPNs) { 1784 1785 // The PHI must be trivially replaceable. 1786 Instruction *New = sinkThroughTriviallyReplaceablePHI( 1787 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU); 1788 PN->replaceAllUsesWith(New); 1789 eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr); 1790 Changed = true; 1791 } 1792 return Changed; 1793 } 1794 1795 /// When an instruction is found to only use loop invariant operands that 1796 /// is safe to hoist, this instruction is called to do the dirty work. 1797 /// 1798 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 1799 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 1800 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 1801 OptimizationRemarkEmitter *ORE) { 1802 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": " 1803 << I << "\n"); 1804 ORE->emit([&]() { 1805 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting " 1806 << ore::NV("Inst", &I); 1807 }); 1808 1809 // Metadata can be dependent on conditions we are hoisting above. 1810 // Conservatively strip all metadata on the instruction unless we were 1811 // guaranteed to execute I if we entered the loop, in which case the metadata 1812 // is valid in the loop preheader. 1813 // Similarly, If I is a call and it is not guaranteed to execute in the loop, 1814 // then moving to the preheader means we should strip attributes on the call 1815 // that can cause UB since we may be hoisting above conditions that allowed 1816 // inferring those attributes. They may not be valid at the preheader. 1817 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) && 1818 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning 1819 // time in isGuaranteedToExecute if we don't actually have anything to 1820 // drop. It is a compile time optimization, not required for correctness. 1821 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) 1822 I.dropUndefImplyingAttrsAndUnknownMetadata(); 1823 1824 if (isa<PHINode>(I)) 1825 // Move the new node to the end of the phi list in the destination block. 1826 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE); 1827 else 1828 // Move the new node to the destination block, before its terminator. 1829 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE); 1830 1831 I.updateLocationAfterHoist(); 1832 1833 if (isa<LoadInst>(I)) 1834 ++NumMovedLoads; 1835 else if (isa<CallInst>(I)) 1836 ++NumMovedCalls; 1837 ++NumHoisted; 1838 } 1839 1840 /// Only sink or hoist an instruction if it is not a trapping instruction, 1841 /// or if the instruction is known not to trap when moved to the preheader. 1842 /// or if it is a trapping instruction and is guaranteed to execute. 1843 static bool isSafeToExecuteUnconditionally(Instruction &Inst, 1844 const DominatorTree *DT, 1845 const TargetLibraryInfo *TLI, 1846 const Loop *CurLoop, 1847 const LoopSafetyInfo *SafetyInfo, 1848 OptimizationRemarkEmitter *ORE, 1849 const Instruction *CtxI) { 1850 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI)) 1851 return true; 1852 1853 bool GuaranteedToExecute = 1854 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop); 1855 1856 if (!GuaranteedToExecute) { 1857 auto *LI = dyn_cast<LoadInst>(&Inst); 1858 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1859 ORE->emit([&]() { 1860 return OptimizationRemarkMissed( 1861 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI) 1862 << "failed to hoist load with loop-invariant address " 1863 "because load is conditionally executed"; 1864 }); 1865 } 1866 1867 return GuaranteedToExecute; 1868 } 1869 1870 namespace { 1871 class LoopPromoter : public LoadAndStorePromoter { 1872 Value *SomePtr; // Designated pointer to store to. 1873 const SmallSetVector<Value *, 8> &PointerMustAliases; 1874 SmallVectorImpl<BasicBlock *> &LoopExitBlocks; 1875 SmallVectorImpl<Instruction *> &LoopInsertPts; 1876 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts; 1877 PredIteratorCache &PredCache; 1878 AliasSetTracker *AST; 1879 MemorySSAUpdater *MSSAU; 1880 LoopInfo &LI; 1881 DebugLoc DL; 1882 int Alignment; 1883 bool UnorderedAtomic; 1884 AAMDNodes AATags; 1885 ICFLoopSafetyInfo &SafetyInfo; 1886 1887 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi 1888 // (if legal) if doing so would add an out-of-loop use to an instruction 1889 // defined in-loop. 1890 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { 1891 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB)) 1892 return V; 1893 1894 Instruction *I = cast<Instruction>(V); 1895 // We need to create an LCSSA PHI node for the incoming value and 1896 // store that. 1897 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), 1898 I->getName() + ".lcssa", &BB->front()); 1899 for (BasicBlock *Pred : PredCache.get(BB)) 1900 PN->addIncoming(I, Pred); 1901 return PN; 1902 } 1903 1904 public: 1905 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, 1906 const SmallSetVector<Value *, 8> &PMA, 1907 SmallVectorImpl<BasicBlock *> &LEB, 1908 SmallVectorImpl<Instruction *> &LIP, 1909 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC, 1910 AliasSetTracker *ast, MemorySSAUpdater *MSSAU, LoopInfo &li, 1911 DebugLoc dl, int alignment, bool UnorderedAtomic, 1912 const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo) 1913 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), 1914 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), 1915 PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)), 1916 Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags), 1917 SafetyInfo(SafetyInfo) {} 1918 1919 bool isInstInList(Instruction *I, 1920 const SmallVectorImpl<Instruction *> &) const override { 1921 Value *Ptr; 1922 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1923 Ptr = LI->getOperand(0); 1924 else 1925 Ptr = cast<StoreInst>(I)->getPointerOperand(); 1926 return PointerMustAliases.count(Ptr); 1927 } 1928 1929 void doExtraRewritesBeforeFinalDeletion() override { 1930 // Insert stores after in the loop exit blocks. Each exit block gets a 1931 // store of the live-out values that feed them. Since we've already told 1932 // the SSA updater about the defs in the loop and the preheader 1933 // definition, it is all set and we can start using it. 1934 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { 1935 BasicBlock *ExitBlock = LoopExitBlocks[i]; 1936 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 1937 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); 1938 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); 1939 Instruction *InsertPos = LoopInsertPts[i]; 1940 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); 1941 if (UnorderedAtomic) 1942 NewSI->setOrdering(AtomicOrdering::Unordered); 1943 NewSI->setAlignment(Align(Alignment)); 1944 NewSI->setDebugLoc(DL); 1945 if (AATags) 1946 NewSI->setAAMetadata(AATags); 1947 1948 if (MSSAU) { 1949 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i]; 1950 MemoryAccess *NewMemAcc; 1951 if (!MSSAInsertPoint) { 1952 NewMemAcc = MSSAU->createMemoryAccessInBB( 1953 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning); 1954 } else { 1955 NewMemAcc = 1956 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint); 1957 } 1958 MSSAInsertPts[i] = NewMemAcc; 1959 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1960 // FIXME: true for safety, false may still be correct. 1961 } 1962 } 1963 } 1964 1965 void replaceLoadWithValue(LoadInst *LI, Value *V) const override { 1966 // Update alias analysis. 1967 if (AST) 1968 AST->copyValue(LI, V); 1969 } 1970 void instructionDeleted(Instruction *I) const override { 1971 SafetyInfo.removeInstruction(I); 1972 if (AST) 1973 AST->deleteValue(I); 1974 if (MSSAU) 1975 MSSAU->removeMemoryAccess(I); 1976 } 1977 }; 1978 1979 bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L, 1980 DominatorTree *DT) { 1981 // We can perform the captured-before check against any instruction in the 1982 // loop header, as the loop header is reachable from any instruction inside 1983 // the loop. 1984 // TODO: ReturnCaptures=true shouldn't be necessary here. 1985 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true, 1986 /* StoreCaptures */ true, 1987 L->getHeader()->getTerminator(), DT); 1988 } 1989 1990 /// Return true iff we can prove that a caller of this function can not inspect 1991 /// the contents of the provided object in a well defined program. 1992 bool isKnownNonEscaping(Value *Object, const Loop *L, 1993 const TargetLibraryInfo *TLI, DominatorTree *DT) { 1994 if (isa<AllocaInst>(Object)) 1995 // Since the alloca goes out of scope, we know the caller can't retain a 1996 // reference to it and be well defined. Thus, we don't need to check for 1997 // capture. 1998 return true; 1999 2000 // For all other objects we need to know that the caller can't possibly 2001 // have gotten a reference to the object. There are two components of 2002 // that: 2003 // 1) Object can't be escaped by this function. This is what 2004 // PointerMayBeCaptured checks. 2005 // 2) Object can't have been captured at definition site. For this, we 2006 // need to know the return value is noalias. At the moment, we use a 2007 // weaker condition and handle only AllocLikeFunctions (which are 2008 // known to be noalias). TODO 2009 return isAllocLikeFn(Object, TLI) && 2010 isNotCapturedBeforeOrInLoop(Object, L, DT); 2011 } 2012 2013 } // namespace 2014 2015 /// Try to promote memory values to scalars by sinking stores out of the 2016 /// loop and moving loads to before the loop. We do this by looping over 2017 /// the stores in the loop, looking for stores to Must pointers which are 2018 /// loop invariant. 2019 /// 2020 bool llvm::promoteLoopAccessesToScalars( 2021 const SmallSetVector<Value *, 8> &PointerMustAliases, 2022 SmallVectorImpl<BasicBlock *> &ExitBlocks, 2023 SmallVectorImpl<Instruction *> &InsertPts, 2024 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC, 2025 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, 2026 Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, 2027 ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) { 2028 // Verify inputs. 2029 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && 2030 SafetyInfo != nullptr && 2031 "Unexpected Input to promoteLoopAccessesToScalars"); 2032 2033 Value *SomePtr = *PointerMustAliases.begin(); 2034 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 2035 2036 // It is not safe to promote a load/store from the loop if the load/store is 2037 // conditional. For example, turning: 2038 // 2039 // for () { if (c) *P += 1; } 2040 // 2041 // into: 2042 // 2043 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; 2044 // 2045 // is not safe, because *P may only be valid to access if 'c' is true. 2046 // 2047 // The safety property divides into two parts: 2048 // p1) The memory may not be dereferenceable on entry to the loop. In this 2049 // case, we can't insert the required load in the preheader. 2050 // p2) The memory model does not allow us to insert a store along any dynamic 2051 // path which did not originally have one. 2052 // 2053 // If at least one store is guaranteed to execute, both properties are 2054 // satisfied, and promotion is legal. 2055 // 2056 // This, however, is not a necessary condition. Even if no store/load is 2057 // guaranteed to execute, we can still establish these properties. 2058 // We can establish (p1) by proving that hoisting the load into the preheader 2059 // is safe (i.e. proving dereferenceability on all paths through the loop). We 2060 // can use any access within the alias set to prove dereferenceability, 2061 // since they're all must alias. 2062 // 2063 // There are two ways establish (p2): 2064 // a) Prove the location is thread-local. In this case the memory model 2065 // requirement does not apply, and stores are safe to insert. 2066 // b) Prove a store dominates every exit block. In this case, if an exit 2067 // blocks is reached, the original dynamic path would have taken us through 2068 // the store, so inserting a store into the exit block is safe. Note that this 2069 // is different from the store being guaranteed to execute. For instance, 2070 // if an exception is thrown on the first iteration of the loop, the original 2071 // store is never executed, but the exit blocks are not executed either. 2072 2073 bool DereferenceableInPH = false; 2074 bool SafeToInsertStore = false; 2075 2076 SmallVector<Instruction *, 64> LoopUses; 2077 2078 // We start with an alignment of one and try to find instructions that allow 2079 // us to prove better alignment. 2080 Align Alignment; 2081 // Keep track of which types of access we see 2082 bool SawUnorderedAtomic = false; 2083 bool SawNotAtomic = false; 2084 AAMDNodes AATags; 2085 2086 const DataLayout &MDL = Preheader->getModule()->getDataLayout(); 2087 2088 bool IsKnownThreadLocalObject = false; 2089 if (SafetyInfo->anyBlockMayThrow()) { 2090 // If a loop can throw, we have to insert a store along each unwind edge. 2091 // That said, we can't actually make the unwind edge explicit. Therefore, 2092 // we have to prove that the store is dead along the unwind edge. We do 2093 // this by proving that the caller can't have a reference to the object 2094 // after return and thus can't possibly load from the object. 2095 Value *Object = getUnderlyingObject(SomePtr); 2096 if (!isKnownNonEscaping(Object, CurLoop, TLI, DT)) 2097 return false; 2098 // Subtlety: Alloca's aren't visible to callers, but *are* potentially 2099 // visible to other threads if captured and used during their lifetimes. 2100 IsKnownThreadLocalObject = !isa<AllocaInst>(Object); 2101 } 2102 2103 // Check that all of the pointers in the alias set have the same type. We 2104 // cannot (yet) promote a memory location that is loaded and stored in 2105 // different sizes. While we are at it, collect alignment and AA info. 2106 for (Value *ASIV : PointerMustAliases) { 2107 // Check that all of the pointers in the alias set have the same type. We 2108 // cannot (yet) promote a memory location that is loaded and stored in 2109 // different sizes. 2110 if (SomePtr->getType() != ASIV->getType()) 2111 return false; 2112 2113 for (User *U : ASIV->users()) { 2114 // Ignore instructions that are outside the loop. 2115 Instruction *UI = dyn_cast<Instruction>(U); 2116 if (!UI || !CurLoop->contains(UI)) 2117 continue; 2118 2119 // If there is an non-load/store instruction in the loop, we can't promote 2120 // it. 2121 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { 2122 if (!Load->isUnordered()) 2123 return false; 2124 2125 SawUnorderedAtomic |= Load->isAtomic(); 2126 SawNotAtomic |= !Load->isAtomic(); 2127 2128 Align InstAlignment = Load->getAlign(); 2129 2130 // Note that proving a load safe to speculate requires proving 2131 // sufficient alignment at the target location. Proving it guaranteed 2132 // to execute does as well. Thus we can increase our guaranteed 2133 // alignment as well. 2134 if (!DereferenceableInPH || (InstAlignment > Alignment)) 2135 if (isSafeToExecuteUnconditionally(*Load, DT, TLI, CurLoop, 2136 SafetyInfo, ORE, 2137 Preheader->getTerminator())) { 2138 DereferenceableInPH = true; 2139 Alignment = std::max(Alignment, InstAlignment); 2140 } 2141 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { 2142 // Stores *of* the pointer are not interesting, only stores *to* the 2143 // pointer. 2144 if (UI->getOperand(1) != ASIV) 2145 continue; 2146 if (!Store->isUnordered()) 2147 return false; 2148 2149 SawUnorderedAtomic |= Store->isAtomic(); 2150 SawNotAtomic |= !Store->isAtomic(); 2151 2152 // If the store is guaranteed to execute, both properties are satisfied. 2153 // We may want to check if a store is guaranteed to execute even if we 2154 // already know that promotion is safe, since it may have higher 2155 // alignment than any other guaranteed stores, in which case we can 2156 // raise the alignment on the promoted store. 2157 Align InstAlignment = Store->getAlign(); 2158 2159 if (!DereferenceableInPH || !SafeToInsertStore || 2160 (InstAlignment > Alignment)) { 2161 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) { 2162 DereferenceableInPH = true; 2163 SafeToInsertStore = true; 2164 Alignment = std::max(Alignment, InstAlignment); 2165 } 2166 } 2167 2168 // If a store dominates all exit blocks, it is safe to sink. 2169 // As explained above, if an exit block was executed, a dominating 2170 // store must have been executed at least once, so we are not 2171 // introducing stores on paths that did not have them. 2172 // Note that this only looks at explicit exit blocks. If we ever 2173 // start sinking stores into unwind edges (see above), this will break. 2174 if (!SafeToInsertStore) 2175 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { 2176 return DT->dominates(Store->getParent(), Exit); 2177 }); 2178 2179 // If the store is not guaranteed to execute, we may still get 2180 // deref info through it. 2181 if (!DereferenceableInPH) { 2182 DereferenceableInPH = isDereferenceableAndAlignedPointer( 2183 Store->getPointerOperand(), Store->getValueOperand()->getType(), 2184 Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI); 2185 } 2186 } else 2187 return false; // Not a load or store. 2188 2189 // Merge the AA tags. 2190 if (LoopUses.empty()) { 2191 // On the first load/store, just take its AA tags. 2192 UI->getAAMetadata(AATags); 2193 } else if (AATags) { 2194 UI->getAAMetadata(AATags, /* Merge = */ true); 2195 } 2196 2197 LoopUses.push_back(UI); 2198 } 2199 } 2200 2201 // If we found both an unordered atomic instruction and a non-atomic memory 2202 // access, bail. We can't blindly promote non-atomic to atomic since we 2203 // might not be able to lower the result. We can't downgrade since that 2204 // would violate memory model. Also, align 0 is an error for atomics. 2205 if (SawUnorderedAtomic && SawNotAtomic) 2206 return false; 2207 2208 // If we're inserting an atomic load in the preheader, we must be able to 2209 // lower it. We're only guaranteed to be able to lower naturally aligned 2210 // atomics. 2211 auto *SomePtrElemType = SomePtr->getType()->getPointerElementType(); 2212 if (SawUnorderedAtomic && 2213 Alignment < MDL.getTypeStoreSize(SomePtrElemType)) 2214 return false; 2215 2216 // If we couldn't prove we can hoist the load, bail. 2217 if (!DereferenceableInPH) 2218 return false; 2219 2220 // We know we can hoist the load, but don't have a guaranteed store. 2221 // Check whether the location is thread-local. If it is, then we can insert 2222 // stores along paths which originally didn't have them without violating the 2223 // memory model. 2224 if (!SafeToInsertStore) { 2225 if (IsKnownThreadLocalObject) 2226 SafeToInsertStore = true; 2227 else { 2228 Value *Object = getUnderlyingObject(SomePtr); 2229 SafeToInsertStore = 2230 (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) && 2231 isNotCapturedBeforeOrInLoop(Object, CurLoop, DT); 2232 } 2233 } 2234 2235 // If we've still failed to prove we can sink the store, give up. 2236 if (!SafeToInsertStore) 2237 return false; 2238 2239 // Otherwise, this is safe to promote, lets do it! 2240 LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr 2241 << '\n'); 2242 ORE->emit([&]() { 2243 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", 2244 LoopUses[0]) 2245 << "Moving accesses to memory location out of the loop"; 2246 }); 2247 ++NumPromoted; 2248 2249 // Look at all the loop uses, and try to merge their locations. 2250 std::vector<const DILocation *> LoopUsesLocs; 2251 for (auto U : LoopUses) 2252 LoopUsesLocs.push_back(U->getDebugLoc().get()); 2253 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs)); 2254 2255 // We use the SSAUpdater interface to insert phi nodes as required. 2256 SmallVector<PHINode *, 16> NewPHIs; 2257 SSAUpdater SSA(&NewPHIs); 2258 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, 2259 InsertPts, MSSAInsertPts, PIC, CurAST, MSSAU, *LI, DL, 2260 Alignment.value(), SawUnorderedAtomic, AATags, 2261 *SafetyInfo); 2262 2263 // Set up the preheader to have a definition of the value. It is the live-out 2264 // value from the preheader that uses in the loop will use. 2265 LoadInst *PreheaderLoad = new LoadInst( 2266 SomePtr->getType()->getPointerElementType(), SomePtr, 2267 SomePtr->getName() + ".promoted", Preheader->getTerminator()); 2268 if (SawUnorderedAtomic) 2269 PreheaderLoad->setOrdering(AtomicOrdering::Unordered); 2270 PreheaderLoad->setAlignment(Alignment); 2271 PreheaderLoad->setDebugLoc(DebugLoc()); 2272 if (AATags) 2273 PreheaderLoad->setAAMetadata(AATags); 2274 SSA.AddAvailableValue(Preheader, PreheaderLoad); 2275 2276 if (MSSAU) { 2277 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB( 2278 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End); 2279 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess); 2280 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true); 2281 } 2282 2283 if (MSSAU && VerifyMemorySSA) 2284 MSSAU->getMemorySSA()->verifyMemorySSA(); 2285 // Rewrite all the loads in the loop and remember all the definitions from 2286 // stores in the loop. 2287 Promoter.run(LoopUses); 2288 2289 if (MSSAU && VerifyMemorySSA) 2290 MSSAU->getMemorySSA()->verifyMemorySSA(); 2291 // If the SSAUpdater didn't use the load in the preheader, just zap it now. 2292 if (PreheaderLoad->use_empty()) 2293 eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU); 2294 2295 return true; 2296 } 2297 2298 static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L, 2299 function_ref<void(Instruction *)> Fn) { 2300 for (const BasicBlock *BB : L->blocks()) 2301 if (const auto *Accesses = MSSA->getBlockAccesses(BB)) 2302 for (const auto &Access : *Accesses) 2303 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access)) 2304 Fn(MUD->getMemoryInst()); 2305 } 2306 2307 static SmallVector<SmallSetVector<Value *, 8>, 0> 2308 collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) { 2309 AliasSetTracker AST(*AA); 2310 2311 auto IsPotentiallyPromotable = [L](const Instruction *I) { 2312 if (const auto *SI = dyn_cast<StoreInst>(I)) 2313 return L->isLoopInvariant(SI->getPointerOperand()); 2314 if (const auto *LI = dyn_cast<LoadInst>(I)) 2315 return L->isLoopInvariant(LI->getPointerOperand()); 2316 return false; 2317 }; 2318 2319 // Populate AST with potentially promotable accesses and remove them from 2320 // MaybePromotable, so they will not be checked again on the next iteration. 2321 SmallPtrSet<Value *, 16> AttemptingPromotion; 2322 foreachMemoryAccess(MSSA, L, [&](Instruction *I) { 2323 if (IsPotentiallyPromotable(I)) { 2324 AttemptingPromotion.insert(I); 2325 AST.add(I); 2326 } 2327 }); 2328 2329 // We're only interested in must-alias sets that contain a mod. 2330 SmallVector<const AliasSet *, 8> Sets; 2331 for (AliasSet &AS : AST) 2332 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias()) 2333 Sets.push_back(&AS); 2334 2335 if (Sets.empty()) 2336 return {}; // Nothing to promote... 2337 2338 // Discard any sets for which there is an aliasing non-promotable access. 2339 foreachMemoryAccess(MSSA, L, [&](Instruction *I) { 2340 if (AttemptingPromotion.contains(I)) 2341 return; 2342 2343 llvm::erase_if(Sets, [&](const AliasSet *AS) { 2344 return AS->aliasesUnknownInst(I, *AA); 2345 }); 2346 }); 2347 2348 SmallVector<SmallSetVector<Value *, 8>, 0> Result; 2349 for (const AliasSet *Set : Sets) { 2350 SmallSetVector<Value *, 8> PointerMustAliases; 2351 for (const auto &ASI : *Set) 2352 PointerMustAliases.insert(ASI.getValue()); 2353 Result.push_back(std::move(PointerMustAliases)); 2354 } 2355 2356 return Result; 2357 } 2358 2359 /// Returns an owning pointer to an alias set which incorporates aliasing info 2360 /// from L and all subloops of L. 2361 std::unique_ptr<AliasSetTracker> 2362 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI, 2363 AAResults *AA) { 2364 auto CurAST = std::make_unique<AliasSetTracker>(*AA); 2365 2366 // Add everything from all the sub loops. 2367 for (Loop *InnerL : L->getSubLoops()) 2368 for (BasicBlock *BB : InnerL->blocks()) 2369 CurAST->add(*BB); 2370 2371 // And merge in this loop (without anything from inner loops). 2372 for (BasicBlock *BB : L->blocks()) 2373 if (LI->getLoopFor(BB) == L) 2374 CurAST->add(*BB); 2375 2376 return CurAST; 2377 } 2378 2379 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 2380 AliasSetTracker *CurAST, Loop *CurLoop, 2381 AAResults *AA) { 2382 // First check to see if any of the basic blocks in CurLoop invalidate *V. 2383 bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod(); 2384 2385 if (!isInvalidatedAccordingToAST || !LICMN2Theshold) 2386 return isInvalidatedAccordingToAST; 2387 2388 // Check with a diagnostic analysis if we can refine the information above. 2389 // This is to identify the limitations of using the AST. 2390 // The alias set mechanism used by LICM has a major weakness in that it 2391 // combines all things which may alias into a single set *before* asking 2392 // modref questions. As a result, a single readonly call within a loop will 2393 // collapse all loads and stores into a single alias set and report 2394 // invalidation if the loop contains any store. For example, readonly calls 2395 // with deopt states have this form and create a general alias set with all 2396 // loads and stores. In order to get any LICM in loops containing possible 2397 // deopt states we need a more precise invalidation of checking the mod ref 2398 // info of each instruction within the loop and LI. This has a complexity of 2399 // O(N^2), so currently, it is used only as a diagnostic tool since the 2400 // default value of LICMN2Threshold is zero. 2401 2402 // Don't look at nested loops. 2403 if (CurLoop->begin() != CurLoop->end()) 2404 return true; 2405 2406 int N = 0; 2407 for (BasicBlock *BB : CurLoop->getBlocks()) 2408 for (Instruction &I : *BB) { 2409 if (N >= LICMN2Theshold) { 2410 LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for " 2411 << *(MemLoc.Ptr) << "\n"); 2412 return true; 2413 } 2414 N++; 2415 auto Res = AA->getModRefInfo(&I, MemLoc); 2416 if (isModSet(Res)) { 2417 LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for " 2418 << *(MemLoc.Ptr) << "\n"); 2419 return true; 2420 } 2421 } 2422 LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n"); 2423 return false; 2424 } 2425 2426 bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 2427 Loop *CurLoop, Instruction &I, 2428 SinkAndHoistLICMFlags &Flags) { 2429 // For hoisting, use the walker to determine safety 2430 if (!Flags.getIsSink()) { 2431 MemoryAccess *Source; 2432 // See declaration of SetLicmMssaOptCap for usage details. 2433 if (Flags.tooManyClobberingCalls()) 2434 Source = MU->getDefiningAccess(); 2435 else { 2436 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU); 2437 Flags.incrementClobberingCalls(); 2438 } 2439 return !MSSA->isLiveOnEntryDef(Source) && 2440 CurLoop->contains(Source->getBlock()); 2441 } 2442 2443 // For sinking, we'd need to check all Defs below this use. The getClobbering 2444 // call will look on the backedge of the loop, but will check aliasing with 2445 // the instructions on the previous iteration. 2446 // For example: 2447 // for (i ... ) 2448 // load a[i] ( Use (LoE) 2449 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop. 2450 // i++; 2451 // The load sees no clobbering inside the loop, as the backedge alias check 2452 // does phi translation, and will check aliasing against store a[i-1]. 2453 // However sinking the load outside the loop, below the store is incorrect. 2454 2455 // For now, only sink if there are no Defs in the loop, and the existing ones 2456 // precede the use and are in the same block. 2457 // FIXME: Increase precision: Safe to sink if Use post dominates the Def; 2458 // needs PostDominatorTreeAnalysis. 2459 // FIXME: More precise: no Defs that alias this Use. 2460 if (Flags.tooManyMemoryAccesses()) 2461 return true; 2462 for (auto *BB : CurLoop->getBlocks()) 2463 if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU)) 2464 return true; 2465 // When sinking, the source block may not be part of the loop so check it. 2466 if (!CurLoop->contains(&I)) 2467 return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU); 2468 2469 return false; 2470 } 2471 2472 bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA, 2473 MemoryUse &MU) { 2474 if (const auto *Accesses = MSSA.getBlockDefs(&BB)) 2475 for (const auto &MA : *Accesses) 2476 if (const auto *MD = dyn_cast<MemoryDef>(&MA)) 2477 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU)) 2478 return true; 2479 return false; 2480 } 2481 2482 /// Little predicate that returns true if the specified basic block is in 2483 /// a subloop of the current one, not the current one itself. 2484 /// 2485 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { 2486 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); 2487 return LI->getLoopFor(BB) != CurLoop; 2488 } 2489