1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This pass performs loop invariant code motion, attempting to remove as much 10 // code from the body of a loop as possible. It does this by either hoisting 11 // code into the preheader block, or by sinking code to the exit blocks if it is 12 // safe. This pass also promotes must-aliased memory locations in the loop to 13 // live in registers, thus hoisting and sinking "invariant" loads and stores. 14 // 15 // This pass uses alias analysis for two purposes: 16 // 17 // 1. Moving loop invariant loads and calls out of loops. If we can determine 18 // that a load or call inside of a loop never aliases anything stored to, 19 // we can hoist it or sink it like any other instruction. 20 // 2. Scalar Promotion of Memory - If there is a store instruction inside of 21 // the loop, we try to move the store to happen AFTER the loop instead of 22 // inside of the loop. This can only happen if a few conditions are true: 23 // A. The pointer stored through is loop invariant 24 // B. There are no stores or loads in the loop which _may_ alias the 25 // pointer. There are no calls in the loop which mod/ref the pointer. 26 // If these conditions are true, we can promote the loads and stores in the 27 // loop of the pointer to use a temporary alloca'd variable. We then use 28 // the SSAUpdater to construct the appropriate SSA form for the value. 29 // 30 //===----------------------------------------------------------------------===// 31 32 #include "llvm/Transforms/Scalar/LICM.h" 33 #include "llvm/ADT/SetOperations.h" 34 #include "llvm/ADT/Statistic.h" 35 #include "llvm/Analysis/AliasAnalysis.h" 36 #include "llvm/Analysis/AliasSetTracker.h" 37 #include "llvm/Analysis/BasicAliasAnalysis.h" 38 #include "llvm/Analysis/CaptureTracking.h" 39 #include "llvm/Analysis/ConstantFolding.h" 40 #include "llvm/Analysis/GlobalsModRef.h" 41 #include "llvm/Analysis/GuardUtils.h" 42 #include "llvm/Analysis/Loads.h" 43 #include "llvm/Analysis/LoopInfo.h" 44 #include "llvm/Analysis/LoopIterator.h" 45 #include "llvm/Analysis/LoopPass.h" 46 #include "llvm/Analysis/MemoryBuiltins.h" 47 #include "llvm/Analysis/MemorySSA.h" 48 #include "llvm/Analysis/MemorySSAUpdater.h" 49 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 50 #include "llvm/Analysis/ScalarEvolution.h" 51 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 52 #include "llvm/Analysis/TargetLibraryInfo.h" 53 #include "llvm/Analysis/ValueTracking.h" 54 #include "llvm/IR/CFG.h" 55 #include "llvm/IR/Constants.h" 56 #include "llvm/IR/DataLayout.h" 57 #include "llvm/IR/DebugInfoMetadata.h" 58 #include "llvm/IR/DerivedTypes.h" 59 #include "llvm/IR/Dominators.h" 60 #include "llvm/IR/Instructions.h" 61 #include "llvm/IR/IntrinsicInst.h" 62 #include "llvm/IR/LLVMContext.h" 63 #include "llvm/IR/Metadata.h" 64 #include "llvm/IR/PatternMatch.h" 65 #include "llvm/IR/PredIteratorCache.h" 66 #include "llvm/InitializePasses.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Debug.h" 69 #include "llvm/Support/raw_ostream.h" 70 #include "llvm/Transforms/Scalar.h" 71 #include "llvm/Transforms/Scalar/LoopPassManager.h" 72 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 73 #include "llvm/Transforms/Utils/Local.h" 74 #include "llvm/Transforms/Utils/LoopUtils.h" 75 #include "llvm/Transforms/Utils/SSAUpdater.h" 76 #include <algorithm> 77 #include <utility> 78 using namespace llvm; 79 80 #define DEBUG_TYPE "licm" 81 82 STATISTIC(NumCreatedBlocks, "Number of blocks created"); 83 STATISTIC(NumClonedBranches, "Number of branches cloned"); 84 STATISTIC(NumSunk, "Number of instructions sunk out of loop"); 85 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop"); 86 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk"); 87 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); 88 STATISTIC(NumPromoted, "Number of memory locations promoted to registers"); 89 90 /// Memory promotion is enabled by default. 91 static cl::opt<bool> 92 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), 93 cl::desc("Disable memory promotion in LICM pass")); 94 95 static cl::opt<bool> ControlFlowHoisting( 96 "licm-control-flow-hoisting", cl::Hidden, cl::init(false), 97 cl::desc("Enable control flow (and PHI) hoisting in LICM")); 98 99 static cl::opt<uint32_t> MaxNumUsesTraversed( 100 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), 101 cl::desc("Max num uses visited for identifying load " 102 "invariance in loop using invariant start (default = 8)")); 103 104 // Default value of zero implies we use the regular alias set tracker mechanism 105 // instead of the cross product using AA to identify aliasing of the memory 106 // location we are interested in. 107 static cl::opt<int> 108 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0), 109 cl::desc("How many instruction to cross product using AA")); 110 111 // Experimental option to allow imprecision in LICM in pathological cases, in 112 // exchange for faster compile. This is to be removed if MemorySSA starts to 113 // address the same issue. This flag applies only when LICM uses MemorySSA 114 // instead on AliasSetTracker. LICM calls MemorySSAWalker's 115 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect 116 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess, 117 // which may not be precise, since optimizeUses is capped. The result is 118 // correct, but we may not get as "far up" as possible to get which access is 119 // clobbering the one queried. 120 cl::opt<unsigned> llvm::SetLicmMssaOptCap( 121 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, 122 cl::desc("Enable imprecision in LICM in pathological cases, in exchange " 123 "for faster compile. Caps the MemorySSA clobbering calls.")); 124 125 // Experimentally, memory promotion carries less importance than sinking and 126 // hoisting. Limit when we do promotion when using MemorySSA, in order to save 127 // compile time. 128 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( 129 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, 130 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " 131 "effect. When MSSA in LICM is enabled, then this is the maximum " 132 "number of accesses allowed to be present in a loop in order to " 133 "enable memory promotion.")); 134 135 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); 136 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 137 const LoopSafetyInfo *SafetyInfo, 138 TargetTransformInfo *TTI, bool &FreeInLoop); 139 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 140 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 141 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 142 OptimizationRemarkEmitter *ORE); 143 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 144 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, 145 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE); 146 static bool isSafeToExecuteUnconditionally(Instruction &Inst, 147 const DominatorTree *DT, 148 const Loop *CurLoop, 149 const LoopSafetyInfo *SafetyInfo, 150 OptimizationRemarkEmitter *ORE, 151 const Instruction *CtxI = nullptr); 152 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 153 AliasSetTracker *CurAST, Loop *CurLoop, 154 AliasAnalysis *AA); 155 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 156 Loop *CurLoop, 157 SinkAndHoistLICMFlags &Flags); 158 static Instruction *CloneInstructionInExitBlock( 159 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 160 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU); 161 162 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 163 AliasSetTracker *AST, MemorySSAUpdater *MSSAU); 164 165 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 166 ICFLoopSafetyInfo &SafetyInfo, 167 MemorySSAUpdater *MSSAU, ScalarEvolution *SE); 168 169 namespace { 170 struct LoopInvariantCodeMotion { 171 using ASTrackerMapTy = DenseMap<Loop *, std::unique_ptr<AliasSetTracker>>; 172 bool runOnLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, 173 TargetLibraryInfo *TLI, TargetTransformInfo *TTI, 174 ScalarEvolution *SE, MemorySSA *MSSA, 175 OptimizationRemarkEmitter *ORE, bool DeleteAST); 176 177 ASTrackerMapTy &getLoopToAliasSetMap() { return LoopToAliasSetMap; } 178 LoopInvariantCodeMotion(unsigned LicmMssaOptCap, 179 unsigned LicmMssaNoAccForPromotionCap) 180 : LicmMssaOptCap(LicmMssaOptCap), 181 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {} 182 183 private: 184 ASTrackerMapTy LoopToAliasSetMap; 185 unsigned LicmMssaOptCap; 186 unsigned LicmMssaNoAccForPromotionCap; 187 188 std::unique_ptr<AliasSetTracker> 189 collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AliasAnalysis *AA); 190 std::unique_ptr<AliasSetTracker> 191 collectAliasInfoForLoopWithMSSA(Loop *L, AliasAnalysis *AA, 192 MemorySSAUpdater *MSSAU); 193 }; 194 195 struct LegacyLICMPass : public LoopPass { 196 static char ID; // Pass identification, replacement for typeid 197 LegacyLICMPass( 198 unsigned LicmMssaOptCap = SetLicmMssaOptCap, 199 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap) 200 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) { 201 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); 202 } 203 204 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 205 if (skipLoop(L)) { 206 // If we have run LICM on a previous loop but now we are skipping 207 // (because we've hit the opt-bisect limit), we need to clear the 208 // loop alias information. 209 LICM.getLoopToAliasSetMap().clear(); 210 return false; 211 } 212 213 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); 214 MemorySSA *MSSA = EnableMSSALoopDependency 215 ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA()) 216 : nullptr; 217 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis 218 // pass. Function analyses need to be preserved across loop transformations 219 // but ORE cannot be preserved (see comment before the pass definition). 220 OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); 221 return LICM.runOnLoop(L, 222 &getAnalysis<AAResultsWrapperPass>().getAAResults(), 223 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), 224 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), 225 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( 226 *L->getHeader()->getParent()), 227 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( 228 *L->getHeader()->getParent()), 229 SE ? &SE->getSE() : nullptr, MSSA, &ORE, false); 230 } 231 232 /// This transformation requires natural loop information & requires that 233 /// loop preheaders be inserted into the CFG... 234 /// 235 void getAnalysisUsage(AnalysisUsage &AU) const override { 236 AU.addPreserved<DominatorTreeWrapperPass>(); 237 AU.addPreserved<LoopInfoWrapperPass>(); 238 AU.addRequired<TargetLibraryInfoWrapperPass>(); 239 if (EnableMSSALoopDependency) { 240 AU.addRequired<MemorySSAWrapperPass>(); 241 AU.addPreserved<MemorySSAWrapperPass>(); 242 } 243 AU.addRequired<TargetTransformInfoWrapperPass>(); 244 getLoopAnalysisUsage(AU); 245 } 246 247 using llvm::Pass::doFinalization; 248 249 bool doFinalization() override { 250 auto &AliasSetMap = LICM.getLoopToAliasSetMap(); 251 // All loops in the AliasSetMap should be cleaned up already. The only case 252 // where we fail to do so is if an outer loop gets deleted before LICM 253 // visits it. 254 assert(all_of(AliasSetMap, 255 [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) { 256 return !KV.first->getParentLoop(); 257 }) && 258 "Didn't free loop alias sets"); 259 AliasSetMap.clear(); 260 return false; 261 } 262 263 private: 264 LoopInvariantCodeMotion LICM; 265 266 /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info. 267 void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, 268 Loop *L) override; 269 270 /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias 271 /// set. 272 void deleteAnalysisValue(Value *V, Loop *L) override; 273 274 /// Simple Analysis hook. Delete loop L from alias set map. 275 void deleteAnalysisLoop(Loop *L) override; 276 }; 277 } // namespace 278 279 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, 280 LoopStandardAnalysisResults &AR, LPMUpdater &) { 281 const auto &FAM = 282 AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager(); 283 Function *F = L.getHeader()->getParent(); 284 285 auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F); 286 // FIXME: This should probably be optional rather than required. 287 if (!ORE) 288 report_fatal_error("LICM: OptimizationRemarkEmitterAnalysis not " 289 "cached at a higher level"); 290 291 LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); 292 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.TTI, &AR.SE, 293 AR.MSSA, ORE, true)) 294 return PreservedAnalyses::all(); 295 296 auto PA = getLoopPassPreservedAnalyses(); 297 298 PA.preserve<DominatorTreeAnalysis>(); 299 PA.preserve<LoopAnalysis>(); 300 if (AR.MSSA) 301 PA.preserve<MemorySSAAnalysis>(); 302 303 return PA; 304 } 305 306 char LegacyLICMPass::ID = 0; 307 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion", 308 false, false) 309 INITIALIZE_PASS_DEPENDENCY(LoopPass) 310 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 311 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 312 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 313 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false, 314 false) 315 316 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } 317 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap, 318 unsigned LicmMssaNoAccForPromotionCap) { 319 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); 320 } 321 322 /// Hoist expressions out of the specified loop. Note, alias info for inner 323 /// loop is not preserved so it is not a good idea to run LICM multiple 324 /// times on one loop. 325 /// We should delete AST for inner loops in the new pass manager to avoid 326 /// memory leak. 327 /// 328 bool LoopInvariantCodeMotion::runOnLoop( 329 Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, 330 TargetLibraryInfo *TLI, TargetTransformInfo *TTI, ScalarEvolution *SE, 331 MemorySSA *MSSA, OptimizationRemarkEmitter *ORE, bool DeleteAST) { 332 bool Changed = false; 333 334 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); 335 336 // If this loop has metadata indicating that LICM is not to be performed then 337 // just exit. 338 if (hasDisableLICMTransformsHint(L)) { 339 return false; 340 } 341 342 std::unique_ptr<AliasSetTracker> CurAST; 343 std::unique_ptr<MemorySSAUpdater> MSSAU; 344 bool NoOfMemAccTooLarge = false; 345 unsigned LicmMssaOptCounter = 0; 346 347 if (!MSSA) { 348 LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n"); 349 CurAST = collectAliasInfoForLoop(L, LI, AA); 350 } else { 351 LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n"); 352 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 353 354 unsigned AccessCapCount = 0; 355 for (auto *BB : L->getBlocks()) { 356 if (auto *Accesses = MSSA->getBlockAccesses(BB)) { 357 for (const auto &MA : *Accesses) { 358 (void)MA; 359 AccessCapCount++; 360 if (AccessCapCount > LicmMssaNoAccForPromotionCap) { 361 NoOfMemAccTooLarge = true; 362 break; 363 } 364 } 365 } 366 if (NoOfMemAccTooLarge) 367 break; 368 } 369 } 370 371 // Get the preheader block to move instructions into... 372 BasicBlock *Preheader = L->getLoopPreheader(); 373 374 // Compute loop safety information. 375 ICFLoopSafetyInfo SafetyInfo(DT); 376 SafetyInfo.computeLoopSafetyInfo(L); 377 378 // We want to visit all of the instructions in this loop... that are not parts 379 // of our subloops (they have already had their invariants hoisted out of 380 // their loop, into this loop, so there is no need to process the BODIES of 381 // the subloops). 382 // 383 // Traverse the body of the loop in depth first order on the dominator tree so 384 // that we are guaranteed to see definitions before we see uses. This allows 385 // us to sink instructions in one pass, without iteration. After sinking 386 // instructions, we perform another pass to hoist them out of the loop. 387 SinkAndHoistLICMFlags Flags = {NoOfMemAccTooLarge, LicmMssaOptCounter, 388 LicmMssaOptCap, LicmMssaNoAccForPromotionCap, 389 /*IsSink=*/true}; 390 if (L->hasDedicatedExits()) 391 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L, 392 CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE); 393 Flags.IsSink = false; 394 if (Preheader) 395 Changed |= 396 hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L, 397 CurAST.get(), MSSAU.get(), SE, &SafetyInfo, Flags, ORE); 398 399 // Now that all loop invariants have been removed from the loop, promote any 400 // memory references to scalars that we can. 401 // Don't sink stores from loops without dedicated block exits. Exits 402 // containing indirect branches are not transformed by loop simplify, 403 // make sure we catch that. An additional load may be generated in the 404 // preheader for SSA updater, so also avoid sinking when no preheader 405 // is available. 406 if (!DisablePromotion && Preheader && L->hasDedicatedExits() && 407 !NoOfMemAccTooLarge) { 408 // Figure out the loop exits and their insertion points 409 SmallVector<BasicBlock *, 8> ExitBlocks; 410 L->getUniqueExitBlocks(ExitBlocks); 411 412 // We can't insert into a catchswitch. 413 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { 414 return isa<CatchSwitchInst>(Exit->getTerminator()); 415 }); 416 417 if (!HasCatchSwitch) { 418 SmallVector<Instruction *, 8> InsertPts; 419 SmallVector<MemoryAccess *, 8> MSSAInsertPts; 420 InsertPts.reserve(ExitBlocks.size()); 421 if (MSSAU) 422 MSSAInsertPts.reserve(ExitBlocks.size()); 423 for (BasicBlock *ExitBlock : ExitBlocks) { 424 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); 425 if (MSSAU) 426 MSSAInsertPts.push_back(nullptr); 427 } 428 429 PredIteratorCache PIC; 430 431 bool Promoted = false; 432 433 // Build an AST using MSSA. 434 if (!CurAST.get()) 435 CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get()); 436 437 // Loop over all of the alias sets in the tracker object. 438 for (AliasSet &AS : *CurAST) { 439 // We can promote this alias set if it has a store, if it is a "Must" 440 // alias set, if the pointer is loop invariant, and if we are not 441 // eliminating any volatile loads or stores. 442 if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || 443 !L->isLoopInvariant(AS.begin()->getValue())) 444 continue; 445 446 assert( 447 !AS.empty() && 448 "Must alias set should have at least one pointer element in it!"); 449 450 SmallSetVector<Value *, 8> PointerMustAliases; 451 for (const auto &ASI : AS) 452 PointerMustAliases.insert(ASI.getValue()); 453 454 Promoted |= promoteLoopAccessesToScalars( 455 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI, 456 DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE); 457 } 458 459 // Once we have promoted values across the loop body we have to 460 // recursively reform LCSSA as any nested loop may now have values defined 461 // within the loop used in the outer loop. 462 // FIXME: This is really heavy handed. It would be a bit better to use an 463 // SSAUpdater strategy during promotion that was LCSSA aware and reformed 464 // it as it went. 465 if (Promoted) 466 formLCSSARecursively(*L, *DT, LI, SE); 467 468 Changed |= Promoted; 469 } 470 } 471 472 // Check that neither this loop nor its parent have had LCSSA broken. LICM is 473 // specifically moving instructions across the loop boundary and so it is 474 // especially in need of sanity checking here. 475 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"); 476 assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) && 477 "Parent loop not left in LCSSA form after LICM!"); 478 479 // If this loop is nested inside of another one, save the alias information 480 // for when we process the outer loop. 481 if (!MSSAU.get() && CurAST.get() && L->getParentLoop() && !DeleteAST) 482 LoopToAliasSetMap[L] = std::move(CurAST); 483 484 if (MSSAU.get() && VerifyMemorySSA) 485 MSSAU->getMemorySSA()->verifyMemorySSA(); 486 487 if (Changed && SE) 488 SE->forgetLoopDispositions(L); 489 return Changed; 490 } 491 492 /// Walk the specified region of the CFG (defined by all blocks dominated by 493 /// the specified block, and that are in the current loop) in reverse depth 494 /// first order w.r.t the DominatorTree. This allows us to visit uses before 495 /// definitions, allowing us to sink a loop body in one pass without iteration. 496 /// 497 bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, 498 DominatorTree *DT, TargetLibraryInfo *TLI, 499 TargetTransformInfo *TTI, Loop *CurLoop, 500 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, 501 ICFLoopSafetyInfo *SafetyInfo, 502 SinkAndHoistLICMFlags &Flags, 503 OptimizationRemarkEmitter *ORE) { 504 505 // Verify inputs. 506 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 507 CurLoop != nullptr && SafetyInfo != nullptr && 508 "Unexpected input to sinkRegion."); 509 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 510 "Either AliasSetTracker or MemorySSA should be initialized."); 511 512 // We want to visit children before parents. We will enque all the parents 513 // before their children in the worklist and process the worklist in reverse 514 // order. 515 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); 516 517 bool Changed = false; 518 for (DomTreeNode *DTN : reverse(Worklist)) { 519 BasicBlock *BB = DTN->getBlock(); 520 // Only need to process the contents of this block if it is not part of a 521 // subloop (which would already have been processed). 522 if (inSubLoop(BB, CurLoop, LI)) 523 continue; 524 525 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { 526 Instruction &I = *--II; 527 528 // If the instruction is dead, we would try to sink it because it isn't 529 // used in the loop, instead, just delete it. 530 if (isInstructionTriviallyDead(&I, TLI)) { 531 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); 532 salvageDebugInfo(I); 533 ++II; 534 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 535 Changed = true; 536 continue; 537 } 538 539 // Check to see if we can sink this instruction to the exit blocks 540 // of the loop. We can do this if the all users of the instruction are 541 // outside of the loop. In this case, it doesn't even matter if the 542 // operands of the instruction are loop invariant. 543 // 544 bool FreeInLoop = false; 545 if (isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) && 546 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, 547 ORE) && 548 !I.mayHaveSideEffects()) { 549 if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) { 550 if (!FreeInLoop) { 551 ++II; 552 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 553 } 554 Changed = true; 555 } 556 } 557 } 558 } 559 if (MSSAU && VerifyMemorySSA) 560 MSSAU->getMemorySSA()->verifyMemorySSA(); 561 return Changed; 562 } 563 564 namespace { 565 // This is a helper class for hoistRegion to make it able to hoist control flow 566 // in order to be able to hoist phis. The way this works is that we initially 567 // start hoisting to the loop preheader, and when we see a loop invariant branch 568 // we make note of this. When we then come to hoist an instruction that's 569 // conditional on such a branch we duplicate the branch and the relevant control 570 // flow, then hoist the instruction into the block corresponding to its original 571 // block in the duplicated control flow. 572 class ControlFlowHoister { 573 private: 574 // Information about the loop we are hoisting from 575 LoopInfo *LI; 576 DominatorTree *DT; 577 Loop *CurLoop; 578 MemorySSAUpdater *MSSAU; 579 580 // A map of blocks in the loop to the block their instructions will be hoisted 581 // to. 582 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap; 583 584 // The branches that we can hoist, mapped to the block that marks a 585 // convergence point of their control flow. 586 DenseMap<BranchInst *, BasicBlock *> HoistableBranches; 587 588 public: 589 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, 590 MemorySSAUpdater *MSSAU) 591 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {} 592 593 void registerPossiblyHoistableBranch(BranchInst *BI) { 594 // We can only hoist conditional branches with loop invariant operands. 595 if (!ControlFlowHoisting || !BI->isConditional() || 596 !CurLoop->hasLoopInvariantOperands(BI)) 597 return; 598 599 // The branch destinations need to be in the loop, and we don't gain 600 // anything by duplicating conditional branches with duplicate successors, 601 // as it's essentially the same as an unconditional branch. 602 BasicBlock *TrueDest = BI->getSuccessor(0); 603 BasicBlock *FalseDest = BI->getSuccessor(1); 604 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) || 605 TrueDest == FalseDest) 606 return; 607 608 // We can hoist BI if one branch destination is the successor of the other, 609 // or both have common successor which we check by seeing if the 610 // intersection of their successors is non-empty. 611 // TODO: This could be expanded to allowing branches where both ends 612 // eventually converge to a single block. 613 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc; 614 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest)); 615 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest)); 616 BasicBlock *CommonSucc = nullptr; 617 if (TrueDestSucc.count(FalseDest)) { 618 CommonSucc = FalseDest; 619 } else if (FalseDestSucc.count(TrueDest)) { 620 CommonSucc = TrueDest; 621 } else { 622 set_intersect(TrueDestSucc, FalseDestSucc); 623 // If there's one common successor use that. 624 if (TrueDestSucc.size() == 1) 625 CommonSucc = *TrueDestSucc.begin(); 626 // If there's more than one pick whichever appears first in the block list 627 // (we can't use the value returned by TrueDestSucc.begin() as it's 628 // unpredicatable which element gets returned). 629 else if (!TrueDestSucc.empty()) { 630 Function *F = TrueDest->getParent(); 631 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); }; 632 auto It = std::find_if(F->begin(), F->end(), IsSucc); 633 assert(It != F->end() && "Could not find successor in function"); 634 CommonSucc = &*It; 635 } 636 } 637 // The common successor has to be dominated by the branch, as otherwise 638 // there will be some other path to the successor that will not be 639 // controlled by this branch so any phi we hoist would be controlled by the 640 // wrong condition. This also takes care of avoiding hoisting of loop back 641 // edges. 642 // TODO: In some cases this could be relaxed if the successor is dominated 643 // by another block that's been hoisted and we can guarantee that the 644 // control flow has been replicated exactly. 645 if (CommonSucc && DT->dominates(BI, CommonSucc)) 646 HoistableBranches[BI] = CommonSucc; 647 } 648 649 bool canHoistPHI(PHINode *PN) { 650 // The phi must have loop invariant operands. 651 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN)) 652 return false; 653 // We can hoist phis if the block they are in is the target of hoistable 654 // branches which cover all of the predecessors of the block. 655 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks; 656 BasicBlock *BB = PN->getParent(); 657 for (BasicBlock *PredBB : predecessors(BB)) 658 PredecessorBlocks.insert(PredBB); 659 // If we have less predecessor blocks than predecessors then the phi will 660 // have more than one incoming value for the same block which we can't 661 // handle. 662 // TODO: This could be handled be erasing some of the duplicate incoming 663 // values. 664 if (PredecessorBlocks.size() != pred_size(BB)) 665 return false; 666 for (auto &Pair : HoistableBranches) { 667 if (Pair.second == BB) { 668 // Which blocks are predecessors via this branch depends on if the 669 // branch is triangle-like or diamond-like. 670 if (Pair.first->getSuccessor(0) == BB) { 671 PredecessorBlocks.erase(Pair.first->getParent()); 672 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 673 } else if (Pair.first->getSuccessor(1) == BB) { 674 PredecessorBlocks.erase(Pair.first->getParent()); 675 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 676 } else { 677 PredecessorBlocks.erase(Pair.first->getSuccessor(0)); 678 PredecessorBlocks.erase(Pair.first->getSuccessor(1)); 679 } 680 } 681 } 682 // PredecessorBlocks will now be empty if for every predecessor of BB we 683 // found a hoistable branch source. 684 return PredecessorBlocks.empty(); 685 } 686 687 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) { 688 if (!ControlFlowHoisting) 689 return CurLoop->getLoopPreheader(); 690 // If BB has already been hoisted, return that 691 if (HoistDestinationMap.count(BB)) 692 return HoistDestinationMap[BB]; 693 694 // Check if this block is conditional based on a pending branch 695 auto HasBBAsSuccessor = 696 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) { 697 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB || 698 Pair.first->getSuccessor(1) == BB); 699 }; 700 auto It = std::find_if(HoistableBranches.begin(), HoistableBranches.end(), 701 HasBBAsSuccessor); 702 703 // If not involved in a pending branch, hoist to preheader 704 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader(); 705 if (It == HoistableBranches.end()) { 706 LLVM_DEBUG(dbgs() << "LICM using " << InitialPreheader->getName() 707 << " as hoist destination for " << BB->getName() 708 << "\n"); 709 HoistDestinationMap[BB] = InitialPreheader; 710 return InitialPreheader; 711 } 712 BranchInst *BI = It->first; 713 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == 714 HoistableBranches.end() && 715 "BB is expected to be the target of at most one branch"); 716 717 LLVMContext &C = BB->getContext(); 718 BasicBlock *TrueDest = BI->getSuccessor(0); 719 BasicBlock *FalseDest = BI->getSuccessor(1); 720 BasicBlock *CommonSucc = HoistableBranches[BI]; 721 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent()); 722 723 // Create hoisted versions of blocks that currently don't have them 724 auto CreateHoistedBlock = [&](BasicBlock *Orig) { 725 if (HoistDestinationMap.count(Orig)) 726 return HoistDestinationMap[Orig]; 727 BasicBlock *New = 728 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent()); 729 HoistDestinationMap[Orig] = New; 730 DT->addNewBlock(New, HoistTarget); 731 if (CurLoop->getParentLoop()) 732 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI); 733 ++NumCreatedBlocks; 734 LLVM_DEBUG(dbgs() << "LICM created " << New->getName() 735 << " as hoist destination for " << Orig->getName() 736 << "\n"); 737 return New; 738 }; 739 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest); 740 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest); 741 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc); 742 743 // Link up these blocks with branches. 744 if (!HoistCommonSucc->getTerminator()) { 745 // The new common successor we've generated will branch to whatever that 746 // hoist target branched to. 747 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor(); 748 assert(TargetSucc && "Expected hoist target to have a single successor"); 749 HoistCommonSucc->moveBefore(TargetSucc); 750 BranchInst::Create(TargetSucc, HoistCommonSucc); 751 } 752 if (!HoistTrueDest->getTerminator()) { 753 HoistTrueDest->moveBefore(HoistCommonSucc); 754 BranchInst::Create(HoistCommonSucc, HoistTrueDest); 755 } 756 if (!HoistFalseDest->getTerminator()) { 757 HoistFalseDest->moveBefore(HoistCommonSucc); 758 BranchInst::Create(HoistCommonSucc, HoistFalseDest); 759 } 760 761 // If BI is being cloned to what was originally the preheader then 762 // HoistCommonSucc will now be the new preheader. 763 if (HoistTarget == InitialPreheader) { 764 // Phis in the loop header now need to use the new preheader. 765 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc); 766 if (MSSAU) 767 MSSAU->wireOldPredecessorsToNewImmediatePredecessor( 768 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget}); 769 // The new preheader dominates the loop header. 770 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc); 771 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader()); 772 DT->changeImmediateDominator(HeaderNode, PreheaderNode); 773 // The preheader hoist destination is now the new preheader, with the 774 // exception of the hoist destination of this branch. 775 for (auto &Pair : HoistDestinationMap) 776 if (Pair.second == InitialPreheader && Pair.first != BI->getParent()) 777 Pair.second = HoistCommonSucc; 778 } 779 780 // Now finally clone BI. 781 ReplaceInstWithInst( 782 HoistTarget->getTerminator(), 783 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition())); 784 ++NumClonedBranches; 785 786 assert(CurLoop->getLoopPreheader() && 787 "Hoisting blocks should not have destroyed preheader"); 788 return HoistDestinationMap[BB]; 789 } 790 }; 791 } // namespace 792 793 794 /// Return true if we know how to rewrite all uses of the given alloca after 795 /// hoisting it out of the loop. The main concerns are a) potential captures 796 /// and b) invariant.start markers which don't capture, but are no longer 797 /// valid w/o a corresponding invariant.end. 798 static bool canRewriteUsesOfAlloca(AllocaInst &AI) { 799 // TODO: This looks a lot like capture tracking, but we need to remove any 800 // invariant starts if we extend the lifetime of the alloca by hoisting it. 801 // We should probably refactor capture tracking into a form which allows us 802 // to reuse the relevant bits and remove the duplicated logic here. 803 804 SmallVector<Use *, 16> Worklist; 805 for (Use &U : AI.uses()) 806 Worklist.push_back(&U); 807 808 unsigned NumUsesExplored = 0; 809 while (!Worklist.empty()) { 810 Use *U = Worklist.pop_back_val(); 811 Instruction *I = cast<Instruction>(U->getUser()); 812 NumUsesExplored++; 813 if (NumUsesExplored > DefaultMaxUsesToExplore) 814 return false; 815 // Non capturing, terminating uses 816 if (isa<LoadInst>(I) || 817 (isa<StoreInst>(I) && U->getOperandNo() == 1)) 818 continue; 819 // Non capturing, non-terminating 820 if (!isa<BitCastInst>(I) && !isa<GetElementPtrInst>(I)) 821 return false; 822 for (Use &U : I->uses()) 823 Worklist.push_back(&U); 824 } 825 return true; 826 } 827 828 /// Walk the specified region of the CFG (defined by all blocks dominated by 829 /// the specified block, and that are in the current loop) in depth first 830 /// order w.r.t the DominatorTree. This allows us to visit definitions before 831 /// uses, allowing us to hoist a loop body in one pass without iteration. 832 /// 833 bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, 834 DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, 835 AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, 836 ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo, 837 SinkAndHoistLICMFlags &Flags, 838 OptimizationRemarkEmitter *ORE) { 839 // Verify inputs. 840 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && 841 CurLoop != nullptr && SafetyInfo != nullptr && 842 "Unexpected input to hoistRegion."); 843 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) && 844 "Either AliasSetTracker or MemorySSA should be initialized."); 845 846 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU); 847 848 // Keep track of instructions that have been hoisted, as they may need to be 849 // re-hoisted if they end up not dominating all of their uses. 850 SmallVector<Instruction *, 16> HoistedInstructions; 851 852 // For PHI hoisting to work we need to hoist blocks before their successors. 853 // We can do this by iterating through the blocks in the loop in reverse 854 // post-order. 855 LoopBlocksRPO Worklist(CurLoop); 856 Worklist.perform(LI); 857 bool Changed = false; 858 for (BasicBlock *BB : Worklist) { 859 // Only need to process the contents of this block if it is not part of a 860 // subloop (which would already have been processed). 861 if (inSubLoop(BB, CurLoop, LI)) 862 continue; 863 864 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { 865 Instruction &I = *II++; 866 // Try constant folding this instruction. If all the operands are 867 // constants, it is technically hoistable, but it would be better to 868 // just fold it. 869 if (Constant *C = ConstantFoldInstruction( 870 &I, I.getModule()->getDataLayout(), TLI)) { 871 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C 872 << '\n'); 873 if (CurAST) 874 CurAST->copyValue(&I, C); 875 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960). 876 I.replaceAllUsesWith(C); 877 if (isInstructionTriviallyDead(&I, TLI)) 878 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 879 Changed = true; 880 continue; 881 } 882 883 // Try hoisting the instruction out to the preheader. We can only do 884 // this if all of the operands of the instruction are loop invariant and 885 // if it is safe to hoist the instruction. 886 // TODO: It may be safe to hoist if we are hoisting to a conditional block 887 // and we have accurately duplicated the control flow from the loop header 888 // to that block. 889 if (CurLoop->hasLoopInvariantOperands(&I) && 890 canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, 891 ORE) && 892 isSafeToExecuteUnconditionally( 893 I, DT, CurLoop, SafetyInfo, ORE, 894 CurLoop->getLoopPreheader()->getTerminator())) { 895 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 896 MSSAU, SE, ORE); 897 HoistedInstructions.push_back(&I); 898 Changed = true; 899 continue; 900 } 901 902 // Attempt to remove floating point division out of the loop by 903 // converting it to a reciprocal multiplication. 904 if (I.getOpcode() == Instruction::FDiv && 905 CurLoop->isLoopInvariant(I.getOperand(1)) && 906 I.hasAllowReciprocal()) { 907 auto Divisor = I.getOperand(1); 908 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); 909 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); 910 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); 911 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent()); 912 ReciprocalDivisor->insertBefore(&I); 913 914 auto Product = 915 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); 916 Product->setFastMathFlags(I.getFastMathFlags()); 917 SafetyInfo->insertInstructionTo(Product, I.getParent()); 918 Product->insertAfter(&I); 919 I.replaceAllUsesWith(Product); 920 eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); 921 922 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), 923 SafetyInfo, MSSAU, SE, ORE); 924 HoistedInstructions.push_back(ReciprocalDivisor); 925 Changed = true; 926 continue; 927 } 928 929 auto IsInvariantStart = [&](Instruction &I) { 930 using namespace PatternMatch; 931 return I.use_empty() && 932 match(&I, m_Intrinsic<Intrinsic::invariant_start>()); 933 }; 934 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) { 935 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && 936 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop); 937 }; 938 if ((IsInvariantStart(I) || isGuard(&I)) && 939 CurLoop->hasLoopInvariantOperands(&I) && 940 MustExecuteWithoutWritesBefore(I)) { 941 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 942 MSSAU, SE, ORE); 943 HoistedInstructions.push_back(&I); 944 Changed = true; 945 continue; 946 } 947 948 if (isa<AllocaInst>(&I) && 949 SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && 950 canRewriteUsesOfAlloca(cast<AllocaInst>(I))) { 951 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 952 MSSAU, SE, ORE); 953 HoistedInstructions.push_back(&I); 954 Changed = true; 955 continue; 956 } 957 958 if (PHINode *PN = dyn_cast<PHINode>(&I)) { 959 if (CFH.canHoistPHI(PN)) { 960 // Redirect incoming blocks first to ensure that we create hoisted 961 // versions of those blocks before we hoist the phi. 962 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i) 963 PN->setIncomingBlock( 964 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i))); 965 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, 966 MSSAU, SE, ORE); 967 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected"); 968 Changed = true; 969 continue; 970 } 971 } 972 973 // Remember possibly hoistable branches so we can actually hoist them 974 // later if needed. 975 if (BranchInst *BI = dyn_cast<BranchInst>(&I)) 976 CFH.registerPossiblyHoistableBranch(BI); 977 } 978 } 979 980 // If we hoisted instructions to a conditional block they may not dominate 981 // their uses that weren't hoisted (such as phis where some operands are not 982 // loop invariant). If so make them unconditional by moving them to their 983 // immediate dominator. We iterate through the instructions in reverse order 984 // which ensures that when we rehoist an instruction we rehoist its operands, 985 // and also keep track of where in the block we are rehoisting to to make sure 986 // that we rehoist instructions before the instructions that use them. 987 Instruction *HoistPoint = nullptr; 988 if (ControlFlowHoisting) { 989 for (Instruction *I : reverse(HoistedInstructions)) { 990 if (!llvm::all_of(I->uses(), 991 [&](Use &U) { return DT->dominates(I, U); })) { 992 BasicBlock *Dominator = 993 DT->getNode(I->getParent())->getIDom()->getBlock(); 994 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) { 995 if (HoistPoint) 996 assert(DT->dominates(Dominator, HoistPoint->getParent()) && 997 "New hoist point expected to dominate old hoist point"); 998 HoistPoint = Dominator->getTerminator(); 999 } 1000 LLVM_DEBUG(dbgs() << "LICM rehoisting to " 1001 << HoistPoint->getParent()->getName() 1002 << ": " << *I << "\n"); 1003 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE); 1004 HoistPoint = I; 1005 Changed = true; 1006 } 1007 } 1008 } 1009 if (MSSAU && VerifyMemorySSA) 1010 MSSAU->getMemorySSA()->verifyMemorySSA(); 1011 1012 // Now that we've finished hoisting make sure that LI and DT are still 1013 // valid. 1014 #ifdef EXPENSIVE_CHECKS 1015 if (Changed) { 1016 assert(DT->verify(DominatorTree::VerificationLevel::Fast) && 1017 "Dominator tree verification failed"); 1018 LI->verify(*DT); 1019 } 1020 #endif 1021 1022 return Changed; 1023 } 1024 1025 // Return true if LI is invariant within scope of the loop. LI is invariant if 1026 // CurLoop is dominated by an invariant.start representing the same memory 1027 // location and size as the memory location LI loads from, and also the 1028 // invariant.start has no uses. 1029 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, 1030 Loop *CurLoop) { 1031 Value *Addr = LI->getOperand(0); 1032 const DataLayout &DL = LI->getModule()->getDataLayout(); 1033 const uint32_t LocSizeInBits = DL.getTypeSizeInBits(LI->getType()); 1034 1035 // if the type is i8 addrspace(x)*, we know this is the type of 1036 // llvm.invariant.start operand 1037 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()), 1038 LI->getPointerAddressSpace()); 1039 unsigned BitcastsVisited = 0; 1040 // Look through bitcasts until we reach the i8* type (this is invariant.start 1041 // operand type). 1042 while (Addr->getType() != PtrInt8Ty) { 1043 auto *BC = dyn_cast<BitCastInst>(Addr); 1044 // Avoid traversing high number of bitcast uses. 1045 if (++BitcastsVisited > MaxNumUsesTraversed || !BC) 1046 return false; 1047 Addr = BC->getOperand(0); 1048 } 1049 1050 unsigned UsesVisited = 0; 1051 // Traverse all uses of the load operand value, to see if invariant.start is 1052 // one of the uses, and whether it dominates the load instruction. 1053 for (auto *U : Addr->users()) { 1054 // Avoid traversing for Load operand with high number of users. 1055 if (++UsesVisited > MaxNumUsesTraversed) 1056 return false; 1057 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 1058 // If there are escaping uses of invariant.start instruction, the load maybe 1059 // non-invariant. 1060 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || 1061 !II->use_empty()) 1062 continue; 1063 unsigned InvariantSizeInBits = 1064 cast<ConstantInt>(II->getArgOperand(0))->getSExtValue() * 8; 1065 // Confirm the invariant.start location size contains the load operand size 1066 // in bits. Also, the invariant.start should dominate the load, and we 1067 // should not hoist the load out of a loop that contains this dominating 1068 // invariant.start. 1069 if (LocSizeInBits <= InvariantSizeInBits && 1070 DT->properlyDominates(II->getParent(), CurLoop->getHeader())) 1071 return true; 1072 } 1073 1074 return false; 1075 } 1076 1077 namespace { 1078 /// Return true if-and-only-if we know how to (mechanically) both hoist and 1079 /// sink a given instruction out of a loop. Does not address legality 1080 /// concerns such as aliasing or speculation safety. 1081 bool isHoistableAndSinkableInst(Instruction &I) { 1082 // Only these instructions are hoistable/sinkable. 1083 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || 1084 isa<FenceInst>(I) || isa<CastInst>(I) || 1085 isa<UnaryOperator>(I) || isa<BinaryOperator>(I) || 1086 isa<SelectInst>(I) || isa<GetElementPtrInst>(I) || isa<CmpInst>(I) || 1087 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || 1088 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) || 1089 isa<InsertValueInst>(I)); 1090 } 1091 /// Return true if all of the alias sets within this AST are known not to 1092 /// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop. 1093 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU, 1094 const Loop *L) { 1095 if (CurAST) { 1096 for (AliasSet &AS : *CurAST) { 1097 if (!AS.isForwardingAliasSet() && AS.isMod()) { 1098 return false; 1099 } 1100 } 1101 return true; 1102 } else { /*MSSAU*/ 1103 for (auto *BB : L->getBlocks()) 1104 if (MSSAU->getMemorySSA()->getBlockDefs(BB)) 1105 return false; 1106 return true; 1107 } 1108 } 1109 1110 /// Return true if I is the only Instruction with a MemoryAccess in L. 1111 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, 1112 const MemorySSAUpdater *MSSAU) { 1113 for (auto *BB : L->getBlocks()) 1114 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) { 1115 int NotAPhi = 0; 1116 for (const auto &Acc : *Accs) { 1117 if (isa<MemoryPhi>(&Acc)) 1118 continue; 1119 const auto *MUD = cast<MemoryUseOrDef>(&Acc); 1120 if (MUD->getMemoryInst() != I || NotAPhi++ == 1) 1121 return false; 1122 } 1123 } 1124 return true; 1125 } 1126 } 1127 1128 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, 1129 Loop *CurLoop, AliasSetTracker *CurAST, 1130 MemorySSAUpdater *MSSAU, 1131 bool TargetExecutesOncePerLoop, 1132 SinkAndHoistLICMFlags *Flags, 1133 OptimizationRemarkEmitter *ORE) { 1134 // If we don't understand the instruction, bail early. 1135 if (!isHoistableAndSinkableInst(I)) 1136 return false; 1137 1138 MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr; 1139 if (MSSA) 1140 assert(Flags != nullptr && "Flags cannot be null."); 1141 1142 // Loads have extra constraints we have to verify before we can hoist them. 1143 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { 1144 if (!LI->isUnordered()) 1145 return false; // Don't sink/hoist volatile or ordered atomic loads! 1146 1147 // Loads from constant memory are always safe to move, even if they end up 1148 // in the same alias set as something that ends up being modified. 1149 if (AA->pointsToConstantMemory(LI->getOperand(0))) 1150 return true; 1151 if (LI->hasMetadata(LLVMContext::MD_invariant_load)) 1152 return true; 1153 1154 if (LI->isAtomic() && !TargetExecutesOncePerLoop) 1155 return false; // Don't risk duplicating unordered loads 1156 1157 // This checks for an invariant.start dominating the load. 1158 if (isLoadInvariantInLoop(LI, DT, CurLoop)) 1159 return true; 1160 1161 bool Invalidated; 1162 if (CurAST) 1163 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST, 1164 CurLoop, AA); 1165 else 1166 Invalidated = pointerInvalidatedByLoopWithMSSA( 1167 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, *Flags); 1168 // Check loop-invariant address because this may also be a sinkable load 1169 // whose address is not necessarily loop-invariant. 1170 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1171 ORE->emit([&]() { 1172 return OptimizationRemarkMissed( 1173 DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI) 1174 << "failed to move load with loop-invariant address " 1175 "because the loop may invalidate its value"; 1176 }); 1177 1178 return !Invalidated; 1179 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { 1180 // Don't sink or hoist dbg info; it's legal, but not useful. 1181 if (isa<DbgInfoIntrinsic>(I)) 1182 return false; 1183 1184 // Don't sink calls which can throw. 1185 if (CI->mayThrow()) 1186 return false; 1187 1188 using namespace PatternMatch; 1189 if (match(CI, m_Intrinsic<Intrinsic::assume>())) 1190 // Assumes don't actually alias anything or throw 1191 return true; 1192 1193 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>())) 1194 // Widenable conditions don't actually alias anything or throw 1195 return true; 1196 1197 // Handle simple cases by querying alias analysis. 1198 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI); 1199 if (Behavior == FMRB_DoesNotAccessMemory) 1200 return true; 1201 if (AliasAnalysis::onlyReadsMemory(Behavior)) { 1202 // A readonly argmemonly function only reads from memory pointed to by 1203 // it's arguments with arbitrary offsets. If we can prove there are no 1204 // writes to this memory in the loop, we can hoist or sink. 1205 if (AliasAnalysis::onlyAccessesArgPointees(Behavior)) { 1206 // TODO: expand to writeable arguments 1207 for (Value *Op : CI->arg_operands()) 1208 if (Op->getType()->isPointerTy()) { 1209 bool Invalidated; 1210 if (CurAST) 1211 Invalidated = pointerInvalidatedByLoop( 1212 MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()), 1213 CurAST, CurLoop, AA); 1214 else 1215 Invalidated = pointerInvalidatedByLoopWithMSSA( 1216 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, 1217 *Flags); 1218 if (Invalidated) 1219 return false; 1220 } 1221 return true; 1222 } 1223 1224 // If this call only reads from memory and there are no writes to memory 1225 // in the loop, we can hoist or sink the call as appropriate. 1226 if (isReadOnly(CurAST, MSSAU, CurLoop)) 1227 return true; 1228 } 1229 1230 // FIXME: This should use mod/ref information to see if we can hoist or 1231 // sink the call. 1232 1233 return false; 1234 } else if (auto *FI = dyn_cast<FenceInst>(&I)) { 1235 // Fences alias (most) everything to provide ordering. For the moment, 1236 // just give up if there are any other memory operations in the loop. 1237 if (CurAST) { 1238 auto Begin = CurAST->begin(); 1239 assert(Begin != CurAST->end() && "must contain FI"); 1240 if (std::next(Begin) != CurAST->end()) 1241 // constant memory for instance, TODO: handle better 1242 return false; 1243 auto *UniqueI = Begin->getUniqueInstruction(); 1244 if (!UniqueI) 1245 // other memory op, give up 1246 return false; 1247 (void)FI; // suppress unused variable warning 1248 assert(UniqueI == FI && "AS must contain FI"); 1249 return true; 1250 } else // MSSAU 1251 return isOnlyMemoryAccess(FI, CurLoop, MSSAU); 1252 } else if (auto *SI = dyn_cast<StoreInst>(&I)) { 1253 if (!SI->isUnordered()) 1254 return false; // Don't sink/hoist volatile or ordered atomic store! 1255 1256 // We can only hoist a store that we can prove writes a value which is not 1257 // read or overwritten within the loop. For those cases, we fallback to 1258 // load store promotion instead. TODO: We can extend this to cases where 1259 // there is exactly one write to the location and that write dominates an 1260 // arbitrary number of reads in the loop. 1261 if (CurAST) { 1262 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI)); 1263 1264 if (AS.isRef() || !AS.isMustAlias()) 1265 // Quick exit test, handled by the full path below as well. 1266 return false; 1267 auto *UniqueI = AS.getUniqueInstruction(); 1268 if (!UniqueI) 1269 // other memory op, give up 1270 return false; 1271 assert(UniqueI == SI && "AS must contain SI"); 1272 return true; 1273 } else { // MSSAU 1274 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU)) 1275 return true; 1276 // If there are more accesses than the Promotion cap, give up, we're not 1277 // walking a list that long. 1278 if (Flags->NoOfMemAccTooLarge) 1279 return false; 1280 // Check store only if there's still "quota" to check clobber. 1281 if (Flags->LicmMssaOptCounter >= Flags->LicmMssaOptCap) 1282 return false; 1283 // If there are interfering Uses (i.e. their defining access is in the 1284 // loop), or ordered loads (stored as Defs!), don't move this store. 1285 // Could do better here, but this is conservatively correct. 1286 // TODO: Cache set of Uses on the first walk in runOnLoop, update when 1287 // moving accesses. Can also extend to dominating uses. 1288 auto *SIMD = MSSA->getMemoryAccess(SI); 1289 for (auto *BB : CurLoop->getBlocks()) 1290 if (auto *Accesses = MSSA->getBlockAccesses(BB)) { 1291 for (const auto &MA : *Accesses) 1292 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) { 1293 auto *MD = MU->getDefiningAccess(); 1294 if (!MSSA->isLiveOnEntryDef(MD) && 1295 CurLoop->contains(MD->getBlock())) 1296 return false; 1297 // Disable hoisting past potentially interfering loads. Optimized 1298 // Uses may point to an access outside the loop, as getClobbering 1299 // checks the previous iteration when walking the backedge. 1300 // FIXME: More precise: no Uses that alias SI. 1301 if (!Flags->IsSink && !MSSA->dominates(SIMD, MU)) 1302 return false; 1303 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) { 1304 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) { 1305 (void)LI; // Silence warning. 1306 assert(!LI->isUnordered() && "Expected unordered load"); 1307 return false; 1308 } 1309 // Any call, while it may not be clobbering SI, it may be a use. 1310 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) { 1311 // Check if the call may read from the memory locattion written 1312 // to by SI. Check CI's attributes and arguments; the number of 1313 // such checks performed is limited above by NoOfMemAccTooLarge. 1314 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI)); 1315 if (isModOrRefSet(MRI)) 1316 return false; 1317 } 1318 } 1319 } 1320 1321 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI); 1322 Flags->LicmMssaOptCounter++; 1323 // If there are no clobbering Defs in the loop, store is safe to hoist. 1324 return MSSA->isLiveOnEntryDef(Source) || 1325 !CurLoop->contains(Source->getBlock()); 1326 } 1327 } 1328 1329 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing"); 1330 1331 // We've established mechanical ability and aliasing, it's up to the caller 1332 // to check fault safety 1333 return true; 1334 } 1335 1336 /// Returns true if a PHINode is a trivially replaceable with an 1337 /// Instruction. 1338 /// This is true when all incoming values are that instruction. 1339 /// This pattern occurs most often with LCSSA PHI nodes. 1340 /// 1341 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) { 1342 for (const Value *IncValue : PN.incoming_values()) 1343 if (IncValue != &I) 1344 return false; 1345 1346 return true; 1347 } 1348 1349 /// Return true if the instruction is free in the loop. 1350 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop, 1351 const TargetTransformInfo *TTI) { 1352 1353 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) { 1354 if (TTI->getUserCost(GEP) != TargetTransformInfo::TCC_Free) 1355 return false; 1356 // For a GEP, we cannot simply use getUserCost because currently it 1357 // optimistically assume that a GEP will fold into addressing mode 1358 // regardless of its users. 1359 const BasicBlock *BB = GEP->getParent(); 1360 for (const User *U : GEP->users()) { 1361 const Instruction *UI = cast<Instruction>(U); 1362 if (CurLoop->contains(UI) && 1363 (BB != UI->getParent() || 1364 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI)))) 1365 return false; 1366 } 1367 return true; 1368 } else 1369 return TTI->getUserCost(&I) == TargetTransformInfo::TCC_Free; 1370 } 1371 1372 /// Return true if the only users of this instruction are outside of 1373 /// the loop. If this is true, we can sink the instruction to the exit 1374 /// blocks of the loop. 1375 /// 1376 /// We also return true if the instruction could be folded away in lowering. 1377 /// (e.g., a GEP can be folded into a load as an addressing mode in the loop). 1378 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, 1379 const LoopSafetyInfo *SafetyInfo, 1380 TargetTransformInfo *TTI, bool &FreeInLoop) { 1381 const auto &BlockColors = SafetyInfo->getBlockColors(); 1382 bool IsFree = isFreeInLoop(I, CurLoop, TTI); 1383 for (const User *U : I.users()) { 1384 const Instruction *UI = cast<Instruction>(U); 1385 if (const PHINode *PN = dyn_cast<PHINode>(UI)) { 1386 const BasicBlock *BB = PN->getParent(); 1387 // We cannot sink uses in catchswitches. 1388 if (isa<CatchSwitchInst>(BB->getTerminator())) 1389 return false; 1390 1391 // We need to sink a callsite to a unique funclet. Avoid sinking if the 1392 // phi use is too muddled. 1393 if (isa<CallInst>(I)) 1394 if (!BlockColors.empty() && 1395 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) 1396 return false; 1397 } 1398 1399 if (CurLoop->contains(UI)) { 1400 if (IsFree) { 1401 FreeInLoop = true; 1402 continue; 1403 } 1404 return false; 1405 } 1406 } 1407 return true; 1408 } 1409 1410 static Instruction *CloneInstructionInExitBlock( 1411 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, 1412 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) { 1413 Instruction *New; 1414 if (auto *CI = dyn_cast<CallInst>(&I)) { 1415 const auto &BlockColors = SafetyInfo->getBlockColors(); 1416 1417 // Sinking call-sites need to be handled differently from other 1418 // instructions. The cloned call-site needs a funclet bundle operand 1419 // appropriate for its location in the CFG. 1420 SmallVector<OperandBundleDef, 1> OpBundles; 1421 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); 1422 BundleIdx != BundleEnd; ++BundleIdx) { 1423 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); 1424 if (Bundle.getTagID() == LLVMContext::OB_funclet) 1425 continue; 1426 1427 OpBundles.emplace_back(Bundle); 1428 } 1429 1430 if (!BlockColors.empty()) { 1431 const ColorVector &CV = BlockColors.find(&ExitBlock)->second; 1432 assert(CV.size() == 1 && "non-unique color for exit block!"); 1433 BasicBlock *BBColor = CV.front(); 1434 Instruction *EHPad = BBColor->getFirstNonPHI(); 1435 if (EHPad->isEHPad()) 1436 OpBundles.emplace_back("funclet", EHPad); 1437 } 1438 1439 New = CallInst::Create(CI, OpBundles); 1440 } else { 1441 New = I.clone(); 1442 } 1443 1444 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); 1445 if (!I.getName().empty()) 1446 New->setName(I.getName() + ".le"); 1447 1448 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) { 1449 // Create a new MemoryAccess and let MemorySSA set its defining access. 1450 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( 1451 New, nullptr, New->getParent(), MemorySSA::Beginning); 1452 if (NewMemAcc) { 1453 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc)) 1454 MSSAU->insertDef(MemDef, /*RenameUses=*/true); 1455 else { 1456 auto *MemUse = cast<MemoryUse>(NewMemAcc); 1457 MSSAU->insertUse(MemUse, /*RenameUses=*/true); 1458 } 1459 } 1460 } 1461 1462 // Build LCSSA PHI nodes for any in-loop operands. Note that this is 1463 // particularly cheap because we can rip off the PHI node that we're 1464 // replacing for the number and blocks of the predecessors. 1465 // OPT: If this shows up in a profile, we can instead finish sinking all 1466 // invariant instructions, and then walk their operands to re-establish 1467 // LCSSA. That will eliminate creating PHI nodes just to nuke them when 1468 // sinking bottom-up. 1469 for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE; 1470 ++OI) 1471 if (Instruction *OInst = dyn_cast<Instruction>(*OI)) 1472 if (Loop *OLoop = LI->getLoopFor(OInst->getParent())) 1473 if (!OLoop->contains(&PN)) { 1474 PHINode *OpPN = 1475 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), 1476 OInst->getName() + ".lcssa", &ExitBlock.front()); 1477 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) 1478 OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); 1479 *OI = OpPN; 1480 } 1481 return New; 1482 } 1483 1484 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, 1485 AliasSetTracker *AST, MemorySSAUpdater *MSSAU) { 1486 if (AST) 1487 AST->deleteValue(&I); 1488 if (MSSAU) 1489 MSSAU->removeMemoryAccess(&I); 1490 SafetyInfo.removeInstruction(&I); 1491 I.eraseFromParent(); 1492 } 1493 1494 static void moveInstructionBefore(Instruction &I, Instruction &Dest, 1495 ICFLoopSafetyInfo &SafetyInfo, 1496 MemorySSAUpdater *MSSAU, 1497 ScalarEvolution *SE) { 1498 SafetyInfo.removeInstruction(&I); 1499 SafetyInfo.insertInstructionTo(&I, Dest.getParent()); 1500 I.moveBefore(&Dest); 1501 if (MSSAU) 1502 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>( 1503 MSSAU->getMemorySSA()->getMemoryAccess(&I))) 1504 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(), 1505 MemorySSA::BeforeTerminator); 1506 if (SE) 1507 SE->forgetValue(&I); 1508 } 1509 1510 static Instruction *sinkThroughTriviallyReplaceablePHI( 1511 PHINode *TPN, Instruction *I, LoopInfo *LI, 1512 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies, 1513 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, 1514 MemorySSAUpdater *MSSAU) { 1515 assert(isTriviallyReplaceablePHI(*TPN, *I) && 1516 "Expect only trivially replaceable PHI"); 1517 BasicBlock *ExitBlock = TPN->getParent(); 1518 Instruction *New; 1519 auto It = SunkCopies.find(ExitBlock); 1520 if (It != SunkCopies.end()) 1521 New = It->second; 1522 else 1523 New = SunkCopies[ExitBlock] = CloneInstructionInExitBlock( 1524 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU); 1525 return New; 1526 } 1527 1528 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { 1529 BasicBlock *BB = PN->getParent(); 1530 if (!BB->canSplitPredecessors()) 1531 return false; 1532 // It's not impossible to split EHPad blocks, but if BlockColors already exist 1533 // it require updating BlockColors for all offspring blocks accordingly. By 1534 // skipping such corner case, we can make updating BlockColors after splitting 1535 // predecessor fairly simple. 1536 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad()) 1537 return false; 1538 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 1539 BasicBlock *BBPred = *PI; 1540 if (isa<IndirectBrInst>(BBPred->getTerminator())) 1541 return false; 1542 } 1543 return true; 1544 } 1545 1546 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, 1547 LoopInfo *LI, const Loop *CurLoop, 1548 LoopSafetyInfo *SafetyInfo, 1549 MemorySSAUpdater *MSSAU) { 1550 #ifndef NDEBUG 1551 SmallVector<BasicBlock *, 32> ExitBlocks; 1552 CurLoop->getUniqueExitBlocks(ExitBlocks); 1553 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1554 ExitBlocks.end()); 1555 #endif 1556 BasicBlock *ExitBB = PN->getParent(); 1557 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block."); 1558 1559 // Split predecessors of the loop exit to make instructions in the loop are 1560 // exposed to exit blocks through trivially replaceable PHIs while keeping the 1561 // loop in the canonical form where each predecessor of each exit block should 1562 // be contained within the loop. For example, this will convert the loop below 1563 // from 1564 // 1565 // LB1: 1566 // %v1 = 1567 // br %LE, %LB2 1568 // LB2: 1569 // %v2 = 1570 // br %LE, %LB1 1571 // LE: 1572 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable 1573 // 1574 // to 1575 // 1576 // LB1: 1577 // %v1 = 1578 // br %LE.split, %LB2 1579 // LB2: 1580 // %v2 = 1581 // br %LE.split2, %LB1 1582 // LE.split: 1583 // %p1 = phi [%v1, %LB1] <-- trivially replaceable 1584 // br %LE 1585 // LE.split2: 1586 // %p2 = phi [%v2, %LB2] <-- trivially replaceable 1587 // br %LE 1588 // LE: 1589 // %p = phi [%p1, %LE.split], [%p2, %LE.split2] 1590 // 1591 const auto &BlockColors = SafetyInfo->getBlockColors(); 1592 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); 1593 while (!PredBBs.empty()) { 1594 BasicBlock *PredBB = *PredBBs.begin(); 1595 assert(CurLoop->contains(PredBB) && 1596 "Expect all predecessors are in the loop"); 1597 if (PN->getBasicBlockIndex(PredBB) >= 0) { 1598 BasicBlock *NewPred = SplitBlockPredecessors( 1599 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true); 1600 // Since we do not allow splitting EH-block with BlockColors in 1601 // canSplitPredecessors(), we can simply assign predecessor's color to 1602 // the new block. 1603 if (!BlockColors.empty()) 1604 // Grab a reference to the ColorVector to be inserted before getting the 1605 // reference to the vector we are copying because inserting the new 1606 // element in BlockColors might cause the map to be reallocated. 1607 SafetyInfo->copyColors(NewPred, PredBB); 1608 } 1609 PredBBs.remove(PredBB); 1610 } 1611 } 1612 1613 /// When an instruction is found to only be used outside of the loop, this 1614 /// function moves it to the exit blocks and patches up SSA form as needed. 1615 /// This method is guaranteed to remove the original instruction from its 1616 /// position, and may either delete it or move it to outside of the loop. 1617 /// 1618 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, 1619 const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo, 1620 MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE) { 1621 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); 1622 ORE->emit([&]() { 1623 return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I) 1624 << "sinking " << ore::NV("Inst", &I); 1625 }); 1626 bool Changed = false; 1627 if (isa<LoadInst>(I)) 1628 ++NumMovedLoads; 1629 else if (isa<CallInst>(I)) 1630 ++NumMovedCalls; 1631 ++NumSunk; 1632 1633 // Iterate over users to be ready for actual sinking. Replace users via 1634 // unreachable blocks with undef and make all user PHIs trivially replaceable. 1635 SmallPtrSet<Instruction *, 8> VisitedUsers; 1636 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) { 1637 auto *User = cast<Instruction>(*UI); 1638 Use &U = UI.getUse(); 1639 ++UI; 1640 1641 if (VisitedUsers.count(User) || CurLoop->contains(User)) 1642 continue; 1643 1644 if (!DT->isReachableFromEntry(User->getParent())) { 1645 U = UndefValue::get(I.getType()); 1646 Changed = true; 1647 continue; 1648 } 1649 1650 // The user must be a PHI node. 1651 PHINode *PN = cast<PHINode>(User); 1652 1653 // Surprisingly, instructions can be used outside of loops without any 1654 // exits. This can only happen in PHI nodes if the incoming block is 1655 // unreachable. 1656 BasicBlock *BB = PN->getIncomingBlock(U); 1657 if (!DT->isReachableFromEntry(BB)) { 1658 U = UndefValue::get(I.getType()); 1659 Changed = true; 1660 continue; 1661 } 1662 1663 VisitedUsers.insert(PN); 1664 if (isTriviallyReplaceablePHI(*PN, I)) 1665 continue; 1666 1667 if (!canSplitPredecessors(PN, SafetyInfo)) 1668 return Changed; 1669 1670 // Split predecessors of the PHI so that we can make users trivially 1671 // replaceable. 1672 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU); 1673 1674 // Should rebuild the iterators, as they may be invalidated by 1675 // splitPredecessorsOfLoopExit(). 1676 UI = I.user_begin(); 1677 UE = I.user_end(); 1678 } 1679 1680 if (VisitedUsers.empty()) 1681 return Changed; 1682 1683 #ifndef NDEBUG 1684 SmallVector<BasicBlock *, 32> ExitBlocks; 1685 CurLoop->getUniqueExitBlocks(ExitBlocks); 1686 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), 1687 ExitBlocks.end()); 1688 #endif 1689 1690 // Clones of this instruction. Don't create more than one per exit block! 1691 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; 1692 1693 // If this instruction is only used outside of the loop, then all users are 1694 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of 1695 // the instruction. 1696 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end()); 1697 for (auto *UI : Users) { 1698 auto *User = cast<Instruction>(UI); 1699 1700 if (CurLoop->contains(User)) 1701 continue; 1702 1703 PHINode *PN = cast<PHINode>(User); 1704 assert(ExitBlockSet.count(PN->getParent()) && 1705 "The LCSSA PHI is not in an exit block!"); 1706 // The PHI must be trivially replaceable. 1707 Instruction *New = sinkThroughTriviallyReplaceablePHI( 1708 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU); 1709 PN->replaceAllUsesWith(New); 1710 eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr); 1711 Changed = true; 1712 } 1713 return Changed; 1714 } 1715 1716 /// When an instruction is found to only use loop invariant operands that 1717 /// is safe to hoist, this instruction is called to do the dirty work. 1718 /// 1719 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, 1720 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, 1721 MemorySSAUpdater *MSSAU, ScalarEvolution *SE, 1722 OptimizationRemarkEmitter *ORE) { 1723 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << I 1724 << "\n"); 1725 ORE->emit([&]() { 1726 return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting " 1727 << ore::NV("Inst", &I); 1728 }); 1729 1730 // Metadata can be dependent on conditions we are hoisting above. 1731 // Conservatively strip all metadata on the instruction unless we were 1732 // guaranteed to execute I if we entered the loop, in which case the metadata 1733 // is valid in the loop preheader. 1734 if (I.hasMetadataOtherThanDebugLoc() && 1735 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning 1736 // time in isGuaranteedToExecute if we don't actually have anything to 1737 // drop. It is a compile time optimization, not required for correctness. 1738 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) 1739 I.dropUnknownNonDebugMetadata(); 1740 1741 if (isa<PHINode>(I)) 1742 // Move the new node to the end of the phi list in the destination block. 1743 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE); 1744 else 1745 // Move the new node to the destination block, before its terminator. 1746 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE); 1747 1748 // Apply line 0 debug locations when we are moving instructions to different 1749 // basic blocks because we want to avoid jumpy line tables. 1750 if (const DebugLoc &DL = I.getDebugLoc()) 1751 I.setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt())); 1752 1753 if (isa<LoadInst>(I)) 1754 ++NumMovedLoads; 1755 else if (isa<CallInst>(I)) 1756 ++NumMovedCalls; 1757 ++NumHoisted; 1758 } 1759 1760 /// Only sink or hoist an instruction if it is not a trapping instruction, 1761 /// or if the instruction is known not to trap when moved to the preheader. 1762 /// or if it is a trapping instruction and is guaranteed to execute. 1763 static bool isSafeToExecuteUnconditionally(Instruction &Inst, 1764 const DominatorTree *DT, 1765 const Loop *CurLoop, 1766 const LoopSafetyInfo *SafetyInfo, 1767 OptimizationRemarkEmitter *ORE, 1768 const Instruction *CtxI) { 1769 if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT)) 1770 return true; 1771 1772 bool GuaranteedToExecute = 1773 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop); 1774 1775 if (!GuaranteedToExecute) { 1776 auto *LI = dyn_cast<LoadInst>(&Inst); 1777 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) 1778 ORE->emit([&]() { 1779 return OptimizationRemarkMissed( 1780 DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI) 1781 << "failed to hoist load with loop-invariant address " 1782 "because load is conditionally executed"; 1783 }); 1784 } 1785 1786 return GuaranteedToExecute; 1787 } 1788 1789 namespace { 1790 class LoopPromoter : public LoadAndStorePromoter { 1791 Value *SomePtr; // Designated pointer to store to. 1792 const SmallSetVector<Value *, 8> &PointerMustAliases; 1793 SmallVectorImpl<BasicBlock *> &LoopExitBlocks; 1794 SmallVectorImpl<Instruction *> &LoopInsertPts; 1795 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts; 1796 PredIteratorCache &PredCache; 1797 AliasSetTracker &AST; 1798 MemorySSAUpdater *MSSAU; 1799 LoopInfo &LI; 1800 DebugLoc DL; 1801 int Alignment; 1802 bool UnorderedAtomic; 1803 AAMDNodes AATags; 1804 ICFLoopSafetyInfo &SafetyInfo; 1805 1806 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { 1807 if (Instruction *I = dyn_cast<Instruction>(V)) 1808 if (Loop *L = LI.getLoopFor(I->getParent())) 1809 if (!L->contains(BB)) { 1810 // We need to create an LCSSA PHI node for the incoming value and 1811 // store that. 1812 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), 1813 I->getName() + ".lcssa", &BB->front()); 1814 for (BasicBlock *Pred : PredCache.get(BB)) 1815 PN->addIncoming(I, Pred); 1816 return PN; 1817 } 1818 return V; 1819 } 1820 1821 public: 1822 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, 1823 const SmallSetVector<Value *, 8> &PMA, 1824 SmallVectorImpl<BasicBlock *> &LEB, 1825 SmallVectorImpl<Instruction *> &LIP, 1826 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC, 1827 AliasSetTracker &ast, MemorySSAUpdater *MSSAU, LoopInfo &li, 1828 DebugLoc dl, int alignment, bool UnorderedAtomic, 1829 const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo) 1830 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), 1831 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), 1832 PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)), 1833 Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags), 1834 SafetyInfo(SafetyInfo) {} 1835 1836 bool isInstInList(Instruction *I, 1837 const SmallVectorImpl<Instruction *> &) const override { 1838 Value *Ptr; 1839 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 1840 Ptr = LI->getOperand(0); 1841 else 1842 Ptr = cast<StoreInst>(I)->getPointerOperand(); 1843 return PointerMustAliases.count(Ptr); 1844 } 1845 1846 void doExtraRewritesBeforeFinalDeletion() override { 1847 // Insert stores after in the loop exit blocks. Each exit block gets a 1848 // store of the live-out values that feed them. Since we've already told 1849 // the SSA updater about the defs in the loop and the preheader 1850 // definition, it is all set and we can start using it. 1851 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { 1852 BasicBlock *ExitBlock = LoopExitBlocks[i]; 1853 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); 1854 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); 1855 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); 1856 Instruction *InsertPos = LoopInsertPts[i]; 1857 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); 1858 if (UnorderedAtomic) 1859 NewSI->setOrdering(AtomicOrdering::Unordered); 1860 NewSI->setAlignment(MaybeAlign(Alignment)); 1861 NewSI->setDebugLoc(DL); 1862 if (AATags) 1863 NewSI->setAAMetadata(AATags); 1864 1865 if (MSSAU) { 1866 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i]; 1867 MemoryAccess *NewMemAcc; 1868 if (!MSSAInsertPoint) { 1869 NewMemAcc = MSSAU->createMemoryAccessInBB( 1870 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning); 1871 } else { 1872 NewMemAcc = 1873 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint); 1874 } 1875 MSSAInsertPts[i] = NewMemAcc; 1876 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); 1877 // FIXME: true for safety, false may still be correct. 1878 } 1879 } 1880 } 1881 1882 void replaceLoadWithValue(LoadInst *LI, Value *V) const override { 1883 // Update alias analysis. 1884 AST.copyValue(LI, V); 1885 } 1886 void instructionDeleted(Instruction *I) const override { 1887 SafetyInfo.removeInstruction(I); 1888 AST.deleteValue(I); 1889 if (MSSAU) 1890 MSSAU->removeMemoryAccess(I); 1891 } 1892 }; 1893 1894 1895 /// Return true iff we can prove that a caller of this function can not inspect 1896 /// the contents of the provided object in a well defined program. 1897 bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) { 1898 if (isa<AllocaInst>(Object)) 1899 // Since the alloca goes out of scope, we know the caller can't retain a 1900 // reference to it and be well defined. Thus, we don't need to check for 1901 // capture. 1902 return true; 1903 1904 // For all other objects we need to know that the caller can't possibly 1905 // have gotten a reference to the object. There are two components of 1906 // that: 1907 // 1) Object can't be escaped by this function. This is what 1908 // PointerMayBeCaptured checks. 1909 // 2) Object can't have been captured at definition site. For this, we 1910 // need to know the return value is noalias. At the moment, we use a 1911 // weaker condition and handle only AllocLikeFunctions (which are 1912 // known to be noalias). TODO 1913 return isAllocLikeFn(Object, TLI) && 1914 !PointerMayBeCaptured(Object, true, true); 1915 } 1916 1917 } // namespace 1918 1919 /// Try to promote memory values to scalars by sinking stores out of the 1920 /// loop and moving loads to before the loop. We do this by looping over 1921 /// the stores in the loop, looking for stores to Must pointers which are 1922 /// loop invariant. 1923 /// 1924 bool llvm::promoteLoopAccessesToScalars( 1925 const SmallSetVector<Value *, 8> &PointerMustAliases, 1926 SmallVectorImpl<BasicBlock *> &ExitBlocks, 1927 SmallVectorImpl<Instruction *> &InsertPts, 1928 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC, 1929 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, 1930 Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, 1931 ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) { 1932 // Verify inputs. 1933 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && 1934 CurAST != nullptr && SafetyInfo != nullptr && 1935 "Unexpected Input to promoteLoopAccessesToScalars"); 1936 1937 Value *SomePtr = *PointerMustAliases.begin(); 1938 BasicBlock *Preheader = CurLoop->getLoopPreheader(); 1939 1940 // It is not safe to promote a load/store from the loop if the load/store is 1941 // conditional. For example, turning: 1942 // 1943 // for () { if (c) *P += 1; } 1944 // 1945 // into: 1946 // 1947 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; 1948 // 1949 // is not safe, because *P may only be valid to access if 'c' is true. 1950 // 1951 // The safety property divides into two parts: 1952 // p1) The memory may not be dereferenceable on entry to the loop. In this 1953 // case, we can't insert the required load in the preheader. 1954 // p2) The memory model does not allow us to insert a store along any dynamic 1955 // path which did not originally have one. 1956 // 1957 // If at least one store is guaranteed to execute, both properties are 1958 // satisfied, and promotion is legal. 1959 // 1960 // This, however, is not a necessary condition. Even if no store/load is 1961 // guaranteed to execute, we can still establish these properties. 1962 // We can establish (p1) by proving that hoisting the load into the preheader 1963 // is safe (i.e. proving dereferenceability on all paths through the loop). We 1964 // can use any access within the alias set to prove dereferenceability, 1965 // since they're all must alias. 1966 // 1967 // There are two ways establish (p2): 1968 // a) Prove the location is thread-local. In this case the memory model 1969 // requirement does not apply, and stores are safe to insert. 1970 // b) Prove a store dominates every exit block. In this case, if an exit 1971 // blocks is reached, the original dynamic path would have taken us through 1972 // the store, so inserting a store into the exit block is safe. Note that this 1973 // is different from the store being guaranteed to execute. For instance, 1974 // if an exception is thrown on the first iteration of the loop, the original 1975 // store is never executed, but the exit blocks are not executed either. 1976 1977 bool DereferenceableInPH = false; 1978 bool SafeToInsertStore = false; 1979 1980 SmallVector<Instruction *, 64> LoopUses; 1981 1982 // We start with an alignment of one and try to find instructions that allow 1983 // us to prove better alignment. 1984 unsigned Alignment = 1; 1985 // Keep track of which types of access we see 1986 bool SawUnorderedAtomic = false; 1987 bool SawNotAtomic = false; 1988 AAMDNodes AATags; 1989 1990 const DataLayout &MDL = Preheader->getModule()->getDataLayout(); 1991 1992 bool IsKnownThreadLocalObject = false; 1993 if (SafetyInfo->anyBlockMayThrow()) { 1994 // If a loop can throw, we have to insert a store along each unwind edge. 1995 // That said, we can't actually make the unwind edge explicit. Therefore, 1996 // we have to prove that the store is dead along the unwind edge. We do 1997 // this by proving that the caller can't have a reference to the object 1998 // after return and thus can't possibly load from the object. 1999 Value *Object = GetUnderlyingObject(SomePtr, MDL); 2000 if (!isKnownNonEscaping(Object, TLI)) 2001 return false; 2002 // Subtlety: Alloca's aren't visible to callers, but *are* potentially 2003 // visible to other threads if captured and used during their lifetimes. 2004 IsKnownThreadLocalObject = !isa<AllocaInst>(Object); 2005 } 2006 2007 // Check that all of the pointers in the alias set have the same type. We 2008 // cannot (yet) promote a memory location that is loaded and stored in 2009 // different sizes. While we are at it, collect alignment and AA info. 2010 for (Value *ASIV : PointerMustAliases) { 2011 // Check that all of the pointers in the alias set have the same type. We 2012 // cannot (yet) promote a memory location that is loaded and stored in 2013 // different sizes. 2014 if (SomePtr->getType() != ASIV->getType()) 2015 return false; 2016 2017 for (User *U : ASIV->users()) { 2018 // Ignore instructions that are outside the loop. 2019 Instruction *UI = dyn_cast<Instruction>(U); 2020 if (!UI || !CurLoop->contains(UI)) 2021 continue; 2022 2023 // If there is an non-load/store instruction in the loop, we can't promote 2024 // it. 2025 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { 2026 if (!Load->isUnordered()) 2027 return false; 2028 2029 SawUnorderedAtomic |= Load->isAtomic(); 2030 SawNotAtomic |= !Load->isAtomic(); 2031 2032 unsigned InstAlignment = Load->getAlignment(); 2033 if (!InstAlignment) 2034 InstAlignment = 2035 MDL.getABITypeAlignment(Load->getType()); 2036 2037 // Note that proving a load safe to speculate requires proving 2038 // sufficient alignment at the target location. Proving it guaranteed 2039 // to execute does as well. Thus we can increase our guaranteed 2040 // alignment as well. 2041 if (!DereferenceableInPH || (InstAlignment > Alignment)) 2042 if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo, 2043 ORE, Preheader->getTerminator())) { 2044 DereferenceableInPH = true; 2045 Alignment = std::max(Alignment, InstAlignment); 2046 } 2047 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { 2048 // Stores *of* the pointer are not interesting, only stores *to* the 2049 // pointer. 2050 if (UI->getOperand(1) != ASIV) 2051 continue; 2052 if (!Store->isUnordered()) 2053 return false; 2054 2055 SawUnorderedAtomic |= Store->isAtomic(); 2056 SawNotAtomic |= !Store->isAtomic(); 2057 2058 // If the store is guaranteed to execute, both properties are satisfied. 2059 // We may want to check if a store is guaranteed to execute even if we 2060 // already know that promotion is safe, since it may have higher 2061 // alignment than any other guaranteed stores, in which case we can 2062 // raise the alignment on the promoted store. 2063 unsigned InstAlignment = Store->getAlignment(); 2064 if (!InstAlignment) 2065 InstAlignment = 2066 MDL.getABITypeAlignment(Store->getValueOperand()->getType()); 2067 2068 if (!DereferenceableInPH || !SafeToInsertStore || 2069 (InstAlignment > Alignment)) { 2070 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) { 2071 DereferenceableInPH = true; 2072 SafeToInsertStore = true; 2073 Alignment = std::max(Alignment, InstAlignment); 2074 } 2075 } 2076 2077 // If a store dominates all exit blocks, it is safe to sink. 2078 // As explained above, if an exit block was executed, a dominating 2079 // store must have been executed at least once, so we are not 2080 // introducing stores on paths that did not have them. 2081 // Note that this only looks at explicit exit blocks. If we ever 2082 // start sinking stores into unwind edges (see above), this will break. 2083 if (!SafeToInsertStore) 2084 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { 2085 return DT->dominates(Store->getParent(), Exit); 2086 }); 2087 2088 // If the store is not guaranteed to execute, we may still get 2089 // deref info through it. 2090 if (!DereferenceableInPH) { 2091 DereferenceableInPH = isDereferenceableAndAlignedPointer( 2092 Store->getPointerOperand(), Store->getValueOperand()->getType(), 2093 MaybeAlign(Store->getAlignment()), MDL, 2094 Preheader->getTerminator(), DT); 2095 } 2096 } else 2097 return false; // Not a load or store. 2098 2099 // Merge the AA tags. 2100 if (LoopUses.empty()) { 2101 // On the first load/store, just take its AA tags. 2102 UI->getAAMetadata(AATags); 2103 } else if (AATags) { 2104 UI->getAAMetadata(AATags, /* Merge = */ true); 2105 } 2106 2107 LoopUses.push_back(UI); 2108 } 2109 } 2110 2111 // If we found both an unordered atomic instruction and a non-atomic memory 2112 // access, bail. We can't blindly promote non-atomic to atomic since we 2113 // might not be able to lower the result. We can't downgrade since that 2114 // would violate memory model. Also, align 0 is an error for atomics. 2115 if (SawUnorderedAtomic && SawNotAtomic) 2116 return false; 2117 2118 // If we're inserting an atomic load in the preheader, we must be able to 2119 // lower it. We're only guaranteed to be able to lower naturally aligned 2120 // atomics. 2121 auto *SomePtrElemType = SomePtr->getType()->getPointerElementType(); 2122 if (SawUnorderedAtomic && 2123 Alignment < MDL.getTypeStoreSize(SomePtrElemType)) 2124 return false; 2125 2126 // If we couldn't prove we can hoist the load, bail. 2127 if (!DereferenceableInPH) 2128 return false; 2129 2130 // We know we can hoist the load, but don't have a guaranteed store. 2131 // Check whether the location is thread-local. If it is, then we can insert 2132 // stores along paths which originally didn't have them without violating the 2133 // memory model. 2134 if (!SafeToInsertStore) { 2135 if (IsKnownThreadLocalObject) 2136 SafeToInsertStore = true; 2137 else { 2138 Value *Object = GetUnderlyingObject(SomePtr, MDL); 2139 SafeToInsertStore = 2140 (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) && 2141 !PointerMayBeCaptured(Object, true, true); 2142 } 2143 } 2144 2145 // If we've still failed to prove we can sink the store, give up. 2146 if (!SafeToInsertStore) 2147 return false; 2148 2149 // Otherwise, this is safe to promote, lets do it! 2150 LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr 2151 << '\n'); 2152 ORE->emit([&]() { 2153 return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", 2154 LoopUses[0]) 2155 << "Moving accesses to memory location out of the loop"; 2156 }); 2157 ++NumPromoted; 2158 2159 // Grab a debug location for the inserted loads/stores; given that the 2160 // inserted loads/stores have little relation to the original loads/stores, 2161 // this code just arbitrarily picks a location from one, since any debug 2162 // location is better than none. 2163 DebugLoc DL = LoopUses[0]->getDebugLoc(); 2164 2165 // We use the SSAUpdater interface to insert phi nodes as required. 2166 SmallVector<PHINode *, 16> NewPHIs; 2167 SSAUpdater SSA(&NewPHIs); 2168 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, 2169 InsertPts, MSSAInsertPts, PIC, *CurAST, MSSAU, *LI, DL, 2170 Alignment, SawUnorderedAtomic, AATags, *SafetyInfo); 2171 2172 // Set up the preheader to have a definition of the value. It is the live-out 2173 // value from the preheader that uses in the loop will use. 2174 LoadInst *PreheaderLoad = new LoadInst( 2175 SomePtr->getType()->getPointerElementType(), SomePtr, 2176 SomePtr->getName() + ".promoted", Preheader->getTerminator()); 2177 if (SawUnorderedAtomic) 2178 PreheaderLoad->setOrdering(AtomicOrdering::Unordered); 2179 PreheaderLoad->setAlignment(MaybeAlign(Alignment)); 2180 PreheaderLoad->setDebugLoc(DL); 2181 if (AATags) 2182 PreheaderLoad->setAAMetadata(AATags); 2183 SSA.AddAvailableValue(Preheader, PreheaderLoad); 2184 2185 if (MSSAU) { 2186 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB( 2187 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End); 2188 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess); 2189 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true); 2190 } 2191 2192 if (MSSAU && VerifyMemorySSA) 2193 MSSAU->getMemorySSA()->verifyMemorySSA(); 2194 // Rewrite all the loads in the loop and remember all the definitions from 2195 // stores in the loop. 2196 Promoter.run(LoopUses); 2197 2198 if (MSSAU && VerifyMemorySSA) 2199 MSSAU->getMemorySSA()->verifyMemorySSA(); 2200 // If the SSAUpdater didn't use the load in the preheader, just zap it now. 2201 if (PreheaderLoad->use_empty()) 2202 eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU); 2203 2204 return true; 2205 } 2206 2207 /// Returns an owning pointer to an alias set which incorporates aliasing info 2208 /// from L and all subloops of L. 2209 /// FIXME: In new pass manager, there is no helper function to handle loop 2210 /// analysis such as cloneBasicBlockAnalysis, so the AST needs to be recomputed 2211 /// from scratch for every loop. Hook up with the helper functions when 2212 /// available in the new pass manager to avoid redundant computation. 2213 std::unique_ptr<AliasSetTracker> 2214 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI, 2215 AliasAnalysis *AA) { 2216 std::unique_ptr<AliasSetTracker> CurAST; 2217 SmallVector<Loop *, 4> RecomputeLoops; 2218 for (Loop *InnerL : L->getSubLoops()) { 2219 auto MapI = LoopToAliasSetMap.find(InnerL); 2220 // If the AST for this inner loop is missing it may have been merged into 2221 // some other loop's AST and then that loop unrolled, and so we need to 2222 // recompute it. 2223 if (MapI == LoopToAliasSetMap.end()) { 2224 RecomputeLoops.push_back(InnerL); 2225 continue; 2226 } 2227 std::unique_ptr<AliasSetTracker> InnerAST = std::move(MapI->second); 2228 2229 if (CurAST) { 2230 // What if InnerLoop was modified by other passes ? 2231 // Once we've incorporated the inner loop's AST into ours, we don't need 2232 // the subloop's anymore. 2233 CurAST->add(*InnerAST); 2234 } else { 2235 CurAST = std::move(InnerAST); 2236 } 2237 LoopToAliasSetMap.erase(MapI); 2238 } 2239 if (!CurAST) 2240 CurAST = std::make_unique<AliasSetTracker>(*AA); 2241 2242 // Add everything from the sub loops that are no longer directly available. 2243 for (Loop *InnerL : RecomputeLoops) 2244 for (BasicBlock *BB : InnerL->blocks()) 2245 CurAST->add(*BB); 2246 2247 // And merge in this loop (without anything from inner loops). 2248 for (BasicBlock *BB : L->blocks()) 2249 if (LI->getLoopFor(BB) == L) 2250 CurAST->add(*BB); 2251 2252 return CurAST; 2253 } 2254 2255 std::unique_ptr<AliasSetTracker> 2256 LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA( 2257 Loop *L, AliasAnalysis *AA, MemorySSAUpdater *MSSAU) { 2258 auto *MSSA = MSSAU->getMemorySSA(); 2259 auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L); 2260 CurAST->addAllInstructionsInLoopUsingMSSA(); 2261 return CurAST; 2262 } 2263 2264 /// Simple analysis hook. Clone alias set info. 2265 /// 2266 void LegacyLICMPass::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, 2267 Loop *L) { 2268 auto ASTIt = LICM.getLoopToAliasSetMap().find(L); 2269 if (ASTIt == LICM.getLoopToAliasSetMap().end()) 2270 return; 2271 2272 ASTIt->second->copyValue(From, To); 2273 } 2274 2275 /// Simple Analysis hook. Delete value V from alias set 2276 /// 2277 void LegacyLICMPass::deleteAnalysisValue(Value *V, Loop *L) { 2278 auto ASTIt = LICM.getLoopToAliasSetMap().find(L); 2279 if (ASTIt == LICM.getLoopToAliasSetMap().end()) 2280 return; 2281 2282 ASTIt->second->deleteValue(V); 2283 } 2284 2285 /// Simple Analysis hook. Delete value L from alias set map. 2286 /// 2287 void LegacyLICMPass::deleteAnalysisLoop(Loop *L) { 2288 if (!LICM.getLoopToAliasSetMap().count(L)) 2289 return; 2290 2291 LICM.getLoopToAliasSetMap().erase(L); 2292 } 2293 2294 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, 2295 AliasSetTracker *CurAST, Loop *CurLoop, 2296 AliasAnalysis *AA) { 2297 // First check to see if any of the basic blocks in CurLoop invalidate *V. 2298 bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod(); 2299 2300 if (!isInvalidatedAccordingToAST || !LICMN2Theshold) 2301 return isInvalidatedAccordingToAST; 2302 2303 // Check with a diagnostic analysis if we can refine the information above. 2304 // This is to identify the limitations of using the AST. 2305 // The alias set mechanism used by LICM has a major weakness in that it 2306 // combines all things which may alias into a single set *before* asking 2307 // modref questions. As a result, a single readonly call within a loop will 2308 // collapse all loads and stores into a single alias set and report 2309 // invalidation if the loop contains any store. For example, readonly calls 2310 // with deopt states have this form and create a general alias set with all 2311 // loads and stores. In order to get any LICM in loops containing possible 2312 // deopt states we need a more precise invalidation of checking the mod ref 2313 // info of each instruction within the loop and LI. This has a complexity of 2314 // O(N^2), so currently, it is used only as a diagnostic tool since the 2315 // default value of LICMN2Threshold is zero. 2316 2317 // Don't look at nested loops. 2318 if (CurLoop->begin() != CurLoop->end()) 2319 return true; 2320 2321 int N = 0; 2322 for (BasicBlock *BB : CurLoop->getBlocks()) 2323 for (Instruction &I : *BB) { 2324 if (N >= LICMN2Theshold) { 2325 LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for " 2326 << *(MemLoc.Ptr) << "\n"); 2327 return true; 2328 } 2329 N++; 2330 auto Res = AA->getModRefInfo(&I, MemLoc); 2331 if (isModSet(Res)) { 2332 LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for " 2333 << *(MemLoc.Ptr) << "\n"); 2334 return true; 2335 } 2336 } 2337 LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n"); 2338 return false; 2339 } 2340 2341 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, 2342 Loop *CurLoop, 2343 SinkAndHoistLICMFlags &Flags) { 2344 // For hoisting, use the walker to determine safety 2345 if (!Flags.IsSink) { 2346 MemoryAccess *Source; 2347 // See declaration of SetLicmMssaOptCap for usage details. 2348 if (Flags.LicmMssaOptCounter >= Flags.LicmMssaOptCap) 2349 Source = MU->getDefiningAccess(); 2350 else { 2351 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU); 2352 Flags.LicmMssaOptCounter++; 2353 } 2354 return !MSSA->isLiveOnEntryDef(Source) && 2355 CurLoop->contains(Source->getBlock()); 2356 } 2357 2358 // For sinking, we'd need to check all Defs below this use. The getClobbering 2359 // call will look on the backedge of the loop, but will check aliasing with 2360 // the instructions on the previous iteration. 2361 // For example: 2362 // for (i ... ) 2363 // load a[i] ( Use (LoE) 2364 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop. 2365 // i++; 2366 // The load sees no clobbering inside the loop, as the backedge alias check 2367 // does phi translation, and will check aliasing against store a[i-1]. 2368 // However sinking the load outside the loop, below the store is incorrect. 2369 2370 // For now, only sink if there are no Defs in the loop, and the existing ones 2371 // precede the use and are in the same block. 2372 // FIXME: Increase precision: Safe to sink if Use post dominates the Def; 2373 // needs PostDominatorTreeAnalysis. 2374 // FIXME: More precise: no Defs that alias this Use. 2375 if (Flags.NoOfMemAccTooLarge) 2376 return true; 2377 for (auto *BB : CurLoop->getBlocks()) 2378 if (auto *Accesses = MSSA->getBlockDefs(BB)) 2379 for (const auto &MA : *Accesses) 2380 if (const auto *MD = dyn_cast<MemoryDef>(&MA)) 2381 if (MU->getBlock() != MD->getBlock() || 2382 !MSSA->locallyDominates(MD, MU)) 2383 return true; 2384 return false; 2385 } 2386 2387 /// Little predicate that returns true if the specified basic block is in 2388 /// a subloop of the current one, not the current one itself. 2389 /// 2390 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { 2391 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop"); 2392 return LI->getLoopFor(BB) != CurLoop; 2393 } 2394