1 //===----------------- LoopRotationUtils.cpp -----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides utilities to convert a loop into a loop with bottom test. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Utils/LoopRotationUtils.h" 14 #include "llvm/ADT/Statistic.h" 15 #include "llvm/Analysis/AssumptionCache.h" 16 #include "llvm/Analysis/CodeMetrics.h" 17 #include "llvm/Analysis/DomTreeUpdater.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/MemorySSA.h" 21 #include "llvm/Analysis/MemorySSAUpdater.h" 22 #include "llvm/Analysis/ScalarEvolution.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/IR/CFG.h" 25 #include "llvm/IR/DebugInfo.h" 26 #include "llvm/IR/Dominators.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Support/Debug.h" 30 #include "llvm/Support/raw_ostream.h" 31 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 32 #include "llvm/Transforms/Utils/Cloning.h" 33 #include "llvm/Transforms/Utils/Local.h" 34 #include "llvm/Transforms/Utils/SSAUpdater.h" 35 #include "llvm/Transforms/Utils/ValueMapper.h" 36 using namespace llvm; 37 38 #define DEBUG_TYPE "loop-rotate" 39 40 STATISTIC(NumNotRotatedDueToHeaderSize, 41 "Number of loops not rotated due to the header size"); 42 STATISTIC(NumInstrsHoisted, 43 "Number of instructions hoisted into loop preheader"); 44 STATISTIC(NumInstrsDuplicated, 45 "Number of instructions cloned into loop preheader"); 46 STATISTIC(NumRotated, "Number of loops rotated"); 47 48 static cl::opt<bool> 49 MultiRotate("loop-rotate-multi", cl::init(false), cl::Hidden, 50 cl::desc("Allow loop rotation multiple times in order to reach " 51 "a better latch exit")); 52 53 namespace { 54 /// A simple loop rotation transformation. 55 class LoopRotate { 56 const unsigned MaxHeaderSize; 57 LoopInfo *LI; 58 const TargetTransformInfo *TTI; 59 AssumptionCache *AC; 60 DominatorTree *DT; 61 ScalarEvolution *SE; 62 MemorySSAUpdater *MSSAU; 63 const SimplifyQuery &SQ; 64 bool RotationOnly; 65 bool IsUtilMode; 66 bool PrepareForLTO; 67 68 public: 69 LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI, 70 const TargetTransformInfo *TTI, AssumptionCache *AC, 71 DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, 72 const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode, 73 bool PrepareForLTO) 74 : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE), 75 MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly), 76 IsUtilMode(IsUtilMode), PrepareForLTO(PrepareForLTO) {} 77 bool processLoop(Loop *L); 78 79 private: 80 bool rotateLoop(Loop *L, bool SimplifiedLatch); 81 bool simplifyLoopLatch(Loop *L); 82 }; 83 } // end anonymous namespace 84 85 /// Insert (K, V) pair into the ValueToValueMap, and verify the key did not 86 /// previously exist in the map, and the value was inserted. 87 static void InsertNewValueIntoMap(ValueToValueMapTy &VM, Value *K, Value *V) { 88 bool Inserted = VM.insert({K, V}).second; 89 assert(Inserted); 90 (void)Inserted; 91 } 92 /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the 93 /// old header into the preheader. If there were uses of the values produced by 94 /// these instruction that were outside of the loop, we have to insert PHI nodes 95 /// to merge the two values. Do this now. 96 static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, 97 BasicBlock *OrigPreheader, 98 ValueToValueMapTy &ValueMap, 99 ScalarEvolution *SE, 100 SmallVectorImpl<PHINode*> *InsertedPHIs) { 101 // Remove PHI node entries that are no longer live. 102 BasicBlock::iterator I, E = OrigHeader->end(); 103 for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) 104 PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader)); 105 106 // Now fix up users of the instructions in OrigHeader, inserting PHI nodes 107 // as necessary. 108 SSAUpdater SSA(InsertedPHIs); 109 for (I = OrigHeader->begin(); I != E; ++I) { 110 Value *OrigHeaderVal = &*I; 111 112 // If there are no uses of the value (e.g. because it returns void), there 113 // is nothing to rewrite. 114 if (OrigHeaderVal->use_empty()) 115 continue; 116 117 Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal); 118 119 // The value now exits in two versions: the initial value in the preheader 120 // and the loop "next" value in the original header. 121 SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName()); 122 // Force re-computation of OrigHeaderVal, as some users now need to use the 123 // new PHI node. 124 if (SE) 125 SE->forgetValue(OrigHeaderVal); 126 SSA.AddAvailableValue(OrigHeader, OrigHeaderVal); 127 SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal); 128 129 // Visit each use of the OrigHeader instruction. 130 for (Use &U : llvm::make_early_inc_range(OrigHeaderVal->uses())) { 131 // SSAUpdater can't handle a non-PHI use in the same block as an 132 // earlier def. We can easily handle those cases manually. 133 Instruction *UserInst = cast<Instruction>(U.getUser()); 134 if (!isa<PHINode>(UserInst)) { 135 BasicBlock *UserBB = UserInst->getParent(); 136 137 // The original users in the OrigHeader are already using the 138 // original definitions. 139 if (UserBB == OrigHeader) 140 continue; 141 142 // Users in the OrigPreHeader need to use the value to which the 143 // original definitions are mapped. 144 if (UserBB == OrigPreheader) { 145 U = OrigPreHeaderVal; 146 continue; 147 } 148 } 149 150 // Anything else can be handled by SSAUpdater. 151 SSA.RewriteUse(U); 152 } 153 154 // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug 155 // intrinsics. 156 SmallVector<DbgValueInst *, 1> DbgValues; 157 llvm::findDbgValues(DbgValues, OrigHeaderVal); 158 for (auto &DbgValue : DbgValues) { 159 // The original users in the OrigHeader are already using the original 160 // definitions. 161 BasicBlock *UserBB = DbgValue->getParent(); 162 if (UserBB == OrigHeader) 163 continue; 164 165 // Users in the OrigPreHeader need to use the value to which the 166 // original definitions are mapped and anything else can be handled by 167 // the SSAUpdater. To avoid adding PHINodes, check if the value is 168 // available in UserBB, if not substitute undef. 169 Value *NewVal; 170 if (UserBB == OrigPreheader) 171 NewVal = OrigPreHeaderVal; 172 else if (SSA.HasValueForBlock(UserBB)) 173 NewVal = SSA.GetValueInMiddleOfBlock(UserBB); 174 else 175 NewVal = UndefValue::get(OrigHeaderVal->getType()); 176 DbgValue->replaceVariableLocationOp(OrigHeaderVal, NewVal); 177 } 178 } 179 } 180 181 // Assuming both header and latch are exiting, look for a phi which is only 182 // used outside the loop (via a LCSSA phi) in the exit from the header. 183 // This means that rotating the loop can remove the phi. 184 static bool profitableToRotateLoopExitingLatch(Loop *L) { 185 BasicBlock *Header = L->getHeader(); 186 BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator()); 187 assert(BI && BI->isConditional() && "need header with conditional exit"); 188 BasicBlock *HeaderExit = BI->getSuccessor(0); 189 if (L->contains(HeaderExit)) 190 HeaderExit = BI->getSuccessor(1); 191 192 for (auto &Phi : Header->phis()) { 193 // Look for uses of this phi in the loop/via exits other than the header. 194 if (llvm::any_of(Phi.users(), [HeaderExit](const User *U) { 195 return cast<Instruction>(U)->getParent() != HeaderExit; 196 })) 197 continue; 198 return true; 199 } 200 return false; 201 } 202 203 // Check that latch exit is deoptimizing (which means - very unlikely to happen) 204 // and there is another exit from the loop which is non-deoptimizing. 205 // If we rotate latch to that exit our loop has a better chance of being fully 206 // canonical. 207 // 208 // It can give false positives in some rare cases. 209 static bool canRotateDeoptimizingLatchExit(Loop *L) { 210 BasicBlock *Latch = L->getLoopLatch(); 211 assert(Latch && "need latch"); 212 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 213 // Need normal exiting latch. 214 if (!BI || !BI->isConditional()) 215 return false; 216 217 BasicBlock *Exit = BI->getSuccessor(1); 218 if (L->contains(Exit)) 219 Exit = BI->getSuccessor(0); 220 221 // Latch exit is non-deoptimizing, no need to rotate. 222 if (!Exit->getPostdominatingDeoptimizeCall()) 223 return false; 224 225 SmallVector<BasicBlock *, 4> Exits; 226 L->getUniqueExitBlocks(Exits); 227 if (!Exits.empty()) { 228 // There is at least one non-deoptimizing exit. 229 // 230 // Note, that BasicBlock::getPostdominatingDeoptimizeCall is not exact, 231 // as it can conservatively return false for deoptimizing exits with 232 // complex enough control flow down to deoptimize call. 233 // 234 // That means here we can report success for a case where 235 // all exits are deoptimizing but one of them has complex enough 236 // control flow (e.g. with loops). 237 // 238 // That should be a very rare case and false positives for this function 239 // have compile-time effect only. 240 return any_of(Exits, [](const BasicBlock *BB) { 241 return !BB->getPostdominatingDeoptimizeCall(); 242 }); 243 } 244 return false; 245 } 246 247 /// Rotate loop LP. Return true if the loop is rotated. 248 /// 249 /// \param SimplifiedLatch is true if the latch was just folded into the final 250 /// loop exit. In this case we may want to rotate even though the new latch is 251 /// now an exiting branch. This rotation would have happened had the latch not 252 /// been simplified. However, if SimplifiedLatch is false, then we avoid 253 /// rotating loops in which the latch exits to avoid excessive or endless 254 /// rotation. LoopRotate should be repeatable and converge to a canonical 255 /// form. This property is satisfied because simplifying the loop latch can only 256 /// happen once across multiple invocations of the LoopRotate pass. 257 /// 258 /// If -loop-rotate-multi is enabled we can do multiple rotations in one go 259 /// so to reach a suitable (non-deoptimizing) exit. 260 bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { 261 // If the loop has only one block then there is not much to rotate. 262 if (L->getBlocks().size() == 1) 263 return false; 264 265 bool Rotated = false; 266 do { 267 BasicBlock *OrigHeader = L->getHeader(); 268 BasicBlock *OrigLatch = L->getLoopLatch(); 269 270 BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator()); 271 if (!BI || BI->isUnconditional()) 272 return Rotated; 273 274 // If the loop header is not one of the loop exiting blocks then 275 // either this loop is already rotated or it is not 276 // suitable for loop rotation transformations. 277 if (!L->isLoopExiting(OrigHeader)) 278 return Rotated; 279 280 // If the loop latch already contains a branch that leaves the loop then the 281 // loop is already rotated. 282 if (!OrigLatch) 283 return Rotated; 284 285 // Rotate if either the loop latch does *not* exit the loop, or if the loop 286 // latch was just simplified. Or if we think it will be profitable. 287 if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch && IsUtilMode == false && 288 !profitableToRotateLoopExitingLatch(L) && 289 !canRotateDeoptimizingLatchExit(L)) 290 return Rotated; 291 292 // Check size of original header and reject loop if it is very big or we can't 293 // duplicate blocks inside it. 294 { 295 SmallPtrSet<const Value *, 32> EphValues; 296 CodeMetrics::collectEphemeralValues(L, AC, EphValues); 297 298 CodeMetrics Metrics; 299 Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues, PrepareForLTO); 300 if (Metrics.notDuplicatable) { 301 LLVM_DEBUG( 302 dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" 303 << " instructions: "; 304 L->dump()); 305 return Rotated; 306 } 307 if (Metrics.convergent) { 308 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent " 309 "instructions: "; 310 L->dump()); 311 return Rotated; 312 } 313 if (!Metrics.NumInsts.isValid()) { 314 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains instructions" 315 " with invalid cost: "; 316 L->dump()); 317 return Rotated; 318 } 319 if (Metrics.NumInsts > MaxHeaderSize) { 320 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains " 321 << Metrics.NumInsts 322 << " instructions, which is more than the threshold (" 323 << MaxHeaderSize << " instructions): "; 324 L->dump()); 325 ++NumNotRotatedDueToHeaderSize; 326 return Rotated; 327 } 328 329 // When preparing for LTO, avoid rotating loops with calls that could be 330 // inlined during the LTO stage. 331 if (PrepareForLTO && Metrics.NumInlineCandidates > 0) 332 return Rotated; 333 } 334 335 // Now, this loop is suitable for rotation. 336 BasicBlock *OrigPreheader = L->getLoopPreheader(); 337 338 // If the loop could not be converted to canonical form, it must have an 339 // indirectbr in it, just give up. 340 if (!OrigPreheader || !L->hasDedicatedExits()) 341 return Rotated; 342 343 // Anything ScalarEvolution may know about this loop or the PHI nodes 344 // in its header will soon be invalidated. We should also invalidate 345 // all outer loops because insertion and deletion of blocks that happens 346 // during the rotation may violate invariants related to backedge taken 347 // infos in them. 348 if (SE) { 349 SE->forgetTopmostLoop(L); 350 // We may hoist some instructions out of loop. In case if they were cached 351 // as "loop variant" or "loop computable", these caches must be dropped. 352 // We also may fold basic blocks, so cached block dispositions also need 353 // to be dropped. 354 SE->forgetBlockAndLoopDispositions(); 355 } 356 357 LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); 358 if (MSSAU && VerifyMemorySSA) 359 MSSAU->getMemorySSA()->verifyMemorySSA(); 360 361 // Find new Loop header. NewHeader is a Header's one and only successor 362 // that is inside loop. Header's other successor is outside the 363 // loop. Otherwise loop is not suitable for rotation. 364 BasicBlock *Exit = BI->getSuccessor(0); 365 BasicBlock *NewHeader = BI->getSuccessor(1); 366 if (L->contains(Exit)) 367 std::swap(Exit, NewHeader); 368 assert(NewHeader && "Unable to determine new loop header"); 369 assert(L->contains(NewHeader) && !L->contains(Exit) && 370 "Unable to determine loop header and exit blocks"); 371 372 // This code assumes that the new header has exactly one predecessor. 373 // Remove any single-entry PHI nodes in it. 374 assert(NewHeader->getSinglePredecessor() && 375 "New header doesn't have one pred!"); 376 FoldSingleEntryPHINodes(NewHeader); 377 378 // Begin by walking OrigHeader and populating ValueMap with an entry for 379 // each Instruction. 380 BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end(); 381 ValueToValueMapTy ValueMap, ValueMapMSSA; 382 383 // For PHI nodes, the value available in OldPreHeader is just the 384 // incoming value from OldPreHeader. 385 for (; PHINode *PN = dyn_cast<PHINode>(I); ++I) 386 InsertNewValueIntoMap(ValueMap, PN, 387 PN->getIncomingValueForBlock(OrigPreheader)); 388 389 // For the rest of the instructions, either hoist to the OrigPreheader if 390 // possible or create a clone in the OldPreHeader if not. 391 Instruction *LoopEntryBranch = OrigPreheader->getTerminator(); 392 393 // Record all debug intrinsics preceding LoopEntryBranch to avoid 394 // duplication. 395 using DbgIntrinsicHash = 396 std::pair<std::pair<hash_code, DILocalVariable *>, DIExpression *>; 397 auto makeHash = [](DbgVariableIntrinsic *D) -> DbgIntrinsicHash { 398 auto VarLocOps = D->location_ops(); 399 return {{hash_combine_range(VarLocOps.begin(), VarLocOps.end()), 400 D->getVariable()}, 401 D->getExpression()}; 402 }; 403 SmallDenseSet<DbgIntrinsicHash, 8> DbgIntrinsics; 404 for (Instruction &I : llvm::drop_begin(llvm::reverse(*OrigPreheader))) { 405 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I)) 406 DbgIntrinsics.insert(makeHash(DII)); 407 else 408 break; 409 } 410 411 // Remember the local noalias scope declarations in the header. After the 412 // rotation, they must be duplicated and the scope must be cloned. This 413 // avoids unwanted interaction across iterations. 414 SmallVector<NoAliasScopeDeclInst *, 6> NoAliasDeclInstructions; 415 for (Instruction &I : *OrigHeader) 416 if (auto *Decl = dyn_cast<NoAliasScopeDeclInst>(&I)) 417 NoAliasDeclInstructions.push_back(Decl); 418 419 while (I != E) { 420 Instruction *Inst = &*I++; 421 422 // If the instruction's operands are invariant and it doesn't read or write 423 // memory, then it is safe to hoist. Doing this doesn't change the order of 424 // execution in the preheader, but does prevent the instruction from 425 // executing in each iteration of the loop. This means it is safe to hoist 426 // something that might trap, but isn't safe to hoist something that reads 427 // memory (without proving that the loop doesn't write). 428 if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() && 429 !Inst->mayWriteToMemory() && !Inst->isTerminator() && 430 !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) { 431 Inst->moveBefore(LoopEntryBranch); 432 ++NumInstrsHoisted; 433 continue; 434 } 435 436 // Otherwise, create a duplicate of the instruction. 437 Instruction *C = Inst->clone(); 438 C->insertBefore(LoopEntryBranch); 439 440 ++NumInstrsDuplicated; 441 442 // Eagerly remap the operands of the instruction. 443 RemapInstruction(C, ValueMap, 444 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 445 446 // Avoid inserting the same intrinsic twice. 447 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(C)) 448 if (DbgIntrinsics.count(makeHash(DII))) { 449 C->eraseFromParent(); 450 continue; 451 } 452 453 // With the operands remapped, see if the instruction constant folds or is 454 // otherwise simplifyable. This commonly occurs because the entry from PHI 455 // nodes allows icmps and other instructions to fold. 456 Value *V = simplifyInstruction(C, SQ); 457 if (V && LI->replacementPreservesLCSSAForm(C, V)) { 458 // If so, then delete the temporary instruction and stick the folded value 459 // in the map. 460 InsertNewValueIntoMap(ValueMap, Inst, V); 461 if (!C->mayHaveSideEffects()) { 462 C->eraseFromParent(); 463 C = nullptr; 464 } 465 } else { 466 InsertNewValueIntoMap(ValueMap, Inst, C); 467 } 468 if (C) { 469 // Otherwise, stick the new instruction into the new block! 470 C->setName(Inst->getName()); 471 472 if (auto *II = dyn_cast<AssumeInst>(C)) 473 AC->registerAssumption(II); 474 // MemorySSA cares whether the cloned instruction was inserted or not, and 475 // not whether it can be remapped to a simplified value. 476 if (MSSAU) 477 InsertNewValueIntoMap(ValueMapMSSA, Inst, C); 478 } 479 } 480 481 if (!NoAliasDeclInstructions.empty()) { 482 // There are noalias scope declarations: 483 // (general): 484 // Original: OrigPre { OrigHeader NewHeader ... Latch } 485 // after: (OrigPre+OrigHeader') { NewHeader ... Latch OrigHeader } 486 // 487 // with D: llvm.experimental.noalias.scope.decl, 488 // U: !noalias or !alias.scope depending on D 489 // ... { D U1 U2 } can transform into: 490 // (0) : ... { D U1 U2 } // no relevant rotation for this part 491 // (1) : ... D' { U1 U2 D } // D is part of OrigHeader 492 // (2) : ... D' U1' { U2 D U1 } // D, U1 are part of OrigHeader 493 // 494 // We now want to transform: 495 // (1) -> : ... D' { D U1 U2 D'' } 496 // (2) -> : ... D' U1' { D U2 D'' U1'' } 497 // D: original llvm.experimental.noalias.scope.decl 498 // D', U1': duplicate with replaced scopes 499 // D'', U1'': different duplicate with replaced scopes 500 // This ensures a safe fallback to 'may_alias' introduced by the rotate, 501 // as U1'' and U1' scopes will not be compatible wrt to the local restrict 502 503 // Clone the llvm.experimental.noalias.decl again for the NewHeader. 504 Instruction *NewHeaderInsertionPoint = &(*NewHeader->getFirstNonPHI()); 505 for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) { 506 LLVM_DEBUG(dbgs() << " Cloning llvm.experimental.noalias.scope.decl:" 507 << *NAD << "\n"); 508 Instruction *NewNAD = NAD->clone(); 509 NewNAD->insertBefore(NewHeaderInsertionPoint); 510 } 511 512 // Scopes must now be duplicated, once for OrigHeader and once for 513 // OrigPreHeader'. 514 { 515 auto &Context = NewHeader->getContext(); 516 517 SmallVector<MDNode *, 8> NoAliasDeclScopes; 518 for (NoAliasScopeDeclInst *NAD : NoAliasDeclInstructions) 519 NoAliasDeclScopes.push_back(NAD->getScopeList()); 520 521 LLVM_DEBUG(dbgs() << " Updating OrigHeader scopes\n"); 522 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, {OrigHeader}, Context, 523 "h.rot"); 524 LLVM_DEBUG(OrigHeader->dump()); 525 526 // Keep the compile time impact low by only adapting the inserted block 527 // of instructions in the OrigPreHeader. This might result in slightly 528 // more aliasing between these instructions and those that were already 529 // present, but it will be much faster when the original PreHeader is 530 // large. 531 LLVM_DEBUG(dbgs() << " Updating part of OrigPreheader scopes\n"); 532 auto *FirstDecl = 533 cast<Instruction>(ValueMap[*NoAliasDeclInstructions.begin()]); 534 auto *LastInst = &OrigPreheader->back(); 535 cloneAndAdaptNoAliasScopes(NoAliasDeclScopes, FirstDecl, LastInst, 536 Context, "pre.rot"); 537 LLVM_DEBUG(OrigPreheader->dump()); 538 539 LLVM_DEBUG(dbgs() << " Updated NewHeader:\n"); 540 LLVM_DEBUG(NewHeader->dump()); 541 } 542 } 543 544 // Along with all the other instructions, we just cloned OrigHeader's 545 // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's 546 // successors by duplicating their incoming values for OrigHeader. 547 for (BasicBlock *SuccBB : successors(OrigHeader)) 548 for (BasicBlock::iterator BI = SuccBB->begin(); 549 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 550 PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader); 551 552 // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove 553 // OrigPreHeader's old terminator (the original branch into the loop), and 554 // remove the corresponding incoming values from the PHI nodes in OrigHeader. 555 LoopEntryBranch->eraseFromParent(); 556 557 // Update MemorySSA before the rewrite call below changes the 1:1 558 // instruction:cloned_instruction_or_value mapping. 559 if (MSSAU) { 560 InsertNewValueIntoMap(ValueMapMSSA, OrigHeader, OrigPreheader); 561 MSSAU->updateForClonedBlockIntoPred(OrigHeader, OrigPreheader, 562 ValueMapMSSA); 563 } 564 565 SmallVector<PHINode*, 2> InsertedPHIs; 566 // If there were any uses of instructions in the duplicated block outside the 567 // loop, update them, inserting PHI nodes as required 568 RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, SE, 569 &InsertedPHIs); 570 571 // Attach dbg.value intrinsics to the new phis if that phi uses a value that 572 // previously had debug metadata attached. This keeps the debug info 573 // up-to-date in the loop body. 574 if (!InsertedPHIs.empty()) 575 insertDebugValuesForPHIs(OrigHeader, InsertedPHIs); 576 577 // NewHeader is now the header of the loop. 578 L->moveToHeader(NewHeader); 579 assert(L->getHeader() == NewHeader && "Latch block is our new header"); 580 581 // Inform DT about changes to the CFG. 582 if (DT) { 583 // The OrigPreheader branches to the NewHeader and Exit now. Then, inform 584 // the DT about the removed edge to the OrigHeader (that got removed). 585 SmallVector<DominatorTree::UpdateType, 3> Updates; 586 Updates.push_back({DominatorTree::Insert, OrigPreheader, Exit}); 587 Updates.push_back({DominatorTree::Insert, OrigPreheader, NewHeader}); 588 Updates.push_back({DominatorTree::Delete, OrigPreheader, OrigHeader}); 589 590 if (MSSAU) { 591 MSSAU->applyUpdates(Updates, *DT, /*UpdateDT=*/true); 592 if (VerifyMemorySSA) 593 MSSAU->getMemorySSA()->verifyMemorySSA(); 594 } else { 595 DT->applyUpdates(Updates); 596 } 597 } 598 599 // At this point, we've finished our major CFG changes. As part of cloning 600 // the loop into the preheader we've simplified instructions and the 601 // duplicated conditional branch may now be branching on a constant. If it is 602 // branching on a constant and if that constant means that we enter the loop, 603 // then we fold away the cond branch to an uncond branch. This simplifies the 604 // loop in cases important for nested loops, and it also means we don't have 605 // to split as many edges. 606 BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator()); 607 assert(PHBI->isConditional() && "Should be clone of BI condbr!"); 608 if (!isa<ConstantInt>(PHBI->getCondition()) || 609 PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) != 610 NewHeader) { 611 // The conditional branch can't be folded, handle the general case. 612 // Split edges as necessary to preserve LoopSimplify form. 613 614 // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and 615 // thus is not a preheader anymore. 616 // Split the edge to form a real preheader. 617 BasicBlock *NewPH = SplitCriticalEdge( 618 OrigPreheader, NewHeader, 619 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); 620 NewPH->setName(NewHeader->getName() + ".lr.ph"); 621 622 // Preserve canonical loop form, which means that 'Exit' should have only 623 // one predecessor. Note that Exit could be an exit block for multiple 624 // nested loops, causing both of the edges to now be critical and need to 625 // be split. 626 SmallVector<BasicBlock *, 4> ExitPreds(predecessors(Exit)); 627 bool SplitLatchEdge = false; 628 for (BasicBlock *ExitPred : ExitPreds) { 629 // We only need to split loop exit edges. 630 Loop *PredLoop = LI->getLoopFor(ExitPred); 631 if (!PredLoop || PredLoop->contains(Exit) || 632 isa<IndirectBrInst>(ExitPred->getTerminator())) 633 continue; 634 SplitLatchEdge |= L->getLoopLatch() == ExitPred; 635 BasicBlock *ExitSplit = SplitCriticalEdge( 636 ExitPred, Exit, 637 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); 638 ExitSplit->moveBefore(Exit); 639 } 640 assert(SplitLatchEdge && 641 "Despite splitting all preds, failed to split latch exit?"); 642 (void)SplitLatchEdge; 643 } else { 644 // We can fold the conditional branch in the preheader, this makes things 645 // simpler. The first step is to remove the extra edge to the Exit block. 646 Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/); 647 BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI); 648 NewBI->setDebugLoc(PHBI->getDebugLoc()); 649 PHBI->eraseFromParent(); 650 651 // With our CFG finalized, update DomTree if it is available. 652 if (DT) DT->deleteEdge(OrigPreheader, Exit); 653 654 // Update MSSA too, if available. 655 if (MSSAU) 656 MSSAU->removeEdge(OrigPreheader, Exit); 657 } 658 659 assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation"); 660 assert(L->getLoopLatch() && "Invalid loop latch after loop rotation"); 661 662 if (MSSAU && VerifyMemorySSA) 663 MSSAU->getMemorySSA()->verifyMemorySSA(); 664 665 // Now that the CFG and DomTree are in a consistent state again, try to merge 666 // the OrigHeader block into OrigLatch. This will succeed if they are 667 // connected by an unconditional branch. This is just a cleanup so the 668 // emitted code isn't too gross in this common case. 669 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 670 BasicBlock *PredBB = OrigHeader->getUniquePredecessor(); 671 bool DidMerge = MergeBlockIntoPredecessor(OrigHeader, &DTU, LI, MSSAU); 672 if (DidMerge) 673 RemoveRedundantDbgInstrs(PredBB); 674 675 if (MSSAU && VerifyMemorySSA) 676 MSSAU->getMemorySSA()->verifyMemorySSA(); 677 678 LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump()); 679 680 ++NumRotated; 681 682 Rotated = true; 683 SimplifiedLatch = false; 684 685 // Check that new latch is a deoptimizing exit and then repeat rotation if possible. 686 // Deoptimizing latch exit is not a generally typical case, so we just loop over. 687 // TODO: if it becomes a performance bottleneck extend rotation algorithm 688 // to handle multiple rotations in one go. 689 } while (MultiRotate && canRotateDeoptimizingLatchExit(L)); 690 691 692 return true; 693 } 694 695 /// Determine whether the instructions in this range may be safely and cheaply 696 /// speculated. This is not an important enough situation to develop complex 697 /// heuristics. We handle a single arithmetic instruction along with any type 698 /// conversions. 699 static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, 700 BasicBlock::iterator End, Loop *L) { 701 bool seenIncrement = false; 702 bool MultiExitLoop = false; 703 704 if (!L->getExitingBlock()) 705 MultiExitLoop = true; 706 707 for (BasicBlock::iterator I = Begin; I != End; ++I) { 708 709 if (!isSafeToSpeculativelyExecute(&*I)) 710 return false; 711 712 if (isa<DbgInfoIntrinsic>(I)) 713 continue; 714 715 switch (I->getOpcode()) { 716 default: 717 return false; 718 case Instruction::GetElementPtr: 719 // GEPs are cheap if all indices are constant. 720 if (!cast<GEPOperator>(I)->hasAllConstantIndices()) 721 return false; 722 // fall-thru to increment case 723 [[fallthrough]]; 724 case Instruction::Add: 725 case Instruction::Sub: 726 case Instruction::And: 727 case Instruction::Or: 728 case Instruction::Xor: 729 case Instruction::Shl: 730 case Instruction::LShr: 731 case Instruction::AShr: { 732 Value *IVOpnd = 733 !isa<Constant>(I->getOperand(0)) 734 ? I->getOperand(0) 735 : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr; 736 if (!IVOpnd) 737 return false; 738 739 // If increment operand is used outside of the loop, this speculation 740 // could cause extra live range interference. 741 if (MultiExitLoop) { 742 for (User *UseI : IVOpnd->users()) { 743 auto *UserInst = cast<Instruction>(UseI); 744 if (!L->contains(UserInst)) 745 return false; 746 } 747 } 748 749 if (seenIncrement) 750 return false; 751 seenIncrement = true; 752 break; 753 } 754 case Instruction::Trunc: 755 case Instruction::ZExt: 756 case Instruction::SExt: 757 // ignore type conversions 758 break; 759 } 760 } 761 return true; 762 } 763 764 /// Fold the loop tail into the loop exit by speculating the loop tail 765 /// instructions. Typically, this is a single post-increment. In the case of a 766 /// simple 2-block loop, hoisting the increment can be much better than 767 /// duplicating the entire loop header. In the case of loops with early exits, 768 /// rotation will not work anyway, but simplifyLoopLatch will put the loop in 769 /// canonical form so downstream passes can handle it. 770 /// 771 /// I don't believe this invalidates SCEV. 772 bool LoopRotate::simplifyLoopLatch(Loop *L) { 773 BasicBlock *Latch = L->getLoopLatch(); 774 if (!Latch || Latch->hasAddressTaken()) 775 return false; 776 777 BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator()); 778 if (!Jmp || !Jmp->isUnconditional()) 779 return false; 780 781 BasicBlock *LastExit = Latch->getSinglePredecessor(); 782 if (!LastExit || !L->isLoopExiting(LastExit)) 783 return false; 784 785 BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator()); 786 if (!BI) 787 return false; 788 789 if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L)) 790 return false; 791 792 LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " 793 << LastExit->getName() << "\n"); 794 795 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 796 MergeBlockIntoPredecessor(Latch, &DTU, LI, MSSAU, nullptr, 797 /*PredecessorWithTwoSuccessors=*/true); 798 799 if (SE) { 800 // Merging blocks may remove blocks reference in the block disposition cache. Clear the cache. 801 SE->forgetBlockAndLoopDispositions(); 802 } 803 804 if (MSSAU && VerifyMemorySSA) 805 MSSAU->getMemorySSA()->verifyMemorySSA(); 806 807 return true; 808 } 809 810 /// Rotate \c L, and return true if any modification was made. 811 bool LoopRotate::processLoop(Loop *L) { 812 // Save the loop metadata. 813 MDNode *LoopMD = L->getLoopID(); 814 815 bool SimplifiedLatch = false; 816 817 // Simplify the loop latch before attempting to rotate the header 818 // upward. Rotation may not be needed if the loop tail can be folded into the 819 // loop exit. 820 if (!RotationOnly) 821 SimplifiedLatch = simplifyLoopLatch(L); 822 823 bool MadeChange = rotateLoop(L, SimplifiedLatch); 824 assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) && 825 "Loop latch should be exiting after loop-rotate."); 826 827 // Restore the loop metadata. 828 // NB! We presume LoopRotation DOESN'T ADD its own metadata. 829 if ((MadeChange || SimplifiedLatch) && LoopMD) 830 L->setLoopID(LoopMD); 831 832 return MadeChange || SimplifiedLatch; 833 } 834 835 836 /// The utility to convert a loop into a loop with bottom test. 837 bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI, 838 AssumptionCache *AC, DominatorTree *DT, 839 ScalarEvolution *SE, MemorySSAUpdater *MSSAU, 840 const SimplifyQuery &SQ, bool RotationOnly = true, 841 unsigned Threshold = unsigned(-1), 842 bool IsUtilMode = true, bool PrepareForLTO) { 843 LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly, 844 IsUtilMode, PrepareForLTO); 845 return LR.processLoop(L); 846 } 847