1 //===----------------- LoopRotationUtils.cpp -----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides utilities to convert a loop into a loop with bottom test. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Utils/LoopRotationUtils.h" 14 #include "llvm/ADT/Statistic.h" 15 #include "llvm/Analysis/AliasAnalysis.h" 16 #include "llvm/Analysis/AssumptionCache.h" 17 #include "llvm/Analysis/BasicAliasAnalysis.h" 18 #include "llvm/Analysis/CodeMetrics.h" 19 #include "llvm/Analysis/DomTreeUpdater.h" 20 #include "llvm/Analysis/GlobalsModRef.h" 21 #include "llvm/Analysis/InstructionSimplify.h" 22 #include "llvm/Analysis/LoopPass.h" 23 #include "llvm/Analysis/MemorySSA.h" 24 #include "llvm/Analysis/MemorySSAUpdater.h" 25 #include "llvm/Analysis/ScalarEvolution.h" 26 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/DebugInfoMetadata.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Module.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 39 #include "llvm/Transforms/Utils/Local.h" 40 #include "llvm/Transforms/Utils/LoopUtils.h" 41 #include "llvm/Transforms/Utils/SSAUpdater.h" 42 #include "llvm/Transforms/Utils/ValueMapper.h" 43 using namespace llvm; 44 45 #define DEBUG_TYPE "loop-rotate" 46 47 STATISTIC(NumRotated, "Number of loops rotated"); 48 49 namespace { 50 /// A simple loop rotation transformation. 51 class LoopRotate { 52 const unsigned MaxHeaderSize; 53 LoopInfo *LI; 54 const TargetTransformInfo *TTI; 55 AssumptionCache *AC; 56 DominatorTree *DT; 57 ScalarEvolution *SE; 58 MemorySSAUpdater *MSSAU; 59 const SimplifyQuery &SQ; 60 bool RotationOnly; 61 bool IsUtilMode; 62 63 public: 64 LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI, 65 const TargetTransformInfo *TTI, AssumptionCache *AC, 66 DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, 67 const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode) 68 : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE), 69 MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly), 70 IsUtilMode(IsUtilMode) {} 71 bool processLoop(Loop *L); 72 73 private: 74 bool rotateLoop(Loop *L, bool SimplifiedLatch); 75 bool simplifyLoopLatch(Loop *L); 76 }; 77 } // end anonymous namespace 78 79 /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the 80 /// old header into the preheader. If there were uses of the values produced by 81 /// these instruction that were outside of the loop, we have to insert PHI nodes 82 /// to merge the two values. Do this now. 83 static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, 84 BasicBlock *OrigPreheader, 85 ValueToValueMapTy &ValueMap, 86 SmallVectorImpl<PHINode*> *InsertedPHIs) { 87 // Remove PHI node entries that are no longer live. 88 BasicBlock::iterator I, E = OrigHeader->end(); 89 for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) 90 PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader)); 91 92 // Now fix up users of the instructions in OrigHeader, inserting PHI nodes 93 // as necessary. 94 SSAUpdater SSA(InsertedPHIs); 95 for (I = OrigHeader->begin(); I != E; ++I) { 96 Value *OrigHeaderVal = &*I; 97 98 // If there are no uses of the value (e.g. because it returns void), there 99 // is nothing to rewrite. 100 if (OrigHeaderVal->use_empty()) 101 continue; 102 103 Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal); 104 105 // The value now exits in two versions: the initial value in the preheader 106 // and the loop "next" value in the original header. 107 SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName()); 108 SSA.AddAvailableValue(OrigHeader, OrigHeaderVal); 109 SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal); 110 111 // Visit each use of the OrigHeader instruction. 112 for (Value::use_iterator UI = OrigHeaderVal->use_begin(), 113 UE = OrigHeaderVal->use_end(); 114 UI != UE;) { 115 // Grab the use before incrementing the iterator. 116 Use &U = *UI; 117 118 // Increment the iterator before removing the use from the list. 119 ++UI; 120 121 // SSAUpdater can't handle a non-PHI use in the same block as an 122 // earlier def. We can easily handle those cases manually. 123 Instruction *UserInst = cast<Instruction>(U.getUser()); 124 if (!isa<PHINode>(UserInst)) { 125 BasicBlock *UserBB = UserInst->getParent(); 126 127 // The original users in the OrigHeader are already using the 128 // original definitions. 129 if (UserBB == OrigHeader) 130 continue; 131 132 // Users in the OrigPreHeader need to use the value to which the 133 // original definitions are mapped. 134 if (UserBB == OrigPreheader) { 135 U = OrigPreHeaderVal; 136 continue; 137 } 138 } 139 140 // Anything else can be handled by SSAUpdater. 141 SSA.RewriteUse(U); 142 } 143 144 // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug 145 // intrinsics. 146 SmallVector<DbgValueInst *, 1> DbgValues; 147 llvm::findDbgValues(DbgValues, OrigHeaderVal); 148 for (auto &DbgValue : DbgValues) { 149 // The original users in the OrigHeader are already using the original 150 // definitions. 151 BasicBlock *UserBB = DbgValue->getParent(); 152 if (UserBB == OrigHeader) 153 continue; 154 155 // Users in the OrigPreHeader need to use the value to which the 156 // original definitions are mapped and anything else can be handled by 157 // the SSAUpdater. To avoid adding PHINodes, check if the value is 158 // available in UserBB, if not substitute undef. 159 Value *NewVal; 160 if (UserBB == OrigPreheader) 161 NewVal = OrigPreHeaderVal; 162 else if (SSA.HasValueForBlock(UserBB)) 163 NewVal = SSA.GetValueInMiddleOfBlock(UserBB); 164 else 165 NewVal = UndefValue::get(OrigHeaderVal->getType()); 166 DbgValue->setOperand(0, 167 MetadataAsValue::get(OrigHeaderVal->getContext(), 168 ValueAsMetadata::get(NewVal))); 169 } 170 } 171 } 172 173 // Look for a phi which is only used outside the loop (via a LCSSA phi) 174 // in the exit from the header. This means that rotating the loop can 175 // remove the phi. 176 static bool shouldRotateLoopExitingLatch(Loop *L) { 177 BasicBlock *Header = L->getHeader(); 178 BasicBlock *HeaderExit = Header->getTerminator()->getSuccessor(0); 179 if (L->contains(HeaderExit)) 180 HeaderExit = Header->getTerminator()->getSuccessor(1); 181 182 for (auto &Phi : Header->phis()) { 183 // Look for uses of this phi in the loop/via exits other than the header. 184 if (llvm::any_of(Phi.users(), [HeaderExit](const User *U) { 185 return cast<Instruction>(U)->getParent() != HeaderExit; 186 })) 187 continue; 188 return true; 189 } 190 191 return false; 192 } 193 194 /// Rotate loop LP. Return true if the loop is rotated. 195 /// 196 /// \param SimplifiedLatch is true if the latch was just folded into the final 197 /// loop exit. In this case we may want to rotate even though the new latch is 198 /// now an exiting branch. This rotation would have happened had the latch not 199 /// been simplified. However, if SimplifiedLatch is false, then we avoid 200 /// rotating loops in which the latch exits to avoid excessive or endless 201 /// rotation. LoopRotate should be repeatable and converge to a canonical 202 /// form. This property is satisfied because simplifying the loop latch can only 203 /// happen once across multiple invocations of the LoopRotate pass. 204 bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { 205 // If the loop has only one block then there is not much to rotate. 206 if (L->getBlocks().size() == 1) 207 return false; 208 209 BasicBlock *OrigHeader = L->getHeader(); 210 BasicBlock *OrigLatch = L->getLoopLatch(); 211 212 BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator()); 213 if (!BI || BI->isUnconditional()) 214 return false; 215 216 // If the loop header is not one of the loop exiting blocks then 217 // either this loop is already rotated or it is not 218 // suitable for loop rotation transformations. 219 if (!L->isLoopExiting(OrigHeader)) 220 return false; 221 222 // If the loop latch already contains a branch that leaves the loop then the 223 // loop is already rotated. 224 if (!OrigLatch) 225 return false; 226 227 // Rotate if either the loop latch does *not* exit the loop, or if the loop 228 // latch was just simplified. Or if we think it will be profitable. 229 if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch && IsUtilMode == false && 230 !shouldRotateLoopExitingLatch(L)) 231 return false; 232 233 // Check size of original header and reject loop if it is very big or we can't 234 // duplicate blocks inside it. 235 { 236 SmallPtrSet<const Value *, 32> EphValues; 237 CodeMetrics::collectEphemeralValues(L, AC, EphValues); 238 239 CodeMetrics Metrics; 240 Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues); 241 if (Metrics.notDuplicatable) { 242 LLVM_DEBUG( 243 dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" 244 << " instructions: "; 245 L->dump()); 246 return false; 247 } 248 if (Metrics.convergent) { 249 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent " 250 "instructions: "; 251 L->dump()); 252 return false; 253 } 254 if (Metrics.NumInsts > MaxHeaderSize) 255 return false; 256 } 257 258 // Now, this loop is suitable for rotation. 259 BasicBlock *OrigPreheader = L->getLoopPreheader(); 260 261 // If the loop could not be converted to canonical form, it must have an 262 // indirectbr in it, just give up. 263 if (!OrigPreheader || !L->hasDedicatedExits()) 264 return false; 265 266 // Anything ScalarEvolution may know about this loop or the PHI nodes 267 // in its header will soon be invalidated. We should also invalidate 268 // all outer loops because insertion and deletion of blocks that happens 269 // during the rotation may violate invariants related to backedge taken 270 // infos in them. 271 if (SE) 272 SE->forgetTopmostLoop(L); 273 274 LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); 275 if (MSSAU && VerifyMemorySSA) 276 MSSAU->getMemorySSA()->verifyMemorySSA(); 277 278 // Find new Loop header. NewHeader is a Header's one and only successor 279 // that is inside loop. Header's other successor is outside the 280 // loop. Otherwise loop is not suitable for rotation. 281 BasicBlock *Exit = BI->getSuccessor(0); 282 BasicBlock *NewHeader = BI->getSuccessor(1); 283 if (L->contains(Exit)) 284 std::swap(Exit, NewHeader); 285 assert(NewHeader && "Unable to determine new loop header"); 286 assert(L->contains(NewHeader) && !L->contains(Exit) && 287 "Unable to determine loop header and exit blocks"); 288 289 // This code assumes that the new header has exactly one predecessor. 290 // Remove any single-entry PHI nodes in it. 291 assert(NewHeader->getSinglePredecessor() && 292 "New header doesn't have one pred!"); 293 FoldSingleEntryPHINodes(NewHeader); 294 295 // Begin by walking OrigHeader and populating ValueMap with an entry for 296 // each Instruction. 297 BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end(); 298 ValueToValueMapTy ValueMap, ValueMapMSSA; 299 300 // For PHI nodes, the value available in OldPreHeader is just the 301 // incoming value from OldPreHeader. 302 for (; PHINode *PN = dyn_cast<PHINode>(I); ++I) 303 ValueMap[PN] = PN->getIncomingValueForBlock(OrigPreheader); 304 305 // For the rest of the instructions, either hoist to the OrigPreheader if 306 // possible or create a clone in the OldPreHeader if not. 307 Instruction *LoopEntryBranch = OrigPreheader->getTerminator(); 308 309 // Record all debug intrinsics preceding LoopEntryBranch to avoid duplication. 310 using DbgIntrinsicHash = 311 std::pair<std::pair<Value *, DILocalVariable *>, DIExpression *>; 312 auto makeHash = [](DbgVariableIntrinsic *D) -> DbgIntrinsicHash { 313 return {{D->getVariableLocation(), D->getVariable()}, D->getExpression()}; 314 }; 315 SmallDenseSet<DbgIntrinsicHash, 8> DbgIntrinsics; 316 for (auto I = std::next(OrigPreheader->rbegin()), E = OrigPreheader->rend(); 317 I != E; ++I) { 318 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&*I)) 319 DbgIntrinsics.insert(makeHash(DII)); 320 else 321 break; 322 } 323 324 while (I != E) { 325 Instruction *Inst = &*I++; 326 327 // If the instruction's operands are invariant and it doesn't read or write 328 // memory, then it is safe to hoist. Doing this doesn't change the order of 329 // execution in the preheader, but does prevent the instruction from 330 // executing in each iteration of the loop. This means it is safe to hoist 331 // something that might trap, but isn't safe to hoist something that reads 332 // memory (without proving that the loop doesn't write). 333 if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() && 334 !Inst->mayWriteToMemory() && !Inst->isTerminator() && 335 !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) { 336 Inst->moveBefore(LoopEntryBranch); 337 continue; 338 } 339 340 // Otherwise, create a duplicate of the instruction. 341 Instruction *C = Inst->clone(); 342 343 // Eagerly remap the operands of the instruction. 344 RemapInstruction(C, ValueMap, 345 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 346 347 // Avoid inserting the same intrinsic twice. 348 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(C)) 349 if (DbgIntrinsics.count(makeHash(DII))) { 350 C->deleteValue(); 351 continue; 352 } 353 354 // With the operands remapped, see if the instruction constant folds or is 355 // otherwise simplifyable. This commonly occurs because the entry from PHI 356 // nodes allows icmps and other instructions to fold. 357 Value *V = SimplifyInstruction(C, SQ); 358 if (V && LI->replacementPreservesLCSSAForm(C, V)) { 359 // If so, then delete the temporary instruction and stick the folded value 360 // in the map. 361 ValueMap[Inst] = V; 362 if (!C->mayHaveSideEffects()) { 363 C->deleteValue(); 364 C = nullptr; 365 } 366 } else { 367 ValueMap[Inst] = C; 368 } 369 if (C) { 370 // Otherwise, stick the new instruction into the new block! 371 C->setName(Inst->getName()); 372 C->insertBefore(LoopEntryBranch); 373 374 if (auto *II = dyn_cast<IntrinsicInst>(C)) 375 if (II->getIntrinsicID() == Intrinsic::assume) 376 AC->registerAssumption(II); 377 // MemorySSA cares whether the cloned instruction was inserted or not, and 378 // not whether it can be remapped to a simplified value. 379 ValueMapMSSA[Inst] = C; 380 } 381 } 382 383 // Along with all the other instructions, we just cloned OrigHeader's 384 // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's 385 // successors by duplicating their incoming values for OrigHeader. 386 for (BasicBlock *SuccBB : successors(OrigHeader)) 387 for (BasicBlock::iterator BI = SuccBB->begin(); 388 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 389 PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader); 390 391 // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove 392 // OrigPreHeader's old terminator (the original branch into the loop), and 393 // remove the corresponding incoming values from the PHI nodes in OrigHeader. 394 LoopEntryBranch->eraseFromParent(); 395 396 // Update MemorySSA before the rewrite call below changes the 1:1 397 // instruction:cloned_instruction_or_value mapping. 398 if (MSSAU) { 399 ValueMapMSSA[OrigHeader] = OrigPreheader; 400 MSSAU->updateForClonedBlockIntoPred(OrigHeader, OrigPreheader, 401 ValueMapMSSA); 402 } 403 404 SmallVector<PHINode*, 2> InsertedPHIs; 405 // If there were any uses of instructions in the duplicated block outside the 406 // loop, update them, inserting PHI nodes as required 407 RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, 408 &InsertedPHIs); 409 410 // Attach dbg.value intrinsics to the new phis if that phi uses a value that 411 // previously had debug metadata attached. This keeps the debug info 412 // up-to-date in the loop body. 413 if (!InsertedPHIs.empty()) 414 insertDebugValuesForPHIs(OrigHeader, InsertedPHIs); 415 416 // NewHeader is now the header of the loop. 417 L->moveToHeader(NewHeader); 418 assert(L->getHeader() == NewHeader && "Latch block is our new header"); 419 420 // Inform DT about changes to the CFG. 421 if (DT) { 422 // The OrigPreheader branches to the NewHeader and Exit now. Then, inform 423 // the DT about the removed edge to the OrigHeader (that got removed). 424 SmallVector<DominatorTree::UpdateType, 3> Updates; 425 Updates.push_back({DominatorTree::Insert, OrigPreheader, Exit}); 426 Updates.push_back({DominatorTree::Insert, OrigPreheader, NewHeader}); 427 Updates.push_back({DominatorTree::Delete, OrigPreheader, OrigHeader}); 428 DT->applyUpdates(Updates); 429 430 if (MSSAU) { 431 MSSAU->applyUpdates(Updates, *DT); 432 if (VerifyMemorySSA) 433 MSSAU->getMemorySSA()->verifyMemorySSA(); 434 } 435 } 436 437 // At this point, we've finished our major CFG changes. As part of cloning 438 // the loop into the preheader we've simplified instructions and the 439 // duplicated conditional branch may now be branching on a constant. If it is 440 // branching on a constant and if that constant means that we enter the loop, 441 // then we fold away the cond branch to an uncond branch. This simplifies the 442 // loop in cases important for nested loops, and it also means we don't have 443 // to split as many edges. 444 BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator()); 445 assert(PHBI->isConditional() && "Should be clone of BI condbr!"); 446 if (!isa<ConstantInt>(PHBI->getCondition()) || 447 PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) != 448 NewHeader) { 449 // The conditional branch can't be folded, handle the general case. 450 // Split edges as necessary to preserve LoopSimplify form. 451 452 // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and 453 // thus is not a preheader anymore. 454 // Split the edge to form a real preheader. 455 BasicBlock *NewPH = SplitCriticalEdge( 456 OrigPreheader, NewHeader, 457 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); 458 NewPH->setName(NewHeader->getName() + ".lr.ph"); 459 460 // Preserve canonical loop form, which means that 'Exit' should have only 461 // one predecessor. Note that Exit could be an exit block for multiple 462 // nested loops, causing both of the edges to now be critical and need to 463 // be split. 464 SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit)); 465 bool SplitLatchEdge = false; 466 for (BasicBlock *ExitPred : ExitPreds) { 467 // We only need to split loop exit edges. 468 Loop *PredLoop = LI->getLoopFor(ExitPred); 469 if (!PredLoop || PredLoop->contains(Exit) || 470 ExitPred->getTerminator()->isIndirectTerminator()) 471 continue; 472 SplitLatchEdge |= L->getLoopLatch() == ExitPred; 473 BasicBlock *ExitSplit = SplitCriticalEdge( 474 ExitPred, Exit, 475 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); 476 ExitSplit->moveBefore(Exit); 477 } 478 assert(SplitLatchEdge && 479 "Despite splitting all preds, failed to split latch exit?"); 480 } else { 481 // We can fold the conditional branch in the preheader, this makes things 482 // simpler. The first step is to remove the extra edge to the Exit block. 483 Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/); 484 BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI); 485 NewBI->setDebugLoc(PHBI->getDebugLoc()); 486 PHBI->eraseFromParent(); 487 488 // With our CFG finalized, update DomTree if it is available. 489 if (DT) DT->deleteEdge(OrigPreheader, Exit); 490 491 // Update MSSA too, if available. 492 if (MSSAU) 493 MSSAU->removeEdge(OrigPreheader, Exit); 494 } 495 496 assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation"); 497 assert(L->getLoopLatch() && "Invalid loop latch after loop rotation"); 498 499 if (MSSAU && VerifyMemorySSA) 500 MSSAU->getMemorySSA()->verifyMemorySSA(); 501 502 // Now that the CFG and DomTree are in a consistent state again, try to merge 503 // the OrigHeader block into OrigLatch. This will succeed if they are 504 // connected by an unconditional branch. This is just a cleanup so the 505 // emitted code isn't too gross in this common case. 506 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 507 MergeBlockIntoPredecessor(OrigHeader, &DTU, LI, MSSAU); 508 509 if (MSSAU && VerifyMemorySSA) 510 MSSAU->getMemorySSA()->verifyMemorySSA(); 511 512 LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump()); 513 514 ++NumRotated; 515 return true; 516 } 517 518 /// Determine whether the instructions in this range may be safely and cheaply 519 /// speculated. This is not an important enough situation to develop complex 520 /// heuristics. We handle a single arithmetic instruction along with any type 521 /// conversions. 522 static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, 523 BasicBlock::iterator End, Loop *L) { 524 bool seenIncrement = false; 525 bool MultiExitLoop = false; 526 527 if (!L->getExitingBlock()) 528 MultiExitLoop = true; 529 530 for (BasicBlock::iterator I = Begin; I != End; ++I) { 531 532 if (!isSafeToSpeculativelyExecute(&*I)) 533 return false; 534 535 if (isa<DbgInfoIntrinsic>(I)) 536 continue; 537 538 switch (I->getOpcode()) { 539 default: 540 return false; 541 case Instruction::GetElementPtr: 542 // GEPs are cheap if all indices are constant. 543 if (!cast<GEPOperator>(I)->hasAllConstantIndices()) 544 return false; 545 // fall-thru to increment case 546 LLVM_FALLTHROUGH; 547 case Instruction::Add: 548 case Instruction::Sub: 549 case Instruction::And: 550 case Instruction::Or: 551 case Instruction::Xor: 552 case Instruction::Shl: 553 case Instruction::LShr: 554 case Instruction::AShr: { 555 Value *IVOpnd = 556 !isa<Constant>(I->getOperand(0)) 557 ? I->getOperand(0) 558 : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr; 559 if (!IVOpnd) 560 return false; 561 562 // If increment operand is used outside of the loop, this speculation 563 // could cause extra live range interference. 564 if (MultiExitLoop) { 565 for (User *UseI : IVOpnd->users()) { 566 auto *UserInst = cast<Instruction>(UseI); 567 if (!L->contains(UserInst)) 568 return false; 569 } 570 } 571 572 if (seenIncrement) 573 return false; 574 seenIncrement = true; 575 break; 576 } 577 case Instruction::Trunc: 578 case Instruction::ZExt: 579 case Instruction::SExt: 580 // ignore type conversions 581 break; 582 } 583 } 584 return true; 585 } 586 587 /// Fold the loop tail into the loop exit by speculating the loop tail 588 /// instructions. Typically, this is a single post-increment. In the case of a 589 /// simple 2-block loop, hoisting the increment can be much better than 590 /// duplicating the entire loop header. In the case of loops with early exits, 591 /// rotation will not work anyway, but simplifyLoopLatch will put the loop in 592 /// canonical form so downstream passes can handle it. 593 /// 594 /// I don't believe this invalidates SCEV. 595 bool LoopRotate::simplifyLoopLatch(Loop *L) { 596 BasicBlock *Latch = L->getLoopLatch(); 597 if (!Latch || Latch->hasAddressTaken()) 598 return false; 599 600 BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator()); 601 if (!Jmp || !Jmp->isUnconditional()) 602 return false; 603 604 BasicBlock *LastExit = Latch->getSinglePredecessor(); 605 if (!LastExit || !L->isLoopExiting(LastExit)) 606 return false; 607 608 BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator()); 609 if (!BI) 610 return false; 611 612 if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L)) 613 return false; 614 615 LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " 616 << LastExit->getName() << "\n"); 617 618 // Hoist the instructions from Latch into LastExit. 619 Instruction *FirstLatchInst = &*(Latch->begin()); 620 LastExit->getInstList().splice(BI->getIterator(), Latch->getInstList(), 621 Latch->begin(), Jmp->getIterator()); 622 623 // Update MemorySSA 624 if (MSSAU) 625 MSSAU->moveAllAfterMergeBlocks(Latch, LastExit, FirstLatchInst); 626 627 unsigned FallThruPath = BI->getSuccessor(0) == Latch ? 0 : 1; 628 BasicBlock *Header = Jmp->getSuccessor(0); 629 assert(Header == L->getHeader() && "expected a backward branch"); 630 631 // Remove Latch from the CFG so that LastExit becomes the new Latch. 632 BI->setSuccessor(FallThruPath, Header); 633 Latch->replaceSuccessorsPhiUsesWith(LastExit); 634 Jmp->eraseFromParent(); 635 636 // Nuke the Latch block. 637 assert(Latch->empty() && "unable to evacuate Latch"); 638 LI->removeBlock(Latch); 639 if (DT) 640 DT->eraseNode(Latch); 641 Latch->eraseFromParent(); 642 643 if (MSSAU && VerifyMemorySSA) 644 MSSAU->getMemorySSA()->verifyMemorySSA(); 645 646 return true; 647 } 648 649 /// Rotate \c L, and return true if any modification was made. 650 bool LoopRotate::processLoop(Loop *L) { 651 // Save the loop metadata. 652 MDNode *LoopMD = L->getLoopID(); 653 654 bool SimplifiedLatch = false; 655 656 // Simplify the loop latch before attempting to rotate the header 657 // upward. Rotation may not be needed if the loop tail can be folded into the 658 // loop exit. 659 if (!RotationOnly) 660 SimplifiedLatch = simplifyLoopLatch(L); 661 662 bool MadeChange = rotateLoop(L, SimplifiedLatch); 663 assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) && 664 "Loop latch should be exiting after loop-rotate."); 665 666 // Restore the loop metadata. 667 // NB! We presume LoopRotation DOESN'T ADD its own metadata. 668 if ((MadeChange || SimplifiedLatch) && LoopMD) 669 L->setLoopID(LoopMD); 670 671 return MadeChange || SimplifiedLatch; 672 } 673 674 675 /// The utility to convert a loop into a loop with bottom test. 676 bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI, 677 AssumptionCache *AC, DominatorTree *DT, 678 ScalarEvolution *SE, MemorySSAUpdater *MSSAU, 679 const SimplifyQuery &SQ, bool RotationOnly = true, 680 unsigned Threshold = unsigned(-1), 681 bool IsUtilMode = true) { 682 if (MSSAU && VerifyMemorySSA) 683 MSSAU->getMemorySSA()->verifyMemorySSA(); 684 LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly, 685 IsUtilMode); 686 if (MSSAU && VerifyMemorySSA) 687 MSSAU->getMemorySSA()->verifyMemorySSA(); 688 689 return LR.processLoop(L); 690 } 691