1 //===-- LoopUtils.cpp - Loop Utility functions -------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines common loop utility functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Utils/LoopUtils.h" 14 #include "llvm/ADT/DenseSet.h" 15 #include "llvm/ADT/Optional.h" 16 #include "llvm/ADT/PriorityWorklist.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SetVector.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/BasicAliasAnalysis.h" 23 #include "llvm/Analysis/DomTreeUpdater.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/InstSimplifyFolder.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/LoopAccessAnalysis.h" 28 #include "llvm/Analysis/LoopInfo.h" 29 #include "llvm/Analysis/LoopPass.h" 30 #include "llvm/Analysis/MemorySSA.h" 31 #include "llvm/Analysis/MemorySSAUpdater.h" 32 #include "llvm/Analysis/MustExecute.h" 33 #include "llvm/Analysis/ScalarEvolution.h" 34 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 35 #include "llvm/Analysis/ScalarEvolutionExpressions.h" 36 #include "llvm/Analysis/TargetTransformInfo.h" 37 #include "llvm/Analysis/ValueTracking.h" 38 #include "llvm/IR/DIBuilder.h" 39 #include "llvm/IR/Dominators.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/MDBuilder.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/IR/Operator.h" 45 #include "llvm/IR/PatternMatch.h" 46 #include "llvm/IR/ValueHandle.h" 47 #include "llvm/InitializePasses.h" 48 #include "llvm/Pass.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Support/KnownBits.h" 51 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 52 #include "llvm/Transforms/Utils/Local.h" 53 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" 54 55 using namespace llvm; 56 using namespace llvm::PatternMatch; 57 58 #define DEBUG_TYPE "loop-utils" 59 60 static const char *LLVMLoopDisableNonforced = "llvm.loop.disable_nonforced"; 61 static const char *LLVMLoopDisableLICM = "llvm.licm.disable"; 62 63 bool llvm::formDedicatedExitBlocks(Loop *L, DominatorTree *DT, LoopInfo *LI, 64 MemorySSAUpdater *MSSAU, 65 bool PreserveLCSSA) { 66 bool Changed = false; 67 68 // We re-use a vector for the in-loop predecesosrs. 69 SmallVector<BasicBlock *, 4> InLoopPredecessors; 70 71 auto RewriteExit = [&](BasicBlock *BB) { 72 assert(InLoopPredecessors.empty() && 73 "Must start with an empty predecessors list!"); 74 auto Cleanup = make_scope_exit([&] { InLoopPredecessors.clear(); }); 75 76 // See if there are any non-loop predecessors of this exit block and 77 // keep track of the in-loop predecessors. 78 bool IsDedicatedExit = true; 79 for (auto *PredBB : predecessors(BB)) 80 if (L->contains(PredBB)) { 81 if (isa<IndirectBrInst>(PredBB->getTerminator())) 82 // We cannot rewrite exiting edges from an indirectbr. 83 return false; 84 if (isa<CallBrInst>(PredBB->getTerminator())) 85 // We cannot rewrite exiting edges from a callbr. 86 return false; 87 88 InLoopPredecessors.push_back(PredBB); 89 } else { 90 IsDedicatedExit = false; 91 } 92 93 assert(!InLoopPredecessors.empty() && "Must have *some* loop predecessor!"); 94 95 // Nothing to do if this is already a dedicated exit. 96 if (IsDedicatedExit) 97 return false; 98 99 auto *NewExitBB = SplitBlockPredecessors( 100 BB, InLoopPredecessors, ".loopexit", DT, LI, MSSAU, PreserveLCSSA); 101 102 if (!NewExitBB) 103 LLVM_DEBUG( 104 dbgs() << "WARNING: Can't create a dedicated exit block for loop: " 105 << *L << "\n"); 106 else 107 LLVM_DEBUG(dbgs() << "LoopSimplify: Creating dedicated exit block " 108 << NewExitBB->getName() << "\n"); 109 return true; 110 }; 111 112 // Walk the exit blocks directly rather than building up a data structure for 113 // them, but only visit each one once. 114 SmallPtrSet<BasicBlock *, 4> Visited; 115 for (auto *BB : L->blocks()) 116 for (auto *SuccBB : successors(BB)) { 117 // We're looking for exit blocks so skip in-loop successors. 118 if (L->contains(SuccBB)) 119 continue; 120 121 // Visit each exit block exactly once. 122 if (!Visited.insert(SuccBB).second) 123 continue; 124 125 Changed |= RewriteExit(SuccBB); 126 } 127 128 return Changed; 129 } 130 131 /// Returns the instructions that use values defined in the loop. 132 SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) { 133 SmallVector<Instruction *, 8> UsedOutside; 134 135 for (auto *Block : L->getBlocks()) 136 // FIXME: I believe that this could use copy_if if the Inst reference could 137 // be adapted into a pointer. 138 for (auto &Inst : *Block) { 139 auto Users = Inst.users(); 140 if (any_of(Users, [&](User *U) { 141 auto *Use = cast<Instruction>(U); 142 return !L->contains(Use->getParent()); 143 })) 144 UsedOutside.push_back(&Inst); 145 } 146 147 return UsedOutside; 148 } 149 150 void llvm::getLoopAnalysisUsage(AnalysisUsage &AU) { 151 // By definition, all loop passes need the LoopInfo analysis and the 152 // Dominator tree it depends on. Because they all participate in the loop 153 // pass manager, they must also preserve these. 154 AU.addRequired<DominatorTreeWrapperPass>(); 155 AU.addPreserved<DominatorTreeWrapperPass>(); 156 AU.addRequired<LoopInfoWrapperPass>(); 157 AU.addPreserved<LoopInfoWrapperPass>(); 158 159 // We must also preserve LoopSimplify and LCSSA. We locally access their IDs 160 // here because users shouldn't directly get them from this header. 161 extern char &LoopSimplifyID; 162 extern char &LCSSAID; 163 AU.addRequiredID(LoopSimplifyID); 164 AU.addPreservedID(LoopSimplifyID); 165 AU.addRequiredID(LCSSAID); 166 AU.addPreservedID(LCSSAID); 167 // This is used in the LPPassManager to perform LCSSA verification on passes 168 // which preserve lcssa form 169 AU.addRequired<LCSSAVerificationPass>(); 170 AU.addPreserved<LCSSAVerificationPass>(); 171 172 // Loop passes are designed to run inside of a loop pass manager which means 173 // that any function analyses they require must be required by the first loop 174 // pass in the manager (so that it is computed before the loop pass manager 175 // runs) and preserved by all loop pasess in the manager. To make this 176 // reasonably robust, the set needed for most loop passes is maintained here. 177 // If your loop pass requires an analysis not listed here, you will need to 178 // carefully audit the loop pass manager nesting structure that results. 179 AU.addRequired<AAResultsWrapperPass>(); 180 AU.addPreserved<AAResultsWrapperPass>(); 181 AU.addPreserved<BasicAAWrapperPass>(); 182 AU.addPreserved<GlobalsAAWrapperPass>(); 183 AU.addPreserved<SCEVAAWrapperPass>(); 184 AU.addRequired<ScalarEvolutionWrapperPass>(); 185 AU.addPreserved<ScalarEvolutionWrapperPass>(); 186 // FIXME: When all loop passes preserve MemorySSA, it can be required and 187 // preserved here instead of the individual handling in each pass. 188 } 189 190 /// Manually defined generic "LoopPass" dependency initialization. This is used 191 /// to initialize the exact set of passes from above in \c 192 /// getLoopAnalysisUsage. It can be used within a loop pass's initialization 193 /// with: 194 /// 195 /// INITIALIZE_PASS_DEPENDENCY(LoopPass) 196 /// 197 /// As-if "LoopPass" were a pass. 198 void llvm::initializeLoopPassPass(PassRegistry &Registry) { 199 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 200 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 201 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 202 INITIALIZE_PASS_DEPENDENCY(LCSSAWrapperPass) 203 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 204 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass) 205 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass) 206 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass) 207 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 208 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 209 } 210 211 /// Create MDNode for input string. 212 static MDNode *createStringMetadata(Loop *TheLoop, StringRef Name, unsigned V) { 213 LLVMContext &Context = TheLoop->getHeader()->getContext(); 214 Metadata *MDs[] = { 215 MDString::get(Context, Name), 216 ConstantAsMetadata::get(ConstantInt::get(Type::getInt32Ty(Context), V))}; 217 return MDNode::get(Context, MDs); 218 } 219 220 /// Set input string into loop metadata by keeping other values intact. 221 /// If the string is already in loop metadata update value if it is 222 /// different. 223 void llvm::addStringMetadataToLoop(Loop *TheLoop, const char *StringMD, 224 unsigned V) { 225 SmallVector<Metadata *, 4> MDs(1); 226 // If the loop already has metadata, retain it. 227 MDNode *LoopID = TheLoop->getLoopID(); 228 if (LoopID) { 229 for (unsigned i = 1, ie = LoopID->getNumOperands(); i < ie; ++i) { 230 MDNode *Node = cast<MDNode>(LoopID->getOperand(i)); 231 // If it is of form key = value, try to parse it. 232 if (Node->getNumOperands() == 2) { 233 MDString *S = dyn_cast<MDString>(Node->getOperand(0)); 234 if (S && S->getString().equals(StringMD)) { 235 ConstantInt *IntMD = 236 mdconst::extract_or_null<ConstantInt>(Node->getOperand(1)); 237 if (IntMD && IntMD->getSExtValue() == V) 238 // It is already in place. Do nothing. 239 return; 240 // We need to update the value, so just skip it here and it will 241 // be added after copying other existed nodes. 242 continue; 243 } 244 } 245 MDs.push_back(Node); 246 } 247 } 248 // Add new metadata. 249 MDs.push_back(createStringMetadata(TheLoop, StringMD, V)); 250 // Replace current metadata node with new one. 251 LLVMContext &Context = TheLoop->getHeader()->getContext(); 252 MDNode *NewLoopID = MDNode::get(Context, MDs); 253 // Set operand 0 to refer to the loop id itself. 254 NewLoopID->replaceOperandWith(0, NewLoopID); 255 TheLoop->setLoopID(NewLoopID); 256 } 257 258 Optional<ElementCount> 259 llvm::getOptionalElementCountLoopAttribute(const Loop *TheLoop) { 260 Optional<int> Width = 261 getOptionalIntLoopAttribute(TheLoop, "llvm.loop.vectorize.width"); 262 263 if (Width.hasValue()) { 264 Optional<int> IsScalable = getOptionalIntLoopAttribute( 265 TheLoop, "llvm.loop.vectorize.scalable.enable"); 266 return ElementCount::get(*Width, IsScalable.getValueOr(false)); 267 } 268 269 return None; 270 } 271 272 Optional<MDNode *> llvm::makeFollowupLoopID( 273 MDNode *OrigLoopID, ArrayRef<StringRef> FollowupOptions, 274 const char *InheritOptionsExceptPrefix, bool AlwaysNew) { 275 if (!OrigLoopID) { 276 if (AlwaysNew) 277 return nullptr; 278 return None; 279 } 280 281 assert(OrigLoopID->getOperand(0) == OrigLoopID); 282 283 bool InheritAllAttrs = !InheritOptionsExceptPrefix; 284 bool InheritSomeAttrs = 285 InheritOptionsExceptPrefix && InheritOptionsExceptPrefix[0] != '\0'; 286 SmallVector<Metadata *, 8> MDs; 287 MDs.push_back(nullptr); 288 289 bool Changed = false; 290 if (InheritAllAttrs || InheritSomeAttrs) { 291 for (const MDOperand &Existing : drop_begin(OrigLoopID->operands())) { 292 MDNode *Op = cast<MDNode>(Existing.get()); 293 294 auto InheritThisAttribute = [InheritSomeAttrs, 295 InheritOptionsExceptPrefix](MDNode *Op) { 296 if (!InheritSomeAttrs) 297 return false; 298 299 // Skip malformatted attribute metadata nodes. 300 if (Op->getNumOperands() == 0) 301 return true; 302 Metadata *NameMD = Op->getOperand(0).get(); 303 if (!isa<MDString>(NameMD)) 304 return true; 305 StringRef AttrName = cast<MDString>(NameMD)->getString(); 306 307 // Do not inherit excluded attributes. 308 return !AttrName.startswith(InheritOptionsExceptPrefix); 309 }; 310 311 if (InheritThisAttribute(Op)) 312 MDs.push_back(Op); 313 else 314 Changed = true; 315 } 316 } else { 317 // Modified if we dropped at least one attribute. 318 Changed = OrigLoopID->getNumOperands() > 1; 319 } 320 321 bool HasAnyFollowup = false; 322 for (StringRef OptionName : FollowupOptions) { 323 MDNode *FollowupNode = findOptionMDForLoopID(OrigLoopID, OptionName); 324 if (!FollowupNode) 325 continue; 326 327 HasAnyFollowup = true; 328 for (const MDOperand &Option : drop_begin(FollowupNode->operands())) { 329 MDs.push_back(Option.get()); 330 Changed = true; 331 } 332 } 333 334 // Attributes of the followup loop not specified explicity, so signal to the 335 // transformation pass to add suitable attributes. 336 if (!AlwaysNew && !HasAnyFollowup) 337 return None; 338 339 // If no attributes were added or remove, the previous loop Id can be reused. 340 if (!AlwaysNew && !Changed) 341 return OrigLoopID; 342 343 // No attributes is equivalent to having no !llvm.loop metadata at all. 344 if (MDs.size() == 1) 345 return nullptr; 346 347 // Build the new loop ID. 348 MDTuple *FollowupLoopID = MDNode::get(OrigLoopID->getContext(), MDs); 349 FollowupLoopID->replaceOperandWith(0, FollowupLoopID); 350 return FollowupLoopID; 351 } 352 353 bool llvm::hasDisableAllTransformsHint(const Loop *L) { 354 return getBooleanLoopAttribute(L, LLVMLoopDisableNonforced); 355 } 356 357 bool llvm::hasDisableLICMTransformsHint(const Loop *L) { 358 return getBooleanLoopAttribute(L, LLVMLoopDisableLICM); 359 } 360 361 TransformationMode llvm::hasUnrollTransformation(const Loop *L) { 362 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.disable")) 363 return TM_SuppressedByUser; 364 365 Optional<int> Count = 366 getOptionalIntLoopAttribute(L, "llvm.loop.unroll.count"); 367 if (Count.hasValue()) 368 return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser; 369 370 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.enable")) 371 return TM_ForcedByUser; 372 373 if (getBooleanLoopAttribute(L, "llvm.loop.unroll.full")) 374 return TM_ForcedByUser; 375 376 if (hasDisableAllTransformsHint(L)) 377 return TM_Disable; 378 379 return TM_Unspecified; 380 } 381 382 TransformationMode llvm::hasUnrollAndJamTransformation(const Loop *L) { 383 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.disable")) 384 return TM_SuppressedByUser; 385 386 Optional<int> Count = 387 getOptionalIntLoopAttribute(L, "llvm.loop.unroll_and_jam.count"); 388 if (Count.hasValue()) 389 return Count.getValue() == 1 ? TM_SuppressedByUser : TM_ForcedByUser; 390 391 if (getBooleanLoopAttribute(L, "llvm.loop.unroll_and_jam.enable")) 392 return TM_ForcedByUser; 393 394 if (hasDisableAllTransformsHint(L)) 395 return TM_Disable; 396 397 return TM_Unspecified; 398 } 399 400 TransformationMode llvm::hasVectorizeTransformation(const Loop *L) { 401 Optional<bool> Enable = 402 getOptionalBoolLoopAttribute(L, "llvm.loop.vectorize.enable"); 403 404 if (Enable == false) 405 return TM_SuppressedByUser; 406 407 Optional<ElementCount> VectorizeWidth = 408 getOptionalElementCountLoopAttribute(L); 409 Optional<int> InterleaveCount = 410 getOptionalIntLoopAttribute(L, "llvm.loop.interleave.count"); 411 412 // 'Forcing' vector width and interleave count to one effectively disables 413 // this tranformation. 414 if (Enable == true && VectorizeWidth && VectorizeWidth->isScalar() && 415 InterleaveCount == 1) 416 return TM_SuppressedByUser; 417 418 if (getBooleanLoopAttribute(L, "llvm.loop.isvectorized")) 419 return TM_Disable; 420 421 if (Enable == true) 422 return TM_ForcedByUser; 423 424 if ((VectorizeWidth && VectorizeWidth->isScalar()) && InterleaveCount == 1) 425 return TM_Disable; 426 427 if ((VectorizeWidth && VectorizeWidth->isVector()) || InterleaveCount > 1) 428 return TM_Enable; 429 430 if (hasDisableAllTransformsHint(L)) 431 return TM_Disable; 432 433 return TM_Unspecified; 434 } 435 436 TransformationMode llvm::hasDistributeTransformation(const Loop *L) { 437 if (getBooleanLoopAttribute(L, "llvm.loop.distribute.enable")) 438 return TM_ForcedByUser; 439 440 if (hasDisableAllTransformsHint(L)) 441 return TM_Disable; 442 443 return TM_Unspecified; 444 } 445 446 TransformationMode llvm::hasLICMVersioningTransformation(const Loop *L) { 447 if (getBooleanLoopAttribute(L, "llvm.loop.licm_versioning.disable")) 448 return TM_SuppressedByUser; 449 450 if (hasDisableAllTransformsHint(L)) 451 return TM_Disable; 452 453 return TM_Unspecified; 454 } 455 456 /// Does a BFS from a given node to all of its children inside a given loop. 457 /// The returned vector of nodes includes the starting point. 458 SmallVector<DomTreeNode *, 16> 459 llvm::collectChildrenInLoop(DomTreeNode *N, const Loop *CurLoop) { 460 SmallVector<DomTreeNode *, 16> Worklist; 461 auto AddRegionToWorklist = [&](DomTreeNode *DTN) { 462 // Only include subregions in the top level loop. 463 BasicBlock *BB = DTN->getBlock(); 464 if (CurLoop->contains(BB)) 465 Worklist.push_back(DTN); 466 }; 467 468 AddRegionToWorklist(N); 469 470 for (size_t I = 0; I < Worklist.size(); I++) { 471 for (DomTreeNode *Child : Worklist[I]->children()) 472 AddRegionToWorklist(Child); 473 } 474 475 return Worklist; 476 } 477 478 void llvm::deleteDeadLoop(Loop *L, DominatorTree *DT, ScalarEvolution *SE, 479 LoopInfo *LI, MemorySSA *MSSA) { 480 assert((!DT || L->isLCSSAForm(*DT)) && "Expected LCSSA!"); 481 auto *Preheader = L->getLoopPreheader(); 482 assert(Preheader && "Preheader should exist!"); 483 484 std::unique_ptr<MemorySSAUpdater> MSSAU; 485 if (MSSA) 486 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 487 488 // Now that we know the removal is safe, remove the loop by changing the 489 // branch from the preheader to go to the single exit block. 490 // 491 // Because we're deleting a large chunk of code at once, the sequence in which 492 // we remove things is very important to avoid invalidation issues. 493 494 // Tell ScalarEvolution that the loop is deleted. Do this before 495 // deleting the loop so that ScalarEvolution can look at the loop 496 // to determine what it needs to clean up. 497 if (SE) 498 SE->forgetLoop(L); 499 500 auto *OldBr = dyn_cast<BranchInst>(Preheader->getTerminator()); 501 assert(OldBr && "Preheader must end with a branch"); 502 assert(OldBr->isUnconditional() && "Preheader must have a single successor"); 503 // Connect the preheader to the exit block. Keep the old edge to the header 504 // around to perform the dominator tree update in two separate steps 505 // -- #1 insertion of the edge preheader -> exit and #2 deletion of the edge 506 // preheader -> header. 507 // 508 // 509 // 0. Preheader 1. Preheader 2. Preheader 510 // | | | | 511 // V | V | 512 // Header <--\ | Header <--\ | Header <--\ 513 // | | | | | | | | | | | 514 // | V | | | V | | | V | 515 // | Body --/ | | Body --/ | | Body --/ 516 // V V V V V 517 // Exit Exit Exit 518 // 519 // By doing this is two separate steps we can perform the dominator tree 520 // update without using the batch update API. 521 // 522 // Even when the loop is never executed, we cannot remove the edge from the 523 // source block to the exit block. Consider the case where the unexecuted loop 524 // branches back to an outer loop. If we deleted the loop and removed the edge 525 // coming to this inner loop, this will break the outer loop structure (by 526 // deleting the backedge of the outer loop). If the outer loop is indeed a 527 // non-loop, it will be deleted in a future iteration of loop deletion pass. 528 IRBuilder<> Builder(OldBr); 529 530 auto *ExitBlock = L->getUniqueExitBlock(); 531 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 532 if (ExitBlock) { 533 assert(ExitBlock && "Should have a unique exit block!"); 534 assert(L->hasDedicatedExits() && "Loop should have dedicated exits!"); 535 536 Builder.CreateCondBr(Builder.getFalse(), L->getHeader(), ExitBlock); 537 // Remove the old branch. The conditional branch becomes a new terminator. 538 OldBr->eraseFromParent(); 539 540 // Rewrite phis in the exit block to get their inputs from the Preheader 541 // instead of the exiting block. 542 for (PHINode &P : ExitBlock->phis()) { 543 // Set the zero'th element of Phi to be from the preheader and remove all 544 // other incoming values. Given the loop has dedicated exits, all other 545 // incoming values must be from the exiting blocks. 546 int PredIndex = 0; 547 P.setIncomingBlock(PredIndex, Preheader); 548 // Removes all incoming values from all other exiting blocks (including 549 // duplicate values from an exiting block). 550 // Nuke all entries except the zero'th entry which is the preheader entry. 551 // NOTE! We need to remove Incoming Values in the reverse order as done 552 // below, to keep the indices valid for deletion (removeIncomingValues 553 // updates getNumIncomingValues and shifts all values down into the 554 // operand being deleted). 555 for (unsigned i = 0, e = P.getNumIncomingValues() - 1; i != e; ++i) 556 P.removeIncomingValue(e - i, false); 557 558 assert((P.getNumIncomingValues() == 1 && 559 P.getIncomingBlock(PredIndex) == Preheader) && 560 "Should have exactly one value and that's from the preheader!"); 561 } 562 563 if (DT) { 564 DTU.applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}}); 565 if (MSSA) { 566 MSSAU->applyUpdates({{DominatorTree::Insert, Preheader, ExitBlock}}, 567 *DT); 568 if (VerifyMemorySSA) 569 MSSA->verifyMemorySSA(); 570 } 571 } 572 573 // Disconnect the loop body by branching directly to its exit. 574 Builder.SetInsertPoint(Preheader->getTerminator()); 575 Builder.CreateBr(ExitBlock); 576 // Remove the old branch. 577 Preheader->getTerminator()->eraseFromParent(); 578 } else { 579 assert(L->hasNoExitBlocks() && 580 "Loop should have either zero or one exit blocks."); 581 582 Builder.SetInsertPoint(OldBr); 583 Builder.CreateUnreachable(); 584 Preheader->getTerminator()->eraseFromParent(); 585 } 586 587 if (DT) { 588 DTU.applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}}); 589 if (MSSA) { 590 MSSAU->applyUpdates({{DominatorTree::Delete, Preheader, L->getHeader()}}, 591 *DT); 592 SmallSetVector<BasicBlock *, 8> DeadBlockSet(L->block_begin(), 593 L->block_end()); 594 MSSAU->removeBlocks(DeadBlockSet); 595 if (VerifyMemorySSA) 596 MSSA->verifyMemorySSA(); 597 } 598 } 599 600 // Use a map to unique and a vector to guarantee deterministic ordering. 601 llvm::SmallDenseSet<std::pair<DIVariable *, DIExpression *>, 4> DeadDebugSet; 602 llvm::SmallVector<DbgVariableIntrinsic *, 4> DeadDebugInst; 603 604 if (ExitBlock) { 605 // Given LCSSA form is satisfied, we should not have users of instructions 606 // within the dead loop outside of the loop. However, LCSSA doesn't take 607 // unreachable uses into account. We handle them here. 608 // We could do it after drop all references (in this case all users in the 609 // loop will be already eliminated and we have less work to do but according 610 // to API doc of User::dropAllReferences only valid operation after dropping 611 // references, is deletion. So let's substitute all usages of 612 // instruction from the loop with undef value of corresponding type first. 613 for (auto *Block : L->blocks()) 614 for (Instruction &I : *Block) { 615 auto *Undef = UndefValue::get(I.getType()); 616 for (Use &U : llvm::make_early_inc_range(I.uses())) { 617 if (auto *Usr = dyn_cast<Instruction>(U.getUser())) 618 if (L->contains(Usr->getParent())) 619 continue; 620 // If we have a DT then we can check that uses outside a loop only in 621 // unreachable block. 622 if (DT) 623 assert(!DT->isReachableFromEntry(U) && 624 "Unexpected user in reachable block"); 625 U.set(Undef); 626 } 627 auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I); 628 if (!DVI) 629 continue; 630 auto Key = 631 DeadDebugSet.find({DVI->getVariable(), DVI->getExpression()}); 632 if (Key != DeadDebugSet.end()) 633 continue; 634 DeadDebugSet.insert({DVI->getVariable(), DVI->getExpression()}); 635 DeadDebugInst.push_back(DVI); 636 } 637 638 // After the loop has been deleted all the values defined and modified 639 // inside the loop are going to be unavailable. 640 // Since debug values in the loop have been deleted, inserting an undef 641 // dbg.value truncates the range of any dbg.value before the loop where the 642 // loop used to be. This is particularly important for constant values. 643 DIBuilder DIB(*ExitBlock->getModule()); 644 Instruction *InsertDbgValueBefore = ExitBlock->getFirstNonPHI(); 645 assert(InsertDbgValueBefore && 646 "There should be a non-PHI instruction in exit block, else these " 647 "instructions will have no parent."); 648 for (auto *DVI : DeadDebugInst) 649 DIB.insertDbgValueIntrinsic(UndefValue::get(Builder.getInt32Ty()), 650 DVI->getVariable(), DVI->getExpression(), 651 DVI->getDebugLoc(), InsertDbgValueBefore); 652 } 653 654 // Remove the block from the reference counting scheme, so that we can 655 // delete it freely later. 656 for (auto *Block : L->blocks()) 657 Block->dropAllReferences(); 658 659 if (MSSA && VerifyMemorySSA) 660 MSSA->verifyMemorySSA(); 661 662 if (LI) { 663 // Erase the instructions and the blocks without having to worry 664 // about ordering because we already dropped the references. 665 // NOTE: This iteration is safe because erasing the block does not remove 666 // its entry from the loop's block list. We do that in the next section. 667 for (BasicBlock *BB : L->blocks()) 668 BB->eraseFromParent(); 669 670 // Finally, the blocks from loopinfo. This has to happen late because 671 // otherwise our loop iterators won't work. 672 673 SmallPtrSet<BasicBlock *, 8> blocks; 674 blocks.insert(L->block_begin(), L->block_end()); 675 for (BasicBlock *BB : blocks) 676 LI->removeBlock(BB); 677 678 // The last step is to update LoopInfo now that we've eliminated this loop. 679 // Note: LoopInfo::erase remove the given loop and relink its subloops with 680 // its parent. While removeLoop/removeChildLoop remove the given loop but 681 // not relink its subloops, which is what we want. 682 if (Loop *ParentLoop = L->getParentLoop()) { 683 Loop::iterator I = find(*ParentLoop, L); 684 assert(I != ParentLoop->end() && "Couldn't find loop"); 685 ParentLoop->removeChildLoop(I); 686 } else { 687 Loop::iterator I = find(*LI, L); 688 assert(I != LI->end() && "Couldn't find loop"); 689 LI->removeLoop(I); 690 } 691 LI->destroy(L); 692 } 693 } 694 695 static Loop *getOutermostLoop(Loop *L) { 696 while (Loop *Parent = L->getParentLoop()) 697 L = Parent; 698 return L; 699 } 700 701 void llvm::breakLoopBackedge(Loop *L, DominatorTree &DT, ScalarEvolution &SE, 702 LoopInfo &LI, MemorySSA *MSSA) { 703 auto *Latch = L->getLoopLatch(); 704 assert(Latch && "multiple latches not yet supported"); 705 auto *Header = L->getHeader(); 706 Loop *OutermostLoop = getOutermostLoop(L); 707 708 SE.forgetLoop(L); 709 710 std::unique_ptr<MemorySSAUpdater> MSSAU; 711 if (MSSA) 712 MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); 713 714 // Update the CFG and domtree. We chose to special case a couple of 715 // of common cases for code quality and test readability reasons. 716 [&]() -> void { 717 if (auto *BI = dyn_cast<BranchInst>(Latch->getTerminator())) { 718 if (!BI->isConditional()) { 719 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager); 720 (void)changeToUnreachable(BI, /*PreserveLCSSA*/ true, &DTU, 721 MSSAU.get()); 722 return; 723 } 724 725 // Conditional latch/exit - note that latch can be shared by inner 726 // and outer loop so the other target doesn't need to an exit 727 if (L->isLoopExiting(Latch)) { 728 // TODO: Generalize ConstantFoldTerminator so that it can be used 729 // here without invalidating LCSSA or MemorySSA. (Tricky case for 730 // LCSSA: header is an exit block of a preceeding sibling loop w/o 731 // dedicated exits.) 732 const unsigned ExitIdx = L->contains(BI->getSuccessor(0)) ? 1 : 0; 733 BasicBlock *ExitBB = BI->getSuccessor(ExitIdx); 734 735 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager); 736 Header->removePredecessor(Latch, true); 737 738 IRBuilder<> Builder(BI); 739 auto *NewBI = Builder.CreateBr(ExitBB); 740 // Transfer the metadata to the new branch instruction (minus the 741 // loop info since this is no longer a loop) 742 NewBI->copyMetadata(*BI, {LLVMContext::MD_dbg, 743 LLVMContext::MD_annotation}); 744 745 BI->eraseFromParent(); 746 DTU.applyUpdates({{DominatorTree::Delete, Latch, Header}}); 747 if (MSSA) 748 MSSAU->applyUpdates({{DominatorTree::Delete, Latch, Header}}, DT); 749 return; 750 } 751 } 752 753 // General case. By splitting the backedge, and then explicitly making it 754 // unreachable we gracefully handle corner cases such as switch and invoke 755 // termiantors. 756 auto *BackedgeBB = SplitEdge(Latch, Header, &DT, &LI, MSSAU.get()); 757 758 DomTreeUpdater DTU(&DT, DomTreeUpdater::UpdateStrategy::Eager); 759 (void)changeToUnreachable(BackedgeBB->getTerminator(), 760 /*PreserveLCSSA*/ true, &DTU, MSSAU.get()); 761 }(); 762 763 // Erase (and destroy) this loop instance. Handles relinking sub-loops 764 // and blocks within the loop as needed. 765 LI.erase(L); 766 767 // If the loop we broke had a parent, then changeToUnreachable might have 768 // caused a block to be removed from the parent loop (see loop_nest_lcssa 769 // test case in zero-btc.ll for an example), thus changing the parent's 770 // exit blocks. If that happened, we need to rebuild LCSSA on the outermost 771 // loop which might have a had a block removed. 772 if (OutermostLoop != L) 773 formLCSSARecursively(*OutermostLoop, DT, &LI, &SE); 774 } 775 776 777 /// Checks if \p L has an exiting latch branch. There may also be other 778 /// exiting blocks. Returns branch instruction terminating the loop 779 /// latch if above check is successful, nullptr otherwise. 780 static BranchInst *getExpectedExitLoopLatchBranch(Loop *L) { 781 BasicBlock *Latch = L->getLoopLatch(); 782 if (!Latch) 783 return nullptr; 784 785 BranchInst *LatchBR = dyn_cast<BranchInst>(Latch->getTerminator()); 786 if (!LatchBR || LatchBR->getNumSuccessors() != 2 || !L->isLoopExiting(Latch)) 787 return nullptr; 788 789 assert((LatchBR->getSuccessor(0) == L->getHeader() || 790 LatchBR->getSuccessor(1) == L->getHeader()) && 791 "At least one edge out of the latch must go to the header"); 792 793 return LatchBR; 794 } 795 796 /// Return the estimated trip count for any exiting branch which dominates 797 /// the loop latch. 798 static Optional<uint64_t> 799 getEstimatedTripCount(BranchInst *ExitingBranch, Loop *L, 800 uint64_t &OrigExitWeight) { 801 // To estimate the number of times the loop body was executed, we want to 802 // know the number of times the backedge was taken, vs. the number of times 803 // we exited the loop. 804 uint64_t LoopWeight, ExitWeight; 805 if (!ExitingBranch->extractProfMetadata(LoopWeight, ExitWeight)) 806 return None; 807 808 if (L->contains(ExitingBranch->getSuccessor(1))) 809 std::swap(LoopWeight, ExitWeight); 810 811 if (!ExitWeight) 812 // Don't have a way to return predicated infinite 813 return None; 814 815 OrigExitWeight = ExitWeight; 816 817 // Estimated exit count is a ratio of the loop weight by the weight of the 818 // edge exiting the loop, rounded to nearest. 819 uint64_t ExitCount = llvm::divideNearest(LoopWeight, ExitWeight); 820 // Estimated trip count is one plus estimated exit count. 821 return ExitCount + 1; 822 } 823 824 Optional<unsigned> 825 llvm::getLoopEstimatedTripCount(Loop *L, 826 unsigned *EstimatedLoopInvocationWeight) { 827 // Currently we take the estimate exit count only from the loop latch, 828 // ignoring other exiting blocks. This can overestimate the trip count 829 // if we exit through another exit, but can never underestimate it. 830 // TODO: incorporate information from other exits 831 if (BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L)) { 832 uint64_t ExitWeight; 833 if (Optional<uint64_t> EstTripCount = 834 getEstimatedTripCount(LatchBranch, L, ExitWeight)) { 835 if (EstimatedLoopInvocationWeight) 836 *EstimatedLoopInvocationWeight = ExitWeight; 837 return *EstTripCount; 838 } 839 } 840 return None; 841 } 842 843 bool llvm::setLoopEstimatedTripCount(Loop *L, unsigned EstimatedTripCount, 844 unsigned EstimatedloopInvocationWeight) { 845 // At the moment, we currently support changing the estimate trip count of 846 // the latch branch only. We could extend this API to manipulate estimated 847 // trip counts for any exit. 848 BranchInst *LatchBranch = getExpectedExitLoopLatchBranch(L); 849 if (!LatchBranch) 850 return false; 851 852 // Calculate taken and exit weights. 853 unsigned LatchExitWeight = 0; 854 unsigned BackedgeTakenWeight = 0; 855 856 if (EstimatedTripCount > 0) { 857 LatchExitWeight = EstimatedloopInvocationWeight; 858 BackedgeTakenWeight = (EstimatedTripCount - 1) * LatchExitWeight; 859 } 860 861 // Make a swap if back edge is taken when condition is "false". 862 if (LatchBranch->getSuccessor(0) != L->getHeader()) 863 std::swap(BackedgeTakenWeight, LatchExitWeight); 864 865 MDBuilder MDB(LatchBranch->getContext()); 866 867 // Set/Update profile metadata. 868 LatchBranch->setMetadata( 869 LLVMContext::MD_prof, 870 MDB.createBranchWeights(BackedgeTakenWeight, LatchExitWeight)); 871 872 return true; 873 } 874 875 bool llvm::hasIterationCountInvariantInParent(Loop *InnerLoop, 876 ScalarEvolution &SE) { 877 Loop *OuterL = InnerLoop->getParentLoop(); 878 if (!OuterL) 879 return true; 880 881 // Get the backedge taken count for the inner loop 882 BasicBlock *InnerLoopLatch = InnerLoop->getLoopLatch(); 883 const SCEV *InnerLoopBECountSC = SE.getExitCount(InnerLoop, InnerLoopLatch); 884 if (isa<SCEVCouldNotCompute>(InnerLoopBECountSC) || 885 !InnerLoopBECountSC->getType()->isIntegerTy()) 886 return false; 887 888 // Get whether count is invariant to the outer loop 889 ScalarEvolution::LoopDisposition LD = 890 SE.getLoopDisposition(InnerLoopBECountSC, OuterL); 891 if (LD != ScalarEvolution::LoopInvariant) 892 return false; 893 894 return true; 895 } 896 897 CmpInst::Predicate llvm::getMinMaxReductionPredicate(RecurKind RK) { 898 switch (RK) { 899 default: 900 llvm_unreachable("Unknown min/max recurrence kind"); 901 case RecurKind::UMin: 902 return CmpInst::ICMP_ULT; 903 case RecurKind::UMax: 904 return CmpInst::ICMP_UGT; 905 case RecurKind::SMin: 906 return CmpInst::ICMP_SLT; 907 case RecurKind::SMax: 908 return CmpInst::ICMP_SGT; 909 case RecurKind::FMin: 910 return CmpInst::FCMP_OLT; 911 case RecurKind::FMax: 912 return CmpInst::FCMP_OGT; 913 } 914 } 915 916 Value *llvm::createSelectCmpOp(IRBuilderBase &Builder, Value *StartVal, 917 RecurKind RK, Value *Left, Value *Right) { 918 if (auto VTy = dyn_cast<VectorType>(Left->getType())) 919 StartVal = Builder.CreateVectorSplat(VTy->getElementCount(), StartVal); 920 Value *Cmp = 921 Builder.CreateCmp(CmpInst::ICMP_NE, Left, StartVal, "rdx.select.cmp"); 922 return Builder.CreateSelect(Cmp, Left, Right, "rdx.select"); 923 } 924 925 Value *llvm::createMinMaxOp(IRBuilderBase &Builder, RecurKind RK, Value *Left, 926 Value *Right) { 927 CmpInst::Predicate Pred = getMinMaxReductionPredicate(RK); 928 Value *Cmp = Builder.CreateCmp(Pred, Left, Right, "rdx.minmax.cmp"); 929 Value *Select = Builder.CreateSelect(Cmp, Left, Right, "rdx.minmax.select"); 930 return Select; 931 } 932 933 // Helper to generate an ordered reduction. 934 Value *llvm::getOrderedReduction(IRBuilderBase &Builder, Value *Acc, Value *Src, 935 unsigned Op, RecurKind RdxKind) { 936 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements(); 937 938 // Extract and apply reduction ops in ascending order: 939 // e.g. ((((Acc + Scl[0]) + Scl[1]) + Scl[2]) + ) ... + Scl[VF-1] 940 Value *Result = Acc; 941 for (unsigned ExtractIdx = 0; ExtractIdx != VF; ++ExtractIdx) { 942 Value *Ext = 943 Builder.CreateExtractElement(Src, Builder.getInt32(ExtractIdx)); 944 945 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 946 Result = Builder.CreateBinOp((Instruction::BinaryOps)Op, Result, Ext, 947 "bin.rdx"); 948 } else { 949 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) && 950 "Invalid min/max"); 951 Result = createMinMaxOp(Builder, RdxKind, Result, Ext); 952 } 953 } 954 955 return Result; 956 } 957 958 // Helper to generate a log2 shuffle reduction. 959 Value *llvm::getShuffleReduction(IRBuilderBase &Builder, Value *Src, 960 unsigned Op, RecurKind RdxKind) { 961 unsigned VF = cast<FixedVectorType>(Src->getType())->getNumElements(); 962 // VF is a power of 2 so we can emit the reduction using log2(VF) shuffles 963 // and vector ops, reducing the set of values being computed by half each 964 // round. 965 assert(isPowerOf2_32(VF) && 966 "Reduction emission only supported for pow2 vectors!"); 967 // Note: fast-math-flags flags are controlled by the builder configuration 968 // and are assumed to apply to all generated arithmetic instructions. Other 969 // poison generating flags (nsw/nuw/inbounds/inrange/exact) are not part 970 // of the builder configuration, and since they're not passed explicitly, 971 // will never be relevant here. Note that it would be generally unsound to 972 // propagate these from an intrinsic call to the expansion anyways as we/ 973 // change the order of operations. 974 Value *TmpVec = Src; 975 SmallVector<int, 32> ShuffleMask(VF); 976 for (unsigned i = VF; i != 1; i >>= 1) { 977 // Move the upper half of the vector to the lower half. 978 for (unsigned j = 0; j != i / 2; ++j) 979 ShuffleMask[j] = i / 2 + j; 980 981 // Fill the rest of the mask with undef. 982 std::fill(&ShuffleMask[i / 2], ShuffleMask.end(), -1); 983 984 Value *Shuf = Builder.CreateShuffleVector(TmpVec, ShuffleMask, "rdx.shuf"); 985 986 if (Op != Instruction::ICmp && Op != Instruction::FCmp) { 987 TmpVec = Builder.CreateBinOp((Instruction::BinaryOps)Op, TmpVec, Shuf, 988 "bin.rdx"); 989 } else { 990 assert(RecurrenceDescriptor::isMinMaxRecurrenceKind(RdxKind) && 991 "Invalid min/max"); 992 TmpVec = createMinMaxOp(Builder, RdxKind, TmpVec, Shuf); 993 } 994 } 995 // The result is in the first element of the vector. 996 return Builder.CreateExtractElement(TmpVec, Builder.getInt32(0)); 997 } 998 999 Value *llvm::createSelectCmpTargetReduction(IRBuilderBase &Builder, 1000 const TargetTransformInfo *TTI, 1001 Value *Src, 1002 const RecurrenceDescriptor &Desc, 1003 PHINode *OrigPhi) { 1004 assert(RecurrenceDescriptor::isSelectCmpRecurrenceKind( 1005 Desc.getRecurrenceKind()) && 1006 "Unexpected reduction kind"); 1007 Value *InitVal = Desc.getRecurrenceStartValue(); 1008 Value *NewVal = nullptr; 1009 1010 // First use the original phi to determine the new value we're trying to 1011 // select from in the loop. 1012 SelectInst *SI = nullptr; 1013 for (auto *U : OrigPhi->users()) { 1014 if ((SI = dyn_cast<SelectInst>(U))) 1015 break; 1016 } 1017 assert(SI && "One user of the original phi should be a select"); 1018 1019 if (SI->getTrueValue() == OrigPhi) 1020 NewVal = SI->getFalseValue(); 1021 else { 1022 assert(SI->getFalseValue() == OrigPhi && 1023 "At least one input to the select should be the original Phi"); 1024 NewVal = SI->getTrueValue(); 1025 } 1026 1027 // Create a splat vector with the new value and compare this to the vector 1028 // we want to reduce. 1029 ElementCount EC = cast<VectorType>(Src->getType())->getElementCount(); 1030 Value *Right = Builder.CreateVectorSplat(EC, InitVal); 1031 Value *Cmp = 1032 Builder.CreateCmp(CmpInst::ICMP_NE, Src, Right, "rdx.select.cmp"); 1033 1034 // If any predicate is true it means that we want to select the new value. 1035 Cmp = Builder.CreateOrReduce(Cmp); 1036 return Builder.CreateSelect(Cmp, NewVal, InitVal, "rdx.select"); 1037 } 1038 1039 Value *llvm::createSimpleTargetReduction(IRBuilderBase &Builder, 1040 const TargetTransformInfo *TTI, 1041 Value *Src, RecurKind RdxKind) { 1042 auto *SrcVecEltTy = cast<VectorType>(Src->getType())->getElementType(); 1043 switch (RdxKind) { 1044 case RecurKind::Add: 1045 return Builder.CreateAddReduce(Src); 1046 case RecurKind::Mul: 1047 return Builder.CreateMulReduce(Src); 1048 case RecurKind::And: 1049 return Builder.CreateAndReduce(Src); 1050 case RecurKind::Or: 1051 return Builder.CreateOrReduce(Src); 1052 case RecurKind::Xor: 1053 return Builder.CreateXorReduce(Src); 1054 case RecurKind::FMulAdd: 1055 case RecurKind::FAdd: 1056 return Builder.CreateFAddReduce(ConstantFP::getNegativeZero(SrcVecEltTy), 1057 Src); 1058 case RecurKind::FMul: 1059 return Builder.CreateFMulReduce(ConstantFP::get(SrcVecEltTy, 1.0), Src); 1060 case RecurKind::SMax: 1061 return Builder.CreateIntMaxReduce(Src, true); 1062 case RecurKind::SMin: 1063 return Builder.CreateIntMinReduce(Src, true); 1064 case RecurKind::UMax: 1065 return Builder.CreateIntMaxReduce(Src, false); 1066 case RecurKind::UMin: 1067 return Builder.CreateIntMinReduce(Src, false); 1068 case RecurKind::FMax: 1069 return Builder.CreateFPMaxReduce(Src); 1070 case RecurKind::FMin: 1071 return Builder.CreateFPMinReduce(Src); 1072 default: 1073 llvm_unreachable("Unhandled opcode"); 1074 } 1075 } 1076 1077 Value *llvm::createTargetReduction(IRBuilderBase &B, 1078 const TargetTransformInfo *TTI, 1079 const RecurrenceDescriptor &Desc, Value *Src, 1080 PHINode *OrigPhi) { 1081 // TODO: Support in-order reductions based on the recurrence descriptor. 1082 // All ops in the reduction inherit fast-math-flags from the recurrence 1083 // descriptor. 1084 IRBuilderBase::FastMathFlagGuard FMFGuard(B); 1085 B.setFastMathFlags(Desc.getFastMathFlags()); 1086 1087 RecurKind RK = Desc.getRecurrenceKind(); 1088 if (RecurrenceDescriptor::isSelectCmpRecurrenceKind(RK)) 1089 return createSelectCmpTargetReduction(B, TTI, Src, Desc, OrigPhi); 1090 1091 return createSimpleTargetReduction(B, TTI, Src, RK); 1092 } 1093 1094 Value *llvm::createOrderedReduction(IRBuilderBase &B, 1095 const RecurrenceDescriptor &Desc, 1096 Value *Src, Value *Start) { 1097 assert((Desc.getRecurrenceKind() == RecurKind::FAdd || 1098 Desc.getRecurrenceKind() == RecurKind::FMulAdd) && 1099 "Unexpected reduction kind"); 1100 assert(Src->getType()->isVectorTy() && "Expected a vector type"); 1101 assert(!Start->getType()->isVectorTy() && "Expected a scalar type"); 1102 1103 return B.CreateFAddReduce(Start, Src); 1104 } 1105 1106 void llvm::propagateIRFlags(Value *I, ArrayRef<Value *> VL, Value *OpValue) { 1107 auto *VecOp = dyn_cast<Instruction>(I); 1108 if (!VecOp) 1109 return; 1110 auto *Intersection = (OpValue == nullptr) ? dyn_cast<Instruction>(VL[0]) 1111 : dyn_cast<Instruction>(OpValue); 1112 if (!Intersection) 1113 return; 1114 const unsigned Opcode = Intersection->getOpcode(); 1115 VecOp->copyIRFlags(Intersection); 1116 for (auto *V : VL) { 1117 auto *Instr = dyn_cast<Instruction>(V); 1118 if (!Instr) 1119 continue; 1120 if (OpValue == nullptr || Opcode == Instr->getOpcode()) 1121 VecOp->andIRFlags(V); 1122 } 1123 } 1124 1125 bool llvm::isKnownNegativeInLoop(const SCEV *S, const Loop *L, 1126 ScalarEvolution &SE) { 1127 const SCEV *Zero = SE.getZero(S->getType()); 1128 return SE.isAvailableAtLoopEntry(S, L) && 1129 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, S, Zero); 1130 } 1131 1132 bool llvm::isKnownNonNegativeInLoop(const SCEV *S, const Loop *L, 1133 ScalarEvolution &SE) { 1134 const SCEV *Zero = SE.getZero(S->getType()); 1135 return SE.isAvailableAtLoopEntry(S, L) && 1136 SE.isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGE, S, Zero); 1137 } 1138 1139 bool llvm::cannotBeMinInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE, 1140 bool Signed) { 1141 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth(); 1142 APInt Min = Signed ? APInt::getSignedMinValue(BitWidth) : 1143 APInt::getMinValue(BitWidth); 1144 auto Predicate = Signed ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; 1145 return SE.isAvailableAtLoopEntry(S, L) && 1146 SE.isLoopEntryGuardedByCond(L, Predicate, S, 1147 SE.getConstant(Min)); 1148 } 1149 1150 bool llvm::cannotBeMaxInLoop(const SCEV *S, const Loop *L, ScalarEvolution &SE, 1151 bool Signed) { 1152 unsigned BitWidth = cast<IntegerType>(S->getType())->getBitWidth(); 1153 APInt Max = Signed ? APInt::getSignedMaxValue(BitWidth) : 1154 APInt::getMaxValue(BitWidth); 1155 auto Predicate = Signed ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; 1156 return SE.isAvailableAtLoopEntry(S, L) && 1157 SE.isLoopEntryGuardedByCond(L, Predicate, S, 1158 SE.getConstant(Max)); 1159 } 1160 1161 //===----------------------------------------------------------------------===// 1162 // rewriteLoopExitValues - Optimize IV users outside the loop. 1163 // As a side effect, reduces the amount of IV processing within the loop. 1164 //===----------------------------------------------------------------------===// 1165 1166 static bool hasHardUserWithinLoop(const Loop *L, const Instruction *I) { 1167 SmallPtrSet<const Instruction *, 8> Visited; 1168 SmallVector<const Instruction *, 8> WorkList; 1169 Visited.insert(I); 1170 WorkList.push_back(I); 1171 while (!WorkList.empty()) { 1172 const Instruction *Curr = WorkList.pop_back_val(); 1173 // This use is outside the loop, nothing to do. 1174 if (!L->contains(Curr)) 1175 continue; 1176 // Do we assume it is a "hard" use which will not be eliminated easily? 1177 if (Curr->mayHaveSideEffects()) 1178 return true; 1179 // Otherwise, add all its users to worklist. 1180 for (auto U : Curr->users()) { 1181 auto *UI = cast<Instruction>(U); 1182 if (Visited.insert(UI).second) 1183 WorkList.push_back(UI); 1184 } 1185 } 1186 return false; 1187 } 1188 1189 // Collect information about PHI nodes which can be transformed in 1190 // rewriteLoopExitValues. 1191 struct RewritePhi { 1192 PHINode *PN; // For which PHI node is this replacement? 1193 unsigned Ith; // For which incoming value? 1194 const SCEV *ExpansionSCEV; // The SCEV of the incoming value we are rewriting. 1195 Instruction *ExpansionPoint; // Where we'd like to expand that SCEV? 1196 bool HighCost; // Is this expansion a high-cost? 1197 1198 RewritePhi(PHINode *P, unsigned I, const SCEV *Val, Instruction *ExpansionPt, 1199 bool H) 1200 : PN(P), Ith(I), ExpansionSCEV(Val), ExpansionPoint(ExpansionPt), 1201 HighCost(H) {} 1202 }; 1203 1204 // Check whether it is possible to delete the loop after rewriting exit 1205 // value. If it is possible, ignore ReplaceExitValue and do rewriting 1206 // aggressively. 1207 static bool canLoopBeDeleted(Loop *L, SmallVector<RewritePhi, 8> &RewritePhiSet) { 1208 BasicBlock *Preheader = L->getLoopPreheader(); 1209 // If there is no preheader, the loop will not be deleted. 1210 if (!Preheader) 1211 return false; 1212 1213 // In LoopDeletion pass Loop can be deleted when ExitingBlocks.size() > 1. 1214 // We obviate multiple ExitingBlocks case for simplicity. 1215 // TODO: If we see testcase with multiple ExitingBlocks can be deleted 1216 // after exit value rewriting, we can enhance the logic here. 1217 SmallVector<BasicBlock *, 4> ExitingBlocks; 1218 L->getExitingBlocks(ExitingBlocks); 1219 SmallVector<BasicBlock *, 8> ExitBlocks; 1220 L->getUniqueExitBlocks(ExitBlocks); 1221 if (ExitBlocks.size() != 1 || ExitingBlocks.size() != 1) 1222 return false; 1223 1224 BasicBlock *ExitBlock = ExitBlocks[0]; 1225 BasicBlock::iterator BI = ExitBlock->begin(); 1226 while (PHINode *P = dyn_cast<PHINode>(BI)) { 1227 Value *Incoming = P->getIncomingValueForBlock(ExitingBlocks[0]); 1228 1229 // If the Incoming value of P is found in RewritePhiSet, we know it 1230 // could be rewritten to use a loop invariant value in transformation 1231 // phase later. Skip it in the loop invariant check below. 1232 bool found = false; 1233 for (const RewritePhi &Phi : RewritePhiSet) { 1234 unsigned i = Phi.Ith; 1235 if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) { 1236 found = true; 1237 break; 1238 } 1239 } 1240 1241 Instruction *I; 1242 if (!found && (I = dyn_cast<Instruction>(Incoming))) 1243 if (!L->hasLoopInvariantOperands(I)) 1244 return false; 1245 1246 ++BI; 1247 } 1248 1249 for (auto *BB : L->blocks()) 1250 if (llvm::any_of(*BB, [](Instruction &I) { 1251 return I.mayHaveSideEffects(); 1252 })) 1253 return false; 1254 1255 return true; 1256 } 1257 1258 int llvm::rewriteLoopExitValues(Loop *L, LoopInfo *LI, TargetLibraryInfo *TLI, 1259 ScalarEvolution *SE, 1260 const TargetTransformInfo *TTI, 1261 SCEVExpander &Rewriter, DominatorTree *DT, 1262 ReplaceExitVal ReplaceExitValue, 1263 SmallVector<WeakTrackingVH, 16> &DeadInsts) { 1264 // Check a pre-condition. 1265 assert(L->isRecursivelyLCSSAForm(*DT, *LI) && 1266 "Indvars did not preserve LCSSA!"); 1267 1268 SmallVector<BasicBlock*, 8> ExitBlocks; 1269 L->getUniqueExitBlocks(ExitBlocks); 1270 1271 SmallVector<RewritePhi, 8> RewritePhiSet; 1272 // Find all values that are computed inside the loop, but used outside of it. 1273 // Because of LCSSA, these values will only occur in LCSSA PHI Nodes. Scan 1274 // the exit blocks of the loop to find them. 1275 for (BasicBlock *ExitBB : ExitBlocks) { 1276 // If there are no PHI nodes in this exit block, then no values defined 1277 // inside the loop are used on this path, skip it. 1278 PHINode *PN = dyn_cast<PHINode>(ExitBB->begin()); 1279 if (!PN) continue; 1280 1281 unsigned NumPreds = PN->getNumIncomingValues(); 1282 1283 // Iterate over all of the PHI nodes. 1284 BasicBlock::iterator BBI = ExitBB->begin(); 1285 while ((PN = dyn_cast<PHINode>(BBI++))) { 1286 if (PN->use_empty()) 1287 continue; // dead use, don't replace it 1288 1289 if (!SE->isSCEVable(PN->getType())) 1290 continue; 1291 1292 // Iterate over all of the values in all the PHI nodes. 1293 for (unsigned i = 0; i != NumPreds; ++i) { 1294 // If the value being merged in is not integer or is not defined 1295 // in the loop, skip it. 1296 Value *InVal = PN->getIncomingValue(i); 1297 if (!isa<Instruction>(InVal)) 1298 continue; 1299 1300 // If this pred is for a subloop, not L itself, skip it. 1301 if (LI->getLoopFor(PN->getIncomingBlock(i)) != L) 1302 continue; // The Block is in a subloop, skip it. 1303 1304 // Check that InVal is defined in the loop. 1305 Instruction *Inst = cast<Instruction>(InVal); 1306 if (!L->contains(Inst)) 1307 continue; 1308 1309 // Okay, this instruction has a user outside of the current loop 1310 // and varies predictably *inside* the loop. Evaluate the value it 1311 // contains when the loop exits, if possible. We prefer to start with 1312 // expressions which are true for all exits (so as to maximize 1313 // expression reuse by the SCEVExpander), but resort to per-exit 1314 // evaluation if that fails. 1315 const SCEV *ExitValue = SE->getSCEVAtScope(Inst, L->getParentLoop()); 1316 if (isa<SCEVCouldNotCompute>(ExitValue) || 1317 !SE->isLoopInvariant(ExitValue, L) || 1318 !isSafeToExpand(ExitValue, *SE)) { 1319 // TODO: This should probably be sunk into SCEV in some way; maybe a 1320 // getSCEVForExit(SCEV*, L, ExitingBB)? It can be generalized for 1321 // most SCEV expressions and other recurrence types (e.g. shift 1322 // recurrences). Is there existing code we can reuse? 1323 const SCEV *ExitCount = SE->getExitCount(L, PN->getIncomingBlock(i)); 1324 if (isa<SCEVCouldNotCompute>(ExitCount)) 1325 continue; 1326 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(SE->getSCEV(Inst))) 1327 if (AddRec->getLoop() == L) 1328 ExitValue = AddRec->evaluateAtIteration(ExitCount, *SE); 1329 if (isa<SCEVCouldNotCompute>(ExitValue) || 1330 !SE->isLoopInvariant(ExitValue, L) || 1331 !isSafeToExpand(ExitValue, *SE)) 1332 continue; 1333 } 1334 1335 // Computing the value outside of the loop brings no benefit if it is 1336 // definitely used inside the loop in a way which can not be optimized 1337 // away. Avoid doing so unless we know we have a value which computes 1338 // the ExitValue already. TODO: This should be merged into SCEV 1339 // expander to leverage its knowledge of existing expressions. 1340 if (ReplaceExitValue != AlwaysRepl && !isa<SCEVConstant>(ExitValue) && 1341 !isa<SCEVUnknown>(ExitValue) && hasHardUserWithinLoop(L, Inst)) 1342 continue; 1343 1344 // Check if expansions of this SCEV would count as being high cost. 1345 bool HighCost = Rewriter.isHighCostExpansion( 1346 ExitValue, L, SCEVCheapExpansionBudget, TTI, Inst); 1347 1348 // Note that we must not perform expansions until after 1349 // we query *all* the costs, because if we perform temporary expansion 1350 // inbetween, one that we might not intend to keep, said expansion 1351 // *may* affect cost calculation of the the next SCEV's we'll query, 1352 // and next SCEV may errneously get smaller cost. 1353 1354 // Collect all the candidate PHINodes to be rewritten. 1355 RewritePhiSet.emplace_back(PN, i, ExitValue, Inst, HighCost); 1356 } 1357 } 1358 } 1359 1360 // TODO: evaluate whether it is beneficial to change how we calculate 1361 // high-cost: if we have SCEV 'A' which we know we will expand, should we 1362 // calculate the cost of other SCEV's after expanding SCEV 'A', thus 1363 // potentially giving cost bonus to those other SCEV's? 1364 1365 bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet); 1366 int NumReplaced = 0; 1367 1368 // Transformation. 1369 for (const RewritePhi &Phi : RewritePhiSet) { 1370 PHINode *PN = Phi.PN; 1371 1372 // Only do the rewrite when the ExitValue can be expanded cheaply. 1373 // If LoopCanBeDel is true, rewrite exit value aggressively. 1374 if (ReplaceExitValue == OnlyCheapRepl && !LoopCanBeDel && Phi.HighCost) 1375 continue; 1376 1377 Value *ExitVal = Rewriter.expandCodeFor( 1378 Phi.ExpansionSCEV, Phi.PN->getType(), Phi.ExpansionPoint); 1379 1380 LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: AfterLoopVal = " << *ExitVal 1381 << '\n' 1382 << " LoopVal = " << *(Phi.ExpansionPoint) << "\n"); 1383 1384 #ifndef NDEBUG 1385 // If we reuse an instruction from a loop which is neither L nor one of 1386 // its containing loops, we end up breaking LCSSA form for this loop by 1387 // creating a new use of its instruction. 1388 if (auto *ExitInsn = dyn_cast<Instruction>(ExitVal)) 1389 if (auto *EVL = LI->getLoopFor(ExitInsn->getParent())) 1390 if (EVL != L) 1391 assert(EVL->contains(L) && "LCSSA breach detected!"); 1392 #endif 1393 1394 NumReplaced++; 1395 Instruction *Inst = cast<Instruction>(PN->getIncomingValue(Phi.Ith)); 1396 PN->setIncomingValue(Phi.Ith, ExitVal); 1397 // It's necessary to tell ScalarEvolution about this explicitly so that 1398 // it can walk the def-use list and forget all SCEVs, as it may not be 1399 // watching the PHI itself. Once the new exit value is in place, there 1400 // may not be a def-use connection between the loop and every instruction 1401 // which got a SCEVAddRecExpr for that loop. 1402 SE->forgetValue(PN); 1403 1404 // If this instruction is dead now, delete it. Don't do it now to avoid 1405 // invalidating iterators. 1406 if (isInstructionTriviallyDead(Inst, TLI)) 1407 DeadInsts.push_back(Inst); 1408 1409 // Replace PN with ExitVal if that is legal and does not break LCSSA. 1410 if (PN->getNumIncomingValues() == 1 && 1411 LI->replacementPreservesLCSSAForm(PN, ExitVal)) { 1412 PN->replaceAllUsesWith(ExitVal); 1413 PN->eraseFromParent(); 1414 } 1415 } 1416 1417 // The insertion point instruction may have been deleted; clear it out 1418 // so that the rewriter doesn't trip over it later. 1419 Rewriter.clearInsertPoint(); 1420 return NumReplaced; 1421 } 1422 1423 /// Set weights for \p UnrolledLoop and \p RemainderLoop based on weights for 1424 /// \p OrigLoop. 1425 void llvm::setProfileInfoAfterUnrolling(Loop *OrigLoop, Loop *UnrolledLoop, 1426 Loop *RemainderLoop, uint64_t UF) { 1427 assert(UF > 0 && "Zero unrolled factor is not supported"); 1428 assert(UnrolledLoop != RemainderLoop && 1429 "Unrolled and Remainder loops are expected to distinct"); 1430 1431 // Get number of iterations in the original scalar loop. 1432 unsigned OrigLoopInvocationWeight = 0; 1433 Optional<unsigned> OrigAverageTripCount = 1434 getLoopEstimatedTripCount(OrigLoop, &OrigLoopInvocationWeight); 1435 if (!OrigAverageTripCount) 1436 return; 1437 1438 // Calculate number of iterations in unrolled loop. 1439 unsigned UnrolledAverageTripCount = *OrigAverageTripCount / UF; 1440 // Calculate number of iterations for remainder loop. 1441 unsigned RemainderAverageTripCount = *OrigAverageTripCount % UF; 1442 1443 setLoopEstimatedTripCount(UnrolledLoop, UnrolledAverageTripCount, 1444 OrigLoopInvocationWeight); 1445 setLoopEstimatedTripCount(RemainderLoop, RemainderAverageTripCount, 1446 OrigLoopInvocationWeight); 1447 } 1448 1449 /// Utility that implements appending of loops onto a worklist. 1450 /// Loops are added in preorder (analogous for reverse postorder for trees), 1451 /// and the worklist is processed LIFO. 1452 template <typename RangeT> 1453 void llvm::appendReversedLoopsToWorklist( 1454 RangeT &&Loops, SmallPriorityWorklist<Loop *, 4> &Worklist) { 1455 // We use an internal worklist to build up the preorder traversal without 1456 // recursion. 1457 SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist; 1458 1459 // We walk the initial sequence of loops in reverse because we generally want 1460 // to visit defs before uses and the worklist is LIFO. 1461 for (Loop *RootL : Loops) { 1462 assert(PreOrderLoops.empty() && "Must start with an empty preorder walk."); 1463 assert(PreOrderWorklist.empty() && 1464 "Must start with an empty preorder walk worklist."); 1465 PreOrderWorklist.push_back(RootL); 1466 do { 1467 Loop *L = PreOrderWorklist.pop_back_val(); 1468 PreOrderWorklist.append(L->begin(), L->end()); 1469 PreOrderLoops.push_back(L); 1470 } while (!PreOrderWorklist.empty()); 1471 1472 Worklist.insert(std::move(PreOrderLoops)); 1473 PreOrderLoops.clear(); 1474 } 1475 } 1476 1477 template <typename RangeT> 1478 void llvm::appendLoopsToWorklist(RangeT &&Loops, 1479 SmallPriorityWorklist<Loop *, 4> &Worklist) { 1480 appendReversedLoopsToWorklist(reverse(Loops), Worklist); 1481 } 1482 1483 template void llvm::appendLoopsToWorklist<ArrayRef<Loop *> &>( 1484 ArrayRef<Loop *> &Loops, SmallPriorityWorklist<Loop *, 4> &Worklist); 1485 1486 template void 1487 llvm::appendLoopsToWorklist<Loop &>(Loop &L, 1488 SmallPriorityWorklist<Loop *, 4> &Worklist); 1489 1490 void llvm::appendLoopsToWorklist(LoopInfo &LI, 1491 SmallPriorityWorklist<Loop *, 4> &Worklist) { 1492 appendReversedLoopsToWorklist(LI, Worklist); 1493 } 1494 1495 Loop *llvm::cloneLoop(Loop *L, Loop *PL, ValueToValueMapTy &VM, 1496 LoopInfo *LI, LPPassManager *LPM) { 1497 Loop &New = *LI->AllocateLoop(); 1498 if (PL) 1499 PL->addChildLoop(&New); 1500 else 1501 LI->addTopLevelLoop(&New); 1502 1503 if (LPM) 1504 LPM->addLoop(New); 1505 1506 // Add all of the blocks in L to the new loop. 1507 for (BasicBlock *BB : L->blocks()) 1508 if (LI->getLoopFor(BB) == L) 1509 New.addBasicBlockToLoop(cast<BasicBlock>(VM[BB]), *LI); 1510 1511 // Add all of the subloops to the new loop. 1512 for (Loop *I : *L) 1513 cloneLoop(I, &New, VM, LI, LPM); 1514 1515 return &New; 1516 } 1517 1518 /// IR Values for the lower and upper bounds of a pointer evolution. We 1519 /// need to use value-handles because SCEV expansion can invalidate previously 1520 /// expanded values. Thus expansion of a pointer can invalidate the bounds for 1521 /// a previous one. 1522 struct PointerBounds { 1523 TrackingVH<Value> Start; 1524 TrackingVH<Value> End; 1525 }; 1526 1527 /// Expand code for the lower and upper bound of the pointer group \p CG 1528 /// in \p TheLoop. \return the values for the bounds. 1529 static PointerBounds expandBounds(const RuntimeCheckingPtrGroup *CG, 1530 Loop *TheLoop, Instruction *Loc, 1531 SCEVExpander &Exp) { 1532 LLVMContext &Ctx = Loc->getContext(); 1533 Type *PtrArithTy = Type::getInt8PtrTy(Ctx, CG->AddressSpace); 1534 1535 Value *Start = nullptr, *End = nullptr; 1536 LLVM_DEBUG(dbgs() << "LAA: Adding RT check for range:\n"); 1537 Start = Exp.expandCodeFor(CG->Low, PtrArithTy, Loc); 1538 End = Exp.expandCodeFor(CG->High, PtrArithTy, Loc); 1539 LLVM_DEBUG(dbgs() << "Start: " << *CG->Low << " End: " << *CG->High << "\n"); 1540 return {Start, End}; 1541 } 1542 1543 /// Turns a collection of checks into a collection of expanded upper and 1544 /// lower bounds for both pointers in the check. 1545 static SmallVector<std::pair<PointerBounds, PointerBounds>, 4> 1546 expandBounds(const SmallVectorImpl<RuntimePointerCheck> &PointerChecks, Loop *L, 1547 Instruction *Loc, SCEVExpander &Exp) { 1548 SmallVector<std::pair<PointerBounds, PointerBounds>, 4> ChecksWithBounds; 1549 1550 // Here we're relying on the SCEV Expander's cache to only emit code for the 1551 // same bounds once. 1552 transform(PointerChecks, std::back_inserter(ChecksWithBounds), 1553 [&](const RuntimePointerCheck &Check) { 1554 PointerBounds First = expandBounds(Check.first, L, Loc, Exp), 1555 Second = expandBounds(Check.second, L, Loc, Exp); 1556 return std::make_pair(First, Second); 1557 }); 1558 1559 return ChecksWithBounds; 1560 } 1561 1562 Value *llvm::addRuntimeChecks( 1563 Instruction *Loc, Loop *TheLoop, 1564 const SmallVectorImpl<RuntimePointerCheck> &PointerChecks, 1565 SCEVExpander &Exp) { 1566 // TODO: Move noalias annotation code from LoopVersioning here and share with LV if possible. 1567 // TODO: Pass RtPtrChecking instead of PointerChecks and SE separately, if possible 1568 auto ExpandedChecks = expandBounds(PointerChecks, TheLoop, Loc, Exp); 1569 1570 LLVMContext &Ctx = Loc->getContext(); 1571 IRBuilder<InstSimplifyFolder> ChkBuilder(Ctx, 1572 Loc->getModule()->getDataLayout()); 1573 ChkBuilder.SetInsertPoint(Loc); 1574 // Our instructions might fold to a constant. 1575 Value *MemoryRuntimeCheck = nullptr; 1576 1577 for (const auto &Check : ExpandedChecks) { 1578 const PointerBounds &A = Check.first, &B = Check.second; 1579 // Check if two pointers (A and B) conflict where conflict is computed as: 1580 // start(A) <= end(B) && start(B) <= end(A) 1581 unsigned AS0 = A.Start->getType()->getPointerAddressSpace(); 1582 unsigned AS1 = B.Start->getType()->getPointerAddressSpace(); 1583 1584 assert((AS0 == B.End->getType()->getPointerAddressSpace()) && 1585 (AS1 == A.End->getType()->getPointerAddressSpace()) && 1586 "Trying to bounds check pointers with different address spaces"); 1587 1588 Type *PtrArithTy0 = Type::getInt8PtrTy(Ctx, AS0); 1589 Type *PtrArithTy1 = Type::getInt8PtrTy(Ctx, AS1); 1590 1591 Value *Start0 = ChkBuilder.CreateBitCast(A.Start, PtrArithTy0, "bc"); 1592 Value *Start1 = ChkBuilder.CreateBitCast(B.Start, PtrArithTy1, "bc"); 1593 Value *End0 = ChkBuilder.CreateBitCast(A.End, PtrArithTy1, "bc"); 1594 Value *End1 = ChkBuilder.CreateBitCast(B.End, PtrArithTy0, "bc"); 1595 1596 // [A|B].Start points to the first accessed byte under base [A|B]. 1597 // [A|B].End points to the last accessed byte, plus one. 1598 // There is no conflict when the intervals are disjoint: 1599 // NoConflict = (B.Start >= A.End) || (A.Start >= B.End) 1600 // 1601 // bound0 = (B.Start < A.End) 1602 // bound1 = (A.Start < B.End) 1603 // IsConflict = bound0 & bound1 1604 Value *Cmp0 = ChkBuilder.CreateICmpULT(Start0, End1, "bound0"); 1605 Value *Cmp1 = ChkBuilder.CreateICmpULT(Start1, End0, "bound1"); 1606 Value *IsConflict = ChkBuilder.CreateAnd(Cmp0, Cmp1, "found.conflict"); 1607 if (MemoryRuntimeCheck) { 1608 IsConflict = 1609 ChkBuilder.CreateOr(MemoryRuntimeCheck, IsConflict, "conflict.rdx"); 1610 } 1611 MemoryRuntimeCheck = IsConflict; 1612 } 1613 1614 return MemoryRuntimeCheck; 1615 } 1616 1617 Optional<IVConditionInfo> llvm::hasPartialIVCondition(Loop &L, 1618 unsigned MSSAThreshold, 1619 MemorySSA &MSSA, 1620 AAResults &AA) { 1621 auto *TI = dyn_cast<BranchInst>(L.getHeader()->getTerminator()); 1622 if (!TI || !TI->isConditional()) 1623 return {}; 1624 1625 auto *CondI = dyn_cast<CmpInst>(TI->getCondition()); 1626 // The case with the condition outside the loop should already be handled 1627 // earlier. 1628 if (!CondI || !L.contains(CondI)) 1629 return {}; 1630 1631 SmallVector<Instruction *> InstToDuplicate; 1632 InstToDuplicate.push_back(CondI); 1633 1634 SmallVector<Value *, 4> WorkList; 1635 WorkList.append(CondI->op_begin(), CondI->op_end()); 1636 1637 SmallVector<MemoryAccess *, 4> AccessesToCheck; 1638 SmallVector<MemoryLocation, 4> AccessedLocs; 1639 while (!WorkList.empty()) { 1640 Instruction *I = dyn_cast<Instruction>(WorkList.pop_back_val()); 1641 if (!I || !L.contains(I)) 1642 continue; 1643 1644 // TODO: support additional instructions. 1645 if (!isa<LoadInst>(I) && !isa<GetElementPtrInst>(I)) 1646 return {}; 1647 1648 // Do not duplicate volatile and atomic loads. 1649 if (auto *LI = dyn_cast<LoadInst>(I)) 1650 if (LI->isVolatile() || LI->isAtomic()) 1651 return {}; 1652 1653 InstToDuplicate.push_back(I); 1654 if (MemoryAccess *MA = MSSA.getMemoryAccess(I)) { 1655 if (auto *MemUse = dyn_cast_or_null<MemoryUse>(MA)) { 1656 // Queue the defining access to check for alias checks. 1657 AccessesToCheck.push_back(MemUse->getDefiningAccess()); 1658 AccessedLocs.push_back(MemoryLocation::get(I)); 1659 } else { 1660 // MemoryDefs may clobber the location or may be atomic memory 1661 // operations. Bail out. 1662 return {}; 1663 } 1664 } 1665 WorkList.append(I->op_begin(), I->op_end()); 1666 } 1667 1668 if (InstToDuplicate.empty()) 1669 return {}; 1670 1671 SmallVector<BasicBlock *, 4> ExitingBlocks; 1672 L.getExitingBlocks(ExitingBlocks); 1673 auto HasNoClobbersOnPath = 1674 [&L, &AA, &AccessedLocs, &ExitingBlocks, &InstToDuplicate, 1675 MSSAThreshold](BasicBlock *Succ, BasicBlock *Header, 1676 SmallVector<MemoryAccess *, 4> AccessesToCheck) 1677 -> Optional<IVConditionInfo> { 1678 IVConditionInfo Info; 1679 // First, collect all blocks in the loop that are on a patch from Succ 1680 // to the header. 1681 SmallVector<BasicBlock *, 4> WorkList; 1682 WorkList.push_back(Succ); 1683 WorkList.push_back(Header); 1684 SmallPtrSet<BasicBlock *, 4> Seen; 1685 Seen.insert(Header); 1686 Info.PathIsNoop &= 1687 all_of(*Header, [](Instruction &I) { return !I.mayHaveSideEffects(); }); 1688 1689 while (!WorkList.empty()) { 1690 BasicBlock *Current = WorkList.pop_back_val(); 1691 if (!L.contains(Current)) 1692 continue; 1693 const auto &SeenIns = Seen.insert(Current); 1694 if (!SeenIns.second) 1695 continue; 1696 1697 Info.PathIsNoop &= all_of( 1698 *Current, [](Instruction &I) { return !I.mayHaveSideEffects(); }); 1699 WorkList.append(succ_begin(Current), succ_end(Current)); 1700 } 1701 1702 // Require at least 2 blocks on a path through the loop. This skips 1703 // paths that directly exit the loop. 1704 if (Seen.size() < 2) 1705 return {}; 1706 1707 // Next, check if there are any MemoryDefs that are on the path through 1708 // the loop (in the Seen set) and they may-alias any of the locations in 1709 // AccessedLocs. If that is the case, they may modify the condition and 1710 // partial unswitching is not possible. 1711 SmallPtrSet<MemoryAccess *, 4> SeenAccesses; 1712 while (!AccessesToCheck.empty()) { 1713 MemoryAccess *Current = AccessesToCheck.pop_back_val(); 1714 auto SeenI = SeenAccesses.insert(Current); 1715 if (!SeenI.second || !Seen.contains(Current->getBlock())) 1716 continue; 1717 1718 // Bail out if exceeded the threshold. 1719 if (SeenAccesses.size() >= MSSAThreshold) 1720 return {}; 1721 1722 // MemoryUse are read-only accesses. 1723 if (isa<MemoryUse>(Current)) 1724 continue; 1725 1726 // For a MemoryDef, check if is aliases any of the location feeding 1727 // the original condition. 1728 if (auto *CurrentDef = dyn_cast<MemoryDef>(Current)) { 1729 if (any_of(AccessedLocs, [&AA, CurrentDef](MemoryLocation &Loc) { 1730 return isModSet( 1731 AA.getModRefInfo(CurrentDef->getMemoryInst(), Loc)); 1732 })) 1733 return {}; 1734 } 1735 1736 for (Use &U : Current->uses()) 1737 AccessesToCheck.push_back(cast<MemoryAccess>(U.getUser())); 1738 } 1739 1740 // We could also allow loops with known trip counts without mustprogress, 1741 // but ScalarEvolution may not be available. 1742 Info.PathIsNoop &= isMustProgress(&L); 1743 1744 // If the path is considered a no-op so far, check if it reaches a 1745 // single exit block without any phis. This ensures no values from the 1746 // loop are used outside of the loop. 1747 if (Info.PathIsNoop) { 1748 for (auto *Exiting : ExitingBlocks) { 1749 if (!Seen.contains(Exiting)) 1750 continue; 1751 for (auto *Succ : successors(Exiting)) { 1752 if (L.contains(Succ)) 1753 continue; 1754 1755 Info.PathIsNoop &= llvm::empty(Succ->phis()) && 1756 (!Info.ExitForPath || Info.ExitForPath == Succ); 1757 if (!Info.PathIsNoop) 1758 break; 1759 assert((!Info.ExitForPath || Info.ExitForPath == Succ) && 1760 "cannot have multiple exit blocks"); 1761 Info.ExitForPath = Succ; 1762 } 1763 } 1764 } 1765 if (!Info.ExitForPath) 1766 Info.PathIsNoop = false; 1767 1768 Info.InstToDuplicate = InstToDuplicate; 1769 return Info; 1770 }; 1771 1772 // If we branch to the same successor, partial unswitching will not be 1773 // beneficial. 1774 if (TI->getSuccessor(0) == TI->getSuccessor(1)) 1775 return {}; 1776 1777 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(0), L.getHeader(), 1778 AccessesToCheck)) { 1779 Info->KnownValue = ConstantInt::getTrue(TI->getContext()); 1780 return Info; 1781 } 1782 if (auto Info = HasNoClobbersOnPath(TI->getSuccessor(1), L.getHeader(), 1783 AccessesToCheck)) { 1784 Info->KnownValue = ConstantInt::getFalse(TI->getContext()); 1785 return Info; 1786 } 1787 1788 return {}; 1789 } 1790