1 //===- MustExecute.cpp - Printer for isGuaranteedToExecute ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/MustExecute.h" 10 #include "llvm/ADT/PostOrderIterator.h" 11 #include "llvm/ADT/StringExtras.h" 12 #include "llvm/Analysis/CFG.h" 13 #include "llvm/Analysis/InstructionSimplify.h" 14 #include "llvm/Analysis/LoopInfo.h" 15 #include "llvm/Analysis/Passes.h" 16 #include "llvm/Analysis/PostDominators.h" 17 #include "llvm/Analysis/ValueTracking.h" 18 #include "llvm/IR/AssemblyAnnotationWriter.h" 19 #include "llvm/IR/Dominators.h" 20 #include "llvm/IR/InstIterator.h" 21 #include "llvm/IR/Module.h" 22 #include "llvm/IR/PassManager.h" 23 #include "llvm/InitializePasses.h" 24 #include "llvm/Support/FormattedStream.h" 25 #include "llvm/Support/raw_ostream.h" 26 27 using namespace llvm; 28 29 #define DEBUG_TYPE "must-execute" 30 31 const DenseMap<BasicBlock *, ColorVector> & 32 LoopSafetyInfo::getBlockColors() const { 33 return BlockColors; 34 } 35 36 void LoopSafetyInfo::copyColors(BasicBlock *New, BasicBlock *Old) { 37 ColorVector &ColorsForNewBlock = BlockColors[New]; 38 ColorVector &ColorsForOldBlock = BlockColors[Old]; 39 ColorsForNewBlock = ColorsForOldBlock; 40 } 41 42 bool SimpleLoopSafetyInfo::blockMayThrow(const BasicBlock *BB) const { 43 (void)BB; 44 return anyBlockMayThrow(); 45 } 46 47 bool SimpleLoopSafetyInfo::anyBlockMayThrow() const { 48 return MayThrow; 49 } 50 51 void SimpleLoopSafetyInfo::computeLoopSafetyInfo(const Loop *CurLoop) { 52 assert(CurLoop != nullptr && "CurLoop can't be null"); 53 BasicBlock *Header = CurLoop->getHeader(); 54 // Iterate over header and compute safety info. 55 HeaderMayThrow = !isGuaranteedToTransferExecutionToSuccessor(Header); 56 MayThrow = HeaderMayThrow; 57 // Iterate over loop instructions and compute safety info. 58 // Skip header as it has been computed and stored in HeaderMayThrow. 59 // The first block in loopinfo.Blocks is guaranteed to be the header. 60 assert(Header == *CurLoop->getBlocks().begin() && 61 "First block must be header"); 62 for (const BasicBlock *BB : llvm::drop_begin(CurLoop->blocks())) { 63 MayThrow |= !isGuaranteedToTransferExecutionToSuccessor(BB); 64 if (MayThrow) 65 break; 66 } 67 68 computeBlockColors(CurLoop); 69 } 70 71 bool ICFLoopSafetyInfo::blockMayThrow(const BasicBlock *BB) const { 72 return ICF.hasICF(BB); 73 } 74 75 bool ICFLoopSafetyInfo::anyBlockMayThrow() const { 76 return MayThrow; 77 } 78 79 void ICFLoopSafetyInfo::computeLoopSafetyInfo(const Loop *CurLoop) { 80 assert(CurLoop != nullptr && "CurLoop can't be null"); 81 ICF.clear(); 82 MW.clear(); 83 MayThrow = false; 84 // Figure out the fact that at least one block may throw. 85 for (const auto &BB : CurLoop->blocks()) 86 if (ICF.hasICF(&*BB)) { 87 MayThrow = true; 88 break; 89 } 90 computeBlockColors(CurLoop); 91 } 92 93 void ICFLoopSafetyInfo::insertInstructionTo(const Instruction *Inst, 94 const BasicBlock *BB) { 95 ICF.insertInstructionTo(Inst, BB); 96 MW.insertInstructionTo(Inst, BB); 97 } 98 99 void ICFLoopSafetyInfo::removeInstruction(const Instruction *Inst) { 100 ICF.removeInstruction(Inst); 101 MW.removeInstruction(Inst); 102 } 103 104 void LoopSafetyInfo::computeBlockColors(const Loop *CurLoop) { 105 // Compute funclet colors if we might sink/hoist in a function with a funclet 106 // personality routine. 107 Function *Fn = CurLoop->getHeader()->getParent(); 108 if (Fn->hasPersonalityFn()) 109 if (Constant *PersonalityFn = Fn->getPersonalityFn()) 110 if (isScopedEHPersonality(classifyEHPersonality(PersonalityFn))) 111 BlockColors = colorEHFunclets(*Fn); 112 } 113 114 /// Return true if we can prove that the given ExitBlock is not reached on the 115 /// first iteration of the given loop. That is, the backedge of the loop must 116 /// be executed before the ExitBlock is executed in any dynamic execution trace. 117 static bool CanProveNotTakenFirstIteration(const BasicBlock *ExitBlock, 118 const DominatorTree *DT, 119 const Loop *CurLoop) { 120 auto *CondExitBlock = ExitBlock->getSinglePredecessor(); 121 if (!CondExitBlock) 122 // expect unique exits 123 return false; 124 assert(CurLoop->contains(CondExitBlock) && "meaning of exit block"); 125 auto *BI = dyn_cast<BranchInst>(CondExitBlock->getTerminator()); 126 if (!BI || !BI->isConditional()) 127 return false; 128 // If condition is constant and false leads to ExitBlock then we always 129 // execute the true branch. 130 if (auto *Cond = dyn_cast<ConstantInt>(BI->getCondition())) 131 return BI->getSuccessor(Cond->getZExtValue() ? 1 : 0) == ExitBlock; 132 auto *Cond = dyn_cast<CmpInst>(BI->getCondition()); 133 if (!Cond) 134 return false; 135 // todo: this would be a lot more powerful if we used scev, but all the 136 // plumbing is currently missing to pass a pointer in from the pass 137 // Check for cmp (phi [x, preheader] ...), y where (pred x, y is known 138 auto *LHS = dyn_cast<PHINode>(Cond->getOperand(0)); 139 auto *RHS = Cond->getOperand(1); 140 if (!LHS || LHS->getParent() != CurLoop->getHeader()) 141 return false; 142 auto DL = ExitBlock->getDataLayout(); 143 auto *IVStart = LHS->getIncomingValueForBlock(CurLoop->getLoopPreheader()); 144 auto *SimpleValOrNull = simplifyCmpInst(Cond->getPredicate(), 145 IVStart, RHS, 146 {DL, /*TLI*/ nullptr, 147 DT, /*AC*/ nullptr, BI}); 148 auto *SimpleCst = dyn_cast_or_null<Constant>(SimpleValOrNull); 149 if (!SimpleCst) 150 return false; 151 if (ExitBlock == BI->getSuccessor(0)) 152 return SimpleCst->isZeroValue(); 153 assert(ExitBlock == BI->getSuccessor(1) && "implied by above"); 154 return SimpleCst->isAllOnesValue(); 155 } 156 157 /// Collect all blocks from \p CurLoop which lie on all possible paths from 158 /// the header of \p CurLoop (inclusive) to BB (exclusive) into the set 159 /// \p Predecessors. If \p BB is the header, \p Predecessors will be empty. 160 static void collectTransitivePredecessors( 161 const Loop *CurLoop, const BasicBlock *BB, 162 SmallPtrSetImpl<const BasicBlock *> &Predecessors) { 163 assert(Predecessors.empty() && "Garbage in predecessors set?"); 164 assert(CurLoop->contains(BB) && "Should only be called for loop blocks!"); 165 if (BB == CurLoop->getHeader()) 166 return; 167 SmallVector<const BasicBlock *, 4> WorkList; 168 for (const auto *Pred : predecessors(BB)) { 169 Predecessors.insert(Pred); 170 WorkList.push_back(Pred); 171 } 172 while (!WorkList.empty()) { 173 auto *Pred = WorkList.pop_back_val(); 174 assert(CurLoop->contains(Pred) && "Should only reach loop blocks!"); 175 // We are not interested in backedges and we don't want to leave loop. 176 if (Pred == CurLoop->getHeader()) 177 continue; 178 // TODO: If BB lies in an inner loop of CurLoop, this will traverse over all 179 // blocks of this inner loop, even those that are always executed AFTER the 180 // BB. It may make our analysis more conservative than it could be, see test 181 // @nested and @nested_no_throw in test/Analysis/MustExecute/loop-header.ll. 182 // We can ignore backedge of all loops containing BB to get a sligtly more 183 // optimistic result. 184 for (const auto *PredPred : predecessors(Pred)) 185 if (Predecessors.insert(PredPred).second) 186 WorkList.push_back(PredPred); 187 } 188 } 189 190 bool LoopSafetyInfo::allLoopPathsLeadToBlock(const Loop *CurLoop, 191 const BasicBlock *BB, 192 const DominatorTree *DT) const { 193 assert(CurLoop->contains(BB) && "Should only be called for loop blocks!"); 194 195 // Fast path: header is always reached once the loop is entered. 196 if (BB == CurLoop->getHeader()) 197 return true; 198 199 // Collect all transitive predecessors of BB in the same loop. This set will 200 // be a subset of the blocks within the loop. 201 SmallPtrSet<const BasicBlock *, 4> Predecessors; 202 collectTransitivePredecessors(CurLoop, BB, Predecessors); 203 204 // Bail out if a latch block is part of the predecessor set. In this case 205 // we may take the backedge to the header and not execute other latch 206 // successors. 207 for (const BasicBlock *Pred : predecessors(CurLoop->getHeader())) 208 // Predecessors only contains loop blocks, so we don't have to worry about 209 // preheader predecessors here. 210 if (Predecessors.contains(Pred)) 211 return false; 212 213 // Make sure that all successors of, all predecessors of BB which are not 214 // dominated by BB, are either: 215 // 1) BB, 216 // 2) Also predecessors of BB, 217 // 3) Exit blocks which are not taken on 1st iteration. 218 // Memoize blocks we've already checked. 219 SmallPtrSet<const BasicBlock *, 4> CheckedSuccessors; 220 for (const auto *Pred : Predecessors) { 221 // Predecessor block may throw, so it has a side exit. 222 if (blockMayThrow(Pred)) 223 return false; 224 225 // BB dominates Pred, so if Pred runs, BB must run. 226 // This is true when Pred is a loop latch. 227 if (DT->dominates(BB, Pred)) 228 continue; 229 230 for (const auto *Succ : successors(Pred)) 231 if (CheckedSuccessors.insert(Succ).second && 232 Succ != BB && !Predecessors.count(Succ)) 233 // By discharging conditions that are not executed on the 1st iteration, 234 // we guarantee that *at least* on the first iteration all paths from 235 // header that *may* execute will lead us to the block of interest. So 236 // that if we had virtually peeled one iteration away, in this peeled 237 // iteration the set of predecessors would contain only paths from 238 // header to BB without any exiting edges that may execute. 239 // 240 // TODO: We only do it for exiting edges currently. We could use the 241 // same function to skip some of the edges within the loop if we know 242 // that they will not be taken on the 1st iteration. 243 // 244 // TODO: If we somehow know the number of iterations in loop, the same 245 // check may be done for any arbitrary N-th iteration as long as N is 246 // not greater than minimum number of iterations in this loop. 247 if (CurLoop->contains(Succ) || 248 !CanProveNotTakenFirstIteration(Succ, DT, CurLoop)) 249 return false; 250 } 251 252 // All predecessors can only lead us to BB. 253 return true; 254 } 255 256 /// Returns true if the instruction in a loop is guaranteed to execute at least 257 /// once. 258 bool SimpleLoopSafetyInfo::isGuaranteedToExecute(const Instruction &Inst, 259 const DominatorTree *DT, 260 const Loop *CurLoop) const { 261 // If the instruction is in the header block for the loop (which is very 262 // common), it is always guaranteed to dominate the exit blocks. Since this 263 // is a common case, and can save some work, check it now. 264 if (Inst.getParent() == CurLoop->getHeader()) 265 // If there's a throw in the header block, we can't guarantee we'll reach 266 // Inst unless we can prove that Inst comes before the potential implicit 267 // exit. At the moment, we use a (cheap) hack for the common case where 268 // the instruction of interest is the first one in the block. 269 return !HeaderMayThrow || 270 Inst.getParent()->getFirstNonPHIOrDbg() == &Inst; 271 272 // If there is a path from header to exit or latch that doesn't lead to our 273 // instruction's block, return false. 274 return allLoopPathsLeadToBlock(CurLoop, Inst.getParent(), DT); 275 } 276 277 bool ICFLoopSafetyInfo::isGuaranteedToExecute(const Instruction &Inst, 278 const DominatorTree *DT, 279 const Loop *CurLoop) const { 280 return !ICF.isDominatedByICFIFromSameBlock(&Inst) && 281 allLoopPathsLeadToBlock(CurLoop, Inst.getParent(), DT); 282 } 283 284 bool ICFLoopSafetyInfo::doesNotWriteMemoryBefore(const BasicBlock *BB, 285 const Loop *CurLoop) const { 286 assert(CurLoop->contains(BB) && "Should only be called for loop blocks!"); 287 288 // Fast path: there are no instructions before header. 289 if (BB == CurLoop->getHeader()) 290 return true; 291 292 // Collect all transitive predecessors of BB in the same loop. This set will 293 // be a subset of the blocks within the loop. 294 SmallPtrSet<const BasicBlock *, 4> Predecessors; 295 collectTransitivePredecessors(CurLoop, BB, Predecessors); 296 // Find if there any instruction in either predecessor that could write 297 // to memory. 298 for (const auto *Pred : Predecessors) 299 if (MW.mayWriteToMemory(Pred)) 300 return false; 301 return true; 302 } 303 304 bool ICFLoopSafetyInfo::doesNotWriteMemoryBefore(const Instruction &I, 305 const Loop *CurLoop) const { 306 auto *BB = I.getParent(); 307 assert(CurLoop->contains(BB) && "Should only be called for loop blocks!"); 308 return !MW.isDominatedByMemoryWriteFromSameBlock(&I) && 309 doesNotWriteMemoryBefore(BB, CurLoop); 310 } 311 312 static bool isMustExecuteIn(const Instruction &I, Loop *L, DominatorTree *DT) { 313 // TODO: merge these two routines. For the moment, we display the best 314 // result obtained by *either* implementation. This is a bit unfair since no 315 // caller actually gets the full power at the moment. 316 SimpleLoopSafetyInfo LSI; 317 LSI.computeLoopSafetyInfo(L); 318 return LSI.isGuaranteedToExecute(I, DT, L) || 319 isGuaranteedToExecuteForEveryIteration(&I, L); 320 } 321 322 namespace { 323 /// An assembly annotator class to print must execute information in 324 /// comments. 325 class MustExecuteAnnotatedWriter : public AssemblyAnnotationWriter { 326 DenseMap<const Value*, SmallVector<Loop*, 4> > MustExec; 327 328 public: 329 MustExecuteAnnotatedWriter(const Function &F, 330 DominatorTree &DT, LoopInfo &LI) { 331 for (const auto &I: instructions(F)) { 332 Loop *L = LI.getLoopFor(I.getParent()); 333 while (L) { 334 if (isMustExecuteIn(I, L, &DT)) { 335 MustExec[&I].push_back(L); 336 } 337 L = L->getParentLoop(); 338 }; 339 } 340 } 341 MustExecuteAnnotatedWriter(const Module &M, 342 DominatorTree &DT, LoopInfo &LI) { 343 for (const auto &F : M) 344 for (const auto &I: instructions(F)) { 345 Loop *L = LI.getLoopFor(I.getParent()); 346 while (L) { 347 if (isMustExecuteIn(I, L, &DT)) { 348 MustExec[&I].push_back(L); 349 } 350 L = L->getParentLoop(); 351 }; 352 } 353 } 354 355 356 void printInfoComment(const Value &V, formatted_raw_ostream &OS) override { 357 if (!MustExec.count(&V)) 358 return; 359 360 const auto &Loops = MustExec.lookup(&V); 361 const auto NumLoops = Loops.size(); 362 if (NumLoops > 1) 363 OS << " ; (mustexec in " << NumLoops << " loops: "; 364 else 365 OS << " ; (mustexec in: "; 366 367 ListSeparator LS; 368 for (const Loop *L : Loops) 369 OS << LS << L->getHeader()->getName(); 370 OS << ")"; 371 } 372 }; 373 } // namespace 374 375 /// Return true if \p L might be an endless loop. 376 static bool maybeEndlessLoop(const Loop &L) { 377 if (L.getHeader()->getParent()->hasFnAttribute(Attribute::WillReturn)) 378 return false; 379 // TODO: Actually try to prove it is not. 380 // TODO: If maybeEndlessLoop is going to be expensive, cache it. 381 return true; 382 } 383 384 bool llvm::mayContainIrreducibleControl(const Function &F, const LoopInfo *LI) { 385 if (!LI) 386 return false; 387 using RPOTraversal = ReversePostOrderTraversal<const Function *>; 388 RPOTraversal FuncRPOT(&F); 389 return containsIrreducibleCFG<const BasicBlock *, const RPOTraversal, 390 const LoopInfo>(FuncRPOT, *LI); 391 } 392 393 /// Lookup \p Key in \p Map and return the result, potentially after 394 /// initializing the optional through \p Fn(\p args). 395 template <typename K, typename V, typename FnTy, typename... ArgsTy> 396 static V getOrCreateCachedOptional(K Key, DenseMap<K, std::optional<V>> &Map, 397 FnTy &&Fn, ArgsTy &&...args) { 398 std::optional<V> &OptVal = Map[Key]; 399 if (!OptVal) 400 OptVal = Fn(std::forward<ArgsTy>(args)...); 401 return *OptVal; 402 } 403 404 const BasicBlock * 405 MustBeExecutedContextExplorer::findForwardJoinPoint(const BasicBlock *InitBB) { 406 const LoopInfo *LI = LIGetter(*InitBB->getParent()); 407 const PostDominatorTree *PDT = PDTGetter(*InitBB->getParent()); 408 409 LLVM_DEBUG(dbgs() << "\tFind forward join point for " << InitBB->getName() 410 << (LI ? " [LI]" : "") << (PDT ? " [PDT]" : "")); 411 412 const Function &F = *InitBB->getParent(); 413 const Loop *L = LI ? LI->getLoopFor(InitBB) : nullptr; 414 const BasicBlock *HeaderBB = L ? L->getHeader() : InitBB; 415 bool WillReturnAndNoThrow = (F.hasFnAttribute(Attribute::WillReturn) || 416 (L && !maybeEndlessLoop(*L))) && 417 F.doesNotThrow(); 418 LLVM_DEBUG(dbgs() << (L ? " [in loop]" : "") 419 << (WillReturnAndNoThrow ? " [WillReturn] [NoUnwind]" : "") 420 << "\n"); 421 422 // Determine the adjacent blocks in the given direction but exclude (self) 423 // loops under certain circumstances. 424 SmallVector<const BasicBlock *, 8> Worklist; 425 for (const BasicBlock *SuccBB : successors(InitBB)) { 426 bool IsLatch = SuccBB == HeaderBB; 427 // Loop latches are ignored in forward propagation if the loop cannot be 428 // endless and may not throw: control has to go somewhere. 429 if (!WillReturnAndNoThrow || !IsLatch) 430 Worklist.push_back(SuccBB); 431 } 432 LLVM_DEBUG(dbgs() << "\t\t#Worklist: " << Worklist.size() << "\n"); 433 434 // If there are no other adjacent blocks, there is no join point. 435 if (Worklist.empty()) 436 return nullptr; 437 438 // If there is one adjacent block, it is the join point. 439 if (Worklist.size() == 1) 440 return Worklist[0]; 441 442 // Try to determine a join block through the help of the post-dominance 443 // tree. If no tree was provided, we perform simple pattern matching for one 444 // block conditionals and one block loops only. 445 const BasicBlock *JoinBB = nullptr; 446 if (PDT) 447 if (const auto *InitNode = PDT->getNode(InitBB)) 448 if (const auto *IDomNode = InitNode->getIDom()) 449 JoinBB = IDomNode->getBlock(); 450 451 if (!JoinBB && Worklist.size() == 2) { 452 const BasicBlock *Succ0 = Worklist[0]; 453 const BasicBlock *Succ1 = Worklist[1]; 454 const BasicBlock *Succ0UniqueSucc = Succ0->getUniqueSuccessor(); 455 const BasicBlock *Succ1UniqueSucc = Succ1->getUniqueSuccessor(); 456 if (Succ0UniqueSucc == InitBB) { 457 // InitBB -> Succ0 -> InitBB 458 // InitBB -> Succ1 = JoinBB 459 JoinBB = Succ1; 460 } else if (Succ1UniqueSucc == InitBB) { 461 // InitBB -> Succ1 -> InitBB 462 // InitBB -> Succ0 = JoinBB 463 JoinBB = Succ0; 464 } else if (Succ0 == Succ1UniqueSucc) { 465 // InitBB -> Succ0 = JoinBB 466 // InitBB -> Succ1 -> Succ0 = JoinBB 467 JoinBB = Succ0; 468 } else if (Succ1 == Succ0UniqueSucc) { 469 // InitBB -> Succ0 -> Succ1 = JoinBB 470 // InitBB -> Succ1 = JoinBB 471 JoinBB = Succ1; 472 } else if (Succ0UniqueSucc == Succ1UniqueSucc) { 473 // InitBB -> Succ0 -> JoinBB 474 // InitBB -> Succ1 -> JoinBB 475 JoinBB = Succ0UniqueSucc; 476 } 477 } 478 479 if (!JoinBB && L) 480 JoinBB = L->getUniqueExitBlock(); 481 482 if (!JoinBB) 483 return nullptr; 484 485 LLVM_DEBUG(dbgs() << "\t\tJoin block candidate: " << JoinBB->getName() << "\n"); 486 487 // In forward direction we check if control will for sure reach JoinBB from 488 // InitBB, thus it can not be "stopped" along the way. Ways to "stop" control 489 // are: infinite loops and instructions that do not necessarily transfer 490 // execution to their successor. To check for them we traverse the CFG from 491 // the adjacent blocks to the JoinBB, looking at all intermediate blocks. 492 493 // If we know the function is "will-return" and "no-throw" there is no need 494 // for futher checks. 495 if (!F.hasFnAttribute(Attribute::WillReturn) || !F.doesNotThrow()) { 496 497 auto BlockTransfersExecutionToSuccessor = [](const BasicBlock *BB) { 498 return isGuaranteedToTransferExecutionToSuccessor(BB); 499 }; 500 501 SmallPtrSet<const BasicBlock *, 16> Visited; 502 while (!Worklist.empty()) { 503 const BasicBlock *ToBB = Worklist.pop_back_val(); 504 if (ToBB == JoinBB) 505 continue; 506 507 // Make sure all loops in-between are finite. 508 if (!Visited.insert(ToBB).second) { 509 if (!F.hasFnAttribute(Attribute::WillReturn)) { 510 if (!LI) 511 return nullptr; 512 513 bool MayContainIrreducibleControl = getOrCreateCachedOptional( 514 &F, IrreducibleControlMap, mayContainIrreducibleControl, F, LI); 515 if (MayContainIrreducibleControl) 516 return nullptr; 517 518 const Loop *L = LI->getLoopFor(ToBB); 519 if (L && maybeEndlessLoop(*L)) 520 return nullptr; 521 } 522 523 continue; 524 } 525 526 // Make sure the block has no instructions that could stop control 527 // transfer. 528 bool TransfersExecution = getOrCreateCachedOptional( 529 ToBB, BlockTransferMap, BlockTransfersExecutionToSuccessor, ToBB); 530 if (!TransfersExecution) 531 return nullptr; 532 533 append_range(Worklist, successors(ToBB)); 534 } 535 } 536 537 LLVM_DEBUG(dbgs() << "\tJoin block: " << JoinBB->getName() << "\n"); 538 return JoinBB; 539 } 540 const BasicBlock * 541 MustBeExecutedContextExplorer::findBackwardJoinPoint(const BasicBlock *InitBB) { 542 const LoopInfo *LI = LIGetter(*InitBB->getParent()); 543 const DominatorTree *DT = DTGetter(*InitBB->getParent()); 544 LLVM_DEBUG(dbgs() << "\tFind backward join point for " << InitBB->getName() 545 << (LI ? " [LI]" : "") << (DT ? " [DT]" : "")); 546 547 // Try to determine a join block through the help of the dominance tree. If no 548 // tree was provided, we perform simple pattern matching for one block 549 // conditionals only. 550 if (DT) 551 if (const auto *InitNode = DT->getNode(InitBB)) 552 if (const auto *IDomNode = InitNode->getIDom()) 553 return IDomNode->getBlock(); 554 555 const Loop *L = LI ? LI->getLoopFor(InitBB) : nullptr; 556 const BasicBlock *HeaderBB = L ? L->getHeader() : nullptr; 557 558 // Determine the predecessor blocks but ignore backedges. 559 SmallVector<const BasicBlock *, 8> Worklist; 560 for (const BasicBlock *PredBB : predecessors(InitBB)) { 561 bool IsBackedge = 562 (PredBB == InitBB) || (HeaderBB == InitBB && L->contains(PredBB)); 563 // Loop backedges are ignored in backwards propagation: control has to come 564 // from somewhere. 565 if (!IsBackedge) 566 Worklist.push_back(PredBB); 567 } 568 569 // If there are no other predecessor blocks, there is no join point. 570 if (Worklist.empty()) 571 return nullptr; 572 573 // If there is one predecessor block, it is the join point. 574 if (Worklist.size() == 1) 575 return Worklist[0]; 576 577 const BasicBlock *JoinBB = nullptr; 578 if (Worklist.size() == 2) { 579 const BasicBlock *Pred0 = Worklist[0]; 580 const BasicBlock *Pred1 = Worklist[1]; 581 const BasicBlock *Pred0UniquePred = Pred0->getUniquePredecessor(); 582 const BasicBlock *Pred1UniquePred = Pred1->getUniquePredecessor(); 583 if (Pred0 == Pred1UniquePred) { 584 // InitBB <- Pred0 = JoinBB 585 // InitBB <- Pred1 <- Pred0 = JoinBB 586 JoinBB = Pred0; 587 } else if (Pred1 == Pred0UniquePred) { 588 // InitBB <- Pred0 <- Pred1 = JoinBB 589 // InitBB <- Pred1 = JoinBB 590 JoinBB = Pred1; 591 } else if (Pred0UniquePred == Pred1UniquePred) { 592 // InitBB <- Pred0 <- JoinBB 593 // InitBB <- Pred1 <- JoinBB 594 JoinBB = Pred0UniquePred; 595 } 596 } 597 598 if (!JoinBB && L) 599 JoinBB = L->getHeader(); 600 601 // In backwards direction there is no need to show termination of previous 602 // instructions. If they do not terminate, the code afterward is dead, making 603 // any information/transformation correct anyway. 604 return JoinBB; 605 } 606 607 const Instruction * 608 MustBeExecutedContextExplorer::getMustBeExecutedNextInstruction( 609 MustBeExecutedIterator &It, const Instruction *PP) { 610 if (!PP) 611 return PP; 612 LLVM_DEBUG(dbgs() << "Find next instruction for " << *PP << "\n"); 613 614 // If we explore only inside a given basic block we stop at terminators. 615 if (!ExploreInterBlock && PP->isTerminator()) { 616 LLVM_DEBUG(dbgs() << "\tReached terminator in intra-block mode, done\n"); 617 return nullptr; 618 } 619 620 // If we do not traverse the call graph we check if we can make progress in 621 // the current function. First, check if the instruction is guaranteed to 622 // transfer execution to the successor. 623 bool TransfersExecution = isGuaranteedToTransferExecutionToSuccessor(PP); 624 if (!TransfersExecution) 625 return nullptr; 626 627 // If this is not a terminator we know that there is a single instruction 628 // after this one that is executed next if control is transfered. If not, 629 // we can try to go back to a call site we entered earlier. If none exists, we 630 // do not know any instruction that has to be executd next. 631 if (!PP->isTerminator()) { 632 const Instruction *NextPP = PP->getNextNode(); 633 LLVM_DEBUG(dbgs() << "\tIntermediate instruction does transfer control\n"); 634 return NextPP; 635 } 636 637 // Finally, we have to handle terminators, trivial ones first. 638 assert(PP->isTerminator() && "Expected a terminator!"); 639 640 // A terminator without a successor is not handled yet. 641 if (PP->getNumSuccessors() == 0) { 642 LLVM_DEBUG(dbgs() << "\tUnhandled terminator\n"); 643 return nullptr; 644 } 645 646 // A terminator with a single successor, we will continue at the beginning of 647 // that one. 648 if (PP->getNumSuccessors() == 1) { 649 LLVM_DEBUG( 650 dbgs() << "\tUnconditional terminator, continue with successor\n"); 651 return &PP->getSuccessor(0)->front(); 652 } 653 654 // Multiple successors mean we need to find the join point where control flow 655 // converges again. We use the findForwardJoinPoint helper function with 656 // information about the function and helper analyses, if available. 657 if (const BasicBlock *JoinBB = findForwardJoinPoint(PP->getParent())) 658 return &JoinBB->front(); 659 660 LLVM_DEBUG(dbgs() << "\tNo join point found\n"); 661 return nullptr; 662 } 663 664 const Instruction * 665 MustBeExecutedContextExplorer::getMustBeExecutedPrevInstruction( 666 MustBeExecutedIterator &It, const Instruction *PP) { 667 if (!PP) 668 return PP; 669 670 bool IsFirst = !(PP->getPrevNode()); 671 LLVM_DEBUG(dbgs() << "Find next instruction for " << *PP 672 << (IsFirst ? " [IsFirst]" : "") << "\n"); 673 674 // If we explore only inside a given basic block we stop at the first 675 // instruction. 676 if (!ExploreInterBlock && IsFirst) { 677 LLVM_DEBUG(dbgs() << "\tReached block front in intra-block mode, done\n"); 678 return nullptr; 679 } 680 681 // The block and function that contains the current position. 682 const BasicBlock *PPBlock = PP->getParent(); 683 684 // If we are inside a block we know what instruction was executed before, the 685 // previous one. 686 if (!IsFirst) { 687 const Instruction *PrevPP = PP->getPrevNode(); 688 LLVM_DEBUG( 689 dbgs() << "\tIntermediate instruction, continue with previous\n"); 690 // We did not enter a callee so we simply return the previous instruction. 691 return PrevPP; 692 } 693 694 // Finally, we have to handle the case where the program point is the first in 695 // a block but not in the function. We use the findBackwardJoinPoint helper 696 // function with information about the function and helper analyses, if 697 // available. 698 if (const BasicBlock *JoinBB = findBackwardJoinPoint(PPBlock)) 699 return &JoinBB->back(); 700 701 LLVM_DEBUG(dbgs() << "\tNo join point found\n"); 702 return nullptr; 703 } 704 705 MustBeExecutedIterator::MustBeExecutedIterator( 706 MustBeExecutedContextExplorer &Explorer, const Instruction *I) 707 : Explorer(Explorer), CurInst(I) { 708 reset(I); 709 } 710 711 void MustBeExecutedIterator::reset(const Instruction *I) { 712 Visited.clear(); 713 resetInstruction(I); 714 } 715 716 void MustBeExecutedIterator::resetInstruction(const Instruction *I) { 717 CurInst = I; 718 Head = Tail = nullptr; 719 Visited.insert({I, ExplorationDirection::FORWARD}); 720 Visited.insert({I, ExplorationDirection::BACKWARD}); 721 if (Explorer.ExploreCFGForward) 722 Head = I; 723 if (Explorer.ExploreCFGBackward) 724 Tail = I; 725 } 726 727 const Instruction *MustBeExecutedIterator::advance() { 728 assert(CurInst && "Cannot advance an end iterator!"); 729 Head = Explorer.getMustBeExecutedNextInstruction(*this, Head); 730 if (Head && Visited.insert({Head, ExplorationDirection ::FORWARD}).second) 731 return Head; 732 Head = nullptr; 733 734 Tail = Explorer.getMustBeExecutedPrevInstruction(*this, Tail); 735 if (Tail && Visited.insert({Tail, ExplorationDirection ::BACKWARD}).second) 736 return Tail; 737 Tail = nullptr; 738 return nullptr; 739 } 740 741 PreservedAnalyses MustExecutePrinterPass::run(Function &F, 742 FunctionAnalysisManager &AM) { 743 auto &LI = AM.getResult<LoopAnalysis>(F); 744 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 745 746 MustExecuteAnnotatedWriter Writer(F, DT, LI); 747 F.print(OS, &Writer); 748 return PreservedAnalyses::all(); 749 } 750 751 PreservedAnalyses 752 MustBeExecutedContextPrinterPass::run(Module &M, ModuleAnalysisManager &AM) { 753 FunctionAnalysisManager &FAM = 754 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager(); 755 GetterTy<const LoopInfo> LIGetter = [&](const Function &F) { 756 return &FAM.getResult<LoopAnalysis>(const_cast<Function &>(F)); 757 }; 758 GetterTy<const DominatorTree> DTGetter = [&](const Function &F) { 759 return &FAM.getResult<DominatorTreeAnalysis>(const_cast<Function &>(F)); 760 }; 761 GetterTy<const PostDominatorTree> PDTGetter = [&](const Function &F) { 762 return &FAM.getResult<PostDominatorTreeAnalysis>(const_cast<Function &>(F)); 763 }; 764 765 MustBeExecutedContextExplorer Explorer( 766 /* ExploreInterBlock */ true, 767 /* ExploreCFGForward */ true, 768 /* ExploreCFGBackward */ true, LIGetter, DTGetter, PDTGetter); 769 770 for (Function &F : M) { 771 for (Instruction &I : instructions(F)) { 772 OS << "-- Explore context of: " << I << "\n"; 773 for (const Instruction *CI : Explorer.range(&I)) 774 OS << " [F: " << CI->getFunction()->getName() << "] " << *CI << "\n"; 775 } 776 } 777 return PreservedAnalyses::all(); 778 } 779