1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Jump Threading pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Scalar/JumpThreading.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/MapVector.h" 17 #include "llvm/ADT/Optional.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/BlockFrequencyInfo.h" 24 #include "llvm/Analysis/BranchProbabilityInfo.h" 25 #include "llvm/Analysis/CFG.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/DomTreeUpdater.h" 28 #include "llvm/Analysis/GlobalsModRef.h" 29 #include "llvm/Analysis/GuardUtils.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/LazyValueInfo.h" 32 #include "llvm/Analysis/Loads.h" 33 #include "llvm/Analysis/LoopInfo.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/Analysis/ValueTracking.h" 36 #include "llvm/IR/BasicBlock.h" 37 #include "llvm/IR/CFG.h" 38 #include "llvm/IR/Constant.h" 39 #include "llvm/IR/ConstantRange.h" 40 #include "llvm/IR/Constants.h" 41 #include "llvm/IR/DataLayout.h" 42 #include "llvm/IR/Dominators.h" 43 #include "llvm/IR/Function.h" 44 #include "llvm/IR/InstrTypes.h" 45 #include "llvm/IR/Instruction.h" 46 #include "llvm/IR/Instructions.h" 47 #include "llvm/IR/IntrinsicInst.h" 48 #include "llvm/IR/Intrinsics.h" 49 #include "llvm/IR/LLVMContext.h" 50 #include "llvm/IR/MDBuilder.h" 51 #include "llvm/IR/Metadata.h" 52 #include "llvm/IR/Module.h" 53 #include "llvm/IR/PassManager.h" 54 #include "llvm/IR/PatternMatch.h" 55 #include "llvm/IR/Type.h" 56 #include "llvm/IR/Use.h" 57 #include "llvm/IR/User.h" 58 #include "llvm/IR/Value.h" 59 #include "llvm/InitializePasses.h" 60 #include "llvm/Pass.h" 61 #include "llvm/Support/BlockFrequency.h" 62 #include "llvm/Support/BranchProbability.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Debug.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Transforms/Scalar.h" 68 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 69 #include "llvm/Transforms/Utils/Cloning.h" 70 #include "llvm/Transforms/Utils/Local.h" 71 #include "llvm/Transforms/Utils/SSAUpdater.h" 72 #include "llvm/Transforms/Utils/ValueMapper.h" 73 #include <algorithm> 74 #include <cassert> 75 #include <cstddef> 76 #include <cstdint> 77 #include <iterator> 78 #include <memory> 79 #include <utility> 80 81 using namespace llvm; 82 using namespace jumpthreading; 83 84 #define DEBUG_TYPE "jump-threading" 85 86 STATISTIC(NumThreads, "Number of jumps threaded"); 87 STATISTIC(NumFolds, "Number of terminators folded"); 88 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); 89 90 static cl::opt<unsigned> 91 BBDuplicateThreshold("jump-threading-threshold", 92 cl::desc("Max block size to duplicate for jump threading"), 93 cl::init(6), cl::Hidden); 94 95 static cl::opt<unsigned> 96 ImplicationSearchThreshold( 97 "jump-threading-implication-search-threshold", 98 cl::desc("The number of predecessors to search for a stronger " 99 "condition to use to thread over a weaker condition"), 100 cl::init(3), cl::Hidden); 101 102 static cl::opt<bool> PrintLVIAfterJumpThreading( 103 "print-lvi-after-jump-threading", 104 cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false), 105 cl::Hidden); 106 107 static cl::opt<bool> ThreadAcrossLoopHeaders( 108 "jump-threading-across-loop-headers", 109 cl::desc("Allow JumpThreading to thread across loop headers, for testing"), 110 cl::init(false), cl::Hidden); 111 112 113 namespace { 114 115 /// This pass performs 'jump threading', which looks at blocks that have 116 /// multiple predecessors and multiple successors. If one or more of the 117 /// predecessors of the block can be proven to always jump to one of the 118 /// successors, we forward the edge from the predecessor to the successor by 119 /// duplicating the contents of this block. 120 /// 121 /// An example of when this can occur is code like this: 122 /// 123 /// if () { ... 124 /// X = 4; 125 /// } 126 /// if (X < 3) { 127 /// 128 /// In this case, the unconditional branch at the end of the first if can be 129 /// revectored to the false side of the second if. 130 class JumpThreading : public FunctionPass { 131 JumpThreadingPass Impl; 132 133 public: 134 static char ID; // Pass identification 135 136 JumpThreading(int T = -1) : FunctionPass(ID), Impl(T) { 137 initializeJumpThreadingPass(*PassRegistry::getPassRegistry()); 138 } 139 140 bool runOnFunction(Function &F) override; 141 142 void getAnalysisUsage(AnalysisUsage &AU) const override { 143 AU.addRequired<DominatorTreeWrapperPass>(); 144 AU.addPreserved<DominatorTreeWrapperPass>(); 145 AU.addRequired<AAResultsWrapperPass>(); 146 AU.addRequired<LazyValueInfoWrapperPass>(); 147 AU.addPreserved<LazyValueInfoWrapperPass>(); 148 AU.addPreserved<GlobalsAAWrapperPass>(); 149 AU.addRequired<TargetLibraryInfoWrapperPass>(); 150 } 151 152 void releaseMemory() override { Impl.releaseMemory(); } 153 }; 154 155 } // end anonymous namespace 156 157 char JumpThreading::ID = 0; 158 159 INITIALIZE_PASS_BEGIN(JumpThreading, "jump-threading", 160 "Jump Threading", false, false) 161 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 162 INITIALIZE_PASS_DEPENDENCY(LazyValueInfoWrapperPass) 163 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 164 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 165 INITIALIZE_PASS_END(JumpThreading, "jump-threading", 166 "Jump Threading", false, false) 167 168 // Public interface to the Jump Threading pass 169 FunctionPass *llvm::createJumpThreadingPass(int Threshold) { 170 return new JumpThreading(Threshold); 171 } 172 173 JumpThreadingPass::JumpThreadingPass(int T) { 174 DefaultBBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T); 175 } 176 177 // Update branch probability information according to conditional 178 // branch probability. This is usually made possible for cloned branches 179 // in inline instances by the context specific profile in the caller. 180 // For instance, 181 // 182 // [Block PredBB] 183 // [Branch PredBr] 184 // if (t) { 185 // Block A; 186 // } else { 187 // Block B; 188 // } 189 // 190 // [Block BB] 191 // cond = PN([true, %A], [..., %B]); // PHI node 192 // [Branch CondBr] 193 // if (cond) { 194 // ... // P(cond == true) = 1% 195 // } 196 // 197 // Here we know that when block A is taken, cond must be true, which means 198 // P(cond == true | A) = 1 199 // 200 // Given that P(cond == true) = P(cond == true | A) * P(A) + 201 // P(cond == true | B) * P(B) 202 // we get: 203 // P(cond == true ) = P(A) + P(cond == true | B) * P(B) 204 // 205 // which gives us: 206 // P(A) is less than P(cond == true), i.e. 207 // P(t == true) <= P(cond == true) 208 // 209 // In other words, if we know P(cond == true) is unlikely, we know 210 // that P(t == true) is also unlikely. 211 // 212 static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) { 213 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 214 if (!CondBr) 215 return; 216 217 uint64_t TrueWeight, FalseWeight; 218 if (!CondBr->extractProfMetadata(TrueWeight, FalseWeight)) 219 return; 220 221 if (TrueWeight + FalseWeight == 0) 222 // Zero branch_weights do not give a hint for getting branch probabilities. 223 // Technically it would result in division by zero denominator, which is 224 // TrueWeight + FalseWeight. 225 return; 226 227 // Returns the outgoing edge of the dominating predecessor block 228 // that leads to the PhiNode's incoming block: 229 auto GetPredOutEdge = 230 [](BasicBlock *IncomingBB, 231 BasicBlock *PhiBB) -> std::pair<BasicBlock *, BasicBlock *> { 232 auto *PredBB = IncomingBB; 233 auto *SuccBB = PhiBB; 234 SmallPtrSet<BasicBlock *, 16> Visited; 235 while (true) { 236 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); 237 if (PredBr && PredBr->isConditional()) 238 return {PredBB, SuccBB}; 239 Visited.insert(PredBB); 240 auto *SinglePredBB = PredBB->getSinglePredecessor(); 241 if (!SinglePredBB) 242 return {nullptr, nullptr}; 243 244 // Stop searching when SinglePredBB has been visited. It means we see 245 // an unreachable loop. 246 if (Visited.count(SinglePredBB)) 247 return {nullptr, nullptr}; 248 249 SuccBB = PredBB; 250 PredBB = SinglePredBB; 251 } 252 }; 253 254 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 255 Value *PhiOpnd = PN->getIncomingValue(i); 256 ConstantInt *CI = dyn_cast<ConstantInt>(PhiOpnd); 257 258 if (!CI || !CI->getType()->isIntegerTy(1)) 259 continue; 260 261 BranchProbability BP = 262 (CI->isOne() ? BranchProbability::getBranchProbability( 263 TrueWeight, TrueWeight + FalseWeight) 264 : BranchProbability::getBranchProbability( 265 FalseWeight, TrueWeight + FalseWeight)); 266 267 auto PredOutEdge = GetPredOutEdge(PN->getIncomingBlock(i), BB); 268 if (!PredOutEdge.first) 269 return; 270 271 BasicBlock *PredBB = PredOutEdge.first; 272 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); 273 if (!PredBr) 274 return; 275 276 uint64_t PredTrueWeight, PredFalseWeight; 277 // FIXME: We currently only set the profile data when it is missing. 278 // With PGO, this can be used to refine even existing profile data with 279 // context information. This needs to be done after more performance 280 // testing. 281 if (PredBr->extractProfMetadata(PredTrueWeight, PredFalseWeight)) 282 continue; 283 284 // We can not infer anything useful when BP >= 50%, because BP is the 285 // upper bound probability value. 286 if (BP >= BranchProbability(50, 100)) 287 continue; 288 289 SmallVector<uint32_t, 2> Weights; 290 if (PredBr->getSuccessor(0) == PredOutEdge.second) { 291 Weights.push_back(BP.getNumerator()); 292 Weights.push_back(BP.getCompl().getNumerator()); 293 } else { 294 Weights.push_back(BP.getCompl().getNumerator()); 295 Weights.push_back(BP.getNumerator()); 296 } 297 PredBr->setMetadata(LLVMContext::MD_prof, 298 MDBuilder(PredBr->getParent()->getContext()) 299 .createBranchWeights(Weights)); 300 } 301 } 302 303 /// runOnFunction - Toplevel algorithm. 304 bool JumpThreading::runOnFunction(Function &F) { 305 if (skipFunction(F)) 306 return false; 307 auto TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 308 auto DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 309 auto LVI = &getAnalysis<LazyValueInfoWrapperPass>().getLVI(); 310 auto AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); 311 DomTreeUpdater DTU(*DT, DomTreeUpdater::UpdateStrategy::Lazy); 312 std::unique_ptr<BlockFrequencyInfo> BFI; 313 std::unique_ptr<BranchProbabilityInfo> BPI; 314 if (F.hasProfileData()) { 315 LoopInfo LI{DominatorTree(F)}; 316 BPI.reset(new BranchProbabilityInfo(F, LI, TLI)); 317 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 318 } 319 320 bool Changed = Impl.runImpl(F, TLI, LVI, AA, &DTU, F.hasProfileData(), 321 std::move(BFI), std::move(BPI)); 322 if (PrintLVIAfterJumpThreading) { 323 dbgs() << "LVI for function '" << F.getName() << "':\n"; 324 LVI->printLVI(F, DTU.getDomTree(), dbgs()); 325 } 326 return Changed; 327 } 328 329 PreservedAnalyses JumpThreadingPass::run(Function &F, 330 FunctionAnalysisManager &AM) { 331 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 332 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 333 auto &LVI = AM.getResult<LazyValueAnalysis>(F); 334 auto &AA = AM.getResult<AAManager>(F); 335 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy); 336 337 std::unique_ptr<BlockFrequencyInfo> BFI; 338 std::unique_ptr<BranchProbabilityInfo> BPI; 339 if (F.hasProfileData()) { 340 LoopInfo LI{DominatorTree(F)}; 341 BPI.reset(new BranchProbabilityInfo(F, LI, &TLI)); 342 BFI.reset(new BlockFrequencyInfo(F, *BPI, LI)); 343 } 344 345 bool Changed = runImpl(F, &TLI, &LVI, &AA, &DTU, F.hasProfileData(), 346 std::move(BFI), std::move(BPI)); 347 348 if (!Changed) 349 return PreservedAnalyses::all(); 350 PreservedAnalyses PA; 351 PA.preserve<GlobalsAA>(); 352 PA.preserve<DominatorTreeAnalysis>(); 353 PA.preserve<LazyValueAnalysis>(); 354 return PA; 355 } 356 357 bool JumpThreadingPass::runImpl(Function &F, TargetLibraryInfo *TLI_, 358 LazyValueInfo *LVI_, AliasAnalysis *AA_, 359 DomTreeUpdater *DTU_, bool HasProfileData_, 360 std::unique_ptr<BlockFrequencyInfo> BFI_, 361 std::unique_ptr<BranchProbabilityInfo> BPI_) { 362 LLVM_DEBUG(dbgs() << "Jump threading on function '" << F.getName() << "'\n"); 363 TLI = TLI_; 364 LVI = LVI_; 365 AA = AA_; 366 DTU = DTU_; 367 BFI.reset(); 368 BPI.reset(); 369 // When profile data is available, we need to update edge weights after 370 // successful jump threading, which requires both BPI and BFI being available. 371 HasProfileData = HasProfileData_; 372 auto *GuardDecl = F.getParent()->getFunction( 373 Intrinsic::getName(Intrinsic::experimental_guard)); 374 HasGuards = GuardDecl && !GuardDecl->use_empty(); 375 if (HasProfileData) { 376 BPI = std::move(BPI_); 377 BFI = std::move(BFI_); 378 } 379 380 // Reduce the number of instructions duplicated when optimizing strictly for 381 // size. 382 if (BBDuplicateThreshold.getNumOccurrences()) 383 BBDupThreshold = BBDuplicateThreshold; 384 else if (F.hasFnAttribute(Attribute::MinSize)) 385 BBDupThreshold = 3; 386 else 387 BBDupThreshold = DefaultBBDupThreshold; 388 389 // JumpThreading must not processes blocks unreachable from entry. It's a 390 // waste of compute time and can potentially lead to hangs. 391 SmallPtrSet<BasicBlock *, 16> Unreachable; 392 assert(DTU && "DTU isn't passed into JumpThreading before using it."); 393 assert(DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed."); 394 DominatorTree &DT = DTU->getDomTree(); 395 for (auto &BB : F) 396 if (!DT.isReachableFromEntry(&BB)) 397 Unreachable.insert(&BB); 398 399 if (!ThreadAcrossLoopHeaders) 400 FindLoopHeaders(F); 401 402 bool EverChanged = false; 403 bool Changed; 404 do { 405 Changed = false; 406 for (auto &BB : F) { 407 if (Unreachable.count(&BB)) 408 continue; 409 while (ProcessBlock(&BB)) // Thread all of the branches we can over BB. 410 Changed = true; 411 412 // Jump threading may have introduced redundant debug values into BB 413 // which should be removed. 414 if (Changed) 415 RemoveRedundantDbgInstrs(&BB); 416 417 // Stop processing BB if it's the entry or is now deleted. The following 418 // routines attempt to eliminate BB and locating a suitable replacement 419 // for the entry is non-trivial. 420 if (&BB == &F.getEntryBlock() || DTU->isBBPendingDeletion(&BB)) 421 continue; 422 423 if (pred_empty(&BB)) { 424 // When ProcessBlock makes BB unreachable it doesn't bother to fix up 425 // the instructions in it. We must remove BB to prevent invalid IR. 426 LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName() 427 << "' with terminator: " << *BB.getTerminator() 428 << '\n'); 429 LoopHeaders.erase(&BB); 430 LVI->eraseBlock(&BB); 431 DeleteDeadBlock(&BB, DTU); 432 Changed = true; 433 continue; 434 } 435 436 // ProcessBlock doesn't thread BBs with unconditional TIs. However, if BB 437 // is "almost empty", we attempt to merge BB with its sole successor. 438 auto *BI = dyn_cast<BranchInst>(BB.getTerminator()); 439 if (BI && BI->isUnconditional()) { 440 BasicBlock *Succ = BI->getSuccessor(0); 441 if ( 442 // The terminator must be the only non-phi instruction in BB. 443 BB.getFirstNonPHIOrDbg()->isTerminator() && 444 // Don't alter Loop headers and latches to ensure another pass can 445 // detect and transform nested loops later. 446 !LoopHeaders.count(&BB) && !LoopHeaders.count(Succ) && 447 TryToSimplifyUncondBranchFromEmptyBlock(&BB, DTU)) { 448 RemoveRedundantDbgInstrs(Succ); 449 // BB is valid for cleanup here because we passed in DTU. F remains 450 // BB's parent until a DTU->getDomTree() event. 451 LVI->eraseBlock(&BB); 452 Changed = true; 453 } 454 } 455 } 456 EverChanged |= Changed; 457 } while (Changed); 458 459 LoopHeaders.clear(); 460 return EverChanged; 461 } 462 463 // Replace uses of Cond with ToVal when safe to do so. If all uses are 464 // replaced, we can remove Cond. We cannot blindly replace all uses of Cond 465 // because we may incorrectly replace uses when guards/assumes are uses of 466 // of `Cond` and we used the guards/assume to reason about the `Cond` value 467 // at the end of block. RAUW unconditionally replaces all uses 468 // including the guards/assumes themselves and the uses before the 469 // guard/assume. 470 static void ReplaceFoldableUses(Instruction *Cond, Value *ToVal) { 471 assert(Cond->getType() == ToVal->getType()); 472 auto *BB = Cond->getParent(); 473 // We can unconditionally replace all uses in non-local blocks (i.e. uses 474 // strictly dominated by BB), since LVI information is true from the 475 // terminator of BB. 476 replaceNonLocalUsesWith(Cond, ToVal); 477 for (Instruction &I : reverse(*BB)) { 478 // Reached the Cond whose uses we are trying to replace, so there are no 479 // more uses. 480 if (&I == Cond) 481 break; 482 // We only replace uses in instructions that are guaranteed to reach the end 483 // of BB, where we know Cond is ToVal. 484 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 485 break; 486 I.replaceUsesOfWith(Cond, ToVal); 487 } 488 if (Cond->use_empty() && !Cond->mayHaveSideEffects()) 489 Cond->eraseFromParent(); 490 } 491 492 /// Return the cost of duplicating a piece of this block from first non-phi 493 /// and before StopAt instruction to thread across it. Stop scanning the block 494 /// when exceeding the threshold. If duplication is impossible, returns ~0U. 495 static unsigned getJumpThreadDuplicationCost(BasicBlock *BB, 496 Instruction *StopAt, 497 unsigned Threshold) { 498 assert(StopAt->getParent() == BB && "Not an instruction from proper BB?"); 499 /// Ignore PHI nodes, these will be flattened when duplication happens. 500 BasicBlock::const_iterator I(BB->getFirstNonPHI()); 501 502 // FIXME: THREADING will delete values that are just used to compute the 503 // branch, so they shouldn't count against the duplication cost. 504 505 unsigned Bonus = 0; 506 if (BB->getTerminator() == StopAt) { 507 // Threading through a switch statement is particularly profitable. If this 508 // block ends in a switch, decrease its cost to make it more likely to 509 // happen. 510 if (isa<SwitchInst>(StopAt)) 511 Bonus = 6; 512 513 // The same holds for indirect branches, but slightly more so. 514 if (isa<IndirectBrInst>(StopAt)) 515 Bonus = 8; 516 } 517 518 // Bump the threshold up so the early exit from the loop doesn't skip the 519 // terminator-based Size adjustment at the end. 520 Threshold += Bonus; 521 522 // Sum up the cost of each instruction until we get to the terminator. Don't 523 // include the terminator because the copy won't include it. 524 unsigned Size = 0; 525 for (; &*I != StopAt; ++I) { 526 527 // Stop scanning the block if we've reached the threshold. 528 if (Size > Threshold) 529 return Size; 530 531 // Debugger intrinsics don't incur code size. 532 if (isa<DbgInfoIntrinsic>(I)) continue; 533 534 // If this is a pointer->pointer bitcast, it is free. 535 if (isa<BitCastInst>(I) && I->getType()->isPointerTy()) 536 continue; 537 538 // Bail out if this instruction gives back a token type, it is not possible 539 // to duplicate it if it is used outside this BB. 540 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB)) 541 return ~0U; 542 543 // All other instructions count for at least one unit. 544 ++Size; 545 546 // Calls are more expensive. If they are non-intrinsic calls, we model them 547 // as having cost of 4. If they are a non-vector intrinsic, we model them 548 // as having cost of 2 total, and if they are a vector intrinsic, we model 549 // them as having cost 1. 550 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 551 if (CI->cannotDuplicate() || CI->isConvergent()) 552 // Blocks with NoDuplicate are modelled as having infinite cost, so they 553 // are never duplicated. 554 return ~0U; 555 else if (!isa<IntrinsicInst>(CI)) 556 Size += 3; 557 else if (!CI->getType()->isVectorTy()) 558 Size += 1; 559 } 560 } 561 562 return Size > Bonus ? Size - Bonus : 0; 563 } 564 565 /// FindLoopHeaders - We do not want jump threading to turn proper loop 566 /// structures into irreducible loops. Doing this breaks up the loop nesting 567 /// hierarchy and pessimizes later transformations. To prevent this from 568 /// happening, we first have to find the loop headers. Here we approximate this 569 /// by finding targets of backedges in the CFG. 570 /// 571 /// Note that there definitely are cases when we want to allow threading of 572 /// edges across a loop header. For example, threading a jump from outside the 573 /// loop (the preheader) to an exit block of the loop is definitely profitable. 574 /// It is also almost always profitable to thread backedges from within the loop 575 /// to exit blocks, and is often profitable to thread backedges to other blocks 576 /// within the loop (forming a nested loop). This simple analysis is not rich 577 /// enough to track all of these properties and keep it up-to-date as the CFG 578 /// mutates, so we don't allow any of these transformations. 579 void JumpThreadingPass::FindLoopHeaders(Function &F) { 580 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 581 FindFunctionBackedges(F, Edges); 582 583 for (const auto &Edge : Edges) 584 LoopHeaders.insert(Edge.second); 585 } 586 587 /// getKnownConstant - Helper method to determine if we can thread over a 588 /// terminator with the given value as its condition, and if so what value to 589 /// use for that. What kind of value this is depends on whether we want an 590 /// integer or a block address, but an undef is always accepted. 591 /// Returns null if Val is null or not an appropriate constant. 592 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) { 593 if (!Val) 594 return nullptr; 595 596 // Undef is "known" enough. 597 if (UndefValue *U = dyn_cast<UndefValue>(Val)) 598 return U; 599 600 if (Preference == WantBlockAddress) 601 return dyn_cast<BlockAddress>(Val->stripPointerCasts()); 602 603 return dyn_cast<ConstantInt>(Val); 604 } 605 606 /// ComputeValueKnownInPredecessors - Given a basic block BB and a value V, see 607 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef 608 /// in any of our predecessors. If so, return the known list of value and pred 609 /// BB in the result vector. 610 /// 611 /// This returns true if there were any known values. 612 bool JumpThreadingPass::ComputeValueKnownInPredecessorsImpl( 613 Value *V, BasicBlock *BB, PredValueInfo &Result, 614 ConstantPreference Preference, DenseSet<Value *> &RecursionSet, 615 Instruction *CxtI) { 616 // This method walks up use-def chains recursively. Because of this, we could 617 // get into an infinite loop going around loops in the use-def chain. To 618 // prevent this, keep track of what (value, block) pairs we've already visited 619 // and terminate the search if we loop back to them 620 if (!RecursionSet.insert(V).second) 621 return false; 622 623 // If V is a constant, then it is known in all predecessors. 624 if (Constant *KC = getKnownConstant(V, Preference)) { 625 for (BasicBlock *Pred : predecessors(BB)) 626 Result.emplace_back(KC, Pred); 627 628 return !Result.empty(); 629 } 630 631 // If V is a non-instruction value, or an instruction in a different block, 632 // then it can't be derived from a PHI. 633 Instruction *I = dyn_cast<Instruction>(V); 634 if (!I || I->getParent() != BB) { 635 636 // Okay, if this is a live-in value, see if it has a known value at the end 637 // of any of our predecessors. 638 // 639 // FIXME: This should be an edge property, not a block end property. 640 /// TODO: Per PR2563, we could infer value range information about a 641 /// predecessor based on its terminator. 642 // 643 // FIXME: change this to use the more-rich 'getPredicateOnEdge' method if 644 // "I" is a non-local compare-with-a-constant instruction. This would be 645 // able to handle value inequalities better, for example if the compare is 646 // "X < 4" and "X < 3" is known true but "X < 4" itself is not available. 647 // Perhaps getConstantOnEdge should be smart enough to do this? 648 for (BasicBlock *P : predecessors(BB)) { 649 // If the value is known by LazyValueInfo to be a constant in a 650 // predecessor, use that information to try to thread this block. 651 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI); 652 if (Constant *KC = getKnownConstant(PredCst, Preference)) 653 Result.emplace_back(KC, P); 654 } 655 656 return !Result.empty(); 657 } 658 659 /// If I is a PHI node, then we know the incoming values for any constants. 660 if (PHINode *PN = dyn_cast<PHINode>(I)) { 661 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 662 Value *InVal = PN->getIncomingValue(i); 663 if (Constant *KC = getKnownConstant(InVal, Preference)) { 664 Result.emplace_back(KC, PN->getIncomingBlock(i)); 665 } else { 666 Constant *CI = LVI->getConstantOnEdge(InVal, 667 PN->getIncomingBlock(i), 668 BB, CxtI); 669 if (Constant *KC = getKnownConstant(CI, Preference)) 670 Result.emplace_back(KC, PN->getIncomingBlock(i)); 671 } 672 } 673 674 return !Result.empty(); 675 } 676 677 // Handle Cast instructions. Only see through Cast when the source operand is 678 // PHI or Cmp to save the compilation time. 679 if (CastInst *CI = dyn_cast<CastInst>(I)) { 680 Value *Source = CI->getOperand(0); 681 if (!isa<PHINode>(Source) && !isa<CmpInst>(Source)) 682 return false; 683 ComputeValueKnownInPredecessorsImpl(Source, BB, Result, Preference, 684 RecursionSet, CxtI); 685 if (Result.empty()) 686 return false; 687 688 // Convert the known values. 689 for (auto &R : Result) 690 R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType()); 691 692 return true; 693 } 694 695 // Handle some boolean conditions. 696 if (I->getType()->getPrimitiveSizeInBits() == 1) { 697 assert(Preference == WantInteger && "One-bit non-integer type?"); 698 // X | true -> true 699 // X & false -> false 700 if (I->getOpcode() == Instruction::Or || 701 I->getOpcode() == Instruction::And) { 702 PredValueInfoTy LHSVals, RHSVals; 703 704 ComputeValueKnownInPredecessorsImpl(I->getOperand(0), BB, LHSVals, 705 WantInteger, RecursionSet, CxtI); 706 ComputeValueKnownInPredecessorsImpl(I->getOperand(1), BB, RHSVals, 707 WantInteger, RecursionSet, CxtI); 708 709 if (LHSVals.empty() && RHSVals.empty()) 710 return false; 711 712 ConstantInt *InterestingVal; 713 if (I->getOpcode() == Instruction::Or) 714 InterestingVal = ConstantInt::getTrue(I->getContext()); 715 else 716 InterestingVal = ConstantInt::getFalse(I->getContext()); 717 718 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; 719 720 // Scan for the sentinel. If we find an undef, force it to the 721 // interesting value: x|undef -> true and x&undef -> false. 722 for (const auto &LHSVal : LHSVals) 723 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) { 724 Result.emplace_back(InterestingVal, LHSVal.second); 725 LHSKnownBBs.insert(LHSVal.second); 726 } 727 for (const auto &RHSVal : RHSVals) 728 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) { 729 // If we already inferred a value for this block on the LHS, don't 730 // re-add it. 731 if (!LHSKnownBBs.count(RHSVal.second)) 732 Result.emplace_back(InterestingVal, RHSVal.second); 733 } 734 735 return !Result.empty(); 736 } 737 738 // Handle the NOT form of XOR. 739 if (I->getOpcode() == Instruction::Xor && 740 isa<ConstantInt>(I->getOperand(1)) && 741 cast<ConstantInt>(I->getOperand(1))->isOne()) { 742 ComputeValueKnownInPredecessorsImpl(I->getOperand(0), BB, Result, 743 WantInteger, RecursionSet, CxtI); 744 if (Result.empty()) 745 return false; 746 747 // Invert the known values. 748 for (auto &R : Result) 749 R.first = ConstantExpr::getNot(R.first); 750 751 return true; 752 } 753 754 // Try to simplify some other binary operator values. 755 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 756 assert(Preference != WantBlockAddress 757 && "A binary operator creating a block address?"); 758 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 759 PredValueInfoTy LHSVals; 760 ComputeValueKnownInPredecessorsImpl(BO->getOperand(0), BB, LHSVals, 761 WantInteger, RecursionSet, CxtI); 762 763 // Try to use constant folding to simplify the binary operator. 764 for (const auto &LHSVal : LHSVals) { 765 Constant *V = LHSVal.first; 766 Constant *Folded = ConstantExpr::get(BO->getOpcode(), V, CI); 767 768 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 769 Result.emplace_back(KC, LHSVal.second); 770 } 771 } 772 773 return !Result.empty(); 774 } 775 776 // Handle compare with phi operand, where the PHI is defined in this block. 777 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { 778 assert(Preference == WantInteger && "Compares only produce integers"); 779 Type *CmpType = Cmp->getType(); 780 Value *CmpLHS = Cmp->getOperand(0); 781 Value *CmpRHS = Cmp->getOperand(1); 782 CmpInst::Predicate Pred = Cmp->getPredicate(); 783 784 PHINode *PN = dyn_cast<PHINode>(CmpLHS); 785 if (!PN) 786 PN = dyn_cast<PHINode>(CmpRHS); 787 if (PN && PN->getParent() == BB) { 788 const DataLayout &DL = PN->getModule()->getDataLayout(); 789 // We can do this simplification if any comparisons fold to true or false. 790 // See if any do. 791 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 792 BasicBlock *PredBB = PN->getIncomingBlock(i); 793 Value *LHS, *RHS; 794 if (PN == CmpLHS) { 795 LHS = PN->getIncomingValue(i); 796 RHS = CmpRHS->DoPHITranslation(BB, PredBB); 797 } else { 798 LHS = CmpLHS->DoPHITranslation(BB, PredBB); 799 RHS = PN->getIncomingValue(i); 800 } 801 Value *Res = SimplifyCmpInst(Pred, LHS, RHS, {DL}); 802 if (!Res) { 803 if (!isa<Constant>(RHS)) 804 continue; 805 806 // getPredicateOnEdge call will make no sense if LHS is defined in BB. 807 auto LHSInst = dyn_cast<Instruction>(LHS); 808 if (LHSInst && LHSInst->getParent() == BB) 809 continue; 810 811 LazyValueInfo::Tristate 812 ResT = LVI->getPredicateOnEdge(Pred, LHS, 813 cast<Constant>(RHS), PredBB, BB, 814 CxtI ? CxtI : Cmp); 815 if (ResT == LazyValueInfo::Unknown) 816 continue; 817 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT); 818 } 819 820 if (Constant *KC = getKnownConstant(Res, WantInteger)) 821 Result.emplace_back(KC, PredBB); 822 } 823 824 return !Result.empty(); 825 } 826 827 // If comparing a live-in value against a constant, see if we know the 828 // live-in value on any predecessors. 829 if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) { 830 Constant *CmpConst = cast<Constant>(CmpRHS); 831 832 if (!isa<Instruction>(CmpLHS) || 833 cast<Instruction>(CmpLHS)->getParent() != BB) { 834 for (BasicBlock *P : predecessors(BB)) { 835 // If the value is known by LazyValueInfo to be a constant in a 836 // predecessor, use that information to try to thread this block. 837 LazyValueInfo::Tristate Res = 838 LVI->getPredicateOnEdge(Pred, CmpLHS, 839 CmpConst, P, BB, CxtI ? CxtI : Cmp); 840 if (Res == LazyValueInfo::Unknown) 841 continue; 842 843 Constant *ResC = ConstantInt::get(CmpType, Res); 844 Result.emplace_back(ResC, P); 845 } 846 847 return !Result.empty(); 848 } 849 850 // InstCombine can fold some forms of constant range checks into 851 // (icmp (add (x, C1)), C2). See if we have we have such a thing with 852 // x as a live-in. 853 { 854 using namespace PatternMatch; 855 856 Value *AddLHS; 857 ConstantInt *AddConst; 858 if (isa<ConstantInt>(CmpConst) && 859 match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) { 860 if (!isa<Instruction>(AddLHS) || 861 cast<Instruction>(AddLHS)->getParent() != BB) { 862 for (BasicBlock *P : predecessors(BB)) { 863 // If the value is known by LazyValueInfo to be a ConstantRange in 864 // a predecessor, use that information to try to thread this 865 // block. 866 ConstantRange CR = LVI->getConstantRangeOnEdge( 867 AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS)); 868 // Propagate the range through the addition. 869 CR = CR.add(AddConst->getValue()); 870 871 // Get the range where the compare returns true. 872 ConstantRange CmpRange = ConstantRange::makeExactICmpRegion( 873 Pred, cast<ConstantInt>(CmpConst)->getValue()); 874 875 Constant *ResC; 876 if (CmpRange.contains(CR)) 877 ResC = ConstantInt::getTrue(CmpType); 878 else if (CmpRange.inverse().contains(CR)) 879 ResC = ConstantInt::getFalse(CmpType); 880 else 881 continue; 882 883 Result.emplace_back(ResC, P); 884 } 885 886 return !Result.empty(); 887 } 888 } 889 } 890 891 // Try to find a constant value for the LHS of a comparison, 892 // and evaluate it statically if we can. 893 PredValueInfoTy LHSVals; 894 ComputeValueKnownInPredecessorsImpl(I->getOperand(0), BB, LHSVals, 895 WantInteger, RecursionSet, CxtI); 896 897 for (const auto &LHSVal : LHSVals) { 898 Constant *V = LHSVal.first; 899 Constant *Folded = ConstantExpr::getCompare(Pred, V, CmpConst); 900 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 901 Result.emplace_back(KC, LHSVal.second); 902 } 903 904 return !Result.empty(); 905 } 906 } 907 908 if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 909 // Handle select instructions where at least one operand is a known constant 910 // and we can figure out the condition value for any predecessor block. 911 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference); 912 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference); 913 PredValueInfoTy Conds; 914 if ((TrueVal || FalseVal) && 915 ComputeValueKnownInPredecessorsImpl(SI->getCondition(), BB, Conds, 916 WantInteger, RecursionSet, CxtI)) { 917 for (auto &C : Conds) { 918 Constant *Cond = C.first; 919 920 // Figure out what value to use for the condition. 921 bool KnownCond; 922 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) { 923 // A known boolean. 924 KnownCond = CI->isOne(); 925 } else { 926 assert(isa<UndefValue>(Cond) && "Unexpected condition value"); 927 // Either operand will do, so be sure to pick the one that's a known 928 // constant. 929 // FIXME: Do this more cleverly if both values are known constants? 930 KnownCond = (TrueVal != nullptr); 931 } 932 933 // See if the select has a known constant value for this predecessor. 934 if (Constant *Val = KnownCond ? TrueVal : FalseVal) 935 Result.emplace_back(Val, C.second); 936 } 937 938 return !Result.empty(); 939 } 940 } 941 942 // If all else fails, see if LVI can figure out a constant value for us. 943 Constant *CI = LVI->getConstant(V, BB, CxtI); 944 if (Constant *KC = getKnownConstant(CI, Preference)) { 945 for (BasicBlock *Pred : predecessors(BB)) 946 Result.emplace_back(KC, Pred); 947 } 948 949 return !Result.empty(); 950 } 951 952 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends 953 /// in an undefined jump, decide which block is best to revector to. 954 /// 955 /// Since we can pick an arbitrary destination, we pick the successor with the 956 /// fewest predecessors. This should reduce the in-degree of the others. 957 static unsigned GetBestDestForJumpOnUndef(BasicBlock *BB) { 958 Instruction *BBTerm = BB->getTerminator(); 959 unsigned MinSucc = 0; 960 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); 961 // Compute the successor with the minimum number of predecessors. 962 unsigned MinNumPreds = pred_size(TestBB); 963 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { 964 TestBB = BBTerm->getSuccessor(i); 965 unsigned NumPreds = pred_size(TestBB); 966 if (NumPreds < MinNumPreds) { 967 MinSucc = i; 968 MinNumPreds = NumPreds; 969 } 970 } 971 972 return MinSucc; 973 } 974 975 static bool hasAddressTakenAndUsed(BasicBlock *BB) { 976 if (!BB->hasAddressTaken()) return false; 977 978 // If the block has its address taken, it may be a tree of dead constants 979 // hanging off of it. These shouldn't keep the block alive. 980 BlockAddress *BA = BlockAddress::get(BB); 981 BA->removeDeadConstantUsers(); 982 return !BA->use_empty(); 983 } 984 985 /// ProcessBlock - If there are any predecessors whose control can be threaded 986 /// through to a successor, transform them now. 987 bool JumpThreadingPass::ProcessBlock(BasicBlock *BB) { 988 // If the block is trivially dead, just return and let the caller nuke it. 989 // This simplifies other transformations. 990 if (DTU->isBBPendingDeletion(BB) || 991 (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock())) 992 return false; 993 994 // If this block has a single predecessor, and if that pred has a single 995 // successor, merge the blocks. This encourages recursive jump threading 996 // because now the condition in this block can be threaded through 997 // predecessors of our predecessor block. 998 if (MaybeMergeBasicBlockIntoOnlyPred(BB)) 999 return true; 1000 1001 if (TryToUnfoldSelectInCurrBB(BB)) 1002 return true; 1003 1004 // Look if we can propagate guards to predecessors. 1005 if (HasGuards && ProcessGuards(BB)) 1006 return true; 1007 1008 // What kind of constant we're looking for. 1009 ConstantPreference Preference = WantInteger; 1010 1011 // Look to see if the terminator is a conditional branch, switch or indirect 1012 // branch, if not we can't thread it. 1013 Value *Condition; 1014 Instruction *Terminator = BB->getTerminator(); 1015 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) { 1016 // Can't thread an unconditional jump. 1017 if (BI->isUnconditional()) return false; 1018 Condition = BI->getCondition(); 1019 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) { 1020 Condition = SI->getCondition(); 1021 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) { 1022 // Can't thread indirect branch with no successors. 1023 if (IB->getNumSuccessors() == 0) return false; 1024 Condition = IB->getAddress()->stripPointerCasts(); 1025 Preference = WantBlockAddress; 1026 } else { 1027 return false; // Must be an invoke or callbr. 1028 } 1029 1030 // Run constant folding to see if we can reduce the condition to a simple 1031 // constant. 1032 if (Instruction *I = dyn_cast<Instruction>(Condition)) { 1033 Value *SimpleVal = 1034 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI); 1035 if (SimpleVal) { 1036 I->replaceAllUsesWith(SimpleVal); 1037 if (isInstructionTriviallyDead(I, TLI)) 1038 I->eraseFromParent(); 1039 Condition = SimpleVal; 1040 } 1041 } 1042 1043 // If the terminator is branching on an undef, we can pick any of the 1044 // successors to branch to. Let GetBestDestForJumpOnUndef decide. 1045 if (isa<UndefValue>(Condition)) { 1046 unsigned BestSucc = GetBestDestForJumpOnUndef(BB); 1047 std::vector<DominatorTree::UpdateType> Updates; 1048 1049 // Fold the branch/switch. 1050 Instruction *BBTerm = BB->getTerminator(); 1051 Updates.reserve(BBTerm->getNumSuccessors()); 1052 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { 1053 if (i == BestSucc) continue; 1054 BasicBlock *Succ = BBTerm->getSuccessor(i); 1055 Succ->removePredecessor(BB, true); 1056 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1057 } 1058 1059 LLVM_DEBUG(dbgs() << " In block '" << BB->getName() 1060 << "' folding undef terminator: " << *BBTerm << '\n'); 1061 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); 1062 BBTerm->eraseFromParent(); 1063 DTU->applyUpdatesPermissive(Updates); 1064 return true; 1065 } 1066 1067 // If the terminator of this block is branching on a constant, simplify the 1068 // terminator to an unconditional branch. This can occur due to threading in 1069 // other blocks. 1070 if (getKnownConstant(Condition, Preference)) { 1071 LLVM_DEBUG(dbgs() << " In block '" << BB->getName() 1072 << "' folding terminator: " << *BB->getTerminator() 1073 << '\n'); 1074 ++NumFolds; 1075 ConstantFoldTerminator(BB, true, nullptr, DTU); 1076 return true; 1077 } 1078 1079 Instruction *CondInst = dyn_cast<Instruction>(Condition); 1080 1081 // All the rest of our checks depend on the condition being an instruction. 1082 if (!CondInst) { 1083 // FIXME: Unify this with code below. 1084 if (ProcessThreadableEdges(Condition, BB, Preference, Terminator)) 1085 return true; 1086 return false; 1087 } 1088 1089 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondInst)) { 1090 // If we're branching on a conditional, LVI might be able to determine 1091 // it's value at the branch instruction. We only handle comparisons 1092 // against a constant at this time. 1093 // TODO: This should be extended to handle switches as well. 1094 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 1095 Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1)); 1096 if (CondBr && CondConst) { 1097 // We should have returned as soon as we turn a conditional branch to 1098 // unconditional. Because its no longer interesting as far as jump 1099 // threading is concerned. 1100 assert(CondBr->isConditional() && "Threading on unconditional terminator"); 1101 1102 LazyValueInfo::Tristate Ret = 1103 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0), 1104 CondConst, CondBr); 1105 if (Ret != LazyValueInfo::Unknown) { 1106 unsigned ToRemove = Ret == LazyValueInfo::True ? 1 : 0; 1107 unsigned ToKeep = Ret == LazyValueInfo::True ? 0 : 1; 1108 BasicBlock *ToRemoveSucc = CondBr->getSuccessor(ToRemove); 1109 ToRemoveSucc->removePredecessor(BB, true); 1110 BranchInst *UncondBr = 1111 BranchInst::Create(CondBr->getSuccessor(ToKeep), CondBr); 1112 UncondBr->setDebugLoc(CondBr->getDebugLoc()); 1113 CondBr->eraseFromParent(); 1114 if (CondCmp->use_empty()) 1115 CondCmp->eraseFromParent(); 1116 // We can safely replace *some* uses of the CondInst if it has 1117 // exactly one value as returned by LVI. RAUW is incorrect in the 1118 // presence of guards and assumes, that have the `Cond` as the use. This 1119 // is because we use the guards/assume to reason about the `Cond` value 1120 // at the end of block, but RAUW unconditionally replaces all uses 1121 // including the guards/assumes themselves and the uses before the 1122 // guard/assume. 1123 else if (CondCmp->getParent() == BB) { 1124 auto *CI = Ret == LazyValueInfo::True ? 1125 ConstantInt::getTrue(CondCmp->getType()) : 1126 ConstantInt::getFalse(CondCmp->getType()); 1127 ReplaceFoldableUses(CondCmp, CI); 1128 } 1129 DTU->applyUpdatesPermissive( 1130 {{DominatorTree::Delete, BB, ToRemoveSucc}}); 1131 return true; 1132 } 1133 1134 // We did not manage to simplify this branch, try to see whether 1135 // CondCmp depends on a known phi-select pattern. 1136 if (TryToUnfoldSelect(CondCmp, BB)) 1137 return true; 1138 } 1139 } 1140 1141 if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) 1142 if (TryToUnfoldSelect(SI, BB)) 1143 return true; 1144 1145 // Check for some cases that are worth simplifying. Right now we want to look 1146 // for loads that are used by a switch or by the condition for the branch. If 1147 // we see one, check to see if it's partially redundant. If so, insert a PHI 1148 // which can then be used to thread the values. 1149 Value *SimplifyValue = CondInst; 1150 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) 1151 if (isa<Constant>(CondCmp->getOperand(1))) 1152 SimplifyValue = CondCmp->getOperand(0); 1153 1154 // TODO: There are other places where load PRE would be profitable, such as 1155 // more complex comparisons. 1156 if (LoadInst *LoadI = dyn_cast<LoadInst>(SimplifyValue)) 1157 if (SimplifyPartiallyRedundantLoad(LoadI)) 1158 return true; 1159 1160 // Before threading, try to propagate profile data backwards: 1161 if (PHINode *PN = dyn_cast<PHINode>(CondInst)) 1162 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1163 updatePredecessorProfileMetadata(PN, BB); 1164 1165 // Handle a variety of cases where we are branching on something derived from 1166 // a PHI node in the current block. If we can prove that any predecessors 1167 // compute a predictable value based on a PHI node, thread those predecessors. 1168 if (ProcessThreadableEdges(CondInst, BB, Preference, Terminator)) 1169 return true; 1170 1171 // If this is an otherwise-unfoldable branch on a phi node in the current 1172 // block, see if we can simplify. 1173 if (PHINode *PN = dyn_cast<PHINode>(CondInst)) 1174 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1175 return ProcessBranchOnPHI(PN); 1176 1177 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. 1178 if (CondInst->getOpcode() == Instruction::Xor && 1179 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1180 return ProcessBranchOnXOR(cast<BinaryOperator>(CondInst)); 1181 1182 // Search for a stronger dominating condition that can be used to simplify a 1183 // conditional branch leaving BB. 1184 if (ProcessImpliedCondition(BB)) 1185 return true; 1186 1187 return false; 1188 } 1189 1190 bool JumpThreadingPass::ProcessImpliedCondition(BasicBlock *BB) { 1191 auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); 1192 if (!BI || !BI->isConditional()) 1193 return false; 1194 1195 Value *Cond = BI->getCondition(); 1196 BasicBlock *CurrentBB = BB; 1197 BasicBlock *CurrentPred = BB->getSinglePredecessor(); 1198 unsigned Iter = 0; 1199 1200 auto &DL = BB->getModule()->getDataLayout(); 1201 1202 while (CurrentPred && Iter++ < ImplicationSearchThreshold) { 1203 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator()); 1204 if (!PBI || !PBI->isConditional()) 1205 return false; 1206 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB) 1207 return false; 1208 1209 bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB; 1210 Optional<bool> Implication = 1211 isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue); 1212 if (Implication) { 1213 BasicBlock *KeepSucc = BI->getSuccessor(*Implication ? 0 : 1); 1214 BasicBlock *RemoveSucc = BI->getSuccessor(*Implication ? 1 : 0); 1215 RemoveSucc->removePredecessor(BB); 1216 BranchInst *UncondBI = BranchInst::Create(KeepSucc, BI); 1217 UncondBI->setDebugLoc(BI->getDebugLoc()); 1218 BI->eraseFromParent(); 1219 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, RemoveSucc}}); 1220 return true; 1221 } 1222 CurrentBB = CurrentPred; 1223 CurrentPred = CurrentBB->getSinglePredecessor(); 1224 } 1225 1226 return false; 1227 } 1228 1229 /// Return true if Op is an instruction defined in the given block. 1230 static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) { 1231 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1232 if (OpInst->getParent() == BB) 1233 return true; 1234 return false; 1235 } 1236 1237 /// SimplifyPartiallyRedundantLoad - If LoadI is an obviously partially 1238 /// redundant load instruction, eliminate it by replacing it with a PHI node. 1239 /// This is an important optimization that encourages jump threading, and needs 1240 /// to be run interlaced with other jump threading tasks. 1241 bool JumpThreadingPass::SimplifyPartiallyRedundantLoad(LoadInst *LoadI) { 1242 // Don't hack volatile and ordered loads. 1243 if (!LoadI->isUnordered()) return false; 1244 1245 // If the load is defined in a block with exactly one predecessor, it can't be 1246 // partially redundant. 1247 BasicBlock *LoadBB = LoadI->getParent(); 1248 if (LoadBB->getSinglePredecessor()) 1249 return false; 1250 1251 // If the load is defined in an EH pad, it can't be partially redundant, 1252 // because the edges between the invoke and the EH pad cannot have other 1253 // instructions between them. 1254 if (LoadBB->isEHPad()) 1255 return false; 1256 1257 Value *LoadedPtr = LoadI->getOperand(0); 1258 1259 // If the loaded operand is defined in the LoadBB and its not a phi, 1260 // it can't be available in predecessors. 1261 if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr)) 1262 return false; 1263 1264 // Scan a few instructions up from the load, to see if it is obviously live at 1265 // the entry to its block. 1266 BasicBlock::iterator BBIt(LoadI); 1267 bool IsLoadCSE; 1268 if (Value *AvailableVal = FindAvailableLoadedValue( 1269 LoadI, LoadBB, BBIt, DefMaxInstsToScan, AA, &IsLoadCSE)) { 1270 // If the value of the load is locally available within the block, just use 1271 // it. This frequently occurs for reg2mem'd allocas. 1272 1273 if (IsLoadCSE) { 1274 LoadInst *NLoadI = cast<LoadInst>(AvailableVal); 1275 combineMetadataForCSE(NLoadI, LoadI, false); 1276 }; 1277 1278 // If the returned value is the load itself, replace with an undef. This can 1279 // only happen in dead loops. 1280 if (AvailableVal == LoadI) 1281 AvailableVal = UndefValue::get(LoadI->getType()); 1282 if (AvailableVal->getType() != LoadI->getType()) 1283 AvailableVal = CastInst::CreateBitOrPointerCast( 1284 AvailableVal, LoadI->getType(), "", LoadI); 1285 LoadI->replaceAllUsesWith(AvailableVal); 1286 LoadI->eraseFromParent(); 1287 return true; 1288 } 1289 1290 // Otherwise, if we scanned the whole block and got to the top of the block, 1291 // we know the block is locally transparent to the load. If not, something 1292 // might clobber its value. 1293 if (BBIt != LoadBB->begin()) 1294 return false; 1295 1296 // If all of the loads and stores that feed the value have the same AA tags, 1297 // then we can propagate them onto any newly inserted loads. 1298 AAMDNodes AATags; 1299 LoadI->getAAMetadata(AATags); 1300 1301 SmallPtrSet<BasicBlock*, 8> PredsScanned; 1302 1303 using AvailablePredsTy = SmallVector<std::pair<BasicBlock *, Value *>, 8>; 1304 1305 AvailablePredsTy AvailablePreds; 1306 BasicBlock *OneUnavailablePred = nullptr; 1307 SmallVector<LoadInst*, 8> CSELoads; 1308 1309 // If we got here, the loaded value is transparent through to the start of the 1310 // block. Check to see if it is available in any of the predecessor blocks. 1311 for (BasicBlock *PredBB : predecessors(LoadBB)) { 1312 // If we already scanned this predecessor, skip it. 1313 if (!PredsScanned.insert(PredBB).second) 1314 continue; 1315 1316 BBIt = PredBB->end(); 1317 unsigned NumScanedInst = 0; 1318 Value *PredAvailable = nullptr; 1319 // NOTE: We don't CSE load that is volatile or anything stronger than 1320 // unordered, that should have been checked when we entered the function. 1321 assert(LoadI->isUnordered() && 1322 "Attempting to CSE volatile or atomic loads"); 1323 // If this is a load on a phi pointer, phi-translate it and search 1324 // for available load/store to the pointer in predecessors. 1325 Value *Ptr = LoadedPtr->DoPHITranslation(LoadBB, PredBB); 1326 PredAvailable = FindAvailablePtrLoadStore( 1327 Ptr, LoadI->getType(), LoadI->isAtomic(), PredBB, BBIt, 1328 DefMaxInstsToScan, AA, &IsLoadCSE, &NumScanedInst); 1329 1330 // If PredBB has a single predecessor, continue scanning through the 1331 // single predecessor. 1332 BasicBlock *SinglePredBB = PredBB; 1333 while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() && 1334 NumScanedInst < DefMaxInstsToScan) { 1335 SinglePredBB = SinglePredBB->getSinglePredecessor(); 1336 if (SinglePredBB) { 1337 BBIt = SinglePredBB->end(); 1338 PredAvailable = FindAvailablePtrLoadStore( 1339 Ptr, LoadI->getType(), LoadI->isAtomic(), SinglePredBB, BBIt, 1340 (DefMaxInstsToScan - NumScanedInst), AA, &IsLoadCSE, 1341 &NumScanedInst); 1342 } 1343 } 1344 1345 if (!PredAvailable) { 1346 OneUnavailablePred = PredBB; 1347 continue; 1348 } 1349 1350 if (IsLoadCSE) 1351 CSELoads.push_back(cast<LoadInst>(PredAvailable)); 1352 1353 // If so, this load is partially redundant. Remember this info so that we 1354 // can create a PHI node. 1355 AvailablePreds.emplace_back(PredBB, PredAvailable); 1356 } 1357 1358 // If the loaded value isn't available in any predecessor, it isn't partially 1359 // redundant. 1360 if (AvailablePreds.empty()) return false; 1361 1362 // Okay, the loaded value is available in at least one (and maybe all!) 1363 // predecessors. If the value is unavailable in more than one unique 1364 // predecessor, we want to insert a merge block for those common predecessors. 1365 // This ensures that we only have to insert one reload, thus not increasing 1366 // code size. 1367 BasicBlock *UnavailablePred = nullptr; 1368 1369 // If the value is unavailable in one of predecessors, we will end up 1370 // inserting a new instruction into them. It is only valid if all the 1371 // instructions before LoadI are guaranteed to pass execution to its 1372 // successor, or if LoadI is safe to speculate. 1373 // TODO: If this logic becomes more complex, and we will perform PRE insertion 1374 // farther than to a predecessor, we need to reuse the code from GVN's PRE. 1375 // It requires domination tree analysis, so for this simple case it is an 1376 // overkill. 1377 if (PredsScanned.size() != AvailablePreds.size() && 1378 !isSafeToSpeculativelyExecute(LoadI)) 1379 for (auto I = LoadBB->begin(); &*I != LoadI; ++I) 1380 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 1381 return false; 1382 1383 // If there is exactly one predecessor where the value is unavailable, the 1384 // already computed 'OneUnavailablePred' block is it. If it ends in an 1385 // unconditional branch, we know that it isn't a critical edge. 1386 if (PredsScanned.size() == AvailablePreds.size()+1 && 1387 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { 1388 UnavailablePred = OneUnavailablePred; 1389 } else if (PredsScanned.size() != AvailablePreds.size()) { 1390 // Otherwise, we had multiple unavailable predecessors or we had a critical 1391 // edge from the one. 1392 SmallVector<BasicBlock*, 8> PredsToSplit; 1393 SmallPtrSet<BasicBlock*, 8> AvailablePredSet; 1394 1395 for (const auto &AvailablePred : AvailablePreds) 1396 AvailablePredSet.insert(AvailablePred.first); 1397 1398 // Add all the unavailable predecessors to the PredsToSplit list. 1399 for (BasicBlock *P : predecessors(LoadBB)) { 1400 // If the predecessor is an indirect goto, we can't split the edge. 1401 // Same for CallBr. 1402 if (isa<IndirectBrInst>(P->getTerminator()) || 1403 isa<CallBrInst>(P->getTerminator())) 1404 return false; 1405 1406 if (!AvailablePredSet.count(P)) 1407 PredsToSplit.push_back(P); 1408 } 1409 1410 // Split them out to their own block. 1411 UnavailablePred = SplitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split"); 1412 } 1413 1414 // If the value isn't available in all predecessors, then there will be 1415 // exactly one where it isn't available. Insert a load on that edge and add 1416 // it to the AvailablePreds list. 1417 if (UnavailablePred) { 1418 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 && 1419 "Can't handle critical edge here!"); 1420 LoadInst *NewVal = new LoadInst( 1421 LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred), 1422 LoadI->getName() + ".pr", false, LoadI->getAlign(), 1423 LoadI->getOrdering(), LoadI->getSyncScopeID(), 1424 UnavailablePred->getTerminator()); 1425 NewVal->setDebugLoc(LoadI->getDebugLoc()); 1426 if (AATags) 1427 NewVal->setAAMetadata(AATags); 1428 1429 AvailablePreds.emplace_back(UnavailablePred, NewVal); 1430 } 1431 1432 // Now we know that each predecessor of this block has a value in 1433 // AvailablePreds, sort them for efficient access as we're walking the preds. 1434 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); 1435 1436 // Create a PHI node at the start of the block for the PRE'd load value. 1437 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB); 1438 PHINode *PN = PHINode::Create(LoadI->getType(), std::distance(PB, PE), "", 1439 &LoadBB->front()); 1440 PN->takeName(LoadI); 1441 PN->setDebugLoc(LoadI->getDebugLoc()); 1442 1443 // Insert new entries into the PHI for each predecessor. A single block may 1444 // have multiple entries here. 1445 for (pred_iterator PI = PB; PI != PE; ++PI) { 1446 BasicBlock *P = *PI; 1447 AvailablePredsTy::iterator I = 1448 llvm::lower_bound(AvailablePreds, std::make_pair(P, (Value *)nullptr)); 1449 1450 assert(I != AvailablePreds.end() && I->first == P && 1451 "Didn't find entry for predecessor!"); 1452 1453 // If we have an available predecessor but it requires casting, insert the 1454 // cast in the predecessor and use the cast. Note that we have to update the 1455 // AvailablePreds vector as we go so that all of the PHI entries for this 1456 // predecessor use the same bitcast. 1457 Value *&PredV = I->second; 1458 if (PredV->getType() != LoadI->getType()) 1459 PredV = CastInst::CreateBitOrPointerCast(PredV, LoadI->getType(), "", 1460 P->getTerminator()); 1461 1462 PN->addIncoming(PredV, I->first); 1463 } 1464 1465 for (LoadInst *PredLoadI : CSELoads) { 1466 combineMetadataForCSE(PredLoadI, LoadI, true); 1467 } 1468 1469 LoadI->replaceAllUsesWith(PN); 1470 LoadI->eraseFromParent(); 1471 1472 return true; 1473 } 1474 1475 /// FindMostPopularDest - The specified list contains multiple possible 1476 /// threadable destinations. Pick the one that occurs the most frequently in 1477 /// the list. 1478 static BasicBlock * 1479 FindMostPopularDest(BasicBlock *BB, 1480 const SmallVectorImpl<std::pair<BasicBlock *, 1481 BasicBlock *>> &PredToDestList) { 1482 assert(!PredToDestList.empty()); 1483 1484 // Determine popularity. If there are multiple possible destinations, we 1485 // explicitly choose to ignore 'undef' destinations. We prefer to thread 1486 // blocks with known and real destinations to threading undef. We'll handle 1487 // them later if interesting. 1488 MapVector<BasicBlock *, unsigned> DestPopularity; 1489 1490 // Populate DestPopularity with the successors in the order they appear in the 1491 // successor list. This way, we ensure determinism by iterating it in the 1492 // same order in std::max_element below. We map nullptr to 0 so that we can 1493 // return nullptr when PredToDestList contains nullptr only. 1494 DestPopularity[nullptr] = 0; 1495 for (auto *SuccBB : successors(BB)) 1496 DestPopularity[SuccBB] = 0; 1497 1498 for (const auto &PredToDest : PredToDestList) 1499 if (PredToDest.second) 1500 DestPopularity[PredToDest.second]++; 1501 1502 // Find the most popular dest. 1503 using VT = decltype(DestPopularity)::value_type; 1504 auto MostPopular = std::max_element( 1505 DestPopularity.begin(), DestPopularity.end(), 1506 [](const VT &L, const VT &R) { return L.second < R.second; }); 1507 1508 // Okay, we have finally picked the most popular destination. 1509 return MostPopular->first; 1510 } 1511 1512 // Try to evaluate the value of V when the control flows from PredPredBB to 1513 // BB->getSinglePredecessor() and then on to BB. 1514 Constant *JumpThreadingPass::EvaluateOnPredecessorEdge(BasicBlock *BB, 1515 BasicBlock *PredPredBB, 1516 Value *V) { 1517 BasicBlock *PredBB = BB->getSinglePredecessor(); 1518 assert(PredBB && "Expected a single predecessor"); 1519 1520 if (Constant *Cst = dyn_cast<Constant>(V)) { 1521 return Cst; 1522 } 1523 1524 // Consult LVI if V is not an instruction in BB or PredBB. 1525 Instruction *I = dyn_cast<Instruction>(V); 1526 if (!I || (I->getParent() != BB && I->getParent() != PredBB)) { 1527 return LVI->getConstantOnEdge(V, PredPredBB, PredBB, nullptr); 1528 } 1529 1530 // Look into a PHI argument. 1531 if (PHINode *PHI = dyn_cast<PHINode>(V)) { 1532 if (PHI->getParent() == PredBB) 1533 return dyn_cast<Constant>(PHI->getIncomingValueForBlock(PredPredBB)); 1534 return nullptr; 1535 } 1536 1537 // If we have a CmpInst, try to fold it for each incoming edge into PredBB. 1538 if (CmpInst *CondCmp = dyn_cast<CmpInst>(V)) { 1539 if (CondCmp->getParent() == BB) { 1540 Constant *Op0 = 1541 EvaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(0)); 1542 Constant *Op1 = 1543 EvaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(1)); 1544 if (Op0 && Op1) { 1545 return ConstantExpr::getCompare(CondCmp->getPredicate(), Op0, Op1); 1546 } 1547 } 1548 return nullptr; 1549 } 1550 1551 return nullptr; 1552 } 1553 1554 bool JumpThreadingPass::ProcessThreadableEdges(Value *Cond, BasicBlock *BB, 1555 ConstantPreference Preference, 1556 Instruction *CxtI) { 1557 // If threading this would thread across a loop header, don't even try to 1558 // thread the edge. 1559 if (LoopHeaders.count(BB)) 1560 return false; 1561 1562 PredValueInfoTy PredValues; 1563 if (!ComputeValueKnownInPredecessors(Cond, BB, PredValues, Preference, 1564 CxtI)) { 1565 // We don't have known values in predecessors. See if we can thread through 1566 // BB and its sole predecessor. 1567 return MaybeThreadThroughTwoBasicBlocks(BB, Cond); 1568 } 1569 1570 assert(!PredValues.empty() && 1571 "ComputeValueKnownInPredecessors returned true with no values"); 1572 1573 LLVM_DEBUG(dbgs() << "IN BB: " << *BB; 1574 for (const auto &PredValue : PredValues) { 1575 dbgs() << " BB '" << BB->getName() 1576 << "': FOUND condition = " << *PredValue.first 1577 << " for pred '" << PredValue.second->getName() << "'.\n"; 1578 }); 1579 1580 // Decide what we want to thread through. Convert our list of known values to 1581 // a list of known destinations for each pred. This also discards duplicate 1582 // predecessors and keeps track of the undefined inputs (which are represented 1583 // as a null dest in the PredToDestList). 1584 SmallPtrSet<BasicBlock*, 16> SeenPreds; 1585 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; 1586 1587 BasicBlock *OnlyDest = nullptr; 1588 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; 1589 Constant *OnlyVal = nullptr; 1590 Constant *MultipleVal = (Constant *)(intptr_t)~0ULL; 1591 1592 for (const auto &PredValue : PredValues) { 1593 BasicBlock *Pred = PredValue.second; 1594 if (!SeenPreds.insert(Pred).second) 1595 continue; // Duplicate predecessor entry. 1596 1597 Constant *Val = PredValue.first; 1598 1599 BasicBlock *DestBB; 1600 if (isa<UndefValue>(Val)) 1601 DestBB = nullptr; 1602 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 1603 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1604 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero()); 1605 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) { 1606 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1607 DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor(); 1608 } else { 1609 assert(isa<IndirectBrInst>(BB->getTerminator()) 1610 && "Unexpected terminator"); 1611 assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress"); 1612 DestBB = cast<BlockAddress>(Val)->getBasicBlock(); 1613 } 1614 1615 // If we have exactly one destination, remember it for efficiency below. 1616 if (PredToDestList.empty()) { 1617 OnlyDest = DestBB; 1618 OnlyVal = Val; 1619 } else { 1620 if (OnlyDest != DestBB) 1621 OnlyDest = MultipleDestSentinel; 1622 // It possible we have same destination, but different value, e.g. default 1623 // case in switchinst. 1624 if (Val != OnlyVal) 1625 OnlyVal = MultipleVal; 1626 } 1627 1628 // If the predecessor ends with an indirect goto, we can't change its 1629 // destination. Same for CallBr. 1630 if (isa<IndirectBrInst>(Pred->getTerminator()) || 1631 isa<CallBrInst>(Pred->getTerminator())) 1632 continue; 1633 1634 PredToDestList.emplace_back(Pred, DestBB); 1635 } 1636 1637 // If all edges were unthreadable, we fail. 1638 if (PredToDestList.empty()) 1639 return false; 1640 1641 // If all the predecessors go to a single known successor, we want to fold, 1642 // not thread. By doing so, we do not need to duplicate the current block and 1643 // also miss potential opportunities in case we dont/cant duplicate. 1644 if (OnlyDest && OnlyDest != MultipleDestSentinel) { 1645 if (BB->hasNPredecessors(PredToDestList.size())) { 1646 bool SeenFirstBranchToOnlyDest = false; 1647 std::vector <DominatorTree::UpdateType> Updates; 1648 Updates.reserve(BB->getTerminator()->getNumSuccessors() - 1); 1649 for (BasicBlock *SuccBB : successors(BB)) { 1650 if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest) { 1651 SeenFirstBranchToOnlyDest = true; // Don't modify the first branch. 1652 } else { 1653 SuccBB->removePredecessor(BB, true); // This is unreachable successor. 1654 Updates.push_back({DominatorTree::Delete, BB, SuccBB}); 1655 } 1656 } 1657 1658 // Finally update the terminator. 1659 Instruction *Term = BB->getTerminator(); 1660 BranchInst::Create(OnlyDest, Term); 1661 Term->eraseFromParent(); 1662 DTU->applyUpdatesPermissive(Updates); 1663 1664 // If the condition is now dead due to the removal of the old terminator, 1665 // erase it. 1666 if (auto *CondInst = dyn_cast<Instruction>(Cond)) { 1667 if (CondInst->use_empty() && !CondInst->mayHaveSideEffects()) 1668 CondInst->eraseFromParent(); 1669 // We can safely replace *some* uses of the CondInst if it has 1670 // exactly one value as returned by LVI. RAUW is incorrect in the 1671 // presence of guards and assumes, that have the `Cond` as the use. This 1672 // is because we use the guards/assume to reason about the `Cond` value 1673 // at the end of block, but RAUW unconditionally replaces all uses 1674 // including the guards/assumes themselves and the uses before the 1675 // guard/assume. 1676 else if (OnlyVal && OnlyVal != MultipleVal && 1677 CondInst->getParent() == BB) 1678 ReplaceFoldableUses(CondInst, OnlyVal); 1679 } 1680 return true; 1681 } 1682 } 1683 1684 // Determine which is the most common successor. If we have many inputs and 1685 // this block is a switch, we want to start by threading the batch that goes 1686 // to the most popular destination first. If we only know about one 1687 // threadable destination (the common case) we can avoid this. 1688 BasicBlock *MostPopularDest = OnlyDest; 1689 1690 if (MostPopularDest == MultipleDestSentinel) { 1691 // Remove any loop headers from the Dest list, ThreadEdge conservatively 1692 // won't process them, but we might have other destination that are eligible 1693 // and we still want to process. 1694 erase_if(PredToDestList, 1695 [&](const std::pair<BasicBlock *, BasicBlock *> &PredToDest) { 1696 return LoopHeaders.count(PredToDest.second) != 0; 1697 }); 1698 1699 if (PredToDestList.empty()) 1700 return false; 1701 1702 MostPopularDest = FindMostPopularDest(BB, PredToDestList); 1703 } 1704 1705 // Now that we know what the most popular destination is, factor all 1706 // predecessors that will jump to it into a single predecessor. 1707 SmallVector<BasicBlock*, 16> PredsToFactor; 1708 for (const auto &PredToDest : PredToDestList) 1709 if (PredToDest.second == MostPopularDest) { 1710 BasicBlock *Pred = PredToDest.first; 1711 1712 // This predecessor may be a switch or something else that has multiple 1713 // edges to the block. Factor each of these edges by listing them 1714 // according to # occurrences in PredsToFactor. 1715 for (BasicBlock *Succ : successors(Pred)) 1716 if (Succ == BB) 1717 PredsToFactor.push_back(Pred); 1718 } 1719 1720 // If the threadable edges are branching on an undefined value, we get to pick 1721 // the destination that these predecessors should get to. 1722 if (!MostPopularDest) 1723 MostPopularDest = BB->getTerminator()-> 1724 getSuccessor(GetBestDestForJumpOnUndef(BB)); 1725 1726 // Ok, try to thread it! 1727 return TryThreadEdge(BB, PredsToFactor, MostPopularDest); 1728 } 1729 1730 /// ProcessBranchOnPHI - We have an otherwise unthreadable conditional branch on 1731 /// a PHI node in the current block. See if there are any simplifications we 1732 /// can do based on inputs to the phi node. 1733 bool JumpThreadingPass::ProcessBranchOnPHI(PHINode *PN) { 1734 BasicBlock *BB = PN->getParent(); 1735 1736 // TODO: We could make use of this to do it once for blocks with common PHI 1737 // values. 1738 SmallVector<BasicBlock*, 1> PredBBs; 1739 PredBBs.resize(1); 1740 1741 // If any of the predecessor blocks end in an unconditional branch, we can 1742 // *duplicate* the conditional branch into that block in order to further 1743 // encourage jump threading and to eliminate cases where we have branch on a 1744 // phi of an icmp (branch on icmp is much better). 1745 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1746 BasicBlock *PredBB = PN->getIncomingBlock(i); 1747 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) 1748 if (PredBr->isUnconditional()) { 1749 PredBBs[0] = PredBB; 1750 // Try to duplicate BB into PredBB. 1751 if (DuplicateCondBranchOnPHIIntoPred(BB, PredBBs)) 1752 return true; 1753 } 1754 } 1755 1756 return false; 1757 } 1758 1759 /// ProcessBranchOnXOR - We have an otherwise unthreadable conditional branch on 1760 /// a xor instruction in the current block. See if there are any 1761 /// simplifications we can do based on inputs to the xor. 1762 bool JumpThreadingPass::ProcessBranchOnXOR(BinaryOperator *BO) { 1763 BasicBlock *BB = BO->getParent(); 1764 1765 // If either the LHS or RHS of the xor is a constant, don't do this 1766 // optimization. 1767 if (isa<ConstantInt>(BO->getOperand(0)) || 1768 isa<ConstantInt>(BO->getOperand(1))) 1769 return false; 1770 1771 // If the first instruction in BB isn't a phi, we won't be able to infer 1772 // anything special about any particular predecessor. 1773 if (!isa<PHINode>(BB->front())) 1774 return false; 1775 1776 // If this BB is a landing pad, we won't be able to split the edge into it. 1777 if (BB->isEHPad()) 1778 return false; 1779 1780 // If we have a xor as the branch input to this block, and we know that the 1781 // LHS or RHS of the xor in any predecessor is true/false, then we can clone 1782 // the condition into the predecessor and fix that value to true, saving some 1783 // logical ops on that path and encouraging other paths to simplify. 1784 // 1785 // This copies something like this: 1786 // 1787 // BB: 1788 // %X = phi i1 [1], [%X'] 1789 // %Y = icmp eq i32 %A, %B 1790 // %Z = xor i1 %X, %Y 1791 // br i1 %Z, ... 1792 // 1793 // Into: 1794 // BB': 1795 // %Y = icmp ne i32 %A, %B 1796 // br i1 %Y, ... 1797 1798 PredValueInfoTy XorOpValues; 1799 bool isLHS = true; 1800 if (!ComputeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues, 1801 WantInteger, BO)) { 1802 assert(XorOpValues.empty()); 1803 if (!ComputeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues, 1804 WantInteger, BO)) 1805 return false; 1806 isLHS = false; 1807 } 1808 1809 assert(!XorOpValues.empty() && 1810 "ComputeValueKnownInPredecessors returned true with no values"); 1811 1812 // Scan the information to see which is most popular: true or false. The 1813 // predecessors can be of the set true, false, or undef. 1814 unsigned NumTrue = 0, NumFalse = 0; 1815 for (const auto &XorOpValue : XorOpValues) { 1816 if (isa<UndefValue>(XorOpValue.first)) 1817 // Ignore undefs for the count. 1818 continue; 1819 if (cast<ConstantInt>(XorOpValue.first)->isZero()) 1820 ++NumFalse; 1821 else 1822 ++NumTrue; 1823 } 1824 1825 // Determine which value to split on, true, false, or undef if neither. 1826 ConstantInt *SplitVal = nullptr; 1827 if (NumTrue > NumFalse) 1828 SplitVal = ConstantInt::getTrue(BB->getContext()); 1829 else if (NumTrue != 0 || NumFalse != 0) 1830 SplitVal = ConstantInt::getFalse(BB->getContext()); 1831 1832 // Collect all of the blocks that this can be folded into so that we can 1833 // factor this once and clone it once. 1834 SmallVector<BasicBlock*, 8> BlocksToFoldInto; 1835 for (const auto &XorOpValue : XorOpValues) { 1836 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first)) 1837 continue; 1838 1839 BlocksToFoldInto.push_back(XorOpValue.second); 1840 } 1841 1842 // If we inferred a value for all of the predecessors, then duplication won't 1843 // help us. However, we can just replace the LHS or RHS with the constant. 1844 if (BlocksToFoldInto.size() == 1845 cast<PHINode>(BB->front()).getNumIncomingValues()) { 1846 if (!SplitVal) { 1847 // If all preds provide undef, just nuke the xor, because it is undef too. 1848 BO->replaceAllUsesWith(UndefValue::get(BO->getType())); 1849 BO->eraseFromParent(); 1850 } else if (SplitVal->isZero()) { 1851 // If all preds provide 0, replace the xor with the other input. 1852 BO->replaceAllUsesWith(BO->getOperand(isLHS)); 1853 BO->eraseFromParent(); 1854 } else { 1855 // If all preds provide 1, set the computed value to 1. 1856 BO->setOperand(!isLHS, SplitVal); 1857 } 1858 1859 return true; 1860 } 1861 1862 // If any of predecessors end with an indirect goto, we can't change its 1863 // destination. Same for CallBr. 1864 if (any_of(BlocksToFoldInto, [](BasicBlock *Pred) { 1865 return isa<IndirectBrInst>(Pred->getTerminator()) || 1866 isa<CallBrInst>(Pred->getTerminator()); 1867 })) 1868 return false; 1869 1870 // Try to duplicate BB into PredBB. 1871 return DuplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto); 1872 } 1873 1874 /// AddPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new 1875 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for 1876 /// NewPred using the entries from OldPred (suitably mapped). 1877 static void AddPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, 1878 BasicBlock *OldPred, 1879 BasicBlock *NewPred, 1880 DenseMap<Instruction*, Value*> &ValueMap) { 1881 for (PHINode &PN : PHIBB->phis()) { 1882 // Ok, we have a PHI node. Figure out what the incoming value was for the 1883 // DestBlock. 1884 Value *IV = PN.getIncomingValueForBlock(OldPred); 1885 1886 // Remap the value if necessary. 1887 if (Instruction *Inst = dyn_cast<Instruction>(IV)) { 1888 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst); 1889 if (I != ValueMap.end()) 1890 IV = I->second; 1891 } 1892 1893 PN.addIncoming(IV, NewPred); 1894 } 1895 } 1896 1897 /// Merge basic block BB into its sole predecessor if possible. 1898 bool JumpThreadingPass::MaybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB) { 1899 BasicBlock *SinglePred = BB->getSinglePredecessor(); 1900 if (!SinglePred) 1901 return false; 1902 1903 const Instruction *TI = SinglePred->getTerminator(); 1904 if (TI->isExceptionalTerminator() || TI->getNumSuccessors() != 1 || 1905 SinglePred == BB || hasAddressTakenAndUsed(BB)) 1906 return false; 1907 1908 // If SinglePred was a loop header, BB becomes one. 1909 if (LoopHeaders.erase(SinglePred)) 1910 LoopHeaders.insert(BB); 1911 1912 LVI->eraseBlock(SinglePred); 1913 MergeBasicBlockIntoOnlyPred(BB, DTU); 1914 1915 // Now that BB is merged into SinglePred (i.e. SinglePred code followed by 1916 // BB code within one basic block `BB`), we need to invalidate the LVI 1917 // information associated with BB, because the LVI information need not be 1918 // true for all of BB after the merge. For example, 1919 // Before the merge, LVI info and code is as follows: 1920 // SinglePred: <LVI info1 for %p val> 1921 // %y = use of %p 1922 // call @exit() // need not transfer execution to successor. 1923 // assume(%p) // from this point on %p is true 1924 // br label %BB 1925 // BB: <LVI info2 for %p val, i.e. %p is true> 1926 // %x = use of %p 1927 // br label exit 1928 // 1929 // Note that this LVI info for blocks BB and SinglPred is correct for %p 1930 // (info2 and info1 respectively). After the merge and the deletion of the 1931 // LVI info1 for SinglePred. We have the following code: 1932 // BB: <LVI info2 for %p val> 1933 // %y = use of %p 1934 // call @exit() 1935 // assume(%p) 1936 // %x = use of %p <-- LVI info2 is correct from here onwards. 1937 // br label exit 1938 // LVI info2 for BB is incorrect at the beginning of BB. 1939 1940 // Invalidate LVI information for BB if the LVI is not provably true for 1941 // all of BB. 1942 if (!isGuaranteedToTransferExecutionToSuccessor(BB)) 1943 LVI->eraseBlock(BB); 1944 return true; 1945 } 1946 1947 /// Update the SSA form. NewBB contains instructions that are copied from BB. 1948 /// ValueMapping maps old values in BB to new ones in NewBB. 1949 void JumpThreadingPass::UpdateSSA( 1950 BasicBlock *BB, BasicBlock *NewBB, 1951 DenseMap<Instruction *, Value *> &ValueMapping) { 1952 // If there were values defined in BB that are used outside the block, then we 1953 // now have to update all uses of the value to use either the original value, 1954 // the cloned value, or some PHI derived value. This can require arbitrary 1955 // PHI insertion, of which we are prepared to do, clean these up now. 1956 SSAUpdater SSAUpdate; 1957 SmallVector<Use *, 16> UsesToRename; 1958 1959 for (Instruction &I : *BB) { 1960 // Scan all uses of this instruction to see if it is used outside of its 1961 // block, and if so, record them in UsesToRename. 1962 for (Use &U : I.uses()) { 1963 Instruction *User = cast<Instruction>(U.getUser()); 1964 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1965 if (UserPN->getIncomingBlock(U) == BB) 1966 continue; 1967 } else if (User->getParent() == BB) 1968 continue; 1969 1970 UsesToRename.push_back(&U); 1971 } 1972 1973 // If there are no uses outside the block, we're done with this instruction. 1974 if (UsesToRename.empty()) 1975 continue; 1976 LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); 1977 1978 // We found a use of I outside of BB. Rename all uses of I that are outside 1979 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1980 // with the two values we know. 1981 SSAUpdate.Initialize(I.getType(), I.getName()); 1982 SSAUpdate.AddAvailableValue(BB, &I); 1983 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]); 1984 1985 while (!UsesToRename.empty()) 1986 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 1987 LLVM_DEBUG(dbgs() << "\n"); 1988 } 1989 } 1990 1991 /// Clone instructions in range [BI, BE) to NewBB. For PHI nodes, we only clone 1992 /// arguments that come from PredBB. Return the map from the variables in the 1993 /// source basic block to the variables in the newly created basic block. 1994 DenseMap<Instruction *, Value *> 1995 JumpThreadingPass::CloneInstructions(BasicBlock::iterator BI, 1996 BasicBlock::iterator BE, BasicBlock *NewBB, 1997 BasicBlock *PredBB) { 1998 // We are going to have to map operands from the source basic block to the new 1999 // copy of the block 'NewBB'. If there are PHI nodes in the source basic 2000 // block, evaluate them to account for entry from PredBB. 2001 DenseMap<Instruction *, Value *> ValueMapping; 2002 2003 // Clone the phi nodes of the source basic block into NewBB. The resulting 2004 // phi nodes are trivial since NewBB only has one predecessor, but SSAUpdater 2005 // might need to rewrite the operand of the cloned phi. 2006 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2007 PHINode *NewPN = PHINode::Create(PN->getType(), 1, PN->getName(), NewBB); 2008 NewPN->addIncoming(PN->getIncomingValueForBlock(PredBB), PredBB); 2009 ValueMapping[PN] = NewPN; 2010 } 2011 2012 // Clone the non-phi instructions of the source basic block into NewBB, 2013 // keeping track of the mapping and using it to remap operands in the cloned 2014 // instructions. 2015 for (; BI != BE; ++BI) { 2016 Instruction *New = BI->clone(); 2017 New->setName(BI->getName()); 2018 NewBB->getInstList().push_back(New); 2019 ValueMapping[&*BI] = New; 2020 2021 // Remap operands to patch up intra-block references. 2022 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2023 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2024 DenseMap<Instruction *, Value *>::iterator I = ValueMapping.find(Inst); 2025 if (I != ValueMapping.end()) 2026 New->setOperand(i, I->second); 2027 } 2028 } 2029 2030 return ValueMapping; 2031 } 2032 2033 /// Attempt to thread through two successive basic blocks. 2034 bool JumpThreadingPass::MaybeThreadThroughTwoBasicBlocks(BasicBlock *BB, 2035 Value *Cond) { 2036 // Consider: 2037 // 2038 // PredBB: 2039 // %var = phi i32* [ null, %bb1 ], [ @a, %bb2 ] 2040 // %tobool = icmp eq i32 %cond, 0 2041 // br i1 %tobool, label %BB, label ... 2042 // 2043 // BB: 2044 // %cmp = icmp eq i32* %var, null 2045 // br i1 %cmp, label ..., label ... 2046 // 2047 // We don't know the value of %var at BB even if we know which incoming edge 2048 // we take to BB. However, once we duplicate PredBB for each of its incoming 2049 // edges (say, PredBB1 and PredBB2), we know the value of %var in each copy of 2050 // PredBB. Then we can thread edges PredBB1->BB and PredBB2->BB through BB. 2051 2052 // Require that BB end with a Branch for simplicity. 2053 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2054 if (!CondBr) 2055 return false; 2056 2057 // BB must have exactly one predecessor. 2058 BasicBlock *PredBB = BB->getSinglePredecessor(); 2059 if (!PredBB) 2060 return false; 2061 2062 // Require that PredBB end with a conditional Branch. If PredBB ends with an 2063 // unconditional branch, we should be merging PredBB and BB instead. For 2064 // simplicity, we don't deal with a switch. 2065 BranchInst *PredBBBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 2066 if (!PredBBBranch || PredBBBranch->isUnconditional()) 2067 return false; 2068 2069 // If PredBB has exactly one incoming edge, we don't gain anything by copying 2070 // PredBB. 2071 if (PredBB->getSinglePredecessor()) 2072 return false; 2073 2074 // Don't thread through PredBB if it contains a successor edge to itself, in 2075 // which case we would infinite loop. Suppose we are threading an edge from 2076 // PredPredBB through PredBB and BB to SuccBB with PredBB containing a 2077 // successor edge to itself. If we allowed jump threading in this case, we 2078 // could duplicate PredBB and BB as, say, PredBB.thread and BB.thread. Since 2079 // PredBB.thread has a successor edge to PredBB, we would immediately come up 2080 // with another jump threading opportunity from PredBB.thread through PredBB 2081 // and BB to SuccBB. This jump threading would repeatedly occur. That is, we 2082 // would keep peeling one iteration from PredBB. 2083 if (llvm::is_contained(successors(PredBB), PredBB)) 2084 return false; 2085 2086 // Don't thread across a loop header. 2087 if (LoopHeaders.count(PredBB)) 2088 return false; 2089 2090 // Avoid complication with duplicating EH pads. 2091 if (PredBB->isEHPad()) 2092 return false; 2093 2094 // Find a predecessor that we can thread. For simplicity, we only consider a 2095 // successor edge out of BB to which we thread exactly one incoming edge into 2096 // PredBB. 2097 unsigned ZeroCount = 0; 2098 unsigned OneCount = 0; 2099 BasicBlock *ZeroPred = nullptr; 2100 BasicBlock *OnePred = nullptr; 2101 for (BasicBlock *P : predecessors(PredBB)) { 2102 if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>( 2103 EvaluateOnPredecessorEdge(BB, P, Cond))) { 2104 if (CI->isZero()) { 2105 ZeroCount++; 2106 ZeroPred = P; 2107 } else if (CI->isOne()) { 2108 OneCount++; 2109 OnePred = P; 2110 } 2111 } 2112 } 2113 2114 // Disregard complicated cases where we have to thread multiple edges. 2115 BasicBlock *PredPredBB; 2116 if (ZeroCount == 1) { 2117 PredPredBB = ZeroPred; 2118 } else if (OneCount == 1) { 2119 PredPredBB = OnePred; 2120 } else { 2121 return false; 2122 } 2123 2124 BasicBlock *SuccBB = CondBr->getSuccessor(PredPredBB == ZeroPred); 2125 2126 // If threading to the same block as we come from, we would infinite loop. 2127 if (SuccBB == BB) { 2128 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 2129 << "' - would thread to self!\n"); 2130 return false; 2131 } 2132 2133 // If threading this would thread across a loop header, don't thread the edge. 2134 // See the comments above FindLoopHeaders for justifications and caveats. 2135 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { 2136 LLVM_DEBUG({ 2137 bool BBIsHeader = LoopHeaders.count(BB); 2138 bool SuccIsHeader = LoopHeaders.count(SuccBB); 2139 dbgs() << " Not threading across " 2140 << (BBIsHeader ? "loop header BB '" : "block BB '") 2141 << BB->getName() << "' to dest " 2142 << (SuccIsHeader ? "loop header BB '" : "block BB '") 2143 << SuccBB->getName() 2144 << "' - it might create an irreducible loop!\n"; 2145 }); 2146 return false; 2147 } 2148 2149 // Compute the cost of duplicating BB and PredBB. 2150 unsigned BBCost = 2151 getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); 2152 unsigned PredBBCost = getJumpThreadDuplicationCost( 2153 PredBB, PredBB->getTerminator(), BBDupThreshold); 2154 2155 // Give up if costs are too high. We need to check BBCost and PredBBCost 2156 // individually before checking their sum because getJumpThreadDuplicationCost 2157 // return (unsigned)~0 for those basic blocks that cannot be duplicated. 2158 if (BBCost > BBDupThreshold || PredBBCost > BBDupThreshold || 2159 BBCost + PredBBCost > BBDupThreshold) { 2160 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() 2161 << "' - Cost is too high: " << PredBBCost 2162 << " for PredBB, " << BBCost << "for BB\n"); 2163 return false; 2164 } 2165 2166 // Now we are ready to duplicate PredBB. 2167 ThreadThroughTwoBasicBlocks(PredPredBB, PredBB, BB, SuccBB); 2168 return true; 2169 } 2170 2171 void JumpThreadingPass::ThreadThroughTwoBasicBlocks(BasicBlock *PredPredBB, 2172 BasicBlock *PredBB, 2173 BasicBlock *BB, 2174 BasicBlock *SuccBB) { 2175 LLVM_DEBUG(dbgs() << " Threading through '" << PredBB->getName() << "' and '" 2176 << BB->getName() << "'\n"); 2177 2178 BranchInst *CondBr = cast<BranchInst>(BB->getTerminator()); 2179 BranchInst *PredBBBranch = cast<BranchInst>(PredBB->getTerminator()); 2180 2181 BasicBlock *NewBB = 2182 BasicBlock::Create(PredBB->getContext(), PredBB->getName() + ".thread", 2183 PredBB->getParent(), PredBB); 2184 NewBB->moveAfter(PredBB); 2185 2186 // Set the block frequency of NewBB. 2187 if (HasProfileData) { 2188 auto NewBBFreq = BFI->getBlockFreq(PredPredBB) * 2189 BPI->getEdgeProbability(PredPredBB, PredBB); 2190 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 2191 } 2192 2193 // We are going to have to map operands from the original BB block to the new 2194 // copy of the block 'NewBB'. If there are PHI nodes in PredBB, evaluate them 2195 // to account for entry from PredPredBB. 2196 DenseMap<Instruction *, Value *> ValueMapping = 2197 CloneInstructions(PredBB->begin(), PredBB->end(), NewBB, PredPredBB); 2198 2199 // Update the terminator of PredPredBB to jump to NewBB instead of PredBB. 2200 // This eliminates predecessors from PredPredBB, which requires us to simplify 2201 // any PHI nodes in PredBB. 2202 Instruction *PredPredTerm = PredPredBB->getTerminator(); 2203 for (unsigned i = 0, e = PredPredTerm->getNumSuccessors(); i != e; ++i) 2204 if (PredPredTerm->getSuccessor(i) == PredBB) { 2205 PredBB->removePredecessor(PredPredBB, true); 2206 PredPredTerm->setSuccessor(i, NewBB); 2207 } 2208 2209 AddPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(0), PredBB, NewBB, 2210 ValueMapping); 2211 AddPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(1), PredBB, NewBB, 2212 ValueMapping); 2213 2214 DTU->applyUpdatesPermissive( 2215 {{DominatorTree::Insert, NewBB, CondBr->getSuccessor(0)}, 2216 {DominatorTree::Insert, NewBB, CondBr->getSuccessor(1)}, 2217 {DominatorTree::Insert, PredPredBB, NewBB}, 2218 {DominatorTree::Delete, PredPredBB, PredBB}}); 2219 2220 UpdateSSA(PredBB, NewBB, ValueMapping); 2221 2222 // Clean up things like PHI nodes with single operands, dead instructions, 2223 // etc. 2224 SimplifyInstructionsInBlock(NewBB, TLI); 2225 SimplifyInstructionsInBlock(PredBB, TLI); 2226 2227 SmallVector<BasicBlock *, 1> PredsToFactor; 2228 PredsToFactor.push_back(NewBB); 2229 ThreadEdge(BB, PredsToFactor, SuccBB); 2230 } 2231 2232 /// TryThreadEdge - Thread an edge if it's safe and profitable to do so. 2233 bool JumpThreadingPass::TryThreadEdge( 2234 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs, 2235 BasicBlock *SuccBB) { 2236 // If threading to the same block as we come from, we would infinite loop. 2237 if (SuccBB == BB) { 2238 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 2239 << "' - would thread to self!\n"); 2240 return false; 2241 } 2242 2243 // If threading this would thread across a loop header, don't thread the edge. 2244 // See the comments above FindLoopHeaders for justifications and caveats. 2245 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { 2246 LLVM_DEBUG({ 2247 bool BBIsHeader = LoopHeaders.count(BB); 2248 bool SuccIsHeader = LoopHeaders.count(SuccBB); 2249 dbgs() << " Not threading across " 2250 << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() 2251 << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") 2252 << SuccBB->getName() << "' - it might create an irreducible loop!\n"; 2253 }); 2254 return false; 2255 } 2256 2257 unsigned JumpThreadCost = 2258 getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); 2259 if (JumpThreadCost > BBDupThreshold) { 2260 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() 2261 << "' - Cost is too high: " << JumpThreadCost << "\n"); 2262 return false; 2263 } 2264 2265 ThreadEdge(BB, PredBBs, SuccBB); 2266 return true; 2267 } 2268 2269 /// ThreadEdge - We have decided that it is safe and profitable to factor the 2270 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB 2271 /// across BB. Transform the IR to reflect this change. 2272 void JumpThreadingPass::ThreadEdge(BasicBlock *BB, 2273 const SmallVectorImpl<BasicBlock *> &PredBBs, 2274 BasicBlock *SuccBB) { 2275 assert(SuccBB != BB && "Don't create an infinite loop"); 2276 2277 assert(!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) && 2278 "Don't thread across loop headers"); 2279 2280 // And finally, do it! Start by factoring the predecessors if needed. 2281 BasicBlock *PredBB; 2282 if (PredBBs.size() == 1) 2283 PredBB = PredBBs[0]; 2284 else { 2285 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() 2286 << " common predecessors.\n"); 2287 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm"); 2288 } 2289 2290 // And finally, do it! 2291 LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() 2292 << "' to '" << SuccBB->getName() 2293 << ", across block:\n " << *BB << "\n"); 2294 2295 LVI->threadEdge(PredBB, BB, SuccBB); 2296 2297 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), 2298 BB->getName()+".thread", 2299 BB->getParent(), BB); 2300 NewBB->moveAfter(PredBB); 2301 2302 // Set the block frequency of NewBB. 2303 if (HasProfileData) { 2304 auto NewBBFreq = 2305 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB); 2306 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 2307 } 2308 2309 // Copy all the instructions from BB to NewBB except the terminator. 2310 DenseMap<Instruction *, Value *> ValueMapping = 2311 CloneInstructions(BB->begin(), std::prev(BB->end()), NewBB, PredBB); 2312 2313 // We didn't copy the terminator from BB over to NewBB, because there is now 2314 // an unconditional jump to SuccBB. Insert the unconditional jump. 2315 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB); 2316 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc()); 2317 2318 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the 2319 // PHI nodes for NewBB now. 2320 AddPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); 2321 2322 // Update the terminator of PredBB to jump to NewBB instead of BB. This 2323 // eliminates predecessors from BB, which requires us to simplify any PHI 2324 // nodes in BB. 2325 Instruction *PredTerm = PredBB->getTerminator(); 2326 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) 2327 if (PredTerm->getSuccessor(i) == BB) { 2328 BB->removePredecessor(PredBB, true); 2329 PredTerm->setSuccessor(i, NewBB); 2330 } 2331 2332 // Enqueue required DT updates. 2333 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, SuccBB}, 2334 {DominatorTree::Insert, PredBB, NewBB}, 2335 {DominatorTree::Delete, PredBB, BB}}); 2336 2337 UpdateSSA(BB, NewBB, ValueMapping); 2338 2339 // At this point, the IR is fully up to date and consistent. Do a quick scan 2340 // over the new instructions and zap any that are constants or dead. This 2341 // frequently happens because of phi translation. 2342 SimplifyInstructionsInBlock(NewBB, TLI); 2343 2344 // Update the edge weight from BB to SuccBB, which should be less than before. 2345 UpdateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB); 2346 2347 // Threaded an edge! 2348 ++NumThreads; 2349 } 2350 2351 /// Create a new basic block that will be the predecessor of BB and successor of 2352 /// all blocks in Preds. When profile data is available, update the frequency of 2353 /// this new block. 2354 BasicBlock *JumpThreadingPass::SplitBlockPreds(BasicBlock *BB, 2355 ArrayRef<BasicBlock *> Preds, 2356 const char *Suffix) { 2357 SmallVector<BasicBlock *, 2> NewBBs; 2358 2359 // Collect the frequencies of all predecessors of BB, which will be used to 2360 // update the edge weight of the result of splitting predecessors. 2361 DenseMap<BasicBlock *, BlockFrequency> FreqMap; 2362 if (HasProfileData) 2363 for (auto Pred : Preds) 2364 FreqMap.insert(std::make_pair( 2365 Pred, BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB))); 2366 2367 // In the case when BB is a LandingPad block we create 2 new predecessors 2368 // instead of just one. 2369 if (BB->isLandingPad()) { 2370 std::string NewName = std::string(Suffix) + ".split-lp"; 2371 SplitLandingPadPredecessors(BB, Preds, Suffix, NewName.c_str(), NewBBs); 2372 } else { 2373 NewBBs.push_back(SplitBlockPredecessors(BB, Preds, Suffix)); 2374 } 2375 2376 std::vector<DominatorTree::UpdateType> Updates; 2377 Updates.reserve((2 * Preds.size()) + NewBBs.size()); 2378 for (auto NewBB : NewBBs) { 2379 BlockFrequency NewBBFreq(0); 2380 Updates.push_back({DominatorTree::Insert, NewBB, BB}); 2381 for (auto Pred : predecessors(NewBB)) { 2382 Updates.push_back({DominatorTree::Delete, Pred, BB}); 2383 Updates.push_back({DominatorTree::Insert, Pred, NewBB}); 2384 if (HasProfileData) // Update frequencies between Pred -> NewBB. 2385 NewBBFreq += FreqMap.lookup(Pred); 2386 } 2387 if (HasProfileData) // Apply the summed frequency to NewBB. 2388 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 2389 } 2390 2391 DTU->applyUpdatesPermissive(Updates); 2392 return NewBBs[0]; 2393 } 2394 2395 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) { 2396 const Instruction *TI = BB->getTerminator(); 2397 assert(TI->getNumSuccessors() > 1 && "not a split"); 2398 2399 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof); 2400 if (!WeightsNode) 2401 return false; 2402 2403 MDString *MDName = cast<MDString>(WeightsNode->getOperand(0)); 2404 if (MDName->getString() != "branch_weights") 2405 return false; 2406 2407 // Ensure there are weights for all of the successors. Note that the first 2408 // operand to the metadata node is a name, not a weight. 2409 return WeightsNode->getNumOperands() == TI->getNumSuccessors() + 1; 2410 } 2411 2412 /// Update the block frequency of BB and branch weight and the metadata on the 2413 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 - 2414 /// Freq(PredBB->BB) / Freq(BB->SuccBB). 2415 void JumpThreadingPass::UpdateBlockFreqAndEdgeWeight(BasicBlock *PredBB, 2416 BasicBlock *BB, 2417 BasicBlock *NewBB, 2418 BasicBlock *SuccBB) { 2419 if (!HasProfileData) 2420 return; 2421 2422 assert(BFI && BPI && "BFI & BPI should have been created here"); 2423 2424 // As the edge from PredBB to BB is deleted, we have to update the block 2425 // frequency of BB. 2426 auto BBOrigFreq = BFI->getBlockFreq(BB); 2427 auto NewBBFreq = BFI->getBlockFreq(NewBB); 2428 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB); 2429 auto BBNewFreq = BBOrigFreq - NewBBFreq; 2430 BFI->setBlockFreq(BB, BBNewFreq.getFrequency()); 2431 2432 // Collect updated outgoing edges' frequencies from BB and use them to update 2433 // edge probabilities. 2434 SmallVector<uint64_t, 4> BBSuccFreq; 2435 for (BasicBlock *Succ : successors(BB)) { 2436 auto SuccFreq = (Succ == SuccBB) 2437 ? BB2SuccBBFreq - NewBBFreq 2438 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ); 2439 BBSuccFreq.push_back(SuccFreq.getFrequency()); 2440 } 2441 2442 uint64_t MaxBBSuccFreq = 2443 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end()); 2444 2445 SmallVector<BranchProbability, 4> BBSuccProbs; 2446 if (MaxBBSuccFreq == 0) 2447 BBSuccProbs.assign(BBSuccFreq.size(), 2448 {1, static_cast<unsigned>(BBSuccFreq.size())}); 2449 else { 2450 for (uint64_t Freq : BBSuccFreq) 2451 BBSuccProbs.push_back( 2452 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq)); 2453 // Normalize edge probabilities so that they sum up to one. 2454 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(), 2455 BBSuccProbs.end()); 2456 } 2457 2458 // Update edge probabilities in BPI. 2459 BPI->setEdgeProbability(BB, BBSuccProbs); 2460 2461 // Update the profile metadata as well. 2462 // 2463 // Don't do this if the profile of the transformed blocks was statically 2464 // estimated. (This could occur despite the function having an entry 2465 // frequency in completely cold parts of the CFG.) 2466 // 2467 // In this case we don't want to suggest to subsequent passes that the 2468 // calculated weights are fully consistent. Consider this graph: 2469 // 2470 // check_1 2471 // 50% / | 2472 // eq_1 | 50% 2473 // \ | 2474 // check_2 2475 // 50% / | 2476 // eq_2 | 50% 2477 // \ | 2478 // check_3 2479 // 50% / | 2480 // eq_3 | 50% 2481 // \ | 2482 // 2483 // Assuming the blocks check_* all compare the same value against 1, 2 and 3, 2484 // the overall probabilities are inconsistent; the total probability that the 2485 // value is either 1, 2 or 3 is 150%. 2486 // 2487 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3 2488 // becomes 0%. This is even worse if the edge whose probability becomes 0% is 2489 // the loop exit edge. Then based solely on static estimation we would assume 2490 // the loop was extremely hot. 2491 // 2492 // FIXME this locally as well so that BPI and BFI are consistent as well. We 2493 // shouldn't make edges extremely likely or unlikely based solely on static 2494 // estimation. 2495 if (BBSuccProbs.size() >= 2 && doesBlockHaveProfileData(BB)) { 2496 SmallVector<uint32_t, 4> Weights; 2497 for (auto Prob : BBSuccProbs) 2498 Weights.push_back(Prob.getNumerator()); 2499 2500 auto TI = BB->getTerminator(); 2501 TI->setMetadata( 2502 LLVMContext::MD_prof, 2503 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights)); 2504 } 2505 } 2506 2507 /// DuplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch 2508 /// to BB which contains an i1 PHI node and a conditional branch on that PHI. 2509 /// If we can duplicate the contents of BB up into PredBB do so now, this 2510 /// improves the odds that the branch will be on an analyzable instruction like 2511 /// a compare. 2512 bool JumpThreadingPass::DuplicateCondBranchOnPHIIntoPred( 2513 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) { 2514 assert(!PredBBs.empty() && "Can't handle an empty set"); 2515 2516 // If BB is a loop header, then duplicating this block outside the loop would 2517 // cause us to transform this into an irreducible loop, don't do this. 2518 // See the comments above FindLoopHeaders for justifications and caveats. 2519 if (LoopHeaders.count(BB)) { 2520 LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() 2521 << "' into predecessor block '" << PredBBs[0]->getName() 2522 << "' - it might create an irreducible loop!\n"); 2523 return false; 2524 } 2525 2526 unsigned DuplicationCost = 2527 getJumpThreadDuplicationCost(BB, BB->getTerminator(), BBDupThreshold); 2528 if (DuplicationCost > BBDupThreshold) { 2529 LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() 2530 << "' - Cost is too high: " << DuplicationCost << "\n"); 2531 return false; 2532 } 2533 2534 // And finally, do it! Start by factoring the predecessors if needed. 2535 std::vector<DominatorTree::UpdateType> Updates; 2536 BasicBlock *PredBB; 2537 if (PredBBs.size() == 1) 2538 PredBB = PredBBs[0]; 2539 else { 2540 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() 2541 << " common predecessors.\n"); 2542 PredBB = SplitBlockPreds(BB, PredBBs, ".thr_comm"); 2543 } 2544 Updates.push_back({DominatorTree::Delete, PredBB, BB}); 2545 2546 // Okay, we decided to do this! Clone all the instructions in BB onto the end 2547 // of PredBB. 2548 LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName() 2549 << "' into end of '" << PredBB->getName() 2550 << "' to eliminate branch on phi. Cost: " 2551 << DuplicationCost << " block is:" << *BB << "\n"); 2552 2553 // Unless PredBB ends with an unconditional branch, split the edge so that we 2554 // can just clone the bits from BB into the end of the new PredBB. 2555 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 2556 2557 if (!OldPredBranch || !OldPredBranch->isUnconditional()) { 2558 BasicBlock *OldPredBB = PredBB; 2559 PredBB = SplitEdge(OldPredBB, BB); 2560 Updates.push_back({DominatorTree::Insert, OldPredBB, PredBB}); 2561 Updates.push_back({DominatorTree::Insert, PredBB, BB}); 2562 Updates.push_back({DominatorTree::Delete, OldPredBB, BB}); 2563 OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); 2564 } 2565 2566 // We are going to have to map operands from the original BB block into the 2567 // PredBB block. Evaluate PHI nodes in BB. 2568 DenseMap<Instruction*, Value*> ValueMapping; 2569 2570 BasicBlock::iterator BI = BB->begin(); 2571 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 2572 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 2573 // Clone the non-phi instructions of BB into PredBB, keeping track of the 2574 // mapping and using it to remap operands in the cloned instructions. 2575 for (; BI != BB->end(); ++BI) { 2576 Instruction *New = BI->clone(); 2577 2578 // Remap operands to patch up intra-block references. 2579 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2580 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2581 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 2582 if (I != ValueMapping.end()) 2583 New->setOperand(i, I->second); 2584 } 2585 2586 // If this instruction can be simplified after the operands are updated, 2587 // just use the simplified value instead. This frequently happens due to 2588 // phi translation. 2589 if (Value *IV = SimplifyInstruction( 2590 New, 2591 {BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) { 2592 ValueMapping[&*BI] = IV; 2593 if (!New->mayHaveSideEffects()) { 2594 New->deleteValue(); 2595 New = nullptr; 2596 } 2597 } else { 2598 ValueMapping[&*BI] = New; 2599 } 2600 if (New) { 2601 // Otherwise, insert the new instruction into the block. 2602 New->setName(BI->getName()); 2603 PredBB->getInstList().insert(OldPredBranch->getIterator(), New); 2604 // Update Dominance from simplified New instruction operands. 2605 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2606 if (BasicBlock *SuccBB = dyn_cast<BasicBlock>(New->getOperand(i))) 2607 Updates.push_back({DominatorTree::Insert, PredBB, SuccBB}); 2608 } 2609 } 2610 2611 // Check to see if the targets of the branch had PHI nodes. If so, we need to 2612 // add entries to the PHI nodes for branch from PredBB now. 2613 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); 2614 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, 2615 ValueMapping); 2616 AddPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, 2617 ValueMapping); 2618 2619 UpdateSSA(BB, PredBB, ValueMapping); 2620 2621 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge 2622 // that we nuked. 2623 BB->removePredecessor(PredBB, true); 2624 2625 // Remove the unconditional branch at the end of the PredBB block. 2626 OldPredBranch->eraseFromParent(); 2627 DTU->applyUpdatesPermissive(Updates); 2628 2629 ++NumDupes; 2630 return true; 2631 } 2632 2633 // Pred is a predecessor of BB with an unconditional branch to BB. SI is 2634 // a Select instruction in Pred. BB has other predecessors and SI is used in 2635 // a PHI node in BB. SI has no other use. 2636 // A new basic block, NewBB, is created and SI is converted to compare and 2637 // conditional branch. SI is erased from parent. 2638 void JumpThreadingPass::UnfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, 2639 SelectInst *SI, PHINode *SIUse, 2640 unsigned Idx) { 2641 // Expand the select. 2642 // 2643 // Pred -- 2644 // | v 2645 // | NewBB 2646 // | | 2647 // |----- 2648 // v 2649 // BB 2650 BranchInst *PredTerm = cast<BranchInst>(Pred->getTerminator()); 2651 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold", 2652 BB->getParent(), BB); 2653 // Move the unconditional branch to NewBB. 2654 PredTerm->removeFromParent(); 2655 NewBB->getInstList().insert(NewBB->end(), PredTerm); 2656 // Create a conditional branch and update PHI nodes. 2657 BranchInst::Create(NewBB, BB, SI->getCondition(), Pred); 2658 SIUse->setIncomingValue(Idx, SI->getFalseValue()); 2659 SIUse->addIncoming(SI->getTrueValue(), NewBB); 2660 2661 // The select is now dead. 2662 SI->eraseFromParent(); 2663 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, BB}, 2664 {DominatorTree::Insert, Pred, NewBB}}); 2665 2666 // Update any other PHI nodes in BB. 2667 for (BasicBlock::iterator BI = BB->begin(); 2668 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI) 2669 if (Phi != SIUse) 2670 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB); 2671 } 2672 2673 bool JumpThreadingPass::TryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB) { 2674 PHINode *CondPHI = dyn_cast<PHINode>(SI->getCondition()); 2675 2676 if (!CondPHI || CondPHI->getParent() != BB) 2677 return false; 2678 2679 for (unsigned I = 0, E = CondPHI->getNumIncomingValues(); I != E; ++I) { 2680 BasicBlock *Pred = CondPHI->getIncomingBlock(I); 2681 SelectInst *PredSI = dyn_cast<SelectInst>(CondPHI->getIncomingValue(I)); 2682 2683 // The second and third condition can be potentially relaxed. Currently 2684 // the conditions help to simplify the code and allow us to reuse existing 2685 // code, developed for TryToUnfoldSelect(CmpInst *, BasicBlock *) 2686 if (!PredSI || PredSI->getParent() != Pred || !PredSI->hasOneUse()) 2687 continue; 2688 2689 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2690 if (!PredTerm || !PredTerm->isUnconditional()) 2691 continue; 2692 2693 UnfoldSelectInstr(Pred, BB, PredSI, CondPHI, I); 2694 return true; 2695 } 2696 return false; 2697 } 2698 2699 /// TryToUnfoldSelect - Look for blocks of the form 2700 /// bb1: 2701 /// %a = select 2702 /// br bb2 2703 /// 2704 /// bb2: 2705 /// %p = phi [%a, %bb1] ... 2706 /// %c = icmp %p 2707 /// br i1 %c 2708 /// 2709 /// And expand the select into a branch structure if one of its arms allows %c 2710 /// to be folded. This later enables threading from bb1 over bb2. 2711 bool JumpThreadingPass::TryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) { 2712 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2713 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0)); 2714 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1)); 2715 2716 if (!CondBr || !CondBr->isConditional() || !CondLHS || 2717 CondLHS->getParent() != BB) 2718 return false; 2719 2720 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) { 2721 BasicBlock *Pred = CondLHS->getIncomingBlock(I); 2722 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I)); 2723 2724 // Look if one of the incoming values is a select in the corresponding 2725 // predecessor. 2726 if (!SI || SI->getParent() != Pred || !SI->hasOneUse()) 2727 continue; 2728 2729 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2730 if (!PredTerm || !PredTerm->isUnconditional()) 2731 continue; 2732 2733 // Now check if one of the select values would allow us to constant fold the 2734 // terminator in BB. We don't do the transform if both sides fold, those 2735 // cases will be threaded in any case. 2736 LazyValueInfo::Tristate LHSFolds = 2737 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1), 2738 CondRHS, Pred, BB, CondCmp); 2739 LazyValueInfo::Tristate RHSFolds = 2740 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2), 2741 CondRHS, Pred, BB, CondCmp); 2742 if ((LHSFolds != LazyValueInfo::Unknown || 2743 RHSFolds != LazyValueInfo::Unknown) && 2744 LHSFolds != RHSFolds) { 2745 UnfoldSelectInstr(Pred, BB, SI, CondLHS, I); 2746 return true; 2747 } 2748 } 2749 return false; 2750 } 2751 2752 /// TryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the 2753 /// same BB in the form 2754 /// bb: 2755 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ... 2756 /// %s = select %p, trueval, falseval 2757 /// 2758 /// or 2759 /// 2760 /// bb: 2761 /// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ... 2762 /// %c = cmp %p, 0 2763 /// %s = select %c, trueval, falseval 2764 /// 2765 /// And expand the select into a branch structure. This later enables 2766 /// jump-threading over bb in this pass. 2767 /// 2768 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold 2769 /// select if the associated PHI has at least one constant. If the unfolded 2770 /// select is not jump-threaded, it will be folded again in the later 2771 /// optimizations. 2772 bool JumpThreadingPass::TryToUnfoldSelectInCurrBB(BasicBlock *BB) { 2773 // This transform can introduce a UB (a conditional branch that depends on a 2774 // poison value) that was not present in the original program. See 2775 // @TryToUnfoldSelectInCurrBB test in test/Transforms/JumpThreading/select.ll. 2776 // Disable this transform under MemorySanitizer. 2777 // FIXME: either delete it or replace with a valid transform. This issue is 2778 // not limited to MemorySanitizer (but has only been observed as an MSan false 2779 // positive in practice so far). 2780 if (BB->getParent()->hasFnAttribute(Attribute::SanitizeMemory)) 2781 return false; 2782 2783 // If threading this would thread across a loop header, don't thread the edge. 2784 // See the comments above FindLoopHeaders for justifications and caveats. 2785 if (LoopHeaders.count(BB)) 2786 return false; 2787 2788 for (BasicBlock::iterator BI = BB->begin(); 2789 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2790 // Look for a Phi having at least one constant incoming value. 2791 if (llvm::all_of(PN->incoming_values(), 2792 [](Value *V) { return !isa<ConstantInt>(V); })) 2793 continue; 2794 2795 auto isUnfoldCandidate = [BB](SelectInst *SI, Value *V) { 2796 // Check if SI is in BB and use V as condition. 2797 if (SI->getParent() != BB) 2798 return false; 2799 Value *Cond = SI->getCondition(); 2800 return (Cond && Cond == V && Cond->getType()->isIntegerTy(1)); 2801 }; 2802 2803 SelectInst *SI = nullptr; 2804 for (Use &U : PN->uses()) { 2805 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 2806 // Look for a ICmp in BB that compares PN with a constant and is the 2807 // condition of a Select. 2808 if (Cmp->getParent() == BB && Cmp->hasOneUse() && 2809 isa<ConstantInt>(Cmp->getOperand(1 - U.getOperandNo()))) 2810 if (SelectInst *SelectI = dyn_cast<SelectInst>(Cmp->user_back())) 2811 if (isUnfoldCandidate(SelectI, Cmp->use_begin()->get())) { 2812 SI = SelectI; 2813 break; 2814 } 2815 } else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) { 2816 // Look for a Select in BB that uses PN as condition. 2817 if (isUnfoldCandidate(SelectI, U.get())) { 2818 SI = SelectI; 2819 break; 2820 } 2821 } 2822 } 2823 2824 if (!SI) 2825 continue; 2826 // Expand the select. 2827 Instruction *Term = 2828 SplitBlockAndInsertIfThen(SI->getCondition(), SI, false); 2829 BasicBlock *SplitBB = SI->getParent(); 2830 BasicBlock *NewBB = Term->getParent(); 2831 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI); 2832 NewPN->addIncoming(SI->getTrueValue(), Term->getParent()); 2833 NewPN->addIncoming(SI->getFalseValue(), BB); 2834 SI->replaceAllUsesWith(NewPN); 2835 SI->eraseFromParent(); 2836 // NewBB and SplitBB are newly created blocks which require insertion. 2837 std::vector<DominatorTree::UpdateType> Updates; 2838 Updates.reserve((2 * SplitBB->getTerminator()->getNumSuccessors()) + 3); 2839 Updates.push_back({DominatorTree::Insert, BB, SplitBB}); 2840 Updates.push_back({DominatorTree::Insert, BB, NewBB}); 2841 Updates.push_back({DominatorTree::Insert, NewBB, SplitBB}); 2842 // BB's successors were moved to SplitBB, update DTU accordingly. 2843 for (auto *Succ : successors(SplitBB)) { 2844 Updates.push_back({DominatorTree::Delete, BB, Succ}); 2845 Updates.push_back({DominatorTree::Insert, SplitBB, Succ}); 2846 } 2847 DTU->applyUpdatesPermissive(Updates); 2848 return true; 2849 } 2850 return false; 2851 } 2852 2853 /// Try to propagate a guard from the current BB into one of its predecessors 2854 /// in case if another branch of execution implies that the condition of this 2855 /// guard is always true. Currently we only process the simplest case that 2856 /// looks like: 2857 /// 2858 /// Start: 2859 /// %cond = ... 2860 /// br i1 %cond, label %T1, label %F1 2861 /// T1: 2862 /// br label %Merge 2863 /// F1: 2864 /// br label %Merge 2865 /// Merge: 2866 /// %condGuard = ... 2867 /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ] 2868 /// 2869 /// And cond either implies condGuard or !condGuard. In this case all the 2870 /// instructions before the guard can be duplicated in both branches, and the 2871 /// guard is then threaded to one of them. 2872 bool JumpThreadingPass::ProcessGuards(BasicBlock *BB) { 2873 using namespace PatternMatch; 2874 2875 // We only want to deal with two predecessors. 2876 BasicBlock *Pred1, *Pred2; 2877 auto PI = pred_begin(BB), PE = pred_end(BB); 2878 if (PI == PE) 2879 return false; 2880 Pred1 = *PI++; 2881 if (PI == PE) 2882 return false; 2883 Pred2 = *PI++; 2884 if (PI != PE) 2885 return false; 2886 if (Pred1 == Pred2) 2887 return false; 2888 2889 // Try to thread one of the guards of the block. 2890 // TODO: Look up deeper than to immediate predecessor? 2891 auto *Parent = Pred1->getSinglePredecessor(); 2892 if (!Parent || Parent != Pred2->getSinglePredecessor()) 2893 return false; 2894 2895 if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator())) 2896 for (auto &I : *BB) 2897 if (isGuard(&I) && ThreadGuard(BB, cast<IntrinsicInst>(&I), BI)) 2898 return true; 2899 2900 return false; 2901 } 2902 2903 /// Try to propagate the guard from BB which is the lower block of a diamond 2904 /// to one of its branches, in case if diamond's condition implies guard's 2905 /// condition. 2906 bool JumpThreadingPass::ThreadGuard(BasicBlock *BB, IntrinsicInst *Guard, 2907 BranchInst *BI) { 2908 assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?"); 2909 assert(BI->isConditional() && "Unconditional branch has 2 successors?"); 2910 Value *GuardCond = Guard->getArgOperand(0); 2911 Value *BranchCond = BI->getCondition(); 2912 BasicBlock *TrueDest = BI->getSuccessor(0); 2913 BasicBlock *FalseDest = BI->getSuccessor(1); 2914 2915 auto &DL = BB->getModule()->getDataLayout(); 2916 bool TrueDestIsSafe = false; 2917 bool FalseDestIsSafe = false; 2918 2919 // True dest is safe if BranchCond => GuardCond. 2920 auto Impl = isImpliedCondition(BranchCond, GuardCond, DL); 2921 if (Impl && *Impl) 2922 TrueDestIsSafe = true; 2923 else { 2924 // False dest is safe if !BranchCond => GuardCond. 2925 Impl = isImpliedCondition(BranchCond, GuardCond, DL, /* LHSIsTrue */ false); 2926 if (Impl && *Impl) 2927 FalseDestIsSafe = true; 2928 } 2929 2930 if (!TrueDestIsSafe && !FalseDestIsSafe) 2931 return false; 2932 2933 BasicBlock *PredUnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest; 2934 BasicBlock *PredGuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest; 2935 2936 ValueToValueMapTy UnguardedMapping, GuardedMapping; 2937 Instruction *AfterGuard = Guard->getNextNode(); 2938 unsigned Cost = getJumpThreadDuplicationCost(BB, AfterGuard, BBDupThreshold); 2939 if (Cost > BBDupThreshold) 2940 return false; 2941 // Duplicate all instructions before the guard and the guard itself to the 2942 // branch where implication is not proved. 2943 BasicBlock *GuardedBlock = DuplicateInstructionsInSplitBetween( 2944 BB, PredGuardedBlock, AfterGuard, GuardedMapping, *DTU); 2945 assert(GuardedBlock && "Could not create the guarded block?"); 2946 // Duplicate all instructions before the guard in the unguarded branch. 2947 // Since we have successfully duplicated the guarded block and this block 2948 // has fewer instructions, we expect it to succeed. 2949 BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween( 2950 BB, PredUnguardedBlock, Guard, UnguardedMapping, *DTU); 2951 assert(UnguardedBlock && "Could not create the unguarded block?"); 2952 LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block " 2953 << GuardedBlock->getName() << "\n"); 2954 // Some instructions before the guard may still have uses. For them, we need 2955 // to create Phi nodes merging their copies in both guarded and unguarded 2956 // branches. Those instructions that have no uses can be just removed. 2957 SmallVector<Instruction *, 4> ToRemove; 2958 for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI) 2959 if (!isa<PHINode>(&*BI)) 2960 ToRemove.push_back(&*BI); 2961 2962 Instruction *InsertionPoint = &*BB->getFirstInsertionPt(); 2963 assert(InsertionPoint && "Empty block?"); 2964 // Substitute with Phis & remove. 2965 for (auto *Inst : reverse(ToRemove)) { 2966 if (!Inst->use_empty()) { 2967 PHINode *NewPN = PHINode::Create(Inst->getType(), 2); 2968 NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock); 2969 NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock); 2970 NewPN->insertBefore(InsertionPoint); 2971 Inst->replaceAllUsesWith(NewPN); 2972 } 2973 Inst->eraseFromParent(); 2974 } 2975 return true; 2976 } 2977