1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Jump Threading pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Scalar/JumpThreading.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/DenseSet.h" 16 #include "llvm/ADT/MapVector.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/BlockFrequencyInfo.h" 23 #include "llvm/Analysis/BranchProbabilityInfo.h" 24 #include "llvm/Analysis/CFG.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/GlobalsModRef.h" 27 #include "llvm/Analysis/GuardUtils.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/LazyValueInfo.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/MemoryLocation.h" 33 #include "llvm/Analysis/PostDominators.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/Analysis/TargetTransformInfo.h" 36 #include "llvm/Analysis/ValueTracking.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/InstrTypes.h" 47 #include "llvm/IR/Instruction.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/IntrinsicInst.h" 50 #include "llvm/IR/Intrinsics.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/MDBuilder.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Module.h" 55 #include "llvm/IR/PassManager.h" 56 #include "llvm/IR/PatternMatch.h" 57 #include "llvm/IR/ProfDataUtils.h" 58 #include "llvm/IR/Type.h" 59 #include "llvm/IR/Use.h" 60 #include "llvm/IR/Value.h" 61 #include "llvm/Support/BlockFrequency.h" 62 #include "llvm/Support/BranchProbability.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Debug.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 68 #include "llvm/Transforms/Utils/Cloning.h" 69 #include "llvm/Transforms/Utils/Local.h" 70 #include "llvm/Transforms/Utils/SSAUpdater.h" 71 #include "llvm/Transforms/Utils/ValueMapper.h" 72 #include <algorithm> 73 #include <cassert> 74 #include <cstdint> 75 #include <iterator> 76 #include <memory> 77 #include <utility> 78 79 using namespace llvm; 80 using namespace jumpthreading; 81 82 #define DEBUG_TYPE "jump-threading" 83 84 STATISTIC(NumThreads, "Number of jumps threaded"); 85 STATISTIC(NumFolds, "Number of terminators folded"); 86 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); 87 88 static cl::opt<unsigned> 89 BBDuplicateThreshold("jump-threading-threshold", 90 cl::desc("Max block size to duplicate for jump threading"), 91 cl::init(6), cl::Hidden); 92 93 static cl::opt<unsigned> 94 ImplicationSearchThreshold( 95 "jump-threading-implication-search-threshold", 96 cl::desc("The number of predecessors to search for a stronger " 97 "condition to use to thread over a weaker condition"), 98 cl::init(3), cl::Hidden); 99 100 static cl::opt<unsigned> PhiDuplicateThreshold( 101 "jump-threading-phi-threshold", 102 cl::desc("Max PHIs in BB to duplicate for jump threading"), cl::init(76), 103 cl::Hidden); 104 105 static cl::opt<bool> PrintLVIAfterJumpThreading( 106 "print-lvi-after-jump-threading", 107 cl::desc("Print the LazyValueInfo cache after JumpThreading"), cl::init(false), 108 cl::Hidden); 109 110 static cl::opt<bool> ThreadAcrossLoopHeaders( 111 "jump-threading-across-loop-headers", 112 cl::desc("Allow JumpThreading to thread across loop headers, for testing"), 113 cl::init(false), cl::Hidden); 114 115 JumpThreadingPass::JumpThreadingPass(int T) { 116 DefaultBBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T); 117 } 118 119 // Update branch probability information according to conditional 120 // branch probability. This is usually made possible for cloned branches 121 // in inline instances by the context specific profile in the caller. 122 // For instance, 123 // 124 // [Block PredBB] 125 // [Branch PredBr] 126 // if (t) { 127 // Block A; 128 // } else { 129 // Block B; 130 // } 131 // 132 // [Block BB] 133 // cond = PN([true, %A], [..., %B]); // PHI node 134 // [Branch CondBr] 135 // if (cond) { 136 // ... // P(cond == true) = 1% 137 // } 138 // 139 // Here we know that when block A is taken, cond must be true, which means 140 // P(cond == true | A) = 1 141 // 142 // Given that P(cond == true) = P(cond == true | A) * P(A) + 143 // P(cond == true | B) * P(B) 144 // we get: 145 // P(cond == true ) = P(A) + P(cond == true | B) * P(B) 146 // 147 // which gives us: 148 // P(A) is less than P(cond == true), i.e. 149 // P(t == true) <= P(cond == true) 150 // 151 // In other words, if we know P(cond == true) is unlikely, we know 152 // that P(t == true) is also unlikely. 153 // 154 static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) { 155 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 156 if (!CondBr) 157 return; 158 159 uint64_t TrueWeight, FalseWeight; 160 if (!extractBranchWeights(*CondBr, TrueWeight, FalseWeight)) 161 return; 162 163 if (TrueWeight + FalseWeight == 0) 164 // Zero branch_weights do not give a hint for getting branch probabilities. 165 // Technically it would result in division by zero denominator, which is 166 // TrueWeight + FalseWeight. 167 return; 168 169 // Returns the outgoing edge of the dominating predecessor block 170 // that leads to the PhiNode's incoming block: 171 auto GetPredOutEdge = 172 [](BasicBlock *IncomingBB, 173 BasicBlock *PhiBB) -> std::pair<BasicBlock *, BasicBlock *> { 174 auto *PredBB = IncomingBB; 175 auto *SuccBB = PhiBB; 176 SmallPtrSet<BasicBlock *, 16> Visited; 177 while (true) { 178 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); 179 if (PredBr && PredBr->isConditional()) 180 return {PredBB, SuccBB}; 181 Visited.insert(PredBB); 182 auto *SinglePredBB = PredBB->getSinglePredecessor(); 183 if (!SinglePredBB) 184 return {nullptr, nullptr}; 185 186 // Stop searching when SinglePredBB has been visited. It means we see 187 // an unreachable loop. 188 if (Visited.count(SinglePredBB)) 189 return {nullptr, nullptr}; 190 191 SuccBB = PredBB; 192 PredBB = SinglePredBB; 193 } 194 }; 195 196 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 197 Value *PhiOpnd = PN->getIncomingValue(i); 198 ConstantInt *CI = dyn_cast<ConstantInt>(PhiOpnd); 199 200 if (!CI || !CI->getType()->isIntegerTy(1)) 201 continue; 202 203 BranchProbability BP = 204 (CI->isOne() ? BranchProbability::getBranchProbability( 205 TrueWeight, TrueWeight + FalseWeight) 206 : BranchProbability::getBranchProbability( 207 FalseWeight, TrueWeight + FalseWeight)); 208 209 auto PredOutEdge = GetPredOutEdge(PN->getIncomingBlock(i), BB); 210 if (!PredOutEdge.first) 211 return; 212 213 BasicBlock *PredBB = PredOutEdge.first; 214 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); 215 if (!PredBr) 216 return; 217 218 uint64_t PredTrueWeight, PredFalseWeight; 219 // FIXME: We currently only set the profile data when it is missing. 220 // With PGO, this can be used to refine even existing profile data with 221 // context information. This needs to be done after more performance 222 // testing. 223 if (extractBranchWeights(*PredBr, PredTrueWeight, PredFalseWeight)) 224 continue; 225 226 // We can not infer anything useful when BP >= 50%, because BP is the 227 // upper bound probability value. 228 if (BP >= BranchProbability(50, 100)) 229 continue; 230 231 SmallVector<uint32_t, 2> Weights; 232 if (PredBr->getSuccessor(0) == PredOutEdge.second) { 233 Weights.push_back(BP.getNumerator()); 234 Weights.push_back(BP.getCompl().getNumerator()); 235 } else { 236 Weights.push_back(BP.getCompl().getNumerator()); 237 Weights.push_back(BP.getNumerator()); 238 } 239 PredBr->setMetadata(LLVMContext::MD_prof, 240 MDBuilder(PredBr->getParent()->getContext()) 241 .createBranchWeights(Weights)); 242 } 243 } 244 245 PreservedAnalyses JumpThreadingPass::run(Function &F, 246 FunctionAnalysisManager &AM) { 247 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 248 // Jump Threading has no sense for the targets with divergent CF 249 if (TTI.hasBranchDivergence(&F)) 250 return PreservedAnalyses::all(); 251 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 252 auto &LVI = AM.getResult<LazyValueAnalysis>(F); 253 auto &AA = AM.getResult<AAManager>(F); 254 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 255 256 bool Changed = 257 runImpl(F, &AM, &TLI, &TTI, &LVI, &AA, 258 std::make_unique<DomTreeUpdater>( 259 &DT, nullptr, DomTreeUpdater::UpdateStrategy::Lazy), 260 std::nullopt, std::nullopt); 261 262 if (PrintLVIAfterJumpThreading) { 263 dbgs() << "LVI for function '" << F.getName() << "':\n"; 264 LVI.printLVI(F, getDomTreeUpdater()->getDomTree(), dbgs()); 265 } 266 267 if (!Changed) 268 return PreservedAnalyses::all(); 269 270 271 getDomTreeUpdater()->flush(); 272 273 #if defined(EXPENSIVE_CHECKS) 274 assert(getDomTreeUpdater()->getDomTree().verify( 275 DominatorTree::VerificationLevel::Full) && 276 "DT broken after JumpThreading"); 277 assert((!getDomTreeUpdater()->hasPostDomTree() || 278 getDomTreeUpdater()->getPostDomTree().verify( 279 PostDominatorTree::VerificationLevel::Full)) && 280 "PDT broken after JumpThreading"); 281 #else 282 assert(getDomTreeUpdater()->getDomTree().verify( 283 DominatorTree::VerificationLevel::Fast) && 284 "DT broken after JumpThreading"); 285 assert((!getDomTreeUpdater()->hasPostDomTree() || 286 getDomTreeUpdater()->getPostDomTree().verify( 287 PostDominatorTree::VerificationLevel::Fast)) && 288 "PDT broken after JumpThreading"); 289 #endif 290 291 return getPreservedAnalysis(); 292 } 293 294 bool JumpThreadingPass::runImpl(Function &F_, FunctionAnalysisManager *FAM_, 295 TargetLibraryInfo *TLI_, 296 TargetTransformInfo *TTI_, LazyValueInfo *LVI_, 297 AliasAnalysis *AA_, 298 std::unique_ptr<DomTreeUpdater> DTU_, 299 std::optional<BlockFrequencyInfo *> BFI_, 300 std::optional<BranchProbabilityInfo *> BPI_) { 301 LLVM_DEBUG(dbgs() << "Jump threading on function '" << F_.getName() << "'\n"); 302 F = &F_; 303 FAM = FAM_; 304 TLI = TLI_; 305 TTI = TTI_; 306 LVI = LVI_; 307 AA = AA_; 308 DTU = std::move(DTU_); 309 BFI = BFI_; 310 BPI = BPI_; 311 auto *GuardDecl = F->getParent()->getFunction( 312 Intrinsic::getName(Intrinsic::experimental_guard)); 313 HasGuards = GuardDecl && !GuardDecl->use_empty(); 314 315 // Reduce the number of instructions duplicated when optimizing strictly for 316 // size. 317 if (BBDuplicateThreshold.getNumOccurrences()) 318 BBDupThreshold = BBDuplicateThreshold; 319 else if (F->hasFnAttribute(Attribute::MinSize)) 320 BBDupThreshold = 3; 321 else 322 BBDupThreshold = DefaultBBDupThreshold; 323 324 // JumpThreading must not processes blocks unreachable from entry. It's a 325 // waste of compute time and can potentially lead to hangs. 326 SmallPtrSet<BasicBlock *, 16> Unreachable; 327 assert(DTU && "DTU isn't passed into JumpThreading before using it."); 328 assert(DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed."); 329 DominatorTree &DT = DTU->getDomTree(); 330 for (auto &BB : *F) 331 if (!DT.isReachableFromEntry(&BB)) 332 Unreachable.insert(&BB); 333 334 if (!ThreadAcrossLoopHeaders) 335 findLoopHeaders(*F); 336 337 bool EverChanged = false; 338 bool Changed; 339 do { 340 Changed = false; 341 for (auto &BB : *F) { 342 if (Unreachable.count(&BB)) 343 continue; 344 while (processBlock(&BB)) // Thread all of the branches we can over BB. 345 Changed = ChangedSinceLastAnalysisUpdate = true; 346 347 // Jump threading may have introduced redundant debug values into BB 348 // which should be removed. 349 if (Changed) 350 RemoveRedundantDbgInstrs(&BB); 351 352 // Stop processing BB if it's the entry or is now deleted. The following 353 // routines attempt to eliminate BB and locating a suitable replacement 354 // for the entry is non-trivial. 355 if (&BB == &F->getEntryBlock() || DTU->isBBPendingDeletion(&BB)) 356 continue; 357 358 if (pred_empty(&BB)) { 359 // When processBlock makes BB unreachable it doesn't bother to fix up 360 // the instructions in it. We must remove BB to prevent invalid IR. 361 LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName() 362 << "' with terminator: " << *BB.getTerminator() 363 << '\n'); 364 LoopHeaders.erase(&BB); 365 LVI->eraseBlock(&BB); 366 DeleteDeadBlock(&BB, DTU.get()); 367 Changed = ChangedSinceLastAnalysisUpdate = true; 368 continue; 369 } 370 371 // processBlock doesn't thread BBs with unconditional TIs. However, if BB 372 // is "almost empty", we attempt to merge BB with its sole successor. 373 auto *BI = dyn_cast<BranchInst>(BB.getTerminator()); 374 if (BI && BI->isUnconditional()) { 375 BasicBlock *Succ = BI->getSuccessor(0); 376 if ( 377 // The terminator must be the only non-phi instruction in BB. 378 BB.getFirstNonPHIOrDbg(true)->isTerminator() && 379 // Don't alter Loop headers and latches to ensure another pass can 380 // detect and transform nested loops later. 381 !LoopHeaders.count(&BB) && !LoopHeaders.count(Succ) && 382 TryToSimplifyUncondBranchFromEmptyBlock(&BB, DTU.get())) { 383 RemoveRedundantDbgInstrs(Succ); 384 // BB is valid for cleanup here because we passed in DTU. F remains 385 // BB's parent until a DTU->getDomTree() event. 386 LVI->eraseBlock(&BB); 387 Changed = ChangedSinceLastAnalysisUpdate = true; 388 } 389 } 390 } 391 EverChanged |= Changed; 392 } while (Changed); 393 394 LoopHeaders.clear(); 395 return EverChanged; 396 } 397 398 // Replace uses of Cond with ToVal when safe to do so. If all uses are 399 // replaced, we can remove Cond. We cannot blindly replace all uses of Cond 400 // because we may incorrectly replace uses when guards/assumes are uses of 401 // of `Cond` and we used the guards/assume to reason about the `Cond` value 402 // at the end of block. RAUW unconditionally replaces all uses 403 // including the guards/assumes themselves and the uses before the 404 // guard/assume. 405 static bool replaceFoldableUses(Instruction *Cond, Value *ToVal, 406 BasicBlock *KnownAtEndOfBB) { 407 bool Changed = false; 408 assert(Cond->getType() == ToVal->getType()); 409 // We can unconditionally replace all uses in non-local blocks (i.e. uses 410 // strictly dominated by BB), since LVI information is true from the 411 // terminator of BB. 412 if (Cond->getParent() == KnownAtEndOfBB) 413 Changed |= replaceNonLocalUsesWith(Cond, ToVal); 414 for (Instruction &I : reverse(*KnownAtEndOfBB)) { 415 // Reached the Cond whose uses we are trying to replace, so there are no 416 // more uses. 417 if (&I == Cond) 418 break; 419 // We only replace uses in instructions that are guaranteed to reach the end 420 // of BB, where we know Cond is ToVal. 421 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 422 break; 423 Changed |= I.replaceUsesOfWith(Cond, ToVal); 424 } 425 if (Cond->use_empty() && !Cond->mayHaveSideEffects()) { 426 Cond->eraseFromParent(); 427 Changed = true; 428 } 429 return Changed; 430 } 431 432 /// Return the cost of duplicating a piece of this block from first non-phi 433 /// and before StopAt instruction to thread across it. Stop scanning the block 434 /// when exceeding the threshold. If duplication is impossible, returns ~0U. 435 static unsigned getJumpThreadDuplicationCost(const TargetTransformInfo *TTI, 436 BasicBlock *BB, 437 Instruction *StopAt, 438 unsigned Threshold) { 439 assert(StopAt->getParent() == BB && "Not an instruction from proper BB?"); 440 441 // Do not duplicate the BB if it has a lot of PHI nodes. 442 // If a threadable chain is too long then the number of PHI nodes can add up, 443 // leading to a substantial increase in compile time when rewriting the SSA. 444 unsigned PhiCount = 0; 445 Instruction *FirstNonPHI = nullptr; 446 for (Instruction &I : *BB) { 447 if (!isa<PHINode>(&I)) { 448 FirstNonPHI = &I; 449 break; 450 } 451 if (++PhiCount > PhiDuplicateThreshold) 452 return ~0U; 453 } 454 455 /// Ignore PHI nodes, these will be flattened when duplication happens. 456 BasicBlock::const_iterator I(FirstNonPHI); 457 458 // FIXME: THREADING will delete values that are just used to compute the 459 // branch, so they shouldn't count against the duplication cost. 460 461 unsigned Bonus = 0; 462 if (BB->getTerminator() == StopAt) { 463 // Threading through a switch statement is particularly profitable. If this 464 // block ends in a switch, decrease its cost to make it more likely to 465 // happen. 466 if (isa<SwitchInst>(StopAt)) 467 Bonus = 6; 468 469 // The same holds for indirect branches, but slightly more so. 470 if (isa<IndirectBrInst>(StopAt)) 471 Bonus = 8; 472 } 473 474 // Bump the threshold up so the early exit from the loop doesn't skip the 475 // terminator-based Size adjustment at the end. 476 Threshold += Bonus; 477 478 // Sum up the cost of each instruction until we get to the terminator. Don't 479 // include the terminator because the copy won't include it. 480 unsigned Size = 0; 481 for (; &*I != StopAt; ++I) { 482 483 // Stop scanning the block if we've reached the threshold. 484 if (Size > Threshold) 485 return Size; 486 487 // Bail out if this instruction gives back a token type, it is not possible 488 // to duplicate it if it is used outside this BB. 489 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB)) 490 return ~0U; 491 492 // Blocks with NoDuplicate are modelled as having infinite cost, so they 493 // are never duplicated. 494 if (const CallInst *CI = dyn_cast<CallInst>(I)) 495 if (CI->cannotDuplicate() || CI->isConvergent()) 496 return ~0U; 497 498 if (TTI->getInstructionCost(&*I, TargetTransformInfo::TCK_SizeAndLatency) == 499 TargetTransformInfo::TCC_Free) 500 continue; 501 502 // All other instructions count for at least one unit. 503 ++Size; 504 505 // Calls are more expensive. If they are non-intrinsic calls, we model them 506 // as having cost of 4. If they are a non-vector intrinsic, we model them 507 // as having cost of 2 total, and if they are a vector intrinsic, we model 508 // them as having cost 1. 509 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 510 if (!isa<IntrinsicInst>(CI)) 511 Size += 3; 512 else if (!CI->getType()->isVectorTy()) 513 Size += 1; 514 } 515 } 516 517 return Size > Bonus ? Size - Bonus : 0; 518 } 519 520 /// findLoopHeaders - We do not want jump threading to turn proper loop 521 /// structures into irreducible loops. Doing this breaks up the loop nesting 522 /// hierarchy and pessimizes later transformations. To prevent this from 523 /// happening, we first have to find the loop headers. Here we approximate this 524 /// by finding targets of backedges in the CFG. 525 /// 526 /// Note that there definitely are cases when we want to allow threading of 527 /// edges across a loop header. For example, threading a jump from outside the 528 /// loop (the preheader) to an exit block of the loop is definitely profitable. 529 /// It is also almost always profitable to thread backedges from within the loop 530 /// to exit blocks, and is often profitable to thread backedges to other blocks 531 /// within the loop (forming a nested loop). This simple analysis is not rich 532 /// enough to track all of these properties and keep it up-to-date as the CFG 533 /// mutates, so we don't allow any of these transformations. 534 void JumpThreadingPass::findLoopHeaders(Function &F) { 535 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 536 FindFunctionBackedges(F, Edges); 537 538 for (const auto &Edge : Edges) 539 LoopHeaders.insert(Edge.second); 540 } 541 542 /// getKnownConstant - Helper method to determine if we can thread over a 543 /// terminator with the given value as its condition, and if so what value to 544 /// use for that. What kind of value this is depends on whether we want an 545 /// integer or a block address, but an undef is always accepted. 546 /// Returns null if Val is null or not an appropriate constant. 547 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) { 548 if (!Val) 549 return nullptr; 550 551 // Undef is "known" enough. 552 if (UndefValue *U = dyn_cast<UndefValue>(Val)) 553 return U; 554 555 if (Preference == WantBlockAddress) 556 return dyn_cast<BlockAddress>(Val->stripPointerCasts()); 557 558 return dyn_cast<ConstantInt>(Val); 559 } 560 561 /// computeValueKnownInPredecessors - Given a basic block BB and a value V, see 562 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef 563 /// in any of our predecessors. If so, return the known list of value and pred 564 /// BB in the result vector. 565 /// 566 /// This returns true if there were any known values. 567 bool JumpThreadingPass::computeValueKnownInPredecessorsImpl( 568 Value *V, BasicBlock *BB, PredValueInfo &Result, 569 ConstantPreference Preference, DenseSet<Value *> &RecursionSet, 570 Instruction *CxtI) { 571 // This method walks up use-def chains recursively. Because of this, we could 572 // get into an infinite loop going around loops in the use-def chain. To 573 // prevent this, keep track of what (value, block) pairs we've already visited 574 // and terminate the search if we loop back to them 575 if (!RecursionSet.insert(V).second) 576 return false; 577 578 // If V is a constant, then it is known in all predecessors. 579 if (Constant *KC = getKnownConstant(V, Preference)) { 580 for (BasicBlock *Pred : predecessors(BB)) 581 Result.emplace_back(KC, Pred); 582 583 return !Result.empty(); 584 } 585 586 // If V is a non-instruction value, or an instruction in a different block, 587 // then it can't be derived from a PHI. 588 Instruction *I = dyn_cast<Instruction>(V); 589 if (!I || I->getParent() != BB) { 590 591 // Okay, if this is a live-in value, see if it has a known value at the any 592 // edge from our predecessors. 593 for (BasicBlock *P : predecessors(BB)) { 594 using namespace PatternMatch; 595 // If the value is known by LazyValueInfo to be a constant in a 596 // predecessor, use that information to try to thread this block. 597 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI); 598 // If I is a non-local compare-with-constant instruction, use more-rich 599 // 'getPredicateOnEdge' method. This would be able to handle value 600 // inequalities better, for example if the compare is "X < 4" and "X < 3" 601 // is known true but "X < 4" itself is not available. 602 CmpInst::Predicate Pred; 603 Value *Val; 604 Constant *Cst; 605 if (!PredCst && match(V, m_Cmp(Pred, m_Value(Val), m_Constant(Cst)))) { 606 auto Res = LVI->getPredicateOnEdge(Pred, Val, Cst, P, BB, CxtI); 607 if (Res != LazyValueInfo::Unknown) 608 PredCst = ConstantInt::getBool(V->getContext(), Res); 609 } 610 if (Constant *KC = getKnownConstant(PredCst, Preference)) 611 Result.emplace_back(KC, P); 612 } 613 614 return !Result.empty(); 615 } 616 617 /// If I is a PHI node, then we know the incoming values for any constants. 618 if (PHINode *PN = dyn_cast<PHINode>(I)) { 619 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 620 Value *InVal = PN->getIncomingValue(i); 621 if (Constant *KC = getKnownConstant(InVal, Preference)) { 622 Result.emplace_back(KC, PN->getIncomingBlock(i)); 623 } else { 624 Constant *CI = LVI->getConstantOnEdge(InVal, 625 PN->getIncomingBlock(i), 626 BB, CxtI); 627 if (Constant *KC = getKnownConstant(CI, Preference)) 628 Result.emplace_back(KC, PN->getIncomingBlock(i)); 629 } 630 } 631 632 return !Result.empty(); 633 } 634 635 // Handle Cast instructions. 636 if (CastInst *CI = dyn_cast<CastInst>(I)) { 637 Value *Source = CI->getOperand(0); 638 computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference, 639 RecursionSet, CxtI); 640 if (Result.empty()) 641 return false; 642 643 // Convert the known values. 644 for (auto &R : Result) 645 R.first = ConstantExpr::getCast(CI->getOpcode(), R.first, CI->getType()); 646 647 return true; 648 } 649 650 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { 651 Value *Source = FI->getOperand(0); 652 computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference, 653 RecursionSet, CxtI); 654 655 erase_if(Result, [](auto &Pair) { 656 return !isGuaranteedNotToBeUndefOrPoison(Pair.first); 657 }); 658 659 return !Result.empty(); 660 } 661 662 // Handle some boolean conditions. 663 if (I->getType()->getPrimitiveSizeInBits() == 1) { 664 using namespace PatternMatch; 665 if (Preference != WantInteger) 666 return false; 667 // X | true -> true 668 // X & false -> false 669 Value *Op0, *Op1; 670 if (match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))) || 671 match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 672 PredValueInfoTy LHSVals, RHSVals; 673 674 computeValueKnownInPredecessorsImpl(Op0, BB, LHSVals, WantInteger, 675 RecursionSet, CxtI); 676 computeValueKnownInPredecessorsImpl(Op1, BB, RHSVals, WantInteger, 677 RecursionSet, CxtI); 678 679 if (LHSVals.empty() && RHSVals.empty()) 680 return false; 681 682 ConstantInt *InterestingVal; 683 if (match(I, m_LogicalOr())) 684 InterestingVal = ConstantInt::getTrue(I->getContext()); 685 else 686 InterestingVal = ConstantInt::getFalse(I->getContext()); 687 688 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; 689 690 // Scan for the sentinel. If we find an undef, force it to the 691 // interesting value: x|undef -> true and x&undef -> false. 692 for (const auto &LHSVal : LHSVals) 693 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) { 694 Result.emplace_back(InterestingVal, LHSVal.second); 695 LHSKnownBBs.insert(LHSVal.second); 696 } 697 for (const auto &RHSVal : RHSVals) 698 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) { 699 // If we already inferred a value for this block on the LHS, don't 700 // re-add it. 701 if (!LHSKnownBBs.count(RHSVal.second)) 702 Result.emplace_back(InterestingVal, RHSVal.second); 703 } 704 705 return !Result.empty(); 706 } 707 708 // Handle the NOT form of XOR. 709 if (I->getOpcode() == Instruction::Xor && 710 isa<ConstantInt>(I->getOperand(1)) && 711 cast<ConstantInt>(I->getOperand(1))->isOne()) { 712 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, Result, 713 WantInteger, RecursionSet, CxtI); 714 if (Result.empty()) 715 return false; 716 717 // Invert the known values. 718 for (auto &R : Result) 719 R.first = ConstantExpr::getNot(R.first); 720 721 return true; 722 } 723 724 // Try to simplify some other binary operator values. 725 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 726 if (Preference != WantInteger) 727 return false; 728 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 729 const DataLayout &DL = BO->getModule()->getDataLayout(); 730 PredValueInfoTy LHSVals; 731 computeValueKnownInPredecessorsImpl(BO->getOperand(0), BB, LHSVals, 732 WantInteger, RecursionSet, CxtI); 733 734 // Try to use constant folding to simplify the binary operator. 735 for (const auto &LHSVal : LHSVals) { 736 Constant *V = LHSVal.first; 737 Constant *Folded = 738 ConstantFoldBinaryOpOperands(BO->getOpcode(), V, CI, DL); 739 740 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 741 Result.emplace_back(KC, LHSVal.second); 742 } 743 } 744 745 return !Result.empty(); 746 } 747 748 // Handle compare with phi operand, where the PHI is defined in this block. 749 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { 750 if (Preference != WantInteger) 751 return false; 752 Type *CmpType = Cmp->getType(); 753 Value *CmpLHS = Cmp->getOperand(0); 754 Value *CmpRHS = Cmp->getOperand(1); 755 CmpInst::Predicate Pred = Cmp->getPredicate(); 756 757 PHINode *PN = dyn_cast<PHINode>(CmpLHS); 758 if (!PN) 759 PN = dyn_cast<PHINode>(CmpRHS); 760 if (PN && PN->getParent() == BB) { 761 const DataLayout &DL = PN->getModule()->getDataLayout(); 762 // We can do this simplification if any comparisons fold to true or false. 763 // See if any do. 764 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 765 BasicBlock *PredBB = PN->getIncomingBlock(i); 766 Value *LHS, *RHS; 767 if (PN == CmpLHS) { 768 LHS = PN->getIncomingValue(i); 769 RHS = CmpRHS->DoPHITranslation(BB, PredBB); 770 } else { 771 LHS = CmpLHS->DoPHITranslation(BB, PredBB); 772 RHS = PN->getIncomingValue(i); 773 } 774 Value *Res = simplifyCmpInst(Pred, LHS, RHS, {DL}); 775 if (!Res) { 776 if (!isa<Constant>(RHS)) 777 continue; 778 779 // getPredicateOnEdge call will make no sense if LHS is defined in BB. 780 auto LHSInst = dyn_cast<Instruction>(LHS); 781 if (LHSInst && LHSInst->getParent() == BB) 782 continue; 783 784 LazyValueInfo::Tristate 785 ResT = LVI->getPredicateOnEdge(Pred, LHS, 786 cast<Constant>(RHS), PredBB, BB, 787 CxtI ? CxtI : Cmp); 788 if (ResT == LazyValueInfo::Unknown) 789 continue; 790 Res = ConstantInt::get(Type::getInt1Ty(LHS->getContext()), ResT); 791 } 792 793 if (Constant *KC = getKnownConstant(Res, WantInteger)) 794 Result.emplace_back(KC, PredBB); 795 } 796 797 return !Result.empty(); 798 } 799 800 // If comparing a live-in value against a constant, see if we know the 801 // live-in value on any predecessors. 802 if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) { 803 Constant *CmpConst = cast<Constant>(CmpRHS); 804 805 if (!isa<Instruction>(CmpLHS) || 806 cast<Instruction>(CmpLHS)->getParent() != BB) { 807 for (BasicBlock *P : predecessors(BB)) { 808 // If the value is known by LazyValueInfo to be a constant in a 809 // predecessor, use that information to try to thread this block. 810 LazyValueInfo::Tristate Res = 811 LVI->getPredicateOnEdge(Pred, CmpLHS, 812 CmpConst, P, BB, CxtI ? CxtI : Cmp); 813 if (Res == LazyValueInfo::Unknown) 814 continue; 815 816 Constant *ResC = ConstantInt::get(CmpType, Res); 817 Result.emplace_back(ResC, P); 818 } 819 820 return !Result.empty(); 821 } 822 823 // InstCombine can fold some forms of constant range checks into 824 // (icmp (add (x, C1)), C2). See if we have we have such a thing with 825 // x as a live-in. 826 { 827 using namespace PatternMatch; 828 829 Value *AddLHS; 830 ConstantInt *AddConst; 831 if (isa<ConstantInt>(CmpConst) && 832 match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) { 833 if (!isa<Instruction>(AddLHS) || 834 cast<Instruction>(AddLHS)->getParent() != BB) { 835 for (BasicBlock *P : predecessors(BB)) { 836 // If the value is known by LazyValueInfo to be a ConstantRange in 837 // a predecessor, use that information to try to thread this 838 // block. 839 ConstantRange CR = LVI->getConstantRangeOnEdge( 840 AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS)); 841 // Propagate the range through the addition. 842 CR = CR.add(AddConst->getValue()); 843 844 // Get the range where the compare returns true. 845 ConstantRange CmpRange = ConstantRange::makeExactICmpRegion( 846 Pred, cast<ConstantInt>(CmpConst)->getValue()); 847 848 Constant *ResC; 849 if (CmpRange.contains(CR)) 850 ResC = ConstantInt::getTrue(CmpType); 851 else if (CmpRange.inverse().contains(CR)) 852 ResC = ConstantInt::getFalse(CmpType); 853 else 854 continue; 855 856 Result.emplace_back(ResC, P); 857 } 858 859 return !Result.empty(); 860 } 861 } 862 } 863 864 // Try to find a constant value for the LHS of a comparison, 865 // and evaluate it statically if we can. 866 PredValueInfoTy LHSVals; 867 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, LHSVals, 868 WantInteger, RecursionSet, CxtI); 869 870 for (const auto &LHSVal : LHSVals) { 871 Constant *V = LHSVal.first; 872 Constant *Folded = ConstantExpr::getCompare(Pred, V, CmpConst); 873 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 874 Result.emplace_back(KC, LHSVal.second); 875 } 876 877 return !Result.empty(); 878 } 879 } 880 881 if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 882 // Handle select instructions where at least one operand is a known constant 883 // and we can figure out the condition value for any predecessor block. 884 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference); 885 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference); 886 PredValueInfoTy Conds; 887 if ((TrueVal || FalseVal) && 888 computeValueKnownInPredecessorsImpl(SI->getCondition(), BB, Conds, 889 WantInteger, RecursionSet, CxtI)) { 890 for (auto &C : Conds) { 891 Constant *Cond = C.first; 892 893 // Figure out what value to use for the condition. 894 bool KnownCond; 895 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) { 896 // A known boolean. 897 KnownCond = CI->isOne(); 898 } else { 899 assert(isa<UndefValue>(Cond) && "Unexpected condition value"); 900 // Either operand will do, so be sure to pick the one that's a known 901 // constant. 902 // FIXME: Do this more cleverly if both values are known constants? 903 KnownCond = (TrueVal != nullptr); 904 } 905 906 // See if the select has a known constant value for this predecessor. 907 if (Constant *Val = KnownCond ? TrueVal : FalseVal) 908 Result.emplace_back(Val, C.second); 909 } 910 911 return !Result.empty(); 912 } 913 } 914 915 // If all else fails, see if LVI can figure out a constant value for us. 916 assert(CxtI->getParent() == BB && "CxtI should be in BB"); 917 Constant *CI = LVI->getConstant(V, CxtI); 918 if (Constant *KC = getKnownConstant(CI, Preference)) { 919 for (BasicBlock *Pred : predecessors(BB)) 920 Result.emplace_back(KC, Pred); 921 } 922 923 return !Result.empty(); 924 } 925 926 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends 927 /// in an undefined jump, decide which block is best to revector to. 928 /// 929 /// Since we can pick an arbitrary destination, we pick the successor with the 930 /// fewest predecessors. This should reduce the in-degree of the others. 931 static unsigned getBestDestForJumpOnUndef(BasicBlock *BB) { 932 Instruction *BBTerm = BB->getTerminator(); 933 unsigned MinSucc = 0; 934 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); 935 // Compute the successor with the minimum number of predecessors. 936 unsigned MinNumPreds = pred_size(TestBB); 937 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { 938 TestBB = BBTerm->getSuccessor(i); 939 unsigned NumPreds = pred_size(TestBB); 940 if (NumPreds < MinNumPreds) { 941 MinSucc = i; 942 MinNumPreds = NumPreds; 943 } 944 } 945 946 return MinSucc; 947 } 948 949 static bool hasAddressTakenAndUsed(BasicBlock *BB) { 950 if (!BB->hasAddressTaken()) return false; 951 952 // If the block has its address taken, it may be a tree of dead constants 953 // hanging off of it. These shouldn't keep the block alive. 954 BlockAddress *BA = BlockAddress::get(BB); 955 BA->removeDeadConstantUsers(); 956 return !BA->use_empty(); 957 } 958 959 /// processBlock - If there are any predecessors whose control can be threaded 960 /// through to a successor, transform them now. 961 bool JumpThreadingPass::processBlock(BasicBlock *BB) { 962 // If the block is trivially dead, just return and let the caller nuke it. 963 // This simplifies other transformations. 964 if (DTU->isBBPendingDeletion(BB) || 965 (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock())) 966 return false; 967 968 // If this block has a single predecessor, and if that pred has a single 969 // successor, merge the blocks. This encourages recursive jump threading 970 // because now the condition in this block can be threaded through 971 // predecessors of our predecessor block. 972 if (maybeMergeBasicBlockIntoOnlyPred(BB)) 973 return true; 974 975 if (tryToUnfoldSelectInCurrBB(BB)) 976 return true; 977 978 // Look if we can propagate guards to predecessors. 979 if (HasGuards && processGuards(BB)) 980 return true; 981 982 // What kind of constant we're looking for. 983 ConstantPreference Preference = WantInteger; 984 985 // Look to see if the terminator is a conditional branch, switch or indirect 986 // branch, if not we can't thread it. 987 Value *Condition; 988 Instruction *Terminator = BB->getTerminator(); 989 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) { 990 // Can't thread an unconditional jump. 991 if (BI->isUnconditional()) return false; 992 Condition = BI->getCondition(); 993 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) { 994 Condition = SI->getCondition(); 995 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) { 996 // Can't thread indirect branch with no successors. 997 if (IB->getNumSuccessors() == 0) return false; 998 Condition = IB->getAddress()->stripPointerCasts(); 999 Preference = WantBlockAddress; 1000 } else { 1001 return false; // Must be an invoke or callbr. 1002 } 1003 1004 // Keep track if we constant folded the condition in this invocation. 1005 bool ConstantFolded = false; 1006 1007 // Run constant folding to see if we can reduce the condition to a simple 1008 // constant. 1009 if (Instruction *I = dyn_cast<Instruction>(Condition)) { 1010 Value *SimpleVal = 1011 ConstantFoldInstruction(I, BB->getModule()->getDataLayout(), TLI); 1012 if (SimpleVal) { 1013 I->replaceAllUsesWith(SimpleVal); 1014 if (isInstructionTriviallyDead(I, TLI)) 1015 I->eraseFromParent(); 1016 Condition = SimpleVal; 1017 ConstantFolded = true; 1018 } 1019 } 1020 1021 // If the terminator is branching on an undef or freeze undef, we can pick any 1022 // of the successors to branch to. Let getBestDestForJumpOnUndef decide. 1023 auto *FI = dyn_cast<FreezeInst>(Condition); 1024 if (isa<UndefValue>(Condition) || 1025 (FI && isa<UndefValue>(FI->getOperand(0)) && FI->hasOneUse())) { 1026 unsigned BestSucc = getBestDestForJumpOnUndef(BB); 1027 std::vector<DominatorTree::UpdateType> Updates; 1028 1029 // Fold the branch/switch. 1030 Instruction *BBTerm = BB->getTerminator(); 1031 Updates.reserve(BBTerm->getNumSuccessors()); 1032 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { 1033 if (i == BestSucc) continue; 1034 BasicBlock *Succ = BBTerm->getSuccessor(i); 1035 Succ->removePredecessor(BB, true); 1036 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1037 } 1038 1039 LLVM_DEBUG(dbgs() << " In block '" << BB->getName() 1040 << "' folding undef terminator: " << *BBTerm << '\n'); 1041 BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm); 1042 ++NumFolds; 1043 BBTerm->eraseFromParent(); 1044 DTU->applyUpdatesPermissive(Updates); 1045 if (FI) 1046 FI->eraseFromParent(); 1047 return true; 1048 } 1049 1050 // If the terminator of this block is branching on a constant, simplify the 1051 // terminator to an unconditional branch. This can occur due to threading in 1052 // other blocks. 1053 if (getKnownConstant(Condition, Preference)) { 1054 LLVM_DEBUG(dbgs() << " In block '" << BB->getName() 1055 << "' folding terminator: " << *BB->getTerminator() 1056 << '\n'); 1057 ++NumFolds; 1058 ConstantFoldTerminator(BB, true, nullptr, DTU.get()); 1059 if (auto *BPI = getBPI()) 1060 BPI->eraseBlock(BB); 1061 return true; 1062 } 1063 1064 Instruction *CondInst = dyn_cast<Instruction>(Condition); 1065 1066 // All the rest of our checks depend on the condition being an instruction. 1067 if (!CondInst) { 1068 // FIXME: Unify this with code below. 1069 if (processThreadableEdges(Condition, BB, Preference, Terminator)) 1070 return true; 1071 return ConstantFolded; 1072 } 1073 1074 // Some of the following optimization can safely work on the unfrozen cond. 1075 Value *CondWithoutFreeze = CondInst; 1076 if (auto *FI = dyn_cast<FreezeInst>(CondInst)) 1077 CondWithoutFreeze = FI->getOperand(0); 1078 1079 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondWithoutFreeze)) { 1080 // If we're branching on a conditional, LVI might be able to determine 1081 // it's value at the branch instruction. We only handle comparisons 1082 // against a constant at this time. 1083 if (Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1))) { 1084 LazyValueInfo::Tristate Ret = 1085 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0), 1086 CondConst, BB->getTerminator(), 1087 /*UseBlockValue=*/false); 1088 if (Ret != LazyValueInfo::Unknown) { 1089 // We can safely replace *some* uses of the CondInst if it has 1090 // exactly one value as returned by LVI. RAUW is incorrect in the 1091 // presence of guards and assumes, that have the `Cond` as the use. This 1092 // is because we use the guards/assume to reason about the `Cond` value 1093 // at the end of block, but RAUW unconditionally replaces all uses 1094 // including the guards/assumes themselves and the uses before the 1095 // guard/assume. 1096 auto *CI = Ret == LazyValueInfo::True ? 1097 ConstantInt::getTrue(CondCmp->getType()) : 1098 ConstantInt::getFalse(CondCmp->getType()); 1099 if (replaceFoldableUses(CondCmp, CI, BB)) 1100 return true; 1101 } 1102 1103 // We did not manage to simplify this branch, try to see whether 1104 // CondCmp depends on a known phi-select pattern. 1105 if (tryToUnfoldSelect(CondCmp, BB)) 1106 return true; 1107 } 1108 } 1109 1110 if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) 1111 if (tryToUnfoldSelect(SI, BB)) 1112 return true; 1113 1114 // Check for some cases that are worth simplifying. Right now we want to look 1115 // for loads that are used by a switch or by the condition for the branch. If 1116 // we see one, check to see if it's partially redundant. If so, insert a PHI 1117 // which can then be used to thread the values. 1118 Value *SimplifyValue = CondWithoutFreeze; 1119 1120 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) 1121 if (isa<Constant>(CondCmp->getOperand(1))) 1122 SimplifyValue = CondCmp->getOperand(0); 1123 1124 // TODO: There are other places where load PRE would be profitable, such as 1125 // more complex comparisons. 1126 if (LoadInst *LoadI = dyn_cast<LoadInst>(SimplifyValue)) 1127 if (simplifyPartiallyRedundantLoad(LoadI)) 1128 return true; 1129 1130 // Before threading, try to propagate profile data backwards: 1131 if (PHINode *PN = dyn_cast<PHINode>(CondInst)) 1132 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1133 updatePredecessorProfileMetadata(PN, BB); 1134 1135 // Handle a variety of cases where we are branching on something derived from 1136 // a PHI node in the current block. If we can prove that any predecessors 1137 // compute a predictable value based on a PHI node, thread those predecessors. 1138 if (processThreadableEdges(CondInst, BB, Preference, Terminator)) 1139 return true; 1140 1141 // If this is an otherwise-unfoldable branch on a phi node or freeze(phi) in 1142 // the current block, see if we can simplify. 1143 PHINode *PN = dyn_cast<PHINode>(CondWithoutFreeze); 1144 if (PN && PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1145 return processBranchOnPHI(PN); 1146 1147 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. 1148 if (CondInst->getOpcode() == Instruction::Xor && 1149 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1150 return processBranchOnXOR(cast<BinaryOperator>(CondInst)); 1151 1152 // Search for a stronger dominating condition that can be used to simplify a 1153 // conditional branch leaving BB. 1154 if (processImpliedCondition(BB)) 1155 return true; 1156 1157 return false; 1158 } 1159 1160 bool JumpThreadingPass::processImpliedCondition(BasicBlock *BB) { 1161 auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); 1162 if (!BI || !BI->isConditional()) 1163 return false; 1164 1165 Value *Cond = BI->getCondition(); 1166 // Assuming that predecessor's branch was taken, if pred's branch condition 1167 // (V) implies Cond, Cond can be either true, undef, or poison. In this case, 1168 // freeze(Cond) is either true or a nondeterministic value. 1169 // If freeze(Cond) has only one use, we can freely fold freeze(Cond) to true 1170 // without affecting other instructions. 1171 auto *FICond = dyn_cast<FreezeInst>(Cond); 1172 if (FICond && FICond->hasOneUse()) 1173 Cond = FICond->getOperand(0); 1174 else 1175 FICond = nullptr; 1176 1177 BasicBlock *CurrentBB = BB; 1178 BasicBlock *CurrentPred = BB->getSinglePredecessor(); 1179 unsigned Iter = 0; 1180 1181 auto &DL = BB->getModule()->getDataLayout(); 1182 1183 while (CurrentPred && Iter++ < ImplicationSearchThreshold) { 1184 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator()); 1185 if (!PBI || !PBI->isConditional()) 1186 return false; 1187 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB) 1188 return false; 1189 1190 bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB; 1191 std::optional<bool> Implication = 1192 isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue); 1193 1194 // If the branch condition of BB (which is Cond) and CurrentPred are 1195 // exactly the same freeze instruction, Cond can be folded into CondIsTrue. 1196 if (!Implication && FICond && isa<FreezeInst>(PBI->getCondition())) { 1197 if (cast<FreezeInst>(PBI->getCondition())->getOperand(0) == 1198 FICond->getOperand(0)) 1199 Implication = CondIsTrue; 1200 } 1201 1202 if (Implication) { 1203 BasicBlock *KeepSucc = BI->getSuccessor(*Implication ? 0 : 1); 1204 BasicBlock *RemoveSucc = BI->getSuccessor(*Implication ? 1 : 0); 1205 RemoveSucc->removePredecessor(BB); 1206 BranchInst *UncondBI = BranchInst::Create(KeepSucc, BI); 1207 UncondBI->setDebugLoc(BI->getDebugLoc()); 1208 ++NumFolds; 1209 BI->eraseFromParent(); 1210 if (FICond) 1211 FICond->eraseFromParent(); 1212 1213 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, RemoveSucc}}); 1214 if (auto *BPI = getBPI()) 1215 BPI->eraseBlock(BB); 1216 return true; 1217 } 1218 CurrentBB = CurrentPred; 1219 CurrentPred = CurrentBB->getSinglePredecessor(); 1220 } 1221 1222 return false; 1223 } 1224 1225 /// Return true if Op is an instruction defined in the given block. 1226 static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) { 1227 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1228 if (OpInst->getParent() == BB) 1229 return true; 1230 return false; 1231 } 1232 1233 /// simplifyPartiallyRedundantLoad - If LoadI is an obviously partially 1234 /// redundant load instruction, eliminate it by replacing it with a PHI node. 1235 /// This is an important optimization that encourages jump threading, and needs 1236 /// to be run interlaced with other jump threading tasks. 1237 bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) { 1238 // Don't hack volatile and ordered loads. 1239 if (!LoadI->isUnordered()) return false; 1240 1241 // If the load is defined in a block with exactly one predecessor, it can't be 1242 // partially redundant. 1243 BasicBlock *LoadBB = LoadI->getParent(); 1244 if (LoadBB->getSinglePredecessor()) 1245 return false; 1246 1247 // If the load is defined in an EH pad, it can't be partially redundant, 1248 // because the edges between the invoke and the EH pad cannot have other 1249 // instructions between them. 1250 if (LoadBB->isEHPad()) 1251 return false; 1252 1253 Value *LoadedPtr = LoadI->getOperand(0); 1254 1255 // If the loaded operand is defined in the LoadBB and its not a phi, 1256 // it can't be available in predecessors. 1257 if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr)) 1258 return false; 1259 1260 // Scan a few instructions up from the load, to see if it is obviously live at 1261 // the entry to its block. 1262 BasicBlock::iterator BBIt(LoadI); 1263 bool IsLoadCSE; 1264 if (Value *AvailableVal = FindAvailableLoadedValue( 1265 LoadI, LoadBB, BBIt, DefMaxInstsToScan, AA, &IsLoadCSE)) { 1266 // If the value of the load is locally available within the block, just use 1267 // it. This frequently occurs for reg2mem'd allocas. 1268 1269 if (IsLoadCSE) { 1270 LoadInst *NLoadI = cast<LoadInst>(AvailableVal); 1271 combineMetadataForCSE(NLoadI, LoadI, false); 1272 }; 1273 1274 // If the returned value is the load itself, replace with poison. This can 1275 // only happen in dead loops. 1276 if (AvailableVal == LoadI) 1277 AvailableVal = PoisonValue::get(LoadI->getType()); 1278 if (AvailableVal->getType() != LoadI->getType()) 1279 AvailableVal = CastInst::CreateBitOrPointerCast( 1280 AvailableVal, LoadI->getType(), "", LoadI); 1281 LoadI->replaceAllUsesWith(AvailableVal); 1282 LoadI->eraseFromParent(); 1283 return true; 1284 } 1285 1286 // Otherwise, if we scanned the whole block and got to the top of the block, 1287 // we know the block is locally transparent to the load. If not, something 1288 // might clobber its value. 1289 if (BBIt != LoadBB->begin()) 1290 return false; 1291 1292 // If all of the loads and stores that feed the value have the same AA tags, 1293 // then we can propagate them onto any newly inserted loads. 1294 AAMDNodes AATags = LoadI->getAAMetadata(); 1295 1296 SmallPtrSet<BasicBlock*, 8> PredsScanned; 1297 1298 using AvailablePredsTy = SmallVector<std::pair<BasicBlock *, Value *>, 8>; 1299 1300 AvailablePredsTy AvailablePreds; 1301 BasicBlock *OneUnavailablePred = nullptr; 1302 SmallVector<LoadInst*, 8> CSELoads; 1303 1304 // If we got here, the loaded value is transparent through to the start of the 1305 // block. Check to see if it is available in any of the predecessor blocks. 1306 for (BasicBlock *PredBB : predecessors(LoadBB)) { 1307 // If we already scanned this predecessor, skip it. 1308 if (!PredsScanned.insert(PredBB).second) 1309 continue; 1310 1311 BBIt = PredBB->end(); 1312 unsigned NumScanedInst = 0; 1313 Value *PredAvailable = nullptr; 1314 // NOTE: We don't CSE load that is volatile or anything stronger than 1315 // unordered, that should have been checked when we entered the function. 1316 assert(LoadI->isUnordered() && 1317 "Attempting to CSE volatile or atomic loads"); 1318 // If this is a load on a phi pointer, phi-translate it and search 1319 // for available load/store to the pointer in predecessors. 1320 Type *AccessTy = LoadI->getType(); 1321 const auto &DL = LoadI->getModule()->getDataLayout(); 1322 MemoryLocation Loc(LoadedPtr->DoPHITranslation(LoadBB, PredBB), 1323 LocationSize::precise(DL.getTypeStoreSize(AccessTy)), 1324 AATags); 1325 PredAvailable = findAvailablePtrLoadStore(Loc, AccessTy, LoadI->isAtomic(), 1326 PredBB, BBIt, DefMaxInstsToScan, 1327 AA, &IsLoadCSE, &NumScanedInst); 1328 1329 // If PredBB has a single predecessor, continue scanning through the 1330 // single predecessor. 1331 BasicBlock *SinglePredBB = PredBB; 1332 while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() && 1333 NumScanedInst < DefMaxInstsToScan) { 1334 SinglePredBB = SinglePredBB->getSinglePredecessor(); 1335 if (SinglePredBB) { 1336 BBIt = SinglePredBB->end(); 1337 PredAvailable = findAvailablePtrLoadStore( 1338 Loc, AccessTy, LoadI->isAtomic(), SinglePredBB, BBIt, 1339 (DefMaxInstsToScan - NumScanedInst), AA, &IsLoadCSE, 1340 &NumScanedInst); 1341 } 1342 } 1343 1344 if (!PredAvailable) { 1345 OneUnavailablePred = PredBB; 1346 continue; 1347 } 1348 1349 if (IsLoadCSE) 1350 CSELoads.push_back(cast<LoadInst>(PredAvailable)); 1351 1352 // If so, this load is partially redundant. Remember this info so that we 1353 // can create a PHI node. 1354 AvailablePreds.emplace_back(PredBB, PredAvailable); 1355 } 1356 1357 // If the loaded value isn't available in any predecessor, it isn't partially 1358 // redundant. 1359 if (AvailablePreds.empty()) return false; 1360 1361 // Okay, the loaded value is available in at least one (and maybe all!) 1362 // predecessors. If the value is unavailable in more than one unique 1363 // predecessor, we want to insert a merge block for those common predecessors. 1364 // This ensures that we only have to insert one reload, thus not increasing 1365 // code size. 1366 BasicBlock *UnavailablePred = nullptr; 1367 1368 // If the value is unavailable in one of predecessors, we will end up 1369 // inserting a new instruction into them. It is only valid if all the 1370 // instructions before LoadI are guaranteed to pass execution to its 1371 // successor, or if LoadI is safe to speculate. 1372 // TODO: If this logic becomes more complex, and we will perform PRE insertion 1373 // farther than to a predecessor, we need to reuse the code from GVN's PRE. 1374 // It requires domination tree analysis, so for this simple case it is an 1375 // overkill. 1376 if (PredsScanned.size() != AvailablePreds.size() && 1377 !isSafeToSpeculativelyExecute(LoadI)) 1378 for (auto I = LoadBB->begin(); &*I != LoadI; ++I) 1379 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 1380 return false; 1381 1382 // If there is exactly one predecessor where the value is unavailable, the 1383 // already computed 'OneUnavailablePred' block is it. If it ends in an 1384 // unconditional branch, we know that it isn't a critical edge. 1385 if (PredsScanned.size() == AvailablePreds.size()+1 && 1386 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { 1387 UnavailablePred = OneUnavailablePred; 1388 } else if (PredsScanned.size() != AvailablePreds.size()) { 1389 // Otherwise, we had multiple unavailable predecessors or we had a critical 1390 // edge from the one. 1391 SmallVector<BasicBlock*, 8> PredsToSplit; 1392 SmallPtrSet<BasicBlock*, 8> AvailablePredSet; 1393 1394 for (const auto &AvailablePred : AvailablePreds) 1395 AvailablePredSet.insert(AvailablePred.first); 1396 1397 // Add all the unavailable predecessors to the PredsToSplit list. 1398 for (BasicBlock *P : predecessors(LoadBB)) { 1399 // If the predecessor is an indirect goto, we can't split the edge. 1400 if (isa<IndirectBrInst>(P->getTerminator())) 1401 return false; 1402 1403 if (!AvailablePredSet.count(P)) 1404 PredsToSplit.push_back(P); 1405 } 1406 1407 // Split them out to their own block. 1408 UnavailablePred = splitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split"); 1409 } 1410 1411 // If the value isn't available in all predecessors, then there will be 1412 // exactly one where it isn't available. Insert a load on that edge and add 1413 // it to the AvailablePreds list. 1414 if (UnavailablePred) { 1415 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 && 1416 "Can't handle critical edge here!"); 1417 LoadInst *NewVal = new LoadInst( 1418 LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred), 1419 LoadI->getName() + ".pr", false, LoadI->getAlign(), 1420 LoadI->getOrdering(), LoadI->getSyncScopeID(), 1421 UnavailablePred->getTerminator()); 1422 NewVal->setDebugLoc(LoadI->getDebugLoc()); 1423 if (AATags) 1424 NewVal->setAAMetadata(AATags); 1425 1426 AvailablePreds.emplace_back(UnavailablePred, NewVal); 1427 } 1428 1429 // Now we know that each predecessor of this block has a value in 1430 // AvailablePreds, sort them for efficient access as we're walking the preds. 1431 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); 1432 1433 // Create a PHI node at the start of the block for the PRE'd load value. 1434 pred_iterator PB = pred_begin(LoadBB), PE = pred_end(LoadBB); 1435 PHINode *PN = PHINode::Create(LoadI->getType(), std::distance(PB, PE), "", 1436 &LoadBB->front()); 1437 PN->takeName(LoadI); 1438 PN->setDebugLoc(LoadI->getDebugLoc()); 1439 1440 // Insert new entries into the PHI for each predecessor. A single block may 1441 // have multiple entries here. 1442 for (pred_iterator PI = PB; PI != PE; ++PI) { 1443 BasicBlock *P = *PI; 1444 AvailablePredsTy::iterator I = 1445 llvm::lower_bound(AvailablePreds, std::make_pair(P, (Value *)nullptr)); 1446 1447 assert(I != AvailablePreds.end() && I->first == P && 1448 "Didn't find entry for predecessor!"); 1449 1450 // If we have an available predecessor but it requires casting, insert the 1451 // cast in the predecessor and use the cast. Note that we have to update the 1452 // AvailablePreds vector as we go so that all of the PHI entries for this 1453 // predecessor use the same bitcast. 1454 Value *&PredV = I->second; 1455 if (PredV->getType() != LoadI->getType()) 1456 PredV = CastInst::CreateBitOrPointerCast(PredV, LoadI->getType(), "", 1457 P->getTerminator()); 1458 1459 PN->addIncoming(PredV, I->first); 1460 } 1461 1462 for (LoadInst *PredLoadI : CSELoads) { 1463 combineMetadataForCSE(PredLoadI, LoadI, true); 1464 } 1465 1466 LoadI->replaceAllUsesWith(PN); 1467 LoadI->eraseFromParent(); 1468 1469 return true; 1470 } 1471 1472 /// findMostPopularDest - The specified list contains multiple possible 1473 /// threadable destinations. Pick the one that occurs the most frequently in 1474 /// the list. 1475 static BasicBlock * 1476 findMostPopularDest(BasicBlock *BB, 1477 const SmallVectorImpl<std::pair<BasicBlock *, 1478 BasicBlock *>> &PredToDestList) { 1479 assert(!PredToDestList.empty()); 1480 1481 // Determine popularity. If there are multiple possible destinations, we 1482 // explicitly choose to ignore 'undef' destinations. We prefer to thread 1483 // blocks with known and real destinations to threading undef. We'll handle 1484 // them later if interesting. 1485 MapVector<BasicBlock *, unsigned> DestPopularity; 1486 1487 // Populate DestPopularity with the successors in the order they appear in the 1488 // successor list. This way, we ensure determinism by iterating it in the 1489 // same order in std::max_element below. We map nullptr to 0 so that we can 1490 // return nullptr when PredToDestList contains nullptr only. 1491 DestPopularity[nullptr] = 0; 1492 for (auto *SuccBB : successors(BB)) 1493 DestPopularity[SuccBB] = 0; 1494 1495 for (const auto &PredToDest : PredToDestList) 1496 if (PredToDest.second) 1497 DestPopularity[PredToDest.second]++; 1498 1499 // Find the most popular dest. 1500 auto MostPopular = std::max_element( 1501 DestPopularity.begin(), DestPopularity.end(), llvm::less_second()); 1502 1503 // Okay, we have finally picked the most popular destination. 1504 return MostPopular->first; 1505 } 1506 1507 // Try to evaluate the value of V when the control flows from PredPredBB to 1508 // BB->getSinglePredecessor() and then on to BB. 1509 Constant *JumpThreadingPass::evaluateOnPredecessorEdge(BasicBlock *BB, 1510 BasicBlock *PredPredBB, 1511 Value *V) { 1512 BasicBlock *PredBB = BB->getSinglePredecessor(); 1513 assert(PredBB && "Expected a single predecessor"); 1514 1515 if (Constant *Cst = dyn_cast<Constant>(V)) { 1516 return Cst; 1517 } 1518 1519 // Consult LVI if V is not an instruction in BB or PredBB. 1520 Instruction *I = dyn_cast<Instruction>(V); 1521 if (!I || (I->getParent() != BB && I->getParent() != PredBB)) { 1522 return LVI->getConstantOnEdge(V, PredPredBB, PredBB, nullptr); 1523 } 1524 1525 // Look into a PHI argument. 1526 if (PHINode *PHI = dyn_cast<PHINode>(V)) { 1527 if (PHI->getParent() == PredBB) 1528 return dyn_cast<Constant>(PHI->getIncomingValueForBlock(PredPredBB)); 1529 return nullptr; 1530 } 1531 1532 // If we have a CmpInst, try to fold it for each incoming edge into PredBB. 1533 if (CmpInst *CondCmp = dyn_cast<CmpInst>(V)) { 1534 if (CondCmp->getParent() == BB) { 1535 Constant *Op0 = 1536 evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(0)); 1537 Constant *Op1 = 1538 evaluateOnPredecessorEdge(BB, PredPredBB, CondCmp->getOperand(1)); 1539 if (Op0 && Op1) { 1540 return ConstantExpr::getCompare(CondCmp->getPredicate(), Op0, Op1); 1541 } 1542 } 1543 return nullptr; 1544 } 1545 1546 return nullptr; 1547 } 1548 1549 bool JumpThreadingPass::processThreadableEdges(Value *Cond, BasicBlock *BB, 1550 ConstantPreference Preference, 1551 Instruction *CxtI) { 1552 // If threading this would thread across a loop header, don't even try to 1553 // thread the edge. 1554 if (LoopHeaders.count(BB)) 1555 return false; 1556 1557 PredValueInfoTy PredValues; 1558 if (!computeValueKnownInPredecessors(Cond, BB, PredValues, Preference, 1559 CxtI)) { 1560 // We don't have known values in predecessors. See if we can thread through 1561 // BB and its sole predecessor. 1562 return maybethreadThroughTwoBasicBlocks(BB, Cond); 1563 } 1564 1565 assert(!PredValues.empty() && 1566 "computeValueKnownInPredecessors returned true with no values"); 1567 1568 LLVM_DEBUG(dbgs() << "IN BB: " << *BB; 1569 for (const auto &PredValue : PredValues) { 1570 dbgs() << " BB '" << BB->getName() 1571 << "': FOUND condition = " << *PredValue.first 1572 << " for pred '" << PredValue.second->getName() << "'.\n"; 1573 }); 1574 1575 // Decide what we want to thread through. Convert our list of known values to 1576 // a list of known destinations for each pred. This also discards duplicate 1577 // predecessors and keeps track of the undefined inputs (which are represented 1578 // as a null dest in the PredToDestList). 1579 SmallPtrSet<BasicBlock*, 16> SeenPreds; 1580 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; 1581 1582 BasicBlock *OnlyDest = nullptr; 1583 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; 1584 Constant *OnlyVal = nullptr; 1585 Constant *MultipleVal = (Constant *)(intptr_t)~0ULL; 1586 1587 for (const auto &PredValue : PredValues) { 1588 BasicBlock *Pred = PredValue.second; 1589 if (!SeenPreds.insert(Pred).second) 1590 continue; // Duplicate predecessor entry. 1591 1592 Constant *Val = PredValue.first; 1593 1594 BasicBlock *DestBB; 1595 if (isa<UndefValue>(Val)) 1596 DestBB = nullptr; 1597 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 1598 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1599 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero()); 1600 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) { 1601 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1602 DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor(); 1603 } else { 1604 assert(isa<IndirectBrInst>(BB->getTerminator()) 1605 && "Unexpected terminator"); 1606 assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress"); 1607 DestBB = cast<BlockAddress>(Val)->getBasicBlock(); 1608 } 1609 1610 // If we have exactly one destination, remember it for efficiency below. 1611 if (PredToDestList.empty()) { 1612 OnlyDest = DestBB; 1613 OnlyVal = Val; 1614 } else { 1615 if (OnlyDest != DestBB) 1616 OnlyDest = MultipleDestSentinel; 1617 // It possible we have same destination, but different value, e.g. default 1618 // case in switchinst. 1619 if (Val != OnlyVal) 1620 OnlyVal = MultipleVal; 1621 } 1622 1623 // If the predecessor ends with an indirect goto, we can't change its 1624 // destination. 1625 if (isa<IndirectBrInst>(Pred->getTerminator())) 1626 continue; 1627 1628 PredToDestList.emplace_back(Pred, DestBB); 1629 } 1630 1631 // If all edges were unthreadable, we fail. 1632 if (PredToDestList.empty()) 1633 return false; 1634 1635 // If all the predecessors go to a single known successor, we want to fold, 1636 // not thread. By doing so, we do not need to duplicate the current block and 1637 // also miss potential opportunities in case we dont/cant duplicate. 1638 if (OnlyDest && OnlyDest != MultipleDestSentinel) { 1639 if (BB->hasNPredecessors(PredToDestList.size())) { 1640 bool SeenFirstBranchToOnlyDest = false; 1641 std::vector <DominatorTree::UpdateType> Updates; 1642 Updates.reserve(BB->getTerminator()->getNumSuccessors() - 1); 1643 for (BasicBlock *SuccBB : successors(BB)) { 1644 if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest) { 1645 SeenFirstBranchToOnlyDest = true; // Don't modify the first branch. 1646 } else { 1647 SuccBB->removePredecessor(BB, true); // This is unreachable successor. 1648 Updates.push_back({DominatorTree::Delete, BB, SuccBB}); 1649 } 1650 } 1651 1652 // Finally update the terminator. 1653 Instruction *Term = BB->getTerminator(); 1654 BranchInst::Create(OnlyDest, Term); 1655 ++NumFolds; 1656 Term->eraseFromParent(); 1657 DTU->applyUpdatesPermissive(Updates); 1658 if (auto *BPI = getBPI()) 1659 BPI->eraseBlock(BB); 1660 1661 // If the condition is now dead due to the removal of the old terminator, 1662 // erase it. 1663 if (auto *CondInst = dyn_cast<Instruction>(Cond)) { 1664 if (CondInst->use_empty() && !CondInst->mayHaveSideEffects()) 1665 CondInst->eraseFromParent(); 1666 // We can safely replace *some* uses of the CondInst if it has 1667 // exactly one value as returned by LVI. RAUW is incorrect in the 1668 // presence of guards and assumes, that have the `Cond` as the use. This 1669 // is because we use the guards/assume to reason about the `Cond` value 1670 // at the end of block, but RAUW unconditionally replaces all uses 1671 // including the guards/assumes themselves and the uses before the 1672 // guard/assume. 1673 else if (OnlyVal && OnlyVal != MultipleVal) 1674 replaceFoldableUses(CondInst, OnlyVal, BB); 1675 } 1676 return true; 1677 } 1678 } 1679 1680 // Determine which is the most common successor. If we have many inputs and 1681 // this block is a switch, we want to start by threading the batch that goes 1682 // to the most popular destination first. If we only know about one 1683 // threadable destination (the common case) we can avoid this. 1684 BasicBlock *MostPopularDest = OnlyDest; 1685 1686 if (MostPopularDest == MultipleDestSentinel) { 1687 // Remove any loop headers from the Dest list, threadEdge conservatively 1688 // won't process them, but we might have other destination that are eligible 1689 // and we still want to process. 1690 erase_if(PredToDestList, 1691 [&](const std::pair<BasicBlock *, BasicBlock *> &PredToDest) { 1692 return LoopHeaders.contains(PredToDest.second); 1693 }); 1694 1695 if (PredToDestList.empty()) 1696 return false; 1697 1698 MostPopularDest = findMostPopularDest(BB, PredToDestList); 1699 } 1700 1701 // Now that we know what the most popular destination is, factor all 1702 // predecessors that will jump to it into a single predecessor. 1703 SmallVector<BasicBlock*, 16> PredsToFactor; 1704 for (const auto &PredToDest : PredToDestList) 1705 if (PredToDest.second == MostPopularDest) { 1706 BasicBlock *Pred = PredToDest.first; 1707 1708 // This predecessor may be a switch or something else that has multiple 1709 // edges to the block. Factor each of these edges by listing them 1710 // according to # occurrences in PredsToFactor. 1711 for (BasicBlock *Succ : successors(Pred)) 1712 if (Succ == BB) 1713 PredsToFactor.push_back(Pred); 1714 } 1715 1716 // If the threadable edges are branching on an undefined value, we get to pick 1717 // the destination that these predecessors should get to. 1718 if (!MostPopularDest) 1719 MostPopularDest = BB->getTerminator()-> 1720 getSuccessor(getBestDestForJumpOnUndef(BB)); 1721 1722 // Ok, try to thread it! 1723 return tryThreadEdge(BB, PredsToFactor, MostPopularDest); 1724 } 1725 1726 /// processBranchOnPHI - We have an otherwise unthreadable conditional branch on 1727 /// a PHI node (or freeze PHI) in the current block. See if there are any 1728 /// simplifications we can do based on inputs to the phi node. 1729 bool JumpThreadingPass::processBranchOnPHI(PHINode *PN) { 1730 BasicBlock *BB = PN->getParent(); 1731 1732 // TODO: We could make use of this to do it once for blocks with common PHI 1733 // values. 1734 SmallVector<BasicBlock*, 1> PredBBs; 1735 PredBBs.resize(1); 1736 1737 // If any of the predecessor blocks end in an unconditional branch, we can 1738 // *duplicate* the conditional branch into that block in order to further 1739 // encourage jump threading and to eliminate cases where we have branch on a 1740 // phi of an icmp (branch on icmp is much better). 1741 // This is still beneficial when a frozen phi is used as the branch condition 1742 // because it allows CodeGenPrepare to further canonicalize br(freeze(icmp)) 1743 // to br(icmp(freeze ...)). 1744 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1745 BasicBlock *PredBB = PN->getIncomingBlock(i); 1746 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) 1747 if (PredBr->isUnconditional()) { 1748 PredBBs[0] = PredBB; 1749 // Try to duplicate BB into PredBB. 1750 if (duplicateCondBranchOnPHIIntoPred(BB, PredBBs)) 1751 return true; 1752 } 1753 } 1754 1755 return false; 1756 } 1757 1758 /// processBranchOnXOR - We have an otherwise unthreadable conditional branch on 1759 /// a xor instruction in the current block. See if there are any 1760 /// simplifications we can do based on inputs to the xor. 1761 bool JumpThreadingPass::processBranchOnXOR(BinaryOperator *BO) { 1762 BasicBlock *BB = BO->getParent(); 1763 1764 // If either the LHS or RHS of the xor is a constant, don't do this 1765 // optimization. 1766 if (isa<ConstantInt>(BO->getOperand(0)) || 1767 isa<ConstantInt>(BO->getOperand(1))) 1768 return false; 1769 1770 // If the first instruction in BB isn't a phi, we won't be able to infer 1771 // anything special about any particular predecessor. 1772 if (!isa<PHINode>(BB->front())) 1773 return false; 1774 1775 // If this BB is a landing pad, we won't be able to split the edge into it. 1776 if (BB->isEHPad()) 1777 return false; 1778 1779 // If we have a xor as the branch input to this block, and we know that the 1780 // LHS or RHS of the xor in any predecessor is true/false, then we can clone 1781 // the condition into the predecessor and fix that value to true, saving some 1782 // logical ops on that path and encouraging other paths to simplify. 1783 // 1784 // This copies something like this: 1785 // 1786 // BB: 1787 // %X = phi i1 [1], [%X'] 1788 // %Y = icmp eq i32 %A, %B 1789 // %Z = xor i1 %X, %Y 1790 // br i1 %Z, ... 1791 // 1792 // Into: 1793 // BB': 1794 // %Y = icmp ne i32 %A, %B 1795 // br i1 %Y, ... 1796 1797 PredValueInfoTy XorOpValues; 1798 bool isLHS = true; 1799 if (!computeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues, 1800 WantInteger, BO)) { 1801 assert(XorOpValues.empty()); 1802 if (!computeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues, 1803 WantInteger, BO)) 1804 return false; 1805 isLHS = false; 1806 } 1807 1808 assert(!XorOpValues.empty() && 1809 "computeValueKnownInPredecessors returned true with no values"); 1810 1811 // Scan the information to see which is most popular: true or false. The 1812 // predecessors can be of the set true, false, or undef. 1813 unsigned NumTrue = 0, NumFalse = 0; 1814 for (const auto &XorOpValue : XorOpValues) { 1815 if (isa<UndefValue>(XorOpValue.first)) 1816 // Ignore undefs for the count. 1817 continue; 1818 if (cast<ConstantInt>(XorOpValue.first)->isZero()) 1819 ++NumFalse; 1820 else 1821 ++NumTrue; 1822 } 1823 1824 // Determine which value to split on, true, false, or undef if neither. 1825 ConstantInt *SplitVal = nullptr; 1826 if (NumTrue > NumFalse) 1827 SplitVal = ConstantInt::getTrue(BB->getContext()); 1828 else if (NumTrue != 0 || NumFalse != 0) 1829 SplitVal = ConstantInt::getFalse(BB->getContext()); 1830 1831 // Collect all of the blocks that this can be folded into so that we can 1832 // factor this once and clone it once. 1833 SmallVector<BasicBlock*, 8> BlocksToFoldInto; 1834 for (const auto &XorOpValue : XorOpValues) { 1835 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first)) 1836 continue; 1837 1838 BlocksToFoldInto.push_back(XorOpValue.second); 1839 } 1840 1841 // If we inferred a value for all of the predecessors, then duplication won't 1842 // help us. However, we can just replace the LHS or RHS with the constant. 1843 if (BlocksToFoldInto.size() == 1844 cast<PHINode>(BB->front()).getNumIncomingValues()) { 1845 if (!SplitVal) { 1846 // If all preds provide undef, just nuke the xor, because it is undef too. 1847 BO->replaceAllUsesWith(UndefValue::get(BO->getType())); 1848 BO->eraseFromParent(); 1849 } else if (SplitVal->isZero() && BO != BO->getOperand(isLHS)) { 1850 // If all preds provide 0, replace the xor with the other input. 1851 BO->replaceAllUsesWith(BO->getOperand(isLHS)); 1852 BO->eraseFromParent(); 1853 } else { 1854 // If all preds provide 1, set the computed value to 1. 1855 BO->setOperand(!isLHS, SplitVal); 1856 } 1857 1858 return true; 1859 } 1860 1861 // If any of predecessors end with an indirect goto, we can't change its 1862 // destination. 1863 if (any_of(BlocksToFoldInto, [](BasicBlock *Pred) { 1864 return isa<IndirectBrInst>(Pred->getTerminator()); 1865 })) 1866 return false; 1867 1868 // Try to duplicate BB into PredBB. 1869 return duplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto); 1870 } 1871 1872 /// addPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new 1873 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for 1874 /// NewPred using the entries from OldPred (suitably mapped). 1875 static void addPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, 1876 BasicBlock *OldPred, 1877 BasicBlock *NewPred, 1878 DenseMap<Instruction*, Value*> &ValueMap) { 1879 for (PHINode &PN : PHIBB->phis()) { 1880 // Ok, we have a PHI node. Figure out what the incoming value was for the 1881 // DestBlock. 1882 Value *IV = PN.getIncomingValueForBlock(OldPred); 1883 1884 // Remap the value if necessary. 1885 if (Instruction *Inst = dyn_cast<Instruction>(IV)) { 1886 DenseMap<Instruction*, Value*>::iterator I = ValueMap.find(Inst); 1887 if (I != ValueMap.end()) 1888 IV = I->second; 1889 } 1890 1891 PN.addIncoming(IV, NewPred); 1892 } 1893 } 1894 1895 /// Merge basic block BB into its sole predecessor if possible. 1896 bool JumpThreadingPass::maybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB) { 1897 BasicBlock *SinglePred = BB->getSinglePredecessor(); 1898 if (!SinglePred) 1899 return false; 1900 1901 const Instruction *TI = SinglePred->getTerminator(); 1902 if (TI->isExceptionalTerminator() || TI->getNumSuccessors() != 1 || 1903 SinglePred == BB || hasAddressTakenAndUsed(BB)) 1904 return false; 1905 1906 // If SinglePred was a loop header, BB becomes one. 1907 if (LoopHeaders.erase(SinglePred)) 1908 LoopHeaders.insert(BB); 1909 1910 LVI->eraseBlock(SinglePred); 1911 MergeBasicBlockIntoOnlyPred(BB, DTU.get()); 1912 1913 // Now that BB is merged into SinglePred (i.e. SinglePred code followed by 1914 // BB code within one basic block `BB`), we need to invalidate the LVI 1915 // information associated with BB, because the LVI information need not be 1916 // true for all of BB after the merge. For example, 1917 // Before the merge, LVI info and code is as follows: 1918 // SinglePred: <LVI info1 for %p val> 1919 // %y = use of %p 1920 // call @exit() // need not transfer execution to successor. 1921 // assume(%p) // from this point on %p is true 1922 // br label %BB 1923 // BB: <LVI info2 for %p val, i.e. %p is true> 1924 // %x = use of %p 1925 // br label exit 1926 // 1927 // Note that this LVI info for blocks BB and SinglPred is correct for %p 1928 // (info2 and info1 respectively). After the merge and the deletion of the 1929 // LVI info1 for SinglePred. We have the following code: 1930 // BB: <LVI info2 for %p val> 1931 // %y = use of %p 1932 // call @exit() 1933 // assume(%p) 1934 // %x = use of %p <-- LVI info2 is correct from here onwards. 1935 // br label exit 1936 // LVI info2 for BB is incorrect at the beginning of BB. 1937 1938 // Invalidate LVI information for BB if the LVI is not provably true for 1939 // all of BB. 1940 if (!isGuaranteedToTransferExecutionToSuccessor(BB)) 1941 LVI->eraseBlock(BB); 1942 return true; 1943 } 1944 1945 /// Update the SSA form. NewBB contains instructions that are copied from BB. 1946 /// ValueMapping maps old values in BB to new ones in NewBB. 1947 void JumpThreadingPass::updateSSA( 1948 BasicBlock *BB, BasicBlock *NewBB, 1949 DenseMap<Instruction *, Value *> &ValueMapping) { 1950 // If there were values defined in BB that are used outside the block, then we 1951 // now have to update all uses of the value to use either the original value, 1952 // the cloned value, or some PHI derived value. This can require arbitrary 1953 // PHI insertion, of which we are prepared to do, clean these up now. 1954 SSAUpdater SSAUpdate; 1955 SmallVector<Use *, 16> UsesToRename; 1956 SmallVector<DbgValueInst *, 4> DbgValues; 1957 1958 for (Instruction &I : *BB) { 1959 // Scan all uses of this instruction to see if it is used outside of its 1960 // block, and if so, record them in UsesToRename. 1961 for (Use &U : I.uses()) { 1962 Instruction *User = cast<Instruction>(U.getUser()); 1963 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1964 if (UserPN->getIncomingBlock(U) == BB) 1965 continue; 1966 } else if (User->getParent() == BB) 1967 continue; 1968 1969 UsesToRename.push_back(&U); 1970 } 1971 1972 // Find debug values outside of the block 1973 findDbgValues(DbgValues, &I); 1974 DbgValues.erase(remove_if(DbgValues, 1975 [&](const DbgValueInst *DbgVal) { 1976 return DbgVal->getParent() == BB; 1977 }), 1978 DbgValues.end()); 1979 1980 // If there are no uses outside the block, we're done with this instruction. 1981 if (UsesToRename.empty() && DbgValues.empty()) 1982 continue; 1983 LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); 1984 1985 // We found a use of I outside of BB. Rename all uses of I that are outside 1986 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1987 // with the two values we know. 1988 SSAUpdate.Initialize(I.getType(), I.getName()); 1989 SSAUpdate.AddAvailableValue(BB, &I); 1990 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]); 1991 1992 while (!UsesToRename.empty()) 1993 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 1994 if (!DbgValues.empty()) { 1995 SSAUpdate.UpdateDebugValues(&I, DbgValues); 1996 DbgValues.clear(); 1997 } 1998 1999 LLVM_DEBUG(dbgs() << "\n"); 2000 } 2001 } 2002 2003 /// Clone instructions in range [BI, BE) to NewBB. For PHI nodes, we only clone 2004 /// arguments that come from PredBB. Return the map from the variables in the 2005 /// source basic block to the variables in the newly created basic block. 2006 DenseMap<Instruction *, Value *> 2007 JumpThreadingPass::cloneInstructions(BasicBlock::iterator BI, 2008 BasicBlock::iterator BE, BasicBlock *NewBB, 2009 BasicBlock *PredBB) { 2010 // We are going to have to map operands from the source basic block to the new 2011 // copy of the block 'NewBB'. If there are PHI nodes in the source basic 2012 // block, evaluate them to account for entry from PredBB. 2013 DenseMap<Instruction *, Value *> ValueMapping; 2014 2015 // Retargets llvm.dbg.value to any renamed variables. 2016 auto RetargetDbgValueIfPossible = [&](Instruction *NewInst) -> bool { 2017 auto DbgInstruction = dyn_cast<DbgValueInst>(NewInst); 2018 if (!DbgInstruction) 2019 return false; 2020 2021 SmallSet<std::pair<Value *, Value *>, 16> OperandsToRemap; 2022 for (auto DbgOperand : DbgInstruction->location_ops()) { 2023 auto DbgOperandInstruction = dyn_cast<Instruction>(DbgOperand); 2024 if (!DbgOperandInstruction) 2025 continue; 2026 2027 auto I = ValueMapping.find(DbgOperandInstruction); 2028 if (I != ValueMapping.end()) { 2029 OperandsToRemap.insert( 2030 std::pair<Value *, Value *>(DbgOperand, I->second)); 2031 } 2032 } 2033 2034 for (auto &[OldOp, MappedOp] : OperandsToRemap) 2035 DbgInstruction->replaceVariableLocationOp(OldOp, MappedOp); 2036 return true; 2037 }; 2038 2039 // Clone the phi nodes of the source basic block into NewBB. The resulting 2040 // phi nodes are trivial since NewBB only has one predecessor, but SSAUpdater 2041 // might need to rewrite the operand of the cloned phi. 2042 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2043 PHINode *NewPN = PHINode::Create(PN->getType(), 1, PN->getName(), NewBB); 2044 NewPN->addIncoming(PN->getIncomingValueForBlock(PredBB), PredBB); 2045 ValueMapping[PN] = NewPN; 2046 } 2047 2048 // Clone noalias scope declarations in the threaded block. When threading a 2049 // loop exit, we would otherwise end up with two idential scope declarations 2050 // visible at the same time. 2051 SmallVector<MDNode *> NoAliasScopes; 2052 DenseMap<MDNode *, MDNode *> ClonedScopes; 2053 LLVMContext &Context = PredBB->getContext(); 2054 identifyNoAliasScopesToClone(BI, BE, NoAliasScopes); 2055 cloneNoAliasScopes(NoAliasScopes, ClonedScopes, "thread", Context); 2056 2057 // Clone the non-phi instructions of the source basic block into NewBB, 2058 // keeping track of the mapping and using it to remap operands in the cloned 2059 // instructions. 2060 for (; BI != BE; ++BI) { 2061 Instruction *New = BI->clone(); 2062 New->setName(BI->getName()); 2063 New->insertInto(NewBB, NewBB->end()); 2064 ValueMapping[&*BI] = New; 2065 adaptNoAliasScopes(New, ClonedScopes, Context); 2066 2067 if (RetargetDbgValueIfPossible(New)) 2068 continue; 2069 2070 // Remap operands to patch up intra-block references. 2071 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2072 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2073 DenseMap<Instruction *, Value *>::iterator I = ValueMapping.find(Inst); 2074 if (I != ValueMapping.end()) 2075 New->setOperand(i, I->second); 2076 } 2077 } 2078 2079 return ValueMapping; 2080 } 2081 2082 /// Attempt to thread through two successive basic blocks. 2083 bool JumpThreadingPass::maybethreadThroughTwoBasicBlocks(BasicBlock *BB, 2084 Value *Cond) { 2085 // Consider: 2086 // 2087 // PredBB: 2088 // %var = phi i32* [ null, %bb1 ], [ @a, %bb2 ] 2089 // %tobool = icmp eq i32 %cond, 0 2090 // br i1 %tobool, label %BB, label ... 2091 // 2092 // BB: 2093 // %cmp = icmp eq i32* %var, null 2094 // br i1 %cmp, label ..., label ... 2095 // 2096 // We don't know the value of %var at BB even if we know which incoming edge 2097 // we take to BB. However, once we duplicate PredBB for each of its incoming 2098 // edges (say, PredBB1 and PredBB2), we know the value of %var in each copy of 2099 // PredBB. Then we can thread edges PredBB1->BB and PredBB2->BB through BB. 2100 2101 // Require that BB end with a Branch for simplicity. 2102 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2103 if (!CondBr) 2104 return false; 2105 2106 // BB must have exactly one predecessor. 2107 BasicBlock *PredBB = BB->getSinglePredecessor(); 2108 if (!PredBB) 2109 return false; 2110 2111 // Require that PredBB end with a conditional Branch. If PredBB ends with an 2112 // unconditional branch, we should be merging PredBB and BB instead. For 2113 // simplicity, we don't deal with a switch. 2114 BranchInst *PredBBBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 2115 if (!PredBBBranch || PredBBBranch->isUnconditional()) 2116 return false; 2117 2118 // If PredBB has exactly one incoming edge, we don't gain anything by copying 2119 // PredBB. 2120 if (PredBB->getSinglePredecessor()) 2121 return false; 2122 2123 // Don't thread through PredBB if it contains a successor edge to itself, in 2124 // which case we would infinite loop. Suppose we are threading an edge from 2125 // PredPredBB through PredBB and BB to SuccBB with PredBB containing a 2126 // successor edge to itself. If we allowed jump threading in this case, we 2127 // could duplicate PredBB and BB as, say, PredBB.thread and BB.thread. Since 2128 // PredBB.thread has a successor edge to PredBB, we would immediately come up 2129 // with another jump threading opportunity from PredBB.thread through PredBB 2130 // and BB to SuccBB. This jump threading would repeatedly occur. That is, we 2131 // would keep peeling one iteration from PredBB. 2132 if (llvm::is_contained(successors(PredBB), PredBB)) 2133 return false; 2134 2135 // Don't thread across a loop header. 2136 if (LoopHeaders.count(PredBB)) 2137 return false; 2138 2139 // Avoid complication with duplicating EH pads. 2140 if (PredBB->isEHPad()) 2141 return false; 2142 2143 // Find a predecessor that we can thread. For simplicity, we only consider a 2144 // successor edge out of BB to which we thread exactly one incoming edge into 2145 // PredBB. 2146 unsigned ZeroCount = 0; 2147 unsigned OneCount = 0; 2148 BasicBlock *ZeroPred = nullptr; 2149 BasicBlock *OnePred = nullptr; 2150 for (BasicBlock *P : predecessors(PredBB)) { 2151 // If PredPred ends with IndirectBrInst, we can't handle it. 2152 if (isa<IndirectBrInst>(P->getTerminator())) 2153 continue; 2154 if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>( 2155 evaluateOnPredecessorEdge(BB, P, Cond))) { 2156 if (CI->isZero()) { 2157 ZeroCount++; 2158 ZeroPred = P; 2159 } else if (CI->isOne()) { 2160 OneCount++; 2161 OnePred = P; 2162 } 2163 } 2164 } 2165 2166 // Disregard complicated cases where we have to thread multiple edges. 2167 BasicBlock *PredPredBB; 2168 if (ZeroCount == 1) { 2169 PredPredBB = ZeroPred; 2170 } else if (OneCount == 1) { 2171 PredPredBB = OnePred; 2172 } else { 2173 return false; 2174 } 2175 2176 BasicBlock *SuccBB = CondBr->getSuccessor(PredPredBB == ZeroPred); 2177 2178 // If threading to the same block as we come from, we would infinite loop. 2179 if (SuccBB == BB) { 2180 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 2181 << "' - would thread to self!\n"); 2182 return false; 2183 } 2184 2185 // If threading this would thread across a loop header, don't thread the edge. 2186 // See the comments above findLoopHeaders for justifications and caveats. 2187 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { 2188 LLVM_DEBUG({ 2189 bool BBIsHeader = LoopHeaders.count(BB); 2190 bool SuccIsHeader = LoopHeaders.count(SuccBB); 2191 dbgs() << " Not threading across " 2192 << (BBIsHeader ? "loop header BB '" : "block BB '") 2193 << BB->getName() << "' to dest " 2194 << (SuccIsHeader ? "loop header BB '" : "block BB '") 2195 << SuccBB->getName() 2196 << "' - it might create an irreducible loop!\n"; 2197 }); 2198 return false; 2199 } 2200 2201 // Compute the cost of duplicating BB and PredBB. 2202 unsigned BBCost = getJumpThreadDuplicationCost( 2203 TTI, BB, BB->getTerminator(), BBDupThreshold); 2204 unsigned PredBBCost = getJumpThreadDuplicationCost( 2205 TTI, PredBB, PredBB->getTerminator(), BBDupThreshold); 2206 2207 // Give up if costs are too high. We need to check BBCost and PredBBCost 2208 // individually before checking their sum because getJumpThreadDuplicationCost 2209 // return (unsigned)~0 for those basic blocks that cannot be duplicated. 2210 if (BBCost > BBDupThreshold || PredBBCost > BBDupThreshold || 2211 BBCost + PredBBCost > BBDupThreshold) { 2212 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() 2213 << "' - Cost is too high: " << PredBBCost 2214 << " for PredBB, " << BBCost << "for BB\n"); 2215 return false; 2216 } 2217 2218 // Now we are ready to duplicate PredBB. 2219 threadThroughTwoBasicBlocks(PredPredBB, PredBB, BB, SuccBB); 2220 return true; 2221 } 2222 2223 void JumpThreadingPass::threadThroughTwoBasicBlocks(BasicBlock *PredPredBB, 2224 BasicBlock *PredBB, 2225 BasicBlock *BB, 2226 BasicBlock *SuccBB) { 2227 LLVM_DEBUG(dbgs() << " Threading through '" << PredBB->getName() << "' and '" 2228 << BB->getName() << "'\n"); 2229 2230 // Build BPI/BFI before any changes are made to IR. 2231 bool HasProfile = doesBlockHaveProfileData(BB); 2232 auto *BFI = getOrCreateBFI(HasProfile); 2233 auto *BPI = getOrCreateBPI(BFI != nullptr); 2234 2235 BranchInst *CondBr = cast<BranchInst>(BB->getTerminator()); 2236 BranchInst *PredBBBranch = cast<BranchInst>(PredBB->getTerminator()); 2237 2238 BasicBlock *NewBB = 2239 BasicBlock::Create(PredBB->getContext(), PredBB->getName() + ".thread", 2240 PredBB->getParent(), PredBB); 2241 NewBB->moveAfter(PredBB); 2242 2243 // Set the block frequency of NewBB. 2244 if (BFI) { 2245 assert(BPI && "It's expected BPI to exist along with BFI"); 2246 auto NewBBFreq = BFI->getBlockFreq(PredPredBB) * 2247 BPI->getEdgeProbability(PredPredBB, PredBB); 2248 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 2249 } 2250 2251 // We are going to have to map operands from the original BB block to the new 2252 // copy of the block 'NewBB'. If there are PHI nodes in PredBB, evaluate them 2253 // to account for entry from PredPredBB. 2254 DenseMap<Instruction *, Value *> ValueMapping = 2255 cloneInstructions(PredBB->begin(), PredBB->end(), NewBB, PredPredBB); 2256 2257 // Copy the edge probabilities from PredBB to NewBB. 2258 if (BPI) 2259 BPI->copyEdgeProbabilities(PredBB, NewBB); 2260 2261 // Update the terminator of PredPredBB to jump to NewBB instead of PredBB. 2262 // This eliminates predecessors from PredPredBB, which requires us to simplify 2263 // any PHI nodes in PredBB. 2264 Instruction *PredPredTerm = PredPredBB->getTerminator(); 2265 for (unsigned i = 0, e = PredPredTerm->getNumSuccessors(); i != e; ++i) 2266 if (PredPredTerm->getSuccessor(i) == PredBB) { 2267 PredBB->removePredecessor(PredPredBB, true); 2268 PredPredTerm->setSuccessor(i, NewBB); 2269 } 2270 2271 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(0), PredBB, NewBB, 2272 ValueMapping); 2273 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(1), PredBB, NewBB, 2274 ValueMapping); 2275 2276 DTU->applyUpdatesPermissive( 2277 {{DominatorTree::Insert, NewBB, CondBr->getSuccessor(0)}, 2278 {DominatorTree::Insert, NewBB, CondBr->getSuccessor(1)}, 2279 {DominatorTree::Insert, PredPredBB, NewBB}, 2280 {DominatorTree::Delete, PredPredBB, PredBB}}); 2281 2282 updateSSA(PredBB, NewBB, ValueMapping); 2283 2284 // Clean up things like PHI nodes with single operands, dead instructions, 2285 // etc. 2286 SimplifyInstructionsInBlock(NewBB, TLI); 2287 SimplifyInstructionsInBlock(PredBB, TLI); 2288 2289 SmallVector<BasicBlock *, 1> PredsToFactor; 2290 PredsToFactor.push_back(NewBB); 2291 threadEdge(BB, PredsToFactor, SuccBB); 2292 } 2293 2294 /// tryThreadEdge - Thread an edge if it's safe and profitable to do so. 2295 bool JumpThreadingPass::tryThreadEdge( 2296 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs, 2297 BasicBlock *SuccBB) { 2298 // If threading to the same block as we come from, we would infinite loop. 2299 if (SuccBB == BB) { 2300 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 2301 << "' - would thread to self!\n"); 2302 return false; 2303 } 2304 2305 // If threading this would thread across a loop header, don't thread the edge. 2306 // See the comments above findLoopHeaders for justifications and caveats. 2307 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { 2308 LLVM_DEBUG({ 2309 bool BBIsHeader = LoopHeaders.count(BB); 2310 bool SuccIsHeader = LoopHeaders.count(SuccBB); 2311 dbgs() << " Not threading across " 2312 << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() 2313 << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") 2314 << SuccBB->getName() << "' - it might create an irreducible loop!\n"; 2315 }); 2316 return false; 2317 } 2318 2319 unsigned JumpThreadCost = getJumpThreadDuplicationCost( 2320 TTI, BB, BB->getTerminator(), BBDupThreshold); 2321 if (JumpThreadCost > BBDupThreshold) { 2322 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() 2323 << "' - Cost is too high: " << JumpThreadCost << "\n"); 2324 return false; 2325 } 2326 2327 threadEdge(BB, PredBBs, SuccBB); 2328 return true; 2329 } 2330 2331 /// threadEdge - We have decided that it is safe and profitable to factor the 2332 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB 2333 /// across BB. Transform the IR to reflect this change. 2334 void JumpThreadingPass::threadEdge(BasicBlock *BB, 2335 const SmallVectorImpl<BasicBlock *> &PredBBs, 2336 BasicBlock *SuccBB) { 2337 assert(SuccBB != BB && "Don't create an infinite loop"); 2338 2339 assert(!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) && 2340 "Don't thread across loop headers"); 2341 2342 // Build BPI/BFI before any changes are made to IR. 2343 bool HasProfile = doesBlockHaveProfileData(BB); 2344 auto *BFI = getOrCreateBFI(HasProfile); 2345 auto *BPI = getOrCreateBPI(BFI != nullptr); 2346 2347 // And finally, do it! Start by factoring the predecessors if needed. 2348 BasicBlock *PredBB; 2349 if (PredBBs.size() == 1) 2350 PredBB = PredBBs[0]; 2351 else { 2352 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() 2353 << " common predecessors.\n"); 2354 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm"); 2355 } 2356 2357 // And finally, do it! 2358 LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() 2359 << "' to '" << SuccBB->getName() 2360 << ", across block:\n " << *BB << "\n"); 2361 2362 LVI->threadEdge(PredBB, BB, SuccBB); 2363 2364 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), 2365 BB->getName()+".thread", 2366 BB->getParent(), BB); 2367 NewBB->moveAfter(PredBB); 2368 2369 // Set the block frequency of NewBB. 2370 if (BFI) { 2371 assert(BPI && "It's expected BPI to exist along with BFI"); 2372 auto NewBBFreq = 2373 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB); 2374 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 2375 } 2376 2377 // Copy all the instructions from BB to NewBB except the terminator. 2378 DenseMap<Instruction *, Value *> ValueMapping = 2379 cloneInstructions(BB->begin(), std::prev(BB->end()), NewBB, PredBB); 2380 2381 // We didn't copy the terminator from BB over to NewBB, because there is now 2382 // an unconditional jump to SuccBB. Insert the unconditional jump. 2383 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB); 2384 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc()); 2385 2386 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the 2387 // PHI nodes for NewBB now. 2388 addPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); 2389 2390 // Update the terminator of PredBB to jump to NewBB instead of BB. This 2391 // eliminates predecessors from BB, which requires us to simplify any PHI 2392 // nodes in BB. 2393 Instruction *PredTerm = PredBB->getTerminator(); 2394 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) 2395 if (PredTerm->getSuccessor(i) == BB) { 2396 BB->removePredecessor(PredBB, true); 2397 PredTerm->setSuccessor(i, NewBB); 2398 } 2399 2400 // Enqueue required DT updates. 2401 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, SuccBB}, 2402 {DominatorTree::Insert, PredBB, NewBB}, 2403 {DominatorTree::Delete, PredBB, BB}}); 2404 2405 updateSSA(BB, NewBB, ValueMapping); 2406 2407 // At this point, the IR is fully up to date and consistent. Do a quick scan 2408 // over the new instructions and zap any that are constants or dead. This 2409 // frequently happens because of phi translation. 2410 SimplifyInstructionsInBlock(NewBB, TLI); 2411 2412 // Update the edge weight from BB to SuccBB, which should be less than before. 2413 updateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB, BFI, BPI, HasProfile); 2414 2415 // Threaded an edge! 2416 ++NumThreads; 2417 } 2418 2419 /// Create a new basic block that will be the predecessor of BB and successor of 2420 /// all blocks in Preds. When profile data is available, update the frequency of 2421 /// this new block. 2422 BasicBlock *JumpThreadingPass::splitBlockPreds(BasicBlock *BB, 2423 ArrayRef<BasicBlock *> Preds, 2424 const char *Suffix) { 2425 SmallVector<BasicBlock *, 2> NewBBs; 2426 2427 // Collect the frequencies of all predecessors of BB, which will be used to 2428 // update the edge weight of the result of splitting predecessors. 2429 DenseMap<BasicBlock *, BlockFrequency> FreqMap; 2430 auto *BFI = getBFI(); 2431 if (BFI) { 2432 auto *BPI = getOrCreateBPI(true); 2433 for (auto *Pred : Preds) 2434 FreqMap.insert(std::make_pair( 2435 Pred, BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB))); 2436 } 2437 2438 // In the case when BB is a LandingPad block we create 2 new predecessors 2439 // instead of just one. 2440 if (BB->isLandingPad()) { 2441 std::string NewName = std::string(Suffix) + ".split-lp"; 2442 SplitLandingPadPredecessors(BB, Preds, Suffix, NewName.c_str(), NewBBs); 2443 } else { 2444 NewBBs.push_back(SplitBlockPredecessors(BB, Preds, Suffix)); 2445 } 2446 2447 std::vector<DominatorTree::UpdateType> Updates; 2448 Updates.reserve((2 * Preds.size()) + NewBBs.size()); 2449 for (auto *NewBB : NewBBs) { 2450 BlockFrequency NewBBFreq(0); 2451 Updates.push_back({DominatorTree::Insert, NewBB, BB}); 2452 for (auto *Pred : predecessors(NewBB)) { 2453 Updates.push_back({DominatorTree::Delete, Pred, BB}); 2454 Updates.push_back({DominatorTree::Insert, Pred, NewBB}); 2455 if (BFI) // Update frequencies between Pred -> NewBB. 2456 NewBBFreq += FreqMap.lookup(Pred); 2457 } 2458 if (BFI) // Apply the summed frequency to NewBB. 2459 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 2460 } 2461 2462 DTU->applyUpdatesPermissive(Updates); 2463 return NewBBs[0]; 2464 } 2465 2466 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) { 2467 const Instruction *TI = BB->getTerminator(); 2468 if (!TI || TI->getNumSuccessors() < 2) 2469 return false; 2470 2471 return hasValidBranchWeightMD(*TI); 2472 } 2473 2474 /// Update the block frequency of BB and branch weight and the metadata on the 2475 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 - 2476 /// Freq(PredBB->BB) / Freq(BB->SuccBB). 2477 void JumpThreadingPass::updateBlockFreqAndEdgeWeight(BasicBlock *PredBB, 2478 BasicBlock *BB, 2479 BasicBlock *NewBB, 2480 BasicBlock *SuccBB, 2481 BlockFrequencyInfo *BFI, 2482 BranchProbabilityInfo *BPI, 2483 bool HasProfile) { 2484 assert(((BFI && BPI) || (!BFI && !BFI)) && 2485 "Both BFI & BPI should either be set or unset"); 2486 2487 if (!BFI) { 2488 assert(!HasProfile && 2489 "It's expected to have BFI/BPI when profile info exists"); 2490 return; 2491 } 2492 2493 // As the edge from PredBB to BB is deleted, we have to update the block 2494 // frequency of BB. 2495 auto BBOrigFreq = BFI->getBlockFreq(BB); 2496 auto NewBBFreq = BFI->getBlockFreq(NewBB); 2497 auto BB2SuccBBFreq = BBOrigFreq * BPI->getEdgeProbability(BB, SuccBB); 2498 auto BBNewFreq = BBOrigFreq - NewBBFreq; 2499 BFI->setBlockFreq(BB, BBNewFreq.getFrequency()); 2500 2501 // Collect updated outgoing edges' frequencies from BB and use them to update 2502 // edge probabilities. 2503 SmallVector<uint64_t, 4> BBSuccFreq; 2504 for (BasicBlock *Succ : successors(BB)) { 2505 auto SuccFreq = (Succ == SuccBB) 2506 ? BB2SuccBBFreq - NewBBFreq 2507 : BBOrigFreq * BPI->getEdgeProbability(BB, Succ); 2508 BBSuccFreq.push_back(SuccFreq.getFrequency()); 2509 } 2510 2511 uint64_t MaxBBSuccFreq = 2512 *std::max_element(BBSuccFreq.begin(), BBSuccFreq.end()); 2513 2514 SmallVector<BranchProbability, 4> BBSuccProbs; 2515 if (MaxBBSuccFreq == 0) 2516 BBSuccProbs.assign(BBSuccFreq.size(), 2517 {1, static_cast<unsigned>(BBSuccFreq.size())}); 2518 else { 2519 for (uint64_t Freq : BBSuccFreq) 2520 BBSuccProbs.push_back( 2521 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq)); 2522 // Normalize edge probabilities so that they sum up to one. 2523 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(), 2524 BBSuccProbs.end()); 2525 } 2526 2527 // Update edge probabilities in BPI. 2528 BPI->setEdgeProbability(BB, BBSuccProbs); 2529 2530 // Update the profile metadata as well. 2531 // 2532 // Don't do this if the profile of the transformed blocks was statically 2533 // estimated. (This could occur despite the function having an entry 2534 // frequency in completely cold parts of the CFG.) 2535 // 2536 // In this case we don't want to suggest to subsequent passes that the 2537 // calculated weights are fully consistent. Consider this graph: 2538 // 2539 // check_1 2540 // 50% / | 2541 // eq_1 | 50% 2542 // \ | 2543 // check_2 2544 // 50% / | 2545 // eq_2 | 50% 2546 // \ | 2547 // check_3 2548 // 50% / | 2549 // eq_3 | 50% 2550 // \ | 2551 // 2552 // Assuming the blocks check_* all compare the same value against 1, 2 and 3, 2553 // the overall probabilities are inconsistent; the total probability that the 2554 // value is either 1, 2 or 3 is 150%. 2555 // 2556 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3 2557 // becomes 0%. This is even worse if the edge whose probability becomes 0% is 2558 // the loop exit edge. Then based solely on static estimation we would assume 2559 // the loop was extremely hot. 2560 // 2561 // FIXME this locally as well so that BPI and BFI are consistent as well. We 2562 // shouldn't make edges extremely likely or unlikely based solely on static 2563 // estimation. 2564 if (BBSuccProbs.size() >= 2 && HasProfile) { 2565 SmallVector<uint32_t, 4> Weights; 2566 for (auto Prob : BBSuccProbs) 2567 Weights.push_back(Prob.getNumerator()); 2568 2569 auto TI = BB->getTerminator(); 2570 TI->setMetadata( 2571 LLVMContext::MD_prof, 2572 MDBuilder(TI->getParent()->getContext()).createBranchWeights(Weights)); 2573 } 2574 } 2575 2576 /// duplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch 2577 /// to BB which contains an i1 PHI node and a conditional branch on that PHI. 2578 /// If we can duplicate the contents of BB up into PredBB do so now, this 2579 /// improves the odds that the branch will be on an analyzable instruction like 2580 /// a compare. 2581 bool JumpThreadingPass::duplicateCondBranchOnPHIIntoPred( 2582 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) { 2583 assert(!PredBBs.empty() && "Can't handle an empty set"); 2584 2585 // If BB is a loop header, then duplicating this block outside the loop would 2586 // cause us to transform this into an irreducible loop, don't do this. 2587 // See the comments above findLoopHeaders for justifications and caveats. 2588 if (LoopHeaders.count(BB)) { 2589 LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() 2590 << "' into predecessor block '" << PredBBs[0]->getName() 2591 << "' - it might create an irreducible loop!\n"); 2592 return false; 2593 } 2594 2595 unsigned DuplicationCost = getJumpThreadDuplicationCost( 2596 TTI, BB, BB->getTerminator(), BBDupThreshold); 2597 if (DuplicationCost > BBDupThreshold) { 2598 LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() 2599 << "' - Cost is too high: " << DuplicationCost << "\n"); 2600 return false; 2601 } 2602 2603 // And finally, do it! Start by factoring the predecessors if needed. 2604 std::vector<DominatorTree::UpdateType> Updates; 2605 BasicBlock *PredBB; 2606 if (PredBBs.size() == 1) 2607 PredBB = PredBBs[0]; 2608 else { 2609 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() 2610 << " common predecessors.\n"); 2611 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm"); 2612 } 2613 Updates.push_back({DominatorTree::Delete, PredBB, BB}); 2614 2615 // Okay, we decided to do this! Clone all the instructions in BB onto the end 2616 // of PredBB. 2617 LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName() 2618 << "' into end of '" << PredBB->getName() 2619 << "' to eliminate branch on phi. Cost: " 2620 << DuplicationCost << " block is:" << *BB << "\n"); 2621 2622 // Unless PredBB ends with an unconditional branch, split the edge so that we 2623 // can just clone the bits from BB into the end of the new PredBB. 2624 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 2625 2626 if (!OldPredBranch || !OldPredBranch->isUnconditional()) { 2627 BasicBlock *OldPredBB = PredBB; 2628 PredBB = SplitEdge(OldPredBB, BB); 2629 Updates.push_back({DominatorTree::Insert, OldPredBB, PredBB}); 2630 Updates.push_back({DominatorTree::Insert, PredBB, BB}); 2631 Updates.push_back({DominatorTree::Delete, OldPredBB, BB}); 2632 OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); 2633 } 2634 2635 // We are going to have to map operands from the original BB block into the 2636 // PredBB block. Evaluate PHI nodes in BB. 2637 DenseMap<Instruction*, Value*> ValueMapping; 2638 2639 BasicBlock::iterator BI = BB->begin(); 2640 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 2641 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 2642 // Clone the non-phi instructions of BB into PredBB, keeping track of the 2643 // mapping and using it to remap operands in the cloned instructions. 2644 for (; BI != BB->end(); ++BI) { 2645 Instruction *New = BI->clone(); 2646 New->insertInto(PredBB, OldPredBranch->getIterator()); 2647 2648 // Remap operands to patch up intra-block references. 2649 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2650 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2651 DenseMap<Instruction*, Value*>::iterator I = ValueMapping.find(Inst); 2652 if (I != ValueMapping.end()) 2653 New->setOperand(i, I->second); 2654 } 2655 2656 // If this instruction can be simplified after the operands are updated, 2657 // just use the simplified value instead. This frequently happens due to 2658 // phi translation. 2659 if (Value *IV = simplifyInstruction( 2660 New, 2661 {BB->getModule()->getDataLayout(), TLI, nullptr, nullptr, New})) { 2662 ValueMapping[&*BI] = IV; 2663 if (!New->mayHaveSideEffects()) { 2664 New->eraseFromParent(); 2665 New = nullptr; 2666 } 2667 } else { 2668 ValueMapping[&*BI] = New; 2669 } 2670 if (New) { 2671 // Otherwise, insert the new instruction into the block. 2672 New->setName(BI->getName()); 2673 // Update Dominance from simplified New instruction operands. 2674 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2675 if (BasicBlock *SuccBB = dyn_cast<BasicBlock>(New->getOperand(i))) 2676 Updates.push_back({DominatorTree::Insert, PredBB, SuccBB}); 2677 } 2678 } 2679 2680 // Check to see if the targets of the branch had PHI nodes. If so, we need to 2681 // add entries to the PHI nodes for branch from PredBB now. 2682 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); 2683 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, 2684 ValueMapping); 2685 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, 2686 ValueMapping); 2687 2688 updateSSA(BB, PredBB, ValueMapping); 2689 2690 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge 2691 // that we nuked. 2692 BB->removePredecessor(PredBB, true); 2693 2694 // Remove the unconditional branch at the end of the PredBB block. 2695 OldPredBranch->eraseFromParent(); 2696 if (auto *BPI = getBPI()) 2697 BPI->copyEdgeProbabilities(BB, PredBB); 2698 DTU->applyUpdatesPermissive(Updates); 2699 2700 ++NumDupes; 2701 return true; 2702 } 2703 2704 // Pred is a predecessor of BB with an unconditional branch to BB. SI is 2705 // a Select instruction in Pred. BB has other predecessors and SI is used in 2706 // a PHI node in BB. SI has no other use. 2707 // A new basic block, NewBB, is created and SI is converted to compare and 2708 // conditional branch. SI is erased from parent. 2709 void JumpThreadingPass::unfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, 2710 SelectInst *SI, PHINode *SIUse, 2711 unsigned Idx) { 2712 // Expand the select. 2713 // 2714 // Pred -- 2715 // | v 2716 // | NewBB 2717 // | | 2718 // |----- 2719 // v 2720 // BB 2721 BranchInst *PredTerm = cast<BranchInst>(Pred->getTerminator()); 2722 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold", 2723 BB->getParent(), BB); 2724 // Move the unconditional branch to NewBB. 2725 PredTerm->removeFromParent(); 2726 PredTerm->insertInto(NewBB, NewBB->end()); 2727 // Create a conditional branch and update PHI nodes. 2728 auto *BI = BranchInst::Create(NewBB, BB, SI->getCondition(), Pred); 2729 BI->applyMergedLocation(PredTerm->getDebugLoc(), SI->getDebugLoc()); 2730 BI->copyMetadata(*SI, {LLVMContext::MD_prof}); 2731 SIUse->setIncomingValue(Idx, SI->getFalseValue()); 2732 SIUse->addIncoming(SI->getTrueValue(), NewBB); 2733 2734 uint64_t TrueWeight = 1; 2735 uint64_t FalseWeight = 1; 2736 // Copy probabilities from 'SI' to created conditional branch in 'Pred'. 2737 if (extractBranchWeights(*SI, TrueWeight, FalseWeight) && 2738 (TrueWeight + FalseWeight) != 0) { 2739 SmallVector<BranchProbability, 2> BP; 2740 BP.emplace_back(BranchProbability::getBranchProbability( 2741 TrueWeight, TrueWeight + FalseWeight)); 2742 BP.emplace_back(BranchProbability::getBranchProbability( 2743 FalseWeight, TrueWeight + FalseWeight)); 2744 // Update BPI if exists. 2745 if (auto *BPI = getBPI()) 2746 BPI->setEdgeProbability(Pred, BP); 2747 } 2748 // Set the block frequency of NewBB. 2749 if (auto *BFI = getBFI()) { 2750 if ((TrueWeight + FalseWeight) == 0) { 2751 TrueWeight = 1; 2752 FalseWeight = 1; 2753 } 2754 BranchProbability PredToNewBBProb = BranchProbability::getBranchProbability( 2755 TrueWeight, TrueWeight + FalseWeight); 2756 auto NewBBFreq = BFI->getBlockFreq(Pred) * PredToNewBBProb; 2757 BFI->setBlockFreq(NewBB, NewBBFreq.getFrequency()); 2758 } 2759 2760 // The select is now dead. 2761 SI->eraseFromParent(); 2762 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, BB}, 2763 {DominatorTree::Insert, Pred, NewBB}}); 2764 2765 // Update any other PHI nodes in BB. 2766 for (BasicBlock::iterator BI = BB->begin(); 2767 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI) 2768 if (Phi != SIUse) 2769 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB); 2770 } 2771 2772 bool JumpThreadingPass::tryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB) { 2773 PHINode *CondPHI = dyn_cast<PHINode>(SI->getCondition()); 2774 2775 if (!CondPHI || CondPHI->getParent() != BB) 2776 return false; 2777 2778 for (unsigned I = 0, E = CondPHI->getNumIncomingValues(); I != E; ++I) { 2779 BasicBlock *Pred = CondPHI->getIncomingBlock(I); 2780 SelectInst *PredSI = dyn_cast<SelectInst>(CondPHI->getIncomingValue(I)); 2781 2782 // The second and third condition can be potentially relaxed. Currently 2783 // the conditions help to simplify the code and allow us to reuse existing 2784 // code, developed for tryToUnfoldSelect(CmpInst *, BasicBlock *) 2785 if (!PredSI || PredSI->getParent() != Pred || !PredSI->hasOneUse()) 2786 continue; 2787 2788 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2789 if (!PredTerm || !PredTerm->isUnconditional()) 2790 continue; 2791 2792 unfoldSelectInstr(Pred, BB, PredSI, CondPHI, I); 2793 return true; 2794 } 2795 return false; 2796 } 2797 2798 /// tryToUnfoldSelect - Look for blocks of the form 2799 /// bb1: 2800 /// %a = select 2801 /// br bb2 2802 /// 2803 /// bb2: 2804 /// %p = phi [%a, %bb1] ... 2805 /// %c = icmp %p 2806 /// br i1 %c 2807 /// 2808 /// And expand the select into a branch structure if one of its arms allows %c 2809 /// to be folded. This later enables threading from bb1 over bb2. 2810 bool JumpThreadingPass::tryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) { 2811 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2812 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0)); 2813 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1)); 2814 2815 if (!CondBr || !CondBr->isConditional() || !CondLHS || 2816 CondLHS->getParent() != BB) 2817 return false; 2818 2819 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) { 2820 BasicBlock *Pred = CondLHS->getIncomingBlock(I); 2821 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I)); 2822 2823 // Look if one of the incoming values is a select in the corresponding 2824 // predecessor. 2825 if (!SI || SI->getParent() != Pred || !SI->hasOneUse()) 2826 continue; 2827 2828 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2829 if (!PredTerm || !PredTerm->isUnconditional()) 2830 continue; 2831 2832 // Now check if one of the select values would allow us to constant fold the 2833 // terminator in BB. We don't do the transform if both sides fold, those 2834 // cases will be threaded in any case. 2835 LazyValueInfo::Tristate LHSFolds = 2836 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1), 2837 CondRHS, Pred, BB, CondCmp); 2838 LazyValueInfo::Tristate RHSFolds = 2839 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2), 2840 CondRHS, Pred, BB, CondCmp); 2841 if ((LHSFolds != LazyValueInfo::Unknown || 2842 RHSFolds != LazyValueInfo::Unknown) && 2843 LHSFolds != RHSFolds) { 2844 unfoldSelectInstr(Pred, BB, SI, CondLHS, I); 2845 return true; 2846 } 2847 } 2848 return false; 2849 } 2850 2851 /// tryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the 2852 /// same BB in the form 2853 /// bb: 2854 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ... 2855 /// %s = select %p, trueval, falseval 2856 /// 2857 /// or 2858 /// 2859 /// bb: 2860 /// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ... 2861 /// %c = cmp %p, 0 2862 /// %s = select %c, trueval, falseval 2863 /// 2864 /// And expand the select into a branch structure. This later enables 2865 /// jump-threading over bb in this pass. 2866 /// 2867 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold 2868 /// select if the associated PHI has at least one constant. If the unfolded 2869 /// select is not jump-threaded, it will be folded again in the later 2870 /// optimizations. 2871 bool JumpThreadingPass::tryToUnfoldSelectInCurrBB(BasicBlock *BB) { 2872 // This transform would reduce the quality of msan diagnostics. 2873 // Disable this transform under MemorySanitizer. 2874 if (BB->getParent()->hasFnAttribute(Attribute::SanitizeMemory)) 2875 return false; 2876 2877 // If threading this would thread across a loop header, don't thread the edge. 2878 // See the comments above findLoopHeaders for justifications and caveats. 2879 if (LoopHeaders.count(BB)) 2880 return false; 2881 2882 for (BasicBlock::iterator BI = BB->begin(); 2883 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2884 // Look for a Phi having at least one constant incoming value. 2885 if (llvm::all_of(PN->incoming_values(), 2886 [](Value *V) { return !isa<ConstantInt>(V); })) 2887 continue; 2888 2889 auto isUnfoldCandidate = [BB](SelectInst *SI, Value *V) { 2890 using namespace PatternMatch; 2891 2892 // Check if SI is in BB and use V as condition. 2893 if (SI->getParent() != BB) 2894 return false; 2895 Value *Cond = SI->getCondition(); 2896 bool IsAndOr = match(SI, m_CombineOr(m_LogicalAnd(), m_LogicalOr())); 2897 return Cond && Cond == V && Cond->getType()->isIntegerTy(1) && !IsAndOr; 2898 }; 2899 2900 SelectInst *SI = nullptr; 2901 for (Use &U : PN->uses()) { 2902 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 2903 // Look for a ICmp in BB that compares PN with a constant and is the 2904 // condition of a Select. 2905 if (Cmp->getParent() == BB && Cmp->hasOneUse() && 2906 isa<ConstantInt>(Cmp->getOperand(1 - U.getOperandNo()))) 2907 if (SelectInst *SelectI = dyn_cast<SelectInst>(Cmp->user_back())) 2908 if (isUnfoldCandidate(SelectI, Cmp->use_begin()->get())) { 2909 SI = SelectI; 2910 break; 2911 } 2912 } else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) { 2913 // Look for a Select in BB that uses PN as condition. 2914 if (isUnfoldCandidate(SelectI, U.get())) { 2915 SI = SelectI; 2916 break; 2917 } 2918 } 2919 } 2920 2921 if (!SI) 2922 continue; 2923 // Expand the select. 2924 Value *Cond = SI->getCondition(); 2925 if (!isGuaranteedNotToBeUndefOrPoison(Cond, nullptr, SI)) 2926 Cond = new FreezeInst(Cond, "cond.fr", SI); 2927 Instruction *Term = SplitBlockAndInsertIfThen(Cond, SI, false); 2928 BasicBlock *SplitBB = SI->getParent(); 2929 BasicBlock *NewBB = Term->getParent(); 2930 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI); 2931 NewPN->addIncoming(SI->getTrueValue(), Term->getParent()); 2932 NewPN->addIncoming(SI->getFalseValue(), BB); 2933 SI->replaceAllUsesWith(NewPN); 2934 SI->eraseFromParent(); 2935 // NewBB and SplitBB are newly created blocks which require insertion. 2936 std::vector<DominatorTree::UpdateType> Updates; 2937 Updates.reserve((2 * SplitBB->getTerminator()->getNumSuccessors()) + 3); 2938 Updates.push_back({DominatorTree::Insert, BB, SplitBB}); 2939 Updates.push_back({DominatorTree::Insert, BB, NewBB}); 2940 Updates.push_back({DominatorTree::Insert, NewBB, SplitBB}); 2941 // BB's successors were moved to SplitBB, update DTU accordingly. 2942 for (auto *Succ : successors(SplitBB)) { 2943 Updates.push_back({DominatorTree::Delete, BB, Succ}); 2944 Updates.push_back({DominatorTree::Insert, SplitBB, Succ}); 2945 } 2946 DTU->applyUpdatesPermissive(Updates); 2947 return true; 2948 } 2949 return false; 2950 } 2951 2952 /// Try to propagate a guard from the current BB into one of its predecessors 2953 /// in case if another branch of execution implies that the condition of this 2954 /// guard is always true. Currently we only process the simplest case that 2955 /// looks like: 2956 /// 2957 /// Start: 2958 /// %cond = ... 2959 /// br i1 %cond, label %T1, label %F1 2960 /// T1: 2961 /// br label %Merge 2962 /// F1: 2963 /// br label %Merge 2964 /// Merge: 2965 /// %condGuard = ... 2966 /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ] 2967 /// 2968 /// And cond either implies condGuard or !condGuard. In this case all the 2969 /// instructions before the guard can be duplicated in both branches, and the 2970 /// guard is then threaded to one of them. 2971 bool JumpThreadingPass::processGuards(BasicBlock *BB) { 2972 using namespace PatternMatch; 2973 2974 // We only want to deal with two predecessors. 2975 BasicBlock *Pred1, *Pred2; 2976 auto PI = pred_begin(BB), PE = pred_end(BB); 2977 if (PI == PE) 2978 return false; 2979 Pred1 = *PI++; 2980 if (PI == PE) 2981 return false; 2982 Pred2 = *PI++; 2983 if (PI != PE) 2984 return false; 2985 if (Pred1 == Pred2) 2986 return false; 2987 2988 // Try to thread one of the guards of the block. 2989 // TODO: Look up deeper than to immediate predecessor? 2990 auto *Parent = Pred1->getSinglePredecessor(); 2991 if (!Parent || Parent != Pred2->getSinglePredecessor()) 2992 return false; 2993 2994 if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator())) 2995 for (auto &I : *BB) 2996 if (isGuard(&I) && threadGuard(BB, cast<IntrinsicInst>(&I), BI)) 2997 return true; 2998 2999 return false; 3000 } 3001 3002 /// Try to propagate the guard from BB which is the lower block of a diamond 3003 /// to one of its branches, in case if diamond's condition implies guard's 3004 /// condition. 3005 bool JumpThreadingPass::threadGuard(BasicBlock *BB, IntrinsicInst *Guard, 3006 BranchInst *BI) { 3007 assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?"); 3008 assert(BI->isConditional() && "Unconditional branch has 2 successors?"); 3009 Value *GuardCond = Guard->getArgOperand(0); 3010 Value *BranchCond = BI->getCondition(); 3011 BasicBlock *TrueDest = BI->getSuccessor(0); 3012 BasicBlock *FalseDest = BI->getSuccessor(1); 3013 3014 auto &DL = BB->getModule()->getDataLayout(); 3015 bool TrueDestIsSafe = false; 3016 bool FalseDestIsSafe = false; 3017 3018 // True dest is safe if BranchCond => GuardCond. 3019 auto Impl = isImpliedCondition(BranchCond, GuardCond, DL); 3020 if (Impl && *Impl) 3021 TrueDestIsSafe = true; 3022 else { 3023 // False dest is safe if !BranchCond => GuardCond. 3024 Impl = isImpliedCondition(BranchCond, GuardCond, DL, /* LHSIsTrue */ false); 3025 if (Impl && *Impl) 3026 FalseDestIsSafe = true; 3027 } 3028 3029 if (!TrueDestIsSafe && !FalseDestIsSafe) 3030 return false; 3031 3032 BasicBlock *PredUnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest; 3033 BasicBlock *PredGuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest; 3034 3035 ValueToValueMapTy UnguardedMapping, GuardedMapping; 3036 Instruction *AfterGuard = Guard->getNextNode(); 3037 unsigned Cost = 3038 getJumpThreadDuplicationCost(TTI, BB, AfterGuard, BBDupThreshold); 3039 if (Cost > BBDupThreshold) 3040 return false; 3041 // Duplicate all instructions before the guard and the guard itself to the 3042 // branch where implication is not proved. 3043 BasicBlock *GuardedBlock = DuplicateInstructionsInSplitBetween( 3044 BB, PredGuardedBlock, AfterGuard, GuardedMapping, *DTU); 3045 assert(GuardedBlock && "Could not create the guarded block?"); 3046 // Duplicate all instructions before the guard in the unguarded branch. 3047 // Since we have successfully duplicated the guarded block and this block 3048 // has fewer instructions, we expect it to succeed. 3049 BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween( 3050 BB, PredUnguardedBlock, Guard, UnguardedMapping, *DTU); 3051 assert(UnguardedBlock && "Could not create the unguarded block?"); 3052 LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block " 3053 << GuardedBlock->getName() << "\n"); 3054 // Some instructions before the guard may still have uses. For them, we need 3055 // to create Phi nodes merging their copies in both guarded and unguarded 3056 // branches. Those instructions that have no uses can be just removed. 3057 SmallVector<Instruction *, 4> ToRemove; 3058 for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI) 3059 if (!isa<PHINode>(&*BI)) 3060 ToRemove.push_back(&*BI); 3061 3062 Instruction *InsertionPoint = &*BB->getFirstInsertionPt(); 3063 assert(InsertionPoint && "Empty block?"); 3064 // Substitute with Phis & remove. 3065 for (auto *Inst : reverse(ToRemove)) { 3066 if (!Inst->use_empty()) { 3067 PHINode *NewPN = PHINode::Create(Inst->getType(), 2); 3068 NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock); 3069 NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock); 3070 NewPN->insertBefore(InsertionPoint); 3071 Inst->replaceAllUsesWith(NewPN); 3072 } 3073 Inst->eraseFromParent(); 3074 } 3075 return true; 3076 } 3077 3078 PreservedAnalyses JumpThreadingPass::getPreservedAnalysis() const { 3079 PreservedAnalyses PA; 3080 PA.preserve<LazyValueAnalysis>(); 3081 PA.preserve<DominatorTreeAnalysis>(); 3082 3083 // TODO: We would like to preserve BPI/BFI. Enable once all paths update them. 3084 // TODO: Would be nice to verify BPI/BFI consistency as well. 3085 return PA; 3086 } 3087 3088 template <typename AnalysisT> 3089 typename AnalysisT::Result *JumpThreadingPass::runExternalAnalysis() { 3090 assert(FAM && "Can't run external analysis without FunctionAnalysisManager"); 3091 3092 // If there were no changes since last call to 'runExternalAnalysis' then all 3093 // analysis is either up to date or explicitly invalidated. Just go ahead and 3094 // run the "external" analysis. 3095 if (!ChangedSinceLastAnalysisUpdate) { 3096 assert(!DTU->hasPendingUpdates() && 3097 "Lost update of 'ChangedSinceLastAnalysisUpdate'?"); 3098 // Run the "external" analysis. 3099 return &FAM->getResult<AnalysisT>(*F); 3100 } 3101 ChangedSinceLastAnalysisUpdate = false; 3102 3103 auto PA = getPreservedAnalysis(); 3104 // TODO: This shouldn't be needed once 'getPreservedAnalysis' reports BPI/BFI 3105 // as preserved. 3106 PA.preserve<BranchProbabilityAnalysis>(); 3107 PA.preserve<BlockFrequencyAnalysis>(); 3108 // Report everything except explicitly preserved as invalid. 3109 FAM->invalidate(*F, PA); 3110 // Update DT/PDT. 3111 DTU->flush(); 3112 // Make sure DT/PDT are valid before running "external" analysis. 3113 assert(DTU->getDomTree().verify(DominatorTree::VerificationLevel::Fast)); 3114 assert((!DTU->hasPostDomTree() || 3115 DTU->getPostDomTree().verify( 3116 PostDominatorTree::VerificationLevel::Fast))); 3117 // Run the "external" analysis. 3118 auto *Result = &FAM->getResult<AnalysisT>(*F); 3119 // Update analysis JumpThreading depends on and not explicitly preserved. 3120 TTI = &FAM->getResult<TargetIRAnalysis>(*F); 3121 TLI = &FAM->getResult<TargetLibraryAnalysis>(*F); 3122 AA = &FAM->getResult<AAManager>(*F); 3123 3124 return Result; 3125 } 3126 3127 BranchProbabilityInfo *JumpThreadingPass::getBPI() { 3128 if (!BPI) { 3129 assert(FAM && "Can't create BPI without FunctionAnalysisManager"); 3130 BPI = FAM->getCachedResult<BranchProbabilityAnalysis>(*F); 3131 } 3132 return *BPI; 3133 } 3134 3135 BlockFrequencyInfo *JumpThreadingPass::getBFI() { 3136 if (!BFI) { 3137 assert(FAM && "Can't create BFI without FunctionAnalysisManager"); 3138 BFI = FAM->getCachedResult<BlockFrequencyAnalysis>(*F); 3139 } 3140 return *BFI; 3141 } 3142 3143 // Important note on validity of BPI/BFI. JumpThreading tries to preserve 3144 // BPI/BFI as it goes. Thus if cached instance exists it will be updated. 3145 // Otherwise, new instance of BPI/BFI is created (up to date by definition). 3146 BranchProbabilityInfo *JumpThreadingPass::getOrCreateBPI(bool Force) { 3147 auto *Res = getBPI(); 3148 if (Res) 3149 return Res; 3150 3151 if (Force) 3152 BPI = runExternalAnalysis<BranchProbabilityAnalysis>(); 3153 3154 return *BPI; 3155 } 3156 3157 BlockFrequencyInfo *JumpThreadingPass::getOrCreateBFI(bool Force) { 3158 auto *Res = getBFI(); 3159 if (Res) 3160 return Res; 3161 3162 if (Force) 3163 BFI = runExternalAnalysis<BlockFrequencyAnalysis>(); 3164 3165 return *BFI; 3166 } 3167