1 //===- JumpThreading.cpp - Thread control through conditional blocks ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the Jump Threading pass. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Scalar/JumpThreading.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/MapVector.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/ScopeExit.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/SmallVector.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AliasAnalysis.h" 22 #include "llvm/Analysis/BlockFrequencyInfo.h" 23 #include "llvm/Analysis/BranchProbabilityInfo.h" 24 #include "llvm/Analysis/CFG.h" 25 #include "llvm/Analysis/ConstantFolding.h" 26 #include "llvm/Analysis/GlobalsModRef.h" 27 #include "llvm/Analysis/GuardUtils.h" 28 #include "llvm/Analysis/InstructionSimplify.h" 29 #include "llvm/Analysis/LazyValueInfo.h" 30 #include "llvm/Analysis/Loads.h" 31 #include "llvm/Analysis/LoopInfo.h" 32 #include "llvm/Analysis/MemoryLocation.h" 33 #include "llvm/Analysis/PostDominators.h" 34 #include "llvm/Analysis/TargetLibraryInfo.h" 35 #include "llvm/Analysis/TargetTransformInfo.h" 36 #include "llvm/Analysis/ValueTracking.h" 37 #include "llvm/IR/BasicBlock.h" 38 #include "llvm/IR/CFG.h" 39 #include "llvm/IR/Constant.h" 40 #include "llvm/IR/ConstantRange.h" 41 #include "llvm/IR/Constants.h" 42 #include "llvm/IR/DataLayout.h" 43 #include "llvm/IR/DebugInfo.h" 44 #include "llvm/IR/Dominators.h" 45 #include "llvm/IR/Function.h" 46 #include "llvm/IR/InstrTypes.h" 47 #include "llvm/IR/Instruction.h" 48 #include "llvm/IR/Instructions.h" 49 #include "llvm/IR/IntrinsicInst.h" 50 #include "llvm/IR/Intrinsics.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/IR/MDBuilder.h" 53 #include "llvm/IR/Metadata.h" 54 #include "llvm/IR/Module.h" 55 #include "llvm/IR/PassManager.h" 56 #include "llvm/IR/PatternMatch.h" 57 #include "llvm/IR/ProfDataUtils.h" 58 #include "llvm/IR/Type.h" 59 #include "llvm/IR/Use.h" 60 #include "llvm/IR/Value.h" 61 #include "llvm/Support/BlockFrequency.h" 62 #include "llvm/Support/BranchProbability.h" 63 #include "llvm/Support/Casting.h" 64 #include "llvm/Support/CommandLine.h" 65 #include "llvm/Support/Debug.h" 66 #include "llvm/Support/raw_ostream.h" 67 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 68 #include "llvm/Transforms/Utils/Cloning.h" 69 #include "llvm/Transforms/Utils/Local.h" 70 #include "llvm/Transforms/Utils/SSAUpdater.h" 71 #include "llvm/Transforms/Utils/ValueMapper.h" 72 #include <cassert> 73 #include <cstdint> 74 #include <iterator> 75 #include <memory> 76 #include <utility> 77 78 using namespace llvm; 79 using namespace jumpthreading; 80 81 #define DEBUG_TYPE "jump-threading" 82 83 STATISTIC(NumThreads, "Number of jumps threaded"); 84 STATISTIC(NumFolds, "Number of terminators folded"); 85 STATISTIC(NumDupes, "Number of branch blocks duplicated to eliminate phi"); 86 87 static cl::opt<unsigned> 88 BBDuplicateThreshold("jump-threading-threshold", 89 cl::desc("Max block size to duplicate for jump threading"), 90 cl::init(6), cl::Hidden); 91 92 static cl::opt<unsigned> 93 ImplicationSearchThreshold( 94 "jump-threading-implication-search-threshold", 95 cl::desc("The number of predecessors to search for a stronger " 96 "condition to use to thread over a weaker condition"), 97 cl::init(3), cl::Hidden); 98 99 static cl::opt<unsigned> PhiDuplicateThreshold( 100 "jump-threading-phi-threshold", 101 cl::desc("Max PHIs in BB to duplicate for jump threading"), cl::init(76), 102 cl::Hidden); 103 104 static cl::opt<bool> ThreadAcrossLoopHeaders( 105 "jump-threading-across-loop-headers", 106 cl::desc("Allow JumpThreading to thread across loop headers, for testing"), 107 cl::init(false), cl::Hidden); 108 109 JumpThreadingPass::JumpThreadingPass(int T) { 110 DefaultBBDupThreshold = (T == -1) ? BBDuplicateThreshold : unsigned(T); 111 } 112 113 // Update branch probability information according to conditional 114 // branch probability. This is usually made possible for cloned branches 115 // in inline instances by the context specific profile in the caller. 116 // For instance, 117 // 118 // [Block PredBB] 119 // [Branch PredBr] 120 // if (t) { 121 // Block A; 122 // } else { 123 // Block B; 124 // } 125 // 126 // [Block BB] 127 // cond = PN([true, %A], [..., %B]); // PHI node 128 // [Branch CondBr] 129 // if (cond) { 130 // ... // P(cond == true) = 1% 131 // } 132 // 133 // Here we know that when block A is taken, cond must be true, which means 134 // P(cond == true | A) = 1 135 // 136 // Given that P(cond == true) = P(cond == true | A) * P(A) + 137 // P(cond == true | B) * P(B) 138 // we get: 139 // P(cond == true ) = P(A) + P(cond == true | B) * P(B) 140 // 141 // which gives us: 142 // P(A) is less than P(cond == true), i.e. 143 // P(t == true) <= P(cond == true) 144 // 145 // In other words, if we know P(cond == true) is unlikely, we know 146 // that P(t == true) is also unlikely. 147 // 148 static void updatePredecessorProfileMetadata(PHINode *PN, BasicBlock *BB) { 149 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 150 if (!CondBr) 151 return; 152 153 uint64_t TrueWeight, FalseWeight; 154 if (!extractBranchWeights(*CondBr, TrueWeight, FalseWeight)) 155 return; 156 157 if (TrueWeight + FalseWeight == 0) 158 // Zero branch_weights do not give a hint for getting branch probabilities. 159 // Technically it would result in division by zero denominator, which is 160 // TrueWeight + FalseWeight. 161 return; 162 163 // Returns the outgoing edge of the dominating predecessor block 164 // that leads to the PhiNode's incoming block: 165 auto GetPredOutEdge = 166 [](BasicBlock *IncomingBB, 167 BasicBlock *PhiBB) -> std::pair<BasicBlock *, BasicBlock *> { 168 auto *PredBB = IncomingBB; 169 auto *SuccBB = PhiBB; 170 SmallPtrSet<BasicBlock *, 16> Visited; 171 while (true) { 172 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); 173 if (PredBr && PredBr->isConditional()) 174 return {PredBB, SuccBB}; 175 Visited.insert(PredBB); 176 auto *SinglePredBB = PredBB->getSinglePredecessor(); 177 if (!SinglePredBB) 178 return {nullptr, nullptr}; 179 180 // Stop searching when SinglePredBB has been visited. It means we see 181 // an unreachable loop. 182 if (Visited.count(SinglePredBB)) 183 return {nullptr, nullptr}; 184 185 SuccBB = PredBB; 186 PredBB = SinglePredBB; 187 } 188 }; 189 190 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 191 Value *PhiOpnd = PN->getIncomingValue(i); 192 ConstantInt *CI = dyn_cast<ConstantInt>(PhiOpnd); 193 194 if (!CI || !CI->getType()->isIntegerTy(1)) 195 continue; 196 197 BranchProbability BP = 198 (CI->isOne() ? BranchProbability::getBranchProbability( 199 TrueWeight, TrueWeight + FalseWeight) 200 : BranchProbability::getBranchProbability( 201 FalseWeight, TrueWeight + FalseWeight)); 202 203 auto PredOutEdge = GetPredOutEdge(PN->getIncomingBlock(i), BB); 204 if (!PredOutEdge.first) 205 return; 206 207 BasicBlock *PredBB = PredOutEdge.first; 208 BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator()); 209 if (!PredBr) 210 return; 211 212 uint64_t PredTrueWeight, PredFalseWeight; 213 // FIXME: We currently only set the profile data when it is missing. 214 // With PGO, this can be used to refine even existing profile data with 215 // context information. This needs to be done after more performance 216 // testing. 217 if (extractBranchWeights(*PredBr, PredTrueWeight, PredFalseWeight)) 218 continue; 219 220 // We can not infer anything useful when BP >= 50%, because BP is the 221 // upper bound probability value. 222 if (BP >= BranchProbability(50, 100)) 223 continue; 224 225 uint32_t Weights[2]; 226 if (PredBr->getSuccessor(0) == PredOutEdge.second) { 227 Weights[0] = BP.getNumerator(); 228 Weights[1] = BP.getCompl().getNumerator(); 229 } else { 230 Weights[0] = BP.getCompl().getNumerator(); 231 Weights[1] = BP.getNumerator(); 232 } 233 setBranchWeights(*PredBr, Weights, hasBranchWeightOrigin(*PredBr)); 234 } 235 } 236 237 PreservedAnalyses JumpThreadingPass::run(Function &F, 238 FunctionAnalysisManager &AM) { 239 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 240 // Jump Threading has no sense for the targets with divergent CF 241 if (TTI.hasBranchDivergence(&F)) 242 return PreservedAnalyses::all(); 243 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 244 auto &LVI = AM.getResult<LazyValueAnalysis>(F); 245 auto &AA = AM.getResult<AAManager>(F); 246 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 247 248 bool Changed = 249 runImpl(F, &AM, &TLI, &TTI, &LVI, &AA, 250 std::make_unique<DomTreeUpdater>( 251 &DT, nullptr, DomTreeUpdater::UpdateStrategy::Lazy), 252 nullptr, nullptr); 253 254 if (!Changed) 255 return PreservedAnalyses::all(); 256 257 258 getDomTreeUpdater()->flush(); 259 260 #if defined(EXPENSIVE_CHECKS) 261 assert(getDomTreeUpdater()->getDomTree().verify( 262 DominatorTree::VerificationLevel::Full) && 263 "DT broken after JumpThreading"); 264 assert((!getDomTreeUpdater()->hasPostDomTree() || 265 getDomTreeUpdater()->getPostDomTree().verify( 266 PostDominatorTree::VerificationLevel::Full)) && 267 "PDT broken after JumpThreading"); 268 #else 269 assert(getDomTreeUpdater()->getDomTree().verify( 270 DominatorTree::VerificationLevel::Fast) && 271 "DT broken after JumpThreading"); 272 assert((!getDomTreeUpdater()->hasPostDomTree() || 273 getDomTreeUpdater()->getPostDomTree().verify( 274 PostDominatorTree::VerificationLevel::Fast)) && 275 "PDT broken after JumpThreading"); 276 #endif 277 278 return getPreservedAnalysis(); 279 } 280 281 bool JumpThreadingPass::runImpl(Function &F_, FunctionAnalysisManager *FAM_, 282 TargetLibraryInfo *TLI_, 283 TargetTransformInfo *TTI_, LazyValueInfo *LVI_, 284 AliasAnalysis *AA_, 285 std::unique_ptr<DomTreeUpdater> DTU_, 286 BlockFrequencyInfo *BFI_, 287 BranchProbabilityInfo *BPI_) { 288 LLVM_DEBUG(dbgs() << "Jump threading on function '" << F_.getName() << "'\n"); 289 F = &F_; 290 FAM = FAM_; 291 TLI = TLI_; 292 TTI = TTI_; 293 LVI = LVI_; 294 AA = AA_; 295 DTU = std::move(DTU_); 296 BFI = BFI_; 297 BPI = BPI_; 298 auto *GuardDecl = Intrinsic::getDeclarationIfExists( 299 F->getParent(), Intrinsic::experimental_guard); 300 HasGuards = GuardDecl && !GuardDecl->use_empty(); 301 302 // Reduce the number of instructions duplicated when optimizing strictly for 303 // size. 304 if (BBDuplicateThreshold.getNumOccurrences()) 305 BBDupThreshold = BBDuplicateThreshold; 306 else if (F->hasMinSize()) 307 BBDupThreshold = 3; 308 else 309 BBDupThreshold = DefaultBBDupThreshold; 310 311 assert(DTU && "DTU isn't passed into JumpThreading before using it."); 312 assert(DTU->hasDomTree() && "JumpThreading relies on DomTree to proceed."); 313 DominatorTree &DT = DTU->getDomTree(); 314 315 Unreachable.clear(); 316 for (auto &BB : *F) 317 if (!DT.isReachableFromEntry(&BB)) 318 Unreachable.insert(&BB); 319 320 if (!ThreadAcrossLoopHeaders) 321 findLoopHeaders(*F); 322 323 bool EverChanged = false; 324 bool Changed; 325 do { 326 Changed = false; 327 for (auto &BB : *F) { 328 if (Unreachable.count(&BB)) 329 continue; 330 while (processBlock(&BB)) // Thread all of the branches we can over BB. 331 Changed = ChangedSinceLastAnalysisUpdate = true; 332 333 // Stop processing BB if it's the entry or is now deleted. The following 334 // routines attempt to eliminate BB and locating a suitable replacement 335 // for the entry is non-trivial. 336 if (&BB == &F->getEntryBlock() || DTU->isBBPendingDeletion(&BB)) 337 continue; 338 339 if (pred_empty(&BB)) { 340 // When processBlock makes BB unreachable it doesn't bother to fix up 341 // the instructions in it. We must remove BB to prevent invalid IR. 342 LLVM_DEBUG(dbgs() << " JT: Deleting dead block '" << BB.getName() 343 << "' with terminator: " << *BB.getTerminator() 344 << '\n'); 345 LoopHeaders.erase(&BB); 346 LVI->eraseBlock(&BB); 347 DeleteDeadBlock(&BB, DTU.get()); 348 Changed = ChangedSinceLastAnalysisUpdate = true; 349 continue; 350 } 351 352 // processBlock doesn't thread BBs with unconditional TIs. However, if BB 353 // is "almost empty", we attempt to merge BB with its sole successor. 354 auto *BI = dyn_cast<BranchInst>(BB.getTerminator()); 355 if (BI && BI->isUnconditional()) { 356 BasicBlock *Succ = BI->getSuccessor(0); 357 if ( 358 // The terminator must be the only non-phi instruction in BB. 359 BB.getFirstNonPHIOrDbg(true)->isTerminator() && 360 // Don't alter Loop headers and latches to ensure another pass can 361 // detect and transform nested loops later. 362 !LoopHeaders.count(&BB) && !LoopHeaders.count(Succ) && 363 TryToSimplifyUncondBranchFromEmptyBlock(&BB, DTU.get())) { 364 // BB is valid for cleanup here because we passed in DTU. F remains 365 // BB's parent until a DTU->getDomTree() event. 366 LVI->eraseBlock(&BB); 367 Changed = ChangedSinceLastAnalysisUpdate = true; 368 } 369 } 370 } 371 EverChanged |= Changed; 372 } while (Changed); 373 374 // Jump threading may have introduced redundant debug values into F which 375 // should be removed. 376 if (EverChanged) 377 for (auto &BB : *F) { 378 RemoveRedundantDbgInstrs(&BB); 379 } 380 381 LoopHeaders.clear(); 382 return EverChanged; 383 } 384 385 // Replace uses of Cond with ToVal when safe to do so. If all uses are 386 // replaced, we can remove Cond. We cannot blindly replace all uses of Cond 387 // because we may incorrectly replace uses when guards/assumes are uses of 388 // of `Cond` and we used the guards/assume to reason about the `Cond` value 389 // at the end of block. RAUW unconditionally replaces all uses 390 // including the guards/assumes themselves and the uses before the 391 // guard/assume. 392 static bool replaceFoldableUses(Instruction *Cond, Value *ToVal, 393 BasicBlock *KnownAtEndOfBB) { 394 bool Changed = false; 395 assert(Cond->getType() == ToVal->getType()); 396 // We can unconditionally replace all uses in non-local blocks (i.e. uses 397 // strictly dominated by BB), since LVI information is true from the 398 // terminator of BB. 399 if (Cond->getParent() == KnownAtEndOfBB) 400 Changed |= replaceNonLocalUsesWith(Cond, ToVal); 401 for (Instruction &I : reverse(*KnownAtEndOfBB)) { 402 // Replace any debug-info record users of Cond with ToVal. 403 for (DbgVariableRecord &DVR : filterDbgVars(I.getDbgRecordRange())) 404 DVR.replaceVariableLocationOp(Cond, ToVal, true); 405 406 // Reached the Cond whose uses we are trying to replace, so there are no 407 // more uses. 408 if (&I == Cond) 409 break; 410 // We only replace uses in instructions that are guaranteed to reach the end 411 // of BB, where we know Cond is ToVal. 412 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 413 break; 414 Changed |= I.replaceUsesOfWith(Cond, ToVal); 415 } 416 if (Cond->use_empty() && !Cond->mayHaveSideEffects()) { 417 Cond->eraseFromParent(); 418 Changed = true; 419 } 420 return Changed; 421 } 422 423 /// Return the cost of duplicating a piece of this block from first non-phi 424 /// and before StopAt instruction to thread across it. Stop scanning the block 425 /// when exceeding the threshold. If duplication is impossible, returns ~0U. 426 static unsigned getJumpThreadDuplicationCost(const TargetTransformInfo *TTI, 427 BasicBlock *BB, 428 Instruction *StopAt, 429 unsigned Threshold) { 430 assert(StopAt->getParent() == BB && "Not an instruction from proper BB?"); 431 432 // Do not duplicate the BB if it has a lot of PHI nodes. 433 // If a threadable chain is too long then the number of PHI nodes can add up, 434 // leading to a substantial increase in compile time when rewriting the SSA. 435 unsigned PhiCount = 0; 436 Instruction *FirstNonPHI = nullptr; 437 for (Instruction &I : *BB) { 438 if (!isa<PHINode>(&I)) { 439 FirstNonPHI = &I; 440 break; 441 } 442 if (++PhiCount > PhiDuplicateThreshold) 443 return ~0U; 444 } 445 446 /// Ignore PHI nodes, these will be flattened when duplication happens. 447 BasicBlock::const_iterator I(FirstNonPHI); 448 449 // FIXME: THREADING will delete values that are just used to compute the 450 // branch, so they shouldn't count against the duplication cost. 451 452 unsigned Bonus = 0; 453 if (BB->getTerminator() == StopAt) { 454 // Threading through a switch statement is particularly profitable. If this 455 // block ends in a switch, decrease its cost to make it more likely to 456 // happen. 457 if (isa<SwitchInst>(StopAt)) 458 Bonus = 6; 459 460 // The same holds for indirect branches, but slightly more so. 461 if (isa<IndirectBrInst>(StopAt)) 462 Bonus = 8; 463 } 464 465 // Bump the threshold up so the early exit from the loop doesn't skip the 466 // terminator-based Size adjustment at the end. 467 Threshold += Bonus; 468 469 // Sum up the cost of each instruction until we get to the terminator. Don't 470 // include the terminator because the copy won't include it. 471 unsigned Size = 0; 472 for (; &*I != StopAt; ++I) { 473 474 // Stop scanning the block if we've reached the threshold. 475 if (Size > Threshold) 476 return Size; 477 478 // Bail out if this instruction gives back a token type, it is not possible 479 // to duplicate it if it is used outside this BB. 480 if (I->getType()->isTokenTy() && I->isUsedOutsideOfBlock(BB)) 481 return ~0U; 482 483 // Blocks with NoDuplicate are modelled as having infinite cost, so they 484 // are never duplicated. 485 if (const CallInst *CI = dyn_cast<CallInst>(I)) 486 if (CI->cannotDuplicate() || CI->isConvergent()) 487 return ~0U; 488 489 if (TTI->getInstructionCost(&*I, TargetTransformInfo::TCK_SizeAndLatency) == 490 TargetTransformInfo::TCC_Free) 491 continue; 492 493 // All other instructions count for at least one unit. 494 ++Size; 495 496 // Calls are more expensive. If they are non-intrinsic calls, we model them 497 // as having cost of 4. If they are a non-vector intrinsic, we model them 498 // as having cost of 2 total, and if they are a vector intrinsic, we model 499 // them as having cost 1. 500 if (const CallInst *CI = dyn_cast<CallInst>(I)) { 501 if (!isa<IntrinsicInst>(CI)) 502 Size += 3; 503 else if (!CI->getType()->isVectorTy()) 504 Size += 1; 505 } 506 } 507 508 return Size > Bonus ? Size - Bonus : 0; 509 } 510 511 /// findLoopHeaders - We do not want jump threading to turn proper loop 512 /// structures into irreducible loops. Doing this breaks up the loop nesting 513 /// hierarchy and pessimizes later transformations. To prevent this from 514 /// happening, we first have to find the loop headers. Here we approximate this 515 /// by finding targets of backedges in the CFG. 516 /// 517 /// Note that there definitely are cases when we want to allow threading of 518 /// edges across a loop header. For example, threading a jump from outside the 519 /// loop (the preheader) to an exit block of the loop is definitely profitable. 520 /// It is also almost always profitable to thread backedges from within the loop 521 /// to exit blocks, and is often profitable to thread backedges to other blocks 522 /// within the loop (forming a nested loop). This simple analysis is not rich 523 /// enough to track all of these properties and keep it up-to-date as the CFG 524 /// mutates, so we don't allow any of these transformations. 525 void JumpThreadingPass::findLoopHeaders(Function &F) { 526 SmallVector<std::pair<const BasicBlock*,const BasicBlock*>, 32> Edges; 527 FindFunctionBackedges(F, Edges); 528 LoopHeaders.insert_range(llvm::make_second_range(Edges)); 529 } 530 531 /// getKnownConstant - Helper method to determine if we can thread over a 532 /// terminator with the given value as its condition, and if so what value to 533 /// use for that. What kind of value this is depends on whether we want an 534 /// integer or a block address, but an undef is always accepted. 535 /// Returns null if Val is null or not an appropriate constant. 536 static Constant *getKnownConstant(Value *Val, ConstantPreference Preference) { 537 if (!Val) 538 return nullptr; 539 540 // Undef is "known" enough. 541 if (UndefValue *U = dyn_cast<UndefValue>(Val)) 542 return U; 543 544 if (Preference == WantBlockAddress) 545 return dyn_cast<BlockAddress>(Val->stripPointerCasts()); 546 547 return dyn_cast<ConstantInt>(Val); 548 } 549 550 /// computeValueKnownInPredecessors - Given a basic block BB and a value V, see 551 /// if we can infer that the value is a known ConstantInt/BlockAddress or undef 552 /// in any of our predecessors. If so, return the known list of value and pred 553 /// BB in the result vector. 554 /// 555 /// This returns true if there were any known values. 556 bool JumpThreadingPass::computeValueKnownInPredecessorsImpl( 557 Value *V, BasicBlock *BB, PredValueInfo &Result, 558 ConstantPreference Preference, SmallPtrSet<Value *, 4> &RecursionSet, 559 Instruction *CxtI) { 560 const DataLayout &DL = BB->getDataLayout(); 561 562 // This method walks up use-def chains recursively. Because of this, we could 563 // get into an infinite loop going around loops in the use-def chain. To 564 // prevent this, keep track of what (value, block) pairs we've already visited 565 // and terminate the search if we loop back to them 566 if (!RecursionSet.insert(V).second) 567 return false; 568 569 // If V is a constant, then it is known in all predecessors. 570 if (Constant *KC = getKnownConstant(V, Preference)) { 571 for (BasicBlock *Pred : predecessors(BB)) 572 Result.emplace_back(KC, Pred); 573 574 return !Result.empty(); 575 } 576 577 // If V is a non-instruction value, or an instruction in a different block, 578 // then it can't be derived from a PHI. 579 Instruction *I = dyn_cast<Instruction>(V); 580 if (!I || I->getParent() != BB) { 581 582 // Okay, if this is a live-in value, see if it has a known value at the any 583 // edge from our predecessors. 584 for (BasicBlock *P : predecessors(BB)) { 585 using namespace PatternMatch; 586 // If the value is known by LazyValueInfo to be a constant in a 587 // predecessor, use that information to try to thread this block. 588 Constant *PredCst = LVI->getConstantOnEdge(V, P, BB, CxtI); 589 // If I is a non-local compare-with-constant instruction, use more-rich 590 // 'getPredicateOnEdge' method. This would be able to handle value 591 // inequalities better, for example if the compare is "X < 4" and "X < 3" 592 // is known true but "X < 4" itself is not available. 593 CmpPredicate Pred; 594 Value *Val; 595 Constant *Cst; 596 if (!PredCst && match(V, m_Cmp(Pred, m_Value(Val), m_Constant(Cst)))) 597 PredCst = LVI->getPredicateOnEdge(Pred, Val, Cst, P, BB, CxtI); 598 if (Constant *KC = getKnownConstant(PredCst, Preference)) 599 Result.emplace_back(KC, P); 600 } 601 602 return !Result.empty(); 603 } 604 605 /// If I is a PHI node, then we know the incoming values for any constants. 606 if (PHINode *PN = dyn_cast<PHINode>(I)) { 607 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 608 Value *InVal = PN->getIncomingValue(i); 609 if (Constant *KC = getKnownConstant(InVal, Preference)) { 610 Result.emplace_back(KC, PN->getIncomingBlock(i)); 611 } else { 612 Constant *CI = LVI->getConstantOnEdge(InVal, 613 PN->getIncomingBlock(i), 614 BB, CxtI); 615 if (Constant *KC = getKnownConstant(CI, Preference)) 616 Result.emplace_back(KC, PN->getIncomingBlock(i)); 617 } 618 } 619 620 return !Result.empty(); 621 } 622 623 // Handle Cast instructions. 624 if (CastInst *CI = dyn_cast<CastInst>(I)) { 625 Value *Source = CI->getOperand(0); 626 PredValueInfoTy Vals; 627 computeValueKnownInPredecessorsImpl(Source, BB, Vals, Preference, 628 RecursionSet, CxtI); 629 if (Vals.empty()) 630 return false; 631 632 // Convert the known values. 633 for (auto &Val : Vals) 634 if (Constant *Folded = ConstantFoldCastOperand(CI->getOpcode(), Val.first, 635 CI->getType(), DL)) 636 Result.emplace_back(Folded, Val.second); 637 638 return !Result.empty(); 639 } 640 641 if (FreezeInst *FI = dyn_cast<FreezeInst>(I)) { 642 Value *Source = FI->getOperand(0); 643 computeValueKnownInPredecessorsImpl(Source, BB, Result, Preference, 644 RecursionSet, CxtI); 645 646 erase_if(Result, [](auto &Pair) { 647 return !isGuaranteedNotToBeUndefOrPoison(Pair.first); 648 }); 649 650 return !Result.empty(); 651 } 652 653 // Handle some boolean conditions. 654 if (I->getType()->getPrimitiveSizeInBits() == 1) { 655 using namespace PatternMatch; 656 if (Preference != WantInteger) 657 return false; 658 // X | true -> true 659 // X & false -> false 660 Value *Op0, *Op1; 661 if (match(I, m_LogicalOr(m_Value(Op0), m_Value(Op1))) || 662 match(I, m_LogicalAnd(m_Value(Op0), m_Value(Op1)))) { 663 PredValueInfoTy LHSVals, RHSVals; 664 665 computeValueKnownInPredecessorsImpl(Op0, BB, LHSVals, WantInteger, 666 RecursionSet, CxtI); 667 computeValueKnownInPredecessorsImpl(Op1, BB, RHSVals, WantInteger, 668 RecursionSet, CxtI); 669 670 if (LHSVals.empty() && RHSVals.empty()) 671 return false; 672 673 ConstantInt *InterestingVal; 674 if (match(I, m_LogicalOr())) 675 InterestingVal = ConstantInt::getTrue(I->getContext()); 676 else 677 InterestingVal = ConstantInt::getFalse(I->getContext()); 678 679 SmallPtrSet<BasicBlock*, 4> LHSKnownBBs; 680 681 // Scan for the sentinel. If we find an undef, force it to the 682 // interesting value: x|undef -> true and x&undef -> false. 683 for (const auto &LHSVal : LHSVals) 684 if (LHSVal.first == InterestingVal || isa<UndefValue>(LHSVal.first)) { 685 Result.emplace_back(InterestingVal, LHSVal.second); 686 LHSKnownBBs.insert(LHSVal.second); 687 } 688 for (const auto &RHSVal : RHSVals) 689 if (RHSVal.first == InterestingVal || isa<UndefValue>(RHSVal.first)) { 690 // If we already inferred a value for this block on the LHS, don't 691 // re-add it. 692 if (!LHSKnownBBs.count(RHSVal.second)) 693 Result.emplace_back(InterestingVal, RHSVal.second); 694 } 695 696 return !Result.empty(); 697 } 698 699 // Handle the NOT form of XOR. 700 if (I->getOpcode() == Instruction::Xor && 701 isa<ConstantInt>(I->getOperand(1)) && 702 cast<ConstantInt>(I->getOperand(1))->isOne()) { 703 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, Result, 704 WantInteger, RecursionSet, CxtI); 705 if (Result.empty()) 706 return false; 707 708 // Invert the known values. 709 for (auto &R : Result) 710 R.first = ConstantExpr::getNot(R.first); 711 712 return true; 713 } 714 715 // Try to simplify some other binary operator values. 716 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(I)) { 717 if (Preference != WantInteger) 718 return false; 719 if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) { 720 PredValueInfoTy LHSVals; 721 computeValueKnownInPredecessorsImpl(BO->getOperand(0), BB, LHSVals, 722 WantInteger, RecursionSet, CxtI); 723 724 // Try to use constant folding to simplify the binary operator. 725 for (const auto &LHSVal : LHSVals) { 726 Constant *V = LHSVal.first; 727 Constant *Folded = 728 ConstantFoldBinaryOpOperands(BO->getOpcode(), V, CI, DL); 729 730 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 731 Result.emplace_back(KC, LHSVal.second); 732 } 733 } 734 735 return !Result.empty(); 736 } 737 738 // Handle compare with phi operand, where the PHI is defined in this block. 739 if (CmpInst *Cmp = dyn_cast<CmpInst>(I)) { 740 if (Preference != WantInteger) 741 return false; 742 Type *CmpType = Cmp->getType(); 743 Value *CmpLHS = Cmp->getOperand(0); 744 Value *CmpRHS = Cmp->getOperand(1); 745 CmpInst::Predicate Pred = Cmp->getPredicate(); 746 747 PHINode *PN = dyn_cast<PHINode>(CmpLHS); 748 if (!PN) 749 PN = dyn_cast<PHINode>(CmpRHS); 750 // Do not perform phi translation across a loop header phi, because this 751 // may result in comparison of values from two different loop iterations. 752 // FIXME: This check is broken if LoopHeaders is not populated. 753 if (PN && PN->getParent() == BB && !LoopHeaders.contains(BB)) { 754 const DataLayout &DL = PN->getDataLayout(); 755 // We can do this simplification if any comparisons fold to true or false. 756 // See if any do. 757 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 758 BasicBlock *PredBB = PN->getIncomingBlock(i); 759 Value *LHS, *RHS; 760 if (PN == CmpLHS) { 761 LHS = PN->getIncomingValue(i); 762 RHS = CmpRHS->DoPHITranslation(BB, PredBB); 763 } else { 764 LHS = CmpLHS->DoPHITranslation(BB, PredBB); 765 RHS = PN->getIncomingValue(i); 766 } 767 Value *Res = simplifyCmpInst(Pred, LHS, RHS, {DL}); 768 if (!Res) { 769 if (!isa<Constant>(RHS)) 770 continue; 771 772 // getPredicateOnEdge call will make no sense if LHS is defined in BB. 773 auto LHSInst = dyn_cast<Instruction>(LHS); 774 if (LHSInst && LHSInst->getParent() == BB) 775 continue; 776 777 Res = LVI->getPredicateOnEdge(Pred, LHS, cast<Constant>(RHS), PredBB, 778 BB, CxtI ? CxtI : Cmp); 779 } 780 781 if (Constant *KC = getKnownConstant(Res, WantInteger)) 782 Result.emplace_back(KC, PredBB); 783 } 784 785 return !Result.empty(); 786 } 787 788 // If comparing a live-in value against a constant, see if we know the 789 // live-in value on any predecessors. 790 if (isa<Constant>(CmpRHS) && !CmpType->isVectorTy()) { 791 Constant *CmpConst = cast<Constant>(CmpRHS); 792 793 if (!isa<Instruction>(CmpLHS) || 794 cast<Instruction>(CmpLHS)->getParent() != BB) { 795 for (BasicBlock *P : predecessors(BB)) { 796 // If the value is known by LazyValueInfo to be a constant in a 797 // predecessor, use that information to try to thread this block. 798 Constant *Res = LVI->getPredicateOnEdge(Pred, CmpLHS, CmpConst, P, BB, 799 CxtI ? CxtI : Cmp); 800 if (Constant *KC = getKnownConstant(Res, WantInteger)) 801 Result.emplace_back(KC, P); 802 } 803 804 return !Result.empty(); 805 } 806 807 // InstCombine can fold some forms of constant range checks into 808 // (icmp (add (x, C1)), C2). See if we have we have such a thing with 809 // x as a live-in. 810 { 811 using namespace PatternMatch; 812 813 Value *AddLHS; 814 ConstantInt *AddConst; 815 if (isa<ConstantInt>(CmpConst) && 816 match(CmpLHS, m_Add(m_Value(AddLHS), m_ConstantInt(AddConst)))) { 817 if (!isa<Instruction>(AddLHS) || 818 cast<Instruction>(AddLHS)->getParent() != BB) { 819 for (BasicBlock *P : predecessors(BB)) { 820 // If the value is known by LazyValueInfo to be a ConstantRange in 821 // a predecessor, use that information to try to thread this 822 // block. 823 ConstantRange CR = LVI->getConstantRangeOnEdge( 824 AddLHS, P, BB, CxtI ? CxtI : cast<Instruction>(CmpLHS)); 825 // Propagate the range through the addition. 826 CR = CR.add(AddConst->getValue()); 827 828 // Get the range where the compare returns true. 829 ConstantRange CmpRange = ConstantRange::makeExactICmpRegion( 830 Pred, cast<ConstantInt>(CmpConst)->getValue()); 831 832 Constant *ResC; 833 if (CmpRange.contains(CR)) 834 ResC = ConstantInt::getTrue(CmpType); 835 else if (CmpRange.inverse().contains(CR)) 836 ResC = ConstantInt::getFalse(CmpType); 837 else 838 continue; 839 840 Result.emplace_back(ResC, P); 841 } 842 843 return !Result.empty(); 844 } 845 } 846 } 847 848 // Try to find a constant value for the LHS of a comparison, 849 // and evaluate it statically if we can. 850 PredValueInfoTy LHSVals; 851 computeValueKnownInPredecessorsImpl(I->getOperand(0), BB, LHSVals, 852 WantInteger, RecursionSet, CxtI); 853 854 for (const auto &LHSVal : LHSVals) { 855 Constant *V = LHSVal.first; 856 Constant *Folded = 857 ConstantFoldCompareInstOperands(Pred, V, CmpConst, DL); 858 if (Constant *KC = getKnownConstant(Folded, WantInteger)) 859 Result.emplace_back(KC, LHSVal.second); 860 } 861 862 return !Result.empty(); 863 } 864 } 865 866 if (SelectInst *SI = dyn_cast<SelectInst>(I)) { 867 // Handle select instructions where at least one operand is a known constant 868 // and we can figure out the condition value for any predecessor block. 869 Constant *TrueVal = getKnownConstant(SI->getTrueValue(), Preference); 870 Constant *FalseVal = getKnownConstant(SI->getFalseValue(), Preference); 871 PredValueInfoTy Conds; 872 if ((TrueVal || FalseVal) && 873 computeValueKnownInPredecessorsImpl(SI->getCondition(), BB, Conds, 874 WantInteger, RecursionSet, CxtI)) { 875 for (auto &C : Conds) { 876 Constant *Cond = C.first; 877 878 // Figure out what value to use for the condition. 879 bool KnownCond; 880 if (ConstantInt *CI = dyn_cast<ConstantInt>(Cond)) { 881 // A known boolean. 882 KnownCond = CI->isOne(); 883 } else { 884 assert(isa<UndefValue>(Cond) && "Unexpected condition value"); 885 // Either operand will do, so be sure to pick the one that's a known 886 // constant. 887 // FIXME: Do this more cleverly if both values are known constants? 888 KnownCond = (TrueVal != nullptr); 889 } 890 891 // See if the select has a known constant value for this predecessor. 892 if (Constant *Val = KnownCond ? TrueVal : FalseVal) 893 Result.emplace_back(Val, C.second); 894 } 895 896 return !Result.empty(); 897 } 898 } 899 900 // If all else fails, see if LVI can figure out a constant value for us. 901 assert(CxtI->getParent() == BB && "CxtI should be in BB"); 902 Constant *CI = LVI->getConstant(V, CxtI); 903 if (Constant *KC = getKnownConstant(CI, Preference)) { 904 for (BasicBlock *Pred : predecessors(BB)) 905 Result.emplace_back(KC, Pred); 906 } 907 908 return !Result.empty(); 909 } 910 911 /// GetBestDestForBranchOnUndef - If we determine that the specified block ends 912 /// in an undefined jump, decide which block is best to revector to. 913 /// 914 /// Since we can pick an arbitrary destination, we pick the successor with the 915 /// fewest predecessors. This should reduce the in-degree of the others. 916 static unsigned getBestDestForJumpOnUndef(BasicBlock *BB) { 917 Instruction *BBTerm = BB->getTerminator(); 918 unsigned MinSucc = 0; 919 BasicBlock *TestBB = BBTerm->getSuccessor(MinSucc); 920 // Compute the successor with the minimum number of predecessors. 921 unsigned MinNumPreds = pred_size(TestBB); 922 for (unsigned i = 1, e = BBTerm->getNumSuccessors(); i != e; ++i) { 923 TestBB = BBTerm->getSuccessor(i); 924 unsigned NumPreds = pred_size(TestBB); 925 if (NumPreds < MinNumPreds) { 926 MinSucc = i; 927 MinNumPreds = NumPreds; 928 } 929 } 930 931 return MinSucc; 932 } 933 934 static bool hasAddressTakenAndUsed(BasicBlock *BB) { 935 if (!BB->hasAddressTaken()) return false; 936 937 // If the block has its address taken, it may be a tree of dead constants 938 // hanging off of it. These shouldn't keep the block alive. 939 BlockAddress *BA = BlockAddress::get(BB); 940 BA->removeDeadConstantUsers(); 941 return !BA->use_empty(); 942 } 943 944 /// processBlock - If there are any predecessors whose control can be threaded 945 /// through to a successor, transform them now. 946 bool JumpThreadingPass::processBlock(BasicBlock *BB) { 947 // If the block is trivially dead, just return and let the caller nuke it. 948 // This simplifies other transformations. 949 if (DTU->isBBPendingDeletion(BB) || 950 (pred_empty(BB) && BB != &BB->getParent()->getEntryBlock())) 951 return false; 952 953 // If this block has a single predecessor, and if that pred has a single 954 // successor, merge the blocks. This encourages recursive jump threading 955 // because now the condition in this block can be threaded through 956 // predecessors of our predecessor block. 957 if (maybeMergeBasicBlockIntoOnlyPred(BB)) 958 return true; 959 960 if (tryToUnfoldSelectInCurrBB(BB)) 961 return true; 962 963 // Look if we can propagate guards to predecessors. 964 if (HasGuards && processGuards(BB)) 965 return true; 966 967 // What kind of constant we're looking for. 968 ConstantPreference Preference = WantInteger; 969 970 // Look to see if the terminator is a conditional branch, switch or indirect 971 // branch, if not we can't thread it. 972 Value *Condition; 973 Instruction *Terminator = BB->getTerminator(); 974 if (BranchInst *BI = dyn_cast<BranchInst>(Terminator)) { 975 // Can't thread an unconditional jump. 976 if (BI->isUnconditional()) return false; 977 Condition = BI->getCondition(); 978 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(Terminator)) { 979 Condition = SI->getCondition(); 980 } else if (IndirectBrInst *IB = dyn_cast<IndirectBrInst>(Terminator)) { 981 // Can't thread indirect branch with no successors. 982 if (IB->getNumSuccessors() == 0) return false; 983 Condition = IB->getAddress()->stripPointerCasts(); 984 Preference = WantBlockAddress; 985 } else { 986 return false; // Must be an invoke or callbr. 987 } 988 989 // Keep track if we constant folded the condition in this invocation. 990 bool ConstantFolded = false; 991 992 // Run constant folding to see if we can reduce the condition to a simple 993 // constant. 994 if (Instruction *I = dyn_cast<Instruction>(Condition)) { 995 Value *SimpleVal = 996 ConstantFoldInstruction(I, BB->getDataLayout(), TLI); 997 if (SimpleVal) { 998 I->replaceAllUsesWith(SimpleVal); 999 if (isInstructionTriviallyDead(I, TLI)) 1000 I->eraseFromParent(); 1001 Condition = SimpleVal; 1002 ConstantFolded = true; 1003 } 1004 } 1005 1006 // If the terminator is branching on an undef or freeze undef, we can pick any 1007 // of the successors to branch to. Let getBestDestForJumpOnUndef decide. 1008 auto *FI = dyn_cast<FreezeInst>(Condition); 1009 if (isa<UndefValue>(Condition) || 1010 (FI && isa<UndefValue>(FI->getOperand(0)) && FI->hasOneUse())) { 1011 unsigned BestSucc = getBestDestForJumpOnUndef(BB); 1012 std::vector<DominatorTree::UpdateType> Updates; 1013 1014 // Fold the branch/switch. 1015 Instruction *BBTerm = BB->getTerminator(); 1016 Updates.reserve(BBTerm->getNumSuccessors()); 1017 for (unsigned i = 0, e = BBTerm->getNumSuccessors(); i != e; ++i) { 1018 if (i == BestSucc) continue; 1019 BasicBlock *Succ = BBTerm->getSuccessor(i); 1020 Succ->removePredecessor(BB, true); 1021 Updates.push_back({DominatorTree::Delete, BB, Succ}); 1022 } 1023 1024 LLVM_DEBUG(dbgs() << " In block '" << BB->getName() 1025 << "' folding undef terminator: " << *BBTerm << '\n'); 1026 Instruction *NewBI = BranchInst::Create(BBTerm->getSuccessor(BestSucc), BBTerm->getIterator()); 1027 NewBI->setDebugLoc(BBTerm->getDebugLoc()); 1028 ++NumFolds; 1029 BBTerm->eraseFromParent(); 1030 DTU->applyUpdatesPermissive(Updates); 1031 if (FI) 1032 FI->eraseFromParent(); 1033 return true; 1034 } 1035 1036 // If the terminator of this block is branching on a constant, simplify the 1037 // terminator to an unconditional branch. This can occur due to threading in 1038 // other blocks. 1039 if (getKnownConstant(Condition, Preference)) { 1040 LLVM_DEBUG(dbgs() << " In block '" << BB->getName() 1041 << "' folding terminator: " << *BB->getTerminator() 1042 << '\n'); 1043 ++NumFolds; 1044 ConstantFoldTerminator(BB, true, nullptr, DTU.get()); 1045 if (auto *BPI = getBPI()) 1046 BPI->eraseBlock(BB); 1047 return true; 1048 } 1049 1050 Instruction *CondInst = dyn_cast<Instruction>(Condition); 1051 1052 // All the rest of our checks depend on the condition being an instruction. 1053 if (!CondInst) { 1054 // FIXME: Unify this with code below. 1055 if (processThreadableEdges(Condition, BB, Preference, Terminator)) 1056 return true; 1057 return ConstantFolded; 1058 } 1059 1060 // Some of the following optimization can safely work on the unfrozen cond. 1061 Value *CondWithoutFreeze = CondInst; 1062 if (auto *FI = dyn_cast<FreezeInst>(CondInst)) 1063 CondWithoutFreeze = FI->getOperand(0); 1064 1065 if (CmpInst *CondCmp = dyn_cast<CmpInst>(CondWithoutFreeze)) { 1066 // If we're branching on a conditional, LVI might be able to determine 1067 // it's value at the branch instruction. We only handle comparisons 1068 // against a constant at this time. 1069 if (Constant *CondConst = dyn_cast<Constant>(CondCmp->getOperand(1))) { 1070 Constant *Res = 1071 LVI->getPredicateAt(CondCmp->getPredicate(), CondCmp->getOperand(0), 1072 CondConst, BB->getTerminator(), 1073 /*UseBlockValue=*/false); 1074 if (Res) { 1075 // We can safely replace *some* uses of the CondInst if it has 1076 // exactly one value as returned by LVI. RAUW is incorrect in the 1077 // presence of guards and assumes, that have the `Cond` as the use. This 1078 // is because we use the guards/assume to reason about the `Cond` value 1079 // at the end of block, but RAUW unconditionally replaces all uses 1080 // including the guards/assumes themselves and the uses before the 1081 // guard/assume. 1082 if (replaceFoldableUses(CondCmp, Res, BB)) 1083 return true; 1084 } 1085 1086 // We did not manage to simplify this branch, try to see whether 1087 // CondCmp depends on a known phi-select pattern. 1088 if (tryToUnfoldSelect(CondCmp, BB)) 1089 return true; 1090 } 1091 } 1092 1093 if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) 1094 if (tryToUnfoldSelect(SI, BB)) 1095 return true; 1096 1097 // Check for some cases that are worth simplifying. Right now we want to look 1098 // for loads that are used by a switch or by the condition for the branch. If 1099 // we see one, check to see if it's partially redundant. If so, insert a PHI 1100 // which can then be used to thread the values. 1101 Value *SimplifyValue = CondWithoutFreeze; 1102 1103 if (CmpInst *CondCmp = dyn_cast<CmpInst>(SimplifyValue)) 1104 if (isa<Constant>(CondCmp->getOperand(1))) 1105 SimplifyValue = CondCmp->getOperand(0); 1106 1107 // TODO: There are other places where load PRE would be profitable, such as 1108 // more complex comparisons. 1109 if (LoadInst *LoadI = dyn_cast<LoadInst>(SimplifyValue)) 1110 if (simplifyPartiallyRedundantLoad(LoadI)) 1111 return true; 1112 1113 // Before threading, try to propagate profile data backwards: 1114 if (PHINode *PN = dyn_cast<PHINode>(CondInst)) 1115 if (PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1116 updatePredecessorProfileMetadata(PN, BB); 1117 1118 // Handle a variety of cases where we are branching on something derived from 1119 // a PHI node in the current block. If we can prove that any predecessors 1120 // compute a predictable value based on a PHI node, thread those predecessors. 1121 if (processThreadableEdges(CondInst, BB, Preference, Terminator)) 1122 return true; 1123 1124 // If this is an otherwise-unfoldable branch on a phi node or freeze(phi) in 1125 // the current block, see if we can simplify. 1126 PHINode *PN = dyn_cast<PHINode>(CondWithoutFreeze); 1127 if (PN && PN->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1128 return processBranchOnPHI(PN); 1129 1130 // If this is an otherwise-unfoldable branch on a XOR, see if we can simplify. 1131 if (CondInst->getOpcode() == Instruction::Xor && 1132 CondInst->getParent() == BB && isa<BranchInst>(BB->getTerminator())) 1133 return processBranchOnXOR(cast<BinaryOperator>(CondInst)); 1134 1135 // Search for a stronger dominating condition that can be used to simplify a 1136 // conditional branch leaving BB. 1137 if (processImpliedCondition(BB)) 1138 return true; 1139 1140 return false; 1141 } 1142 1143 bool JumpThreadingPass::processImpliedCondition(BasicBlock *BB) { 1144 auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); 1145 if (!BI || !BI->isConditional()) 1146 return false; 1147 1148 Value *Cond = BI->getCondition(); 1149 // Assuming that predecessor's branch was taken, if pred's branch condition 1150 // (V) implies Cond, Cond can be either true, undef, or poison. In this case, 1151 // freeze(Cond) is either true or a nondeterministic value. 1152 // If freeze(Cond) has only one use, we can freely fold freeze(Cond) to true 1153 // without affecting other instructions. 1154 auto *FICond = dyn_cast<FreezeInst>(Cond); 1155 if (FICond && FICond->hasOneUse()) 1156 Cond = FICond->getOperand(0); 1157 else 1158 FICond = nullptr; 1159 1160 BasicBlock *CurrentBB = BB; 1161 BasicBlock *CurrentPred = BB->getSinglePredecessor(); 1162 unsigned Iter = 0; 1163 1164 auto &DL = BB->getDataLayout(); 1165 1166 while (CurrentPred && Iter++ < ImplicationSearchThreshold) { 1167 auto *PBI = dyn_cast<BranchInst>(CurrentPred->getTerminator()); 1168 if (!PBI || !PBI->isConditional()) 1169 return false; 1170 if (PBI->getSuccessor(0) != CurrentBB && PBI->getSuccessor(1) != CurrentBB) 1171 return false; 1172 1173 bool CondIsTrue = PBI->getSuccessor(0) == CurrentBB; 1174 std::optional<bool> Implication = 1175 isImpliedCondition(PBI->getCondition(), Cond, DL, CondIsTrue); 1176 1177 // If the branch condition of BB (which is Cond) and CurrentPred are 1178 // exactly the same freeze instruction, Cond can be folded into CondIsTrue. 1179 if (!Implication && FICond && isa<FreezeInst>(PBI->getCondition())) { 1180 if (cast<FreezeInst>(PBI->getCondition())->getOperand(0) == 1181 FICond->getOperand(0)) 1182 Implication = CondIsTrue; 1183 } 1184 1185 if (Implication) { 1186 BasicBlock *KeepSucc = BI->getSuccessor(*Implication ? 0 : 1); 1187 BasicBlock *RemoveSucc = BI->getSuccessor(*Implication ? 1 : 0); 1188 RemoveSucc->removePredecessor(BB); 1189 BranchInst *UncondBI = BranchInst::Create(KeepSucc, BI->getIterator()); 1190 UncondBI->setDebugLoc(BI->getDebugLoc()); 1191 ++NumFolds; 1192 BI->eraseFromParent(); 1193 if (FICond) 1194 FICond->eraseFromParent(); 1195 1196 DTU->applyUpdatesPermissive({{DominatorTree::Delete, BB, RemoveSucc}}); 1197 if (auto *BPI = getBPI()) 1198 BPI->eraseBlock(BB); 1199 return true; 1200 } 1201 CurrentBB = CurrentPred; 1202 CurrentPred = CurrentBB->getSinglePredecessor(); 1203 } 1204 1205 return false; 1206 } 1207 1208 /// Return true if Op is an instruction defined in the given block. 1209 static bool isOpDefinedInBlock(Value *Op, BasicBlock *BB) { 1210 if (Instruction *OpInst = dyn_cast<Instruction>(Op)) 1211 if (OpInst->getParent() == BB) 1212 return true; 1213 return false; 1214 } 1215 1216 /// simplifyPartiallyRedundantLoad - If LoadI is an obviously partially 1217 /// redundant load instruction, eliminate it by replacing it with a PHI node. 1218 /// This is an important optimization that encourages jump threading, and needs 1219 /// to be run interlaced with other jump threading tasks. 1220 bool JumpThreadingPass::simplifyPartiallyRedundantLoad(LoadInst *LoadI) { 1221 // Don't hack volatile and ordered loads. 1222 if (!LoadI->isUnordered()) return false; 1223 1224 // If the load is defined in a block with exactly one predecessor, it can't be 1225 // partially redundant. 1226 BasicBlock *LoadBB = LoadI->getParent(); 1227 if (LoadBB->getSinglePredecessor()) 1228 return false; 1229 1230 // If the load is defined in an EH pad, it can't be partially redundant, 1231 // because the edges between the invoke and the EH pad cannot have other 1232 // instructions between them. 1233 if (LoadBB->isEHPad()) 1234 return false; 1235 1236 Value *LoadedPtr = LoadI->getOperand(0); 1237 1238 // If the loaded operand is defined in the LoadBB and its not a phi, 1239 // it can't be available in predecessors. 1240 if (isOpDefinedInBlock(LoadedPtr, LoadBB) && !isa<PHINode>(LoadedPtr)) 1241 return false; 1242 1243 // Scan a few instructions up from the load, to see if it is obviously live at 1244 // the entry to its block. 1245 BasicBlock::iterator BBIt(LoadI); 1246 bool IsLoadCSE; 1247 BatchAAResults BatchAA(*AA); 1248 // The dominator tree is updated lazily and may not be valid at this point. 1249 BatchAA.disableDominatorTree(); 1250 if (Value *AvailableVal = FindAvailableLoadedValue( 1251 LoadI, LoadBB, BBIt, DefMaxInstsToScan, &BatchAA, &IsLoadCSE)) { 1252 // If the value of the load is locally available within the block, just use 1253 // it. This frequently occurs for reg2mem'd allocas. 1254 1255 if (IsLoadCSE) { 1256 LoadInst *NLoadI = cast<LoadInst>(AvailableVal); 1257 combineMetadataForCSE(NLoadI, LoadI, false); 1258 LVI->forgetValue(NLoadI); 1259 }; 1260 1261 // If the returned value is the load itself, replace with poison. This can 1262 // only happen in dead loops. 1263 if (AvailableVal == LoadI) 1264 AvailableVal = PoisonValue::get(LoadI->getType()); 1265 if (AvailableVal->getType() != LoadI->getType()) { 1266 AvailableVal = CastInst::CreateBitOrPointerCast( 1267 AvailableVal, LoadI->getType(), "", LoadI->getIterator()); 1268 cast<Instruction>(AvailableVal)->setDebugLoc(LoadI->getDebugLoc()); 1269 } 1270 LoadI->replaceAllUsesWith(AvailableVal); 1271 LoadI->eraseFromParent(); 1272 return true; 1273 } 1274 1275 // Otherwise, if we scanned the whole block and got to the top of the block, 1276 // we know the block is locally transparent to the load. If not, something 1277 // might clobber its value. 1278 if (BBIt != LoadBB->begin()) 1279 return false; 1280 1281 // If all of the loads and stores that feed the value have the same AA tags, 1282 // then we can propagate them onto any newly inserted loads. 1283 AAMDNodes AATags = LoadI->getAAMetadata(); 1284 1285 SmallPtrSet<BasicBlock*, 8> PredsScanned; 1286 1287 using AvailablePredsTy = SmallVector<std::pair<BasicBlock *, Value *>, 8>; 1288 1289 AvailablePredsTy AvailablePreds; 1290 BasicBlock *OneUnavailablePred = nullptr; 1291 SmallVector<LoadInst*, 8> CSELoads; 1292 1293 // If we got here, the loaded value is transparent through to the start of the 1294 // block. Check to see if it is available in any of the predecessor blocks. 1295 for (BasicBlock *PredBB : predecessors(LoadBB)) { 1296 // If we already scanned this predecessor, skip it. 1297 if (!PredsScanned.insert(PredBB).second) 1298 continue; 1299 1300 BBIt = PredBB->end(); 1301 unsigned NumScanedInst = 0; 1302 Value *PredAvailable = nullptr; 1303 // NOTE: We don't CSE load that is volatile or anything stronger than 1304 // unordered, that should have been checked when we entered the function. 1305 assert(LoadI->isUnordered() && 1306 "Attempting to CSE volatile or atomic loads"); 1307 // If this is a load on a phi pointer, phi-translate it and search 1308 // for available load/store to the pointer in predecessors. 1309 Type *AccessTy = LoadI->getType(); 1310 const auto &DL = LoadI->getDataLayout(); 1311 MemoryLocation Loc(LoadedPtr->DoPHITranslation(LoadBB, PredBB), 1312 LocationSize::precise(DL.getTypeStoreSize(AccessTy)), 1313 AATags); 1314 PredAvailable = findAvailablePtrLoadStore( 1315 Loc, AccessTy, LoadI->isAtomic(), PredBB, BBIt, DefMaxInstsToScan, 1316 &BatchAA, &IsLoadCSE, &NumScanedInst); 1317 1318 // If PredBB has a single predecessor, continue scanning through the 1319 // single predecessor. 1320 BasicBlock *SinglePredBB = PredBB; 1321 while (!PredAvailable && SinglePredBB && BBIt == SinglePredBB->begin() && 1322 NumScanedInst < DefMaxInstsToScan) { 1323 SinglePredBB = SinglePredBB->getSinglePredecessor(); 1324 if (SinglePredBB) { 1325 BBIt = SinglePredBB->end(); 1326 PredAvailable = findAvailablePtrLoadStore( 1327 Loc, AccessTy, LoadI->isAtomic(), SinglePredBB, BBIt, 1328 (DefMaxInstsToScan - NumScanedInst), &BatchAA, &IsLoadCSE, 1329 &NumScanedInst); 1330 } 1331 } 1332 1333 if (!PredAvailable) { 1334 OneUnavailablePred = PredBB; 1335 continue; 1336 } 1337 1338 if (IsLoadCSE) 1339 CSELoads.push_back(cast<LoadInst>(PredAvailable)); 1340 1341 // If so, this load is partially redundant. Remember this info so that we 1342 // can create a PHI node. 1343 AvailablePreds.emplace_back(PredBB, PredAvailable); 1344 } 1345 1346 // If the loaded value isn't available in any predecessor, it isn't partially 1347 // redundant. 1348 if (AvailablePreds.empty()) return false; 1349 1350 // Okay, the loaded value is available in at least one (and maybe all!) 1351 // predecessors. If the value is unavailable in more than one unique 1352 // predecessor, we want to insert a merge block for those common predecessors. 1353 // This ensures that we only have to insert one reload, thus not increasing 1354 // code size. 1355 BasicBlock *UnavailablePred = nullptr; 1356 1357 // If the value is unavailable in one of predecessors, we will end up 1358 // inserting a new instruction into them. It is only valid if all the 1359 // instructions before LoadI are guaranteed to pass execution to its 1360 // successor, or if LoadI is safe to speculate. 1361 // TODO: If this logic becomes more complex, and we will perform PRE insertion 1362 // farther than to a predecessor, we need to reuse the code from GVN's PRE. 1363 // It requires domination tree analysis, so for this simple case it is an 1364 // overkill. 1365 if (PredsScanned.size() != AvailablePreds.size() && 1366 !isSafeToSpeculativelyExecute(LoadI)) 1367 for (auto I = LoadBB->begin(); &*I != LoadI; ++I) 1368 if (!isGuaranteedToTransferExecutionToSuccessor(&*I)) 1369 return false; 1370 1371 // If there is exactly one predecessor where the value is unavailable, the 1372 // already computed 'OneUnavailablePred' block is it. If it ends in an 1373 // unconditional branch, we know that it isn't a critical edge. 1374 if (PredsScanned.size() == AvailablePreds.size()+1 && 1375 OneUnavailablePred->getTerminator()->getNumSuccessors() == 1) { 1376 UnavailablePred = OneUnavailablePred; 1377 } else if (PredsScanned.size() != AvailablePreds.size()) { 1378 // Otherwise, we had multiple unavailable predecessors or we had a critical 1379 // edge from the one. 1380 SmallVector<BasicBlock*, 8> PredsToSplit; 1381 SmallPtrSet<BasicBlock *, 8> AvailablePredSet( 1382 llvm::from_range, llvm::make_first_range(AvailablePreds)); 1383 1384 // Add all the unavailable predecessors to the PredsToSplit list. 1385 for (BasicBlock *P : predecessors(LoadBB)) { 1386 // If the predecessor is an indirect goto, we can't split the edge. 1387 if (isa<IndirectBrInst>(P->getTerminator())) 1388 return false; 1389 1390 if (!AvailablePredSet.count(P)) 1391 PredsToSplit.push_back(P); 1392 } 1393 1394 // Split them out to their own block. 1395 UnavailablePred = splitBlockPreds(LoadBB, PredsToSplit, "thread-pre-split"); 1396 } 1397 1398 // If the value isn't available in all predecessors, then there will be 1399 // exactly one where it isn't available. Insert a load on that edge and add 1400 // it to the AvailablePreds list. 1401 if (UnavailablePred) { 1402 assert(UnavailablePred->getTerminator()->getNumSuccessors() == 1 && 1403 "Can't handle critical edge here!"); 1404 LoadInst *NewVal = new LoadInst( 1405 LoadI->getType(), LoadedPtr->DoPHITranslation(LoadBB, UnavailablePred), 1406 LoadI->getName() + ".pr", false, LoadI->getAlign(), 1407 LoadI->getOrdering(), LoadI->getSyncScopeID(), 1408 UnavailablePred->getTerminator()->getIterator()); 1409 NewVal->setDebugLoc(LoadI->getDebugLoc()); 1410 if (AATags) 1411 NewVal->setAAMetadata(AATags); 1412 1413 AvailablePreds.emplace_back(UnavailablePred, NewVal); 1414 } 1415 1416 // Now we know that each predecessor of this block has a value in 1417 // AvailablePreds, sort them for efficient access as we're walking the preds. 1418 array_pod_sort(AvailablePreds.begin(), AvailablePreds.end()); 1419 1420 // Create a PHI node at the start of the block for the PRE'd load value. 1421 PHINode *PN = PHINode::Create(LoadI->getType(), pred_size(LoadBB), ""); 1422 PN->insertBefore(LoadBB->begin()); 1423 PN->takeName(LoadI); 1424 PN->setDebugLoc(LoadI->getDebugLoc()); 1425 1426 // Insert new entries into the PHI for each predecessor. A single block may 1427 // have multiple entries here. 1428 for (BasicBlock *P : predecessors(LoadBB)) { 1429 AvailablePredsTy::iterator I = 1430 llvm::lower_bound(AvailablePreds, std::make_pair(P, (Value *)nullptr)); 1431 1432 assert(I != AvailablePreds.end() && I->first == P && 1433 "Didn't find entry for predecessor!"); 1434 1435 // If we have an available predecessor but it requires casting, insert the 1436 // cast in the predecessor and use the cast. Note that we have to update the 1437 // AvailablePreds vector as we go so that all of the PHI entries for this 1438 // predecessor use the same bitcast. 1439 Value *&PredV = I->second; 1440 if (PredV->getType() != LoadI->getType()) 1441 PredV = CastInst::CreateBitOrPointerCast( 1442 PredV, LoadI->getType(), "", P->getTerminator()->getIterator()); 1443 1444 PN->addIncoming(PredV, I->first); 1445 } 1446 1447 for (LoadInst *PredLoadI : CSELoads) { 1448 combineMetadataForCSE(PredLoadI, LoadI, true); 1449 LVI->forgetValue(PredLoadI); 1450 } 1451 1452 LoadI->replaceAllUsesWith(PN); 1453 LoadI->eraseFromParent(); 1454 1455 return true; 1456 } 1457 1458 /// findMostPopularDest - The specified list contains multiple possible 1459 /// threadable destinations. Pick the one that occurs the most frequently in 1460 /// the list. 1461 static BasicBlock * 1462 findMostPopularDest(BasicBlock *BB, 1463 const SmallVectorImpl<std::pair<BasicBlock *, 1464 BasicBlock *>> &PredToDestList) { 1465 assert(!PredToDestList.empty()); 1466 1467 // Determine popularity. If there are multiple possible destinations, we 1468 // explicitly choose to ignore 'undef' destinations. We prefer to thread 1469 // blocks with known and real destinations to threading undef. We'll handle 1470 // them later if interesting. 1471 MapVector<BasicBlock *, unsigned> DestPopularity; 1472 1473 // Populate DestPopularity with the successors in the order they appear in the 1474 // successor list. This way, we ensure determinism by iterating it in the 1475 // same order in llvm::max_element below. We map nullptr to 0 so that we can 1476 // return nullptr when PredToDestList contains nullptr only. 1477 DestPopularity[nullptr] = 0; 1478 for (auto *SuccBB : successors(BB)) 1479 DestPopularity[SuccBB] = 0; 1480 1481 for (const auto &PredToDest : PredToDestList) 1482 if (PredToDest.second) 1483 DestPopularity[PredToDest.second]++; 1484 1485 // Find the most popular dest. 1486 auto MostPopular = llvm::max_element(DestPopularity, llvm::less_second()); 1487 1488 // Okay, we have finally picked the most popular destination. 1489 return MostPopular->first; 1490 } 1491 1492 // Try to evaluate the value of V when the control flows from PredPredBB to 1493 // BB->getSinglePredecessor() and then on to BB. 1494 Constant *JumpThreadingPass::evaluateOnPredecessorEdge(BasicBlock *BB, 1495 BasicBlock *PredPredBB, 1496 Value *V, 1497 const DataLayout &DL) { 1498 SmallPtrSet<Value *, 8> Visited; 1499 return evaluateOnPredecessorEdge(BB, PredPredBB, V, DL, Visited); 1500 } 1501 1502 Constant *JumpThreadingPass::evaluateOnPredecessorEdge( 1503 BasicBlock *BB, BasicBlock *PredPredBB, Value *V, const DataLayout &DL, 1504 SmallPtrSet<Value *, 8> &Visited) { 1505 if (!Visited.insert(V).second) 1506 return nullptr; 1507 auto _ = make_scope_exit([&Visited, V]() { Visited.erase(V); }); 1508 1509 BasicBlock *PredBB = BB->getSinglePredecessor(); 1510 assert(PredBB && "Expected a single predecessor"); 1511 1512 if (Constant *Cst = dyn_cast<Constant>(V)) { 1513 return Cst; 1514 } 1515 1516 // Consult LVI if V is not an instruction in BB or PredBB. 1517 Instruction *I = dyn_cast<Instruction>(V); 1518 if (!I || (I->getParent() != BB && I->getParent() != PredBB)) { 1519 return LVI->getConstantOnEdge(V, PredPredBB, PredBB, nullptr); 1520 } 1521 1522 // Look into a PHI argument. 1523 if (PHINode *PHI = dyn_cast<PHINode>(V)) { 1524 if (PHI->getParent() == PredBB) 1525 return dyn_cast<Constant>(PHI->getIncomingValueForBlock(PredPredBB)); 1526 return nullptr; 1527 } 1528 1529 // If we have a CmpInst, try to fold it for each incoming edge into PredBB. 1530 // Note that during the execution of the pass, phi nodes may become constant 1531 // and may be removed, which can lead to self-referencing instructions in 1532 // code that becomes unreachable. Consequently, we need to handle those 1533 // instructions in unreachable code and check before going into recursion. 1534 if (CmpInst *CondCmp = dyn_cast<CmpInst>(V)) { 1535 if (CondCmp->getParent() == BB) { 1536 Constant *Op0 = evaluateOnPredecessorEdge( 1537 BB, PredPredBB, CondCmp->getOperand(0), DL, Visited); 1538 Constant *Op1 = evaluateOnPredecessorEdge( 1539 BB, PredPredBB, CondCmp->getOperand(1), DL, Visited); 1540 if (Op0 && Op1) { 1541 return ConstantFoldCompareInstOperands(CondCmp->getPredicate(), Op0, 1542 Op1, DL); 1543 } 1544 } 1545 return nullptr; 1546 } 1547 1548 return nullptr; 1549 } 1550 1551 bool JumpThreadingPass::processThreadableEdges(Value *Cond, BasicBlock *BB, 1552 ConstantPreference Preference, 1553 Instruction *CxtI) { 1554 // If threading this would thread across a loop header, don't even try to 1555 // thread the edge. 1556 if (LoopHeaders.count(BB)) 1557 return false; 1558 1559 PredValueInfoTy PredValues; 1560 if (!computeValueKnownInPredecessors(Cond, BB, PredValues, Preference, 1561 CxtI)) { 1562 // We don't have known values in predecessors. See if we can thread through 1563 // BB and its sole predecessor. 1564 return maybethreadThroughTwoBasicBlocks(BB, Cond); 1565 } 1566 1567 assert(!PredValues.empty() && 1568 "computeValueKnownInPredecessors returned true with no values"); 1569 1570 LLVM_DEBUG(dbgs() << "IN BB: " << *BB; 1571 for (const auto &PredValue : PredValues) { 1572 dbgs() << " BB '" << BB->getName() 1573 << "': FOUND condition = " << *PredValue.first 1574 << " for pred '" << PredValue.second->getName() << "'.\n"; 1575 }); 1576 1577 // Decide what we want to thread through. Convert our list of known values to 1578 // a list of known destinations for each pred. This also discards duplicate 1579 // predecessors and keeps track of the undefined inputs (which are represented 1580 // as a null dest in the PredToDestList). 1581 SmallPtrSet<BasicBlock*, 16> SeenPreds; 1582 SmallVector<std::pair<BasicBlock*, BasicBlock*>, 16> PredToDestList; 1583 1584 BasicBlock *OnlyDest = nullptr; 1585 BasicBlock *MultipleDestSentinel = (BasicBlock*)(intptr_t)~0ULL; 1586 Constant *OnlyVal = nullptr; 1587 Constant *MultipleVal = (Constant *)(intptr_t)~0ULL; 1588 1589 for (const auto &PredValue : PredValues) { 1590 BasicBlock *Pred = PredValue.second; 1591 if (!SeenPreds.insert(Pred).second) 1592 continue; // Duplicate predecessor entry. 1593 1594 Constant *Val = PredValue.first; 1595 1596 BasicBlock *DestBB; 1597 if (isa<UndefValue>(Val)) 1598 DestBB = nullptr; 1599 else if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator())) { 1600 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1601 DestBB = BI->getSuccessor(cast<ConstantInt>(Val)->isZero()); 1602 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(BB->getTerminator())) { 1603 assert(isa<ConstantInt>(Val) && "Expecting a constant integer"); 1604 DestBB = SI->findCaseValue(cast<ConstantInt>(Val))->getCaseSuccessor(); 1605 } else { 1606 assert(isa<IndirectBrInst>(BB->getTerminator()) 1607 && "Unexpected terminator"); 1608 assert(isa<BlockAddress>(Val) && "Expecting a constant blockaddress"); 1609 DestBB = cast<BlockAddress>(Val)->getBasicBlock(); 1610 } 1611 1612 // If we have exactly one destination, remember it for efficiency below. 1613 if (PredToDestList.empty()) { 1614 OnlyDest = DestBB; 1615 OnlyVal = Val; 1616 } else { 1617 if (OnlyDest != DestBB) 1618 OnlyDest = MultipleDestSentinel; 1619 // It possible we have same destination, but different value, e.g. default 1620 // case in switchinst. 1621 if (Val != OnlyVal) 1622 OnlyVal = MultipleVal; 1623 } 1624 1625 // If the predecessor ends with an indirect goto, we can't change its 1626 // destination. 1627 if (isa<IndirectBrInst>(Pred->getTerminator())) 1628 continue; 1629 1630 PredToDestList.emplace_back(Pred, DestBB); 1631 } 1632 1633 // If all edges were unthreadable, we fail. 1634 if (PredToDestList.empty()) 1635 return false; 1636 1637 // If all the predecessors go to a single known successor, we want to fold, 1638 // not thread. By doing so, we do not need to duplicate the current block and 1639 // also miss potential opportunities in case we dont/cant duplicate. 1640 if (OnlyDest && OnlyDest != MultipleDestSentinel) { 1641 if (BB->hasNPredecessors(PredToDestList.size())) { 1642 bool SeenFirstBranchToOnlyDest = false; 1643 std::vector <DominatorTree::UpdateType> Updates; 1644 Updates.reserve(BB->getTerminator()->getNumSuccessors() - 1); 1645 for (BasicBlock *SuccBB : successors(BB)) { 1646 if (SuccBB == OnlyDest && !SeenFirstBranchToOnlyDest) { 1647 SeenFirstBranchToOnlyDest = true; // Don't modify the first branch. 1648 } else { 1649 SuccBB->removePredecessor(BB, true); // This is unreachable successor. 1650 Updates.push_back({DominatorTree::Delete, BB, SuccBB}); 1651 } 1652 } 1653 1654 // Finally update the terminator. 1655 Instruction *Term = BB->getTerminator(); 1656 Instruction *NewBI = BranchInst::Create(OnlyDest, Term->getIterator()); 1657 NewBI->setDebugLoc(Term->getDebugLoc()); 1658 ++NumFolds; 1659 Term->eraseFromParent(); 1660 DTU->applyUpdatesPermissive(Updates); 1661 if (auto *BPI = getBPI()) 1662 BPI->eraseBlock(BB); 1663 1664 // If the condition is now dead due to the removal of the old terminator, 1665 // erase it. 1666 if (auto *CondInst = dyn_cast<Instruction>(Cond)) { 1667 if (CondInst->use_empty() && !CondInst->mayHaveSideEffects()) 1668 CondInst->eraseFromParent(); 1669 // We can safely replace *some* uses of the CondInst if it has 1670 // exactly one value as returned by LVI. RAUW is incorrect in the 1671 // presence of guards and assumes, that have the `Cond` as the use. This 1672 // is because we use the guards/assume to reason about the `Cond` value 1673 // at the end of block, but RAUW unconditionally replaces all uses 1674 // including the guards/assumes themselves and the uses before the 1675 // guard/assume. 1676 else if (OnlyVal && OnlyVal != MultipleVal) 1677 replaceFoldableUses(CondInst, OnlyVal, BB); 1678 } 1679 return true; 1680 } 1681 } 1682 1683 // Determine which is the most common successor. If we have many inputs and 1684 // this block is a switch, we want to start by threading the batch that goes 1685 // to the most popular destination first. If we only know about one 1686 // threadable destination (the common case) we can avoid this. 1687 BasicBlock *MostPopularDest = OnlyDest; 1688 1689 if (MostPopularDest == MultipleDestSentinel) { 1690 // Remove any loop headers from the Dest list, threadEdge conservatively 1691 // won't process them, but we might have other destination that are eligible 1692 // and we still want to process. 1693 erase_if(PredToDestList, 1694 [&](const std::pair<BasicBlock *, BasicBlock *> &PredToDest) { 1695 return LoopHeaders.contains(PredToDest.second); 1696 }); 1697 1698 if (PredToDestList.empty()) 1699 return false; 1700 1701 MostPopularDest = findMostPopularDest(BB, PredToDestList); 1702 } 1703 1704 // Now that we know what the most popular destination is, factor all 1705 // predecessors that will jump to it into a single predecessor. 1706 SmallVector<BasicBlock*, 16> PredsToFactor; 1707 for (const auto &PredToDest : PredToDestList) 1708 if (PredToDest.second == MostPopularDest) { 1709 BasicBlock *Pred = PredToDest.first; 1710 1711 // This predecessor may be a switch or something else that has multiple 1712 // edges to the block. Factor each of these edges by listing them 1713 // according to # occurrences in PredsToFactor. 1714 for (BasicBlock *Succ : successors(Pred)) 1715 if (Succ == BB) 1716 PredsToFactor.push_back(Pred); 1717 } 1718 1719 // If the threadable edges are branching on an undefined value, we get to pick 1720 // the destination that these predecessors should get to. 1721 if (!MostPopularDest) 1722 MostPopularDest = BB->getTerminator()-> 1723 getSuccessor(getBestDestForJumpOnUndef(BB)); 1724 1725 // Ok, try to thread it! 1726 return tryThreadEdge(BB, PredsToFactor, MostPopularDest); 1727 } 1728 1729 /// processBranchOnPHI - We have an otherwise unthreadable conditional branch on 1730 /// a PHI node (or freeze PHI) in the current block. See if there are any 1731 /// simplifications we can do based on inputs to the phi node. 1732 bool JumpThreadingPass::processBranchOnPHI(PHINode *PN) { 1733 BasicBlock *BB = PN->getParent(); 1734 1735 // TODO: We could make use of this to do it once for blocks with common PHI 1736 // values. 1737 SmallVector<BasicBlock*, 1> PredBBs; 1738 PredBBs.resize(1); 1739 1740 // If any of the predecessor blocks end in an unconditional branch, we can 1741 // *duplicate* the conditional branch into that block in order to further 1742 // encourage jump threading and to eliminate cases where we have branch on a 1743 // phi of an icmp (branch on icmp is much better). 1744 // This is still beneficial when a frozen phi is used as the branch condition 1745 // because it allows CodeGenPrepare to further canonicalize br(freeze(icmp)) 1746 // to br(icmp(freeze ...)). 1747 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 1748 BasicBlock *PredBB = PN->getIncomingBlock(i); 1749 if (BranchInst *PredBr = dyn_cast<BranchInst>(PredBB->getTerminator())) 1750 if (PredBr->isUnconditional()) { 1751 PredBBs[0] = PredBB; 1752 // Try to duplicate BB into PredBB. 1753 if (duplicateCondBranchOnPHIIntoPred(BB, PredBBs)) 1754 return true; 1755 } 1756 } 1757 1758 return false; 1759 } 1760 1761 /// processBranchOnXOR - We have an otherwise unthreadable conditional branch on 1762 /// a xor instruction in the current block. See if there are any 1763 /// simplifications we can do based on inputs to the xor. 1764 bool JumpThreadingPass::processBranchOnXOR(BinaryOperator *BO) { 1765 BasicBlock *BB = BO->getParent(); 1766 1767 // If either the LHS or RHS of the xor is a constant, don't do this 1768 // optimization. 1769 if (isa<ConstantInt>(BO->getOperand(0)) || 1770 isa<ConstantInt>(BO->getOperand(1))) 1771 return false; 1772 1773 // If the first instruction in BB isn't a phi, we won't be able to infer 1774 // anything special about any particular predecessor. 1775 if (!isa<PHINode>(BB->front())) 1776 return false; 1777 1778 // If this BB is a landing pad, we won't be able to split the edge into it. 1779 if (BB->isEHPad()) 1780 return false; 1781 1782 // If we have a xor as the branch input to this block, and we know that the 1783 // LHS or RHS of the xor in any predecessor is true/false, then we can clone 1784 // the condition into the predecessor and fix that value to true, saving some 1785 // logical ops on that path and encouraging other paths to simplify. 1786 // 1787 // This copies something like this: 1788 // 1789 // BB: 1790 // %X = phi i1 [1], [%X'] 1791 // %Y = icmp eq i32 %A, %B 1792 // %Z = xor i1 %X, %Y 1793 // br i1 %Z, ... 1794 // 1795 // Into: 1796 // BB': 1797 // %Y = icmp ne i32 %A, %B 1798 // br i1 %Y, ... 1799 1800 PredValueInfoTy XorOpValues; 1801 bool isLHS = true; 1802 if (!computeValueKnownInPredecessors(BO->getOperand(0), BB, XorOpValues, 1803 WantInteger, BO)) { 1804 assert(XorOpValues.empty()); 1805 if (!computeValueKnownInPredecessors(BO->getOperand(1), BB, XorOpValues, 1806 WantInteger, BO)) 1807 return false; 1808 isLHS = false; 1809 } 1810 1811 assert(!XorOpValues.empty() && 1812 "computeValueKnownInPredecessors returned true with no values"); 1813 1814 // Scan the information to see which is most popular: true or false. The 1815 // predecessors can be of the set true, false, or undef. 1816 unsigned NumTrue = 0, NumFalse = 0; 1817 for (const auto &XorOpValue : XorOpValues) { 1818 if (isa<UndefValue>(XorOpValue.first)) 1819 // Ignore undefs for the count. 1820 continue; 1821 if (cast<ConstantInt>(XorOpValue.first)->isZero()) 1822 ++NumFalse; 1823 else 1824 ++NumTrue; 1825 } 1826 1827 // Determine which value to split on, true, false, or undef if neither. 1828 ConstantInt *SplitVal = nullptr; 1829 if (NumTrue > NumFalse) 1830 SplitVal = ConstantInt::getTrue(BB->getContext()); 1831 else if (NumTrue != 0 || NumFalse != 0) 1832 SplitVal = ConstantInt::getFalse(BB->getContext()); 1833 1834 // Collect all of the blocks that this can be folded into so that we can 1835 // factor this once and clone it once. 1836 SmallVector<BasicBlock*, 8> BlocksToFoldInto; 1837 for (const auto &XorOpValue : XorOpValues) { 1838 if (XorOpValue.first != SplitVal && !isa<UndefValue>(XorOpValue.first)) 1839 continue; 1840 1841 BlocksToFoldInto.push_back(XorOpValue.second); 1842 } 1843 1844 // If we inferred a value for all of the predecessors, then duplication won't 1845 // help us. However, we can just replace the LHS or RHS with the constant. 1846 if (BlocksToFoldInto.size() == 1847 cast<PHINode>(BB->front()).getNumIncomingValues()) { 1848 if (!SplitVal) { 1849 // If all preds provide undef, just nuke the xor, because it is undef too. 1850 BO->replaceAllUsesWith(UndefValue::get(BO->getType())); 1851 BO->eraseFromParent(); 1852 } else if (SplitVal->isZero() && BO != BO->getOperand(isLHS)) { 1853 // If all preds provide 0, replace the xor with the other input. 1854 BO->replaceAllUsesWith(BO->getOperand(isLHS)); 1855 BO->eraseFromParent(); 1856 } else { 1857 // If all preds provide 1, set the computed value to 1. 1858 BO->setOperand(!isLHS, SplitVal); 1859 } 1860 1861 return true; 1862 } 1863 1864 // If any of predecessors end with an indirect goto, we can't change its 1865 // destination. 1866 if (any_of(BlocksToFoldInto, [](BasicBlock *Pred) { 1867 return isa<IndirectBrInst>(Pred->getTerminator()); 1868 })) 1869 return false; 1870 1871 // Try to duplicate BB into PredBB. 1872 return duplicateCondBranchOnPHIIntoPred(BB, BlocksToFoldInto); 1873 } 1874 1875 /// addPHINodeEntriesForMappedBlock - We're adding 'NewPred' as a new 1876 /// predecessor to the PHIBB block. If it has PHI nodes, add entries for 1877 /// NewPred using the entries from OldPred (suitably mapped). 1878 static void addPHINodeEntriesForMappedBlock(BasicBlock *PHIBB, 1879 BasicBlock *OldPred, 1880 BasicBlock *NewPred, 1881 ValueToValueMapTy &ValueMap) { 1882 for (PHINode &PN : PHIBB->phis()) { 1883 // Ok, we have a PHI node. Figure out what the incoming value was for the 1884 // DestBlock. 1885 Value *IV = PN.getIncomingValueForBlock(OldPred); 1886 1887 // Remap the value if necessary. 1888 if (Instruction *Inst = dyn_cast<Instruction>(IV)) { 1889 ValueToValueMapTy::iterator I = ValueMap.find(Inst); 1890 if (I != ValueMap.end()) 1891 IV = I->second; 1892 } 1893 1894 PN.addIncoming(IV, NewPred); 1895 } 1896 } 1897 1898 /// Merge basic block BB into its sole predecessor if possible. 1899 bool JumpThreadingPass::maybeMergeBasicBlockIntoOnlyPred(BasicBlock *BB) { 1900 BasicBlock *SinglePred = BB->getSinglePredecessor(); 1901 if (!SinglePred) 1902 return false; 1903 1904 const Instruction *TI = SinglePred->getTerminator(); 1905 if (TI->isSpecialTerminator() || TI->getNumSuccessors() != 1 || 1906 SinglePred == BB || hasAddressTakenAndUsed(BB)) 1907 return false; 1908 1909 // MergeBasicBlockIntoOnlyPred may delete SinglePred, we need to avoid 1910 // deleting a BB pointer from Unreachable. 1911 if (Unreachable.count(SinglePred)) 1912 return false; 1913 1914 // If SinglePred was a loop header, BB becomes one. 1915 if (LoopHeaders.erase(SinglePred)) 1916 LoopHeaders.insert(BB); 1917 1918 LVI->eraseBlock(SinglePred); 1919 MergeBasicBlockIntoOnlyPred(BB, DTU.get()); 1920 1921 // Now that BB is merged into SinglePred (i.e. SinglePred code followed by 1922 // BB code within one basic block `BB`), we need to invalidate the LVI 1923 // information associated with BB, because the LVI information need not be 1924 // true for all of BB after the merge. For example, 1925 // Before the merge, LVI info and code is as follows: 1926 // SinglePred: <LVI info1 for %p val> 1927 // %y = use of %p 1928 // call @exit() // need not transfer execution to successor. 1929 // assume(%p) // from this point on %p is true 1930 // br label %BB 1931 // BB: <LVI info2 for %p val, i.e. %p is true> 1932 // %x = use of %p 1933 // br label exit 1934 // 1935 // Note that this LVI info for blocks BB and SinglPred is correct for %p 1936 // (info2 and info1 respectively). After the merge and the deletion of the 1937 // LVI info1 for SinglePred. We have the following code: 1938 // BB: <LVI info2 for %p val> 1939 // %y = use of %p 1940 // call @exit() 1941 // assume(%p) 1942 // %x = use of %p <-- LVI info2 is correct from here onwards. 1943 // br label exit 1944 // LVI info2 for BB is incorrect at the beginning of BB. 1945 1946 // Invalidate LVI information for BB if the LVI is not provably true for 1947 // all of BB. 1948 if (!isGuaranteedToTransferExecutionToSuccessor(BB)) 1949 LVI->eraseBlock(BB); 1950 return true; 1951 } 1952 1953 /// Update the SSA form. NewBB contains instructions that are copied from BB. 1954 /// ValueMapping maps old values in BB to new ones in NewBB. 1955 void JumpThreadingPass::updateSSA(BasicBlock *BB, BasicBlock *NewBB, 1956 ValueToValueMapTy &ValueMapping) { 1957 // If there were values defined in BB that are used outside the block, then we 1958 // now have to update all uses of the value to use either the original value, 1959 // the cloned value, or some PHI derived value. This can require arbitrary 1960 // PHI insertion, of which we are prepared to do, clean these up now. 1961 SSAUpdater SSAUpdate; 1962 SmallVector<Use *, 16> UsesToRename; 1963 SmallVector<DbgValueInst *, 4> DbgValues; 1964 SmallVector<DbgVariableRecord *, 4> DbgVariableRecords; 1965 1966 for (Instruction &I : *BB) { 1967 // Scan all uses of this instruction to see if it is used outside of its 1968 // block, and if so, record them in UsesToRename. 1969 for (Use &U : I.uses()) { 1970 Instruction *User = cast<Instruction>(U.getUser()); 1971 if (PHINode *UserPN = dyn_cast<PHINode>(User)) { 1972 if (UserPN->getIncomingBlock(U) == BB) 1973 continue; 1974 } else if (User->getParent() == BB) 1975 continue; 1976 1977 UsesToRename.push_back(&U); 1978 } 1979 1980 // Find debug values outside of the block 1981 findDbgValues(DbgValues, &I, &DbgVariableRecords); 1982 llvm::erase_if(DbgValues, [&](const DbgValueInst *DbgVal) { 1983 return DbgVal->getParent() == BB; 1984 }); 1985 llvm::erase_if(DbgVariableRecords, [&](const DbgVariableRecord *DbgVarRec) { 1986 return DbgVarRec->getParent() == BB; 1987 }); 1988 1989 // If there are no uses outside the block, we're done with this instruction. 1990 if (UsesToRename.empty() && DbgValues.empty() && DbgVariableRecords.empty()) 1991 continue; 1992 LLVM_DEBUG(dbgs() << "JT: Renaming non-local uses of: " << I << "\n"); 1993 1994 // We found a use of I outside of BB. Rename all uses of I that are outside 1995 // its block to be uses of the appropriate PHI node etc. See ValuesInBlocks 1996 // with the two values we know. 1997 SSAUpdate.Initialize(I.getType(), I.getName()); 1998 SSAUpdate.AddAvailableValue(BB, &I); 1999 SSAUpdate.AddAvailableValue(NewBB, ValueMapping[&I]); 2000 2001 while (!UsesToRename.empty()) 2002 SSAUpdate.RewriteUse(*UsesToRename.pop_back_val()); 2003 if (!DbgValues.empty() || !DbgVariableRecords.empty()) { 2004 SSAUpdate.UpdateDebugValues(&I, DbgValues); 2005 SSAUpdate.UpdateDebugValues(&I, DbgVariableRecords); 2006 DbgValues.clear(); 2007 DbgVariableRecords.clear(); 2008 } 2009 2010 LLVM_DEBUG(dbgs() << "\n"); 2011 } 2012 } 2013 2014 static void remapSourceAtoms(ValueToValueMapTy &VM, BasicBlock::iterator Begin, 2015 BasicBlock::iterator End) { 2016 if (VM.AtomMap.empty()) 2017 return; 2018 for (auto It = Begin; It != End; ++It) 2019 RemapSourceAtom(&*It, VM); 2020 } 2021 2022 /// Clone instructions in range [BI, BE) to NewBB. For PHI nodes, we only clone 2023 /// arguments that come from PredBB. Return the map from the variables in the 2024 /// source basic block to the variables in the newly created basic block. 2025 2026 void JumpThreadingPass::cloneInstructions(ValueToValueMapTy &ValueMapping, 2027 BasicBlock::iterator BI, 2028 BasicBlock::iterator BE, 2029 BasicBlock *NewBB, 2030 BasicBlock *PredBB) { 2031 // We are going to have to map operands from the source basic block to the new 2032 // copy of the block 'NewBB'. If there are PHI nodes in the source basic 2033 // block, evaluate them to account for entry from PredBB. 2034 2035 // Retargets llvm.dbg.value to any renamed variables. 2036 auto RetargetDbgValueIfPossible = [&](Instruction *NewInst) -> bool { 2037 auto DbgInstruction = dyn_cast<DbgValueInst>(NewInst); 2038 if (!DbgInstruction) 2039 return false; 2040 2041 SmallSet<std::pair<Value *, Value *>, 16> OperandsToRemap; 2042 for (auto DbgOperand : DbgInstruction->location_ops()) { 2043 auto DbgOperandInstruction = dyn_cast<Instruction>(DbgOperand); 2044 if (!DbgOperandInstruction) 2045 continue; 2046 2047 auto I = ValueMapping.find(DbgOperandInstruction); 2048 if (I != ValueMapping.end()) { 2049 OperandsToRemap.insert( 2050 std::pair<Value *, Value *>(DbgOperand, I->second)); 2051 } 2052 } 2053 2054 for (auto &[OldOp, MappedOp] : OperandsToRemap) 2055 DbgInstruction->replaceVariableLocationOp(OldOp, MappedOp); 2056 return true; 2057 }; 2058 2059 // Duplicate implementation of the above dbg.value code, using 2060 // DbgVariableRecords instead. 2061 auto RetargetDbgVariableRecordIfPossible = [&](DbgVariableRecord *DVR) { 2062 SmallSet<std::pair<Value *, Value *>, 16> OperandsToRemap; 2063 for (auto *Op : DVR->location_ops()) { 2064 Instruction *OpInst = dyn_cast<Instruction>(Op); 2065 if (!OpInst) 2066 continue; 2067 2068 auto I = ValueMapping.find(OpInst); 2069 if (I != ValueMapping.end()) 2070 OperandsToRemap.insert({OpInst, I->second}); 2071 } 2072 2073 for (auto &[OldOp, MappedOp] : OperandsToRemap) 2074 DVR->replaceVariableLocationOp(OldOp, MappedOp); 2075 }; 2076 2077 BasicBlock *RangeBB = BI->getParent(); 2078 2079 // Clone the phi nodes of the source basic block into NewBB. The resulting 2080 // phi nodes are trivial since NewBB only has one predecessor, but SSAUpdater 2081 // might need to rewrite the operand of the cloned phi. 2082 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2083 PHINode *NewPN = PHINode::Create(PN->getType(), 1, PN->getName(), NewBB); 2084 NewPN->addIncoming(PN->getIncomingValueForBlock(PredBB), PredBB); 2085 ValueMapping[PN] = NewPN; 2086 if (const DebugLoc &DL = PN->getDebugLoc()) 2087 mapAtomInstance(DL, ValueMapping); 2088 } 2089 2090 // Clone noalias scope declarations in the threaded block. When threading a 2091 // loop exit, we would otherwise end up with two idential scope declarations 2092 // visible at the same time. 2093 SmallVector<MDNode *> NoAliasScopes; 2094 DenseMap<MDNode *, MDNode *> ClonedScopes; 2095 LLVMContext &Context = PredBB->getContext(); 2096 identifyNoAliasScopesToClone(BI, BE, NoAliasScopes); 2097 cloneNoAliasScopes(NoAliasScopes, ClonedScopes, "thread", Context); 2098 2099 auto CloneAndRemapDbgInfo = [&](Instruction *NewInst, Instruction *From) { 2100 auto DVRRange = NewInst->cloneDebugInfoFrom(From); 2101 for (DbgVariableRecord &DVR : filterDbgVars(DVRRange)) 2102 RetargetDbgVariableRecordIfPossible(&DVR); 2103 }; 2104 2105 // Clone the non-phi instructions of the source basic block into NewBB, 2106 // keeping track of the mapping and using it to remap operands in the cloned 2107 // instructions. 2108 for (; BI != BE; ++BI) { 2109 Instruction *New = BI->clone(); 2110 New->setName(BI->getName()); 2111 New->insertInto(NewBB, NewBB->end()); 2112 ValueMapping[&*BI] = New; 2113 adaptNoAliasScopes(New, ClonedScopes, Context); 2114 2115 CloneAndRemapDbgInfo(New, &*BI); 2116 if (const DebugLoc &DL = New->getDebugLoc()) 2117 mapAtomInstance(DL, ValueMapping); 2118 2119 if (RetargetDbgValueIfPossible(New)) 2120 continue; 2121 2122 // Remap operands to patch up intra-block references. 2123 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2124 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2125 ValueToValueMapTy::iterator I = ValueMapping.find(Inst); 2126 if (I != ValueMapping.end()) 2127 New->setOperand(i, I->second); 2128 } 2129 } 2130 2131 // There may be DbgVariableRecords on the terminator, clone directly from 2132 // marker to marker as there isn't an instruction there. 2133 if (BE != RangeBB->end() && BE->hasDbgRecords()) { 2134 // Dump them at the end. 2135 DbgMarker *Marker = RangeBB->getMarker(BE); 2136 DbgMarker *EndMarker = NewBB->createMarker(NewBB->end()); 2137 auto DVRRange = EndMarker->cloneDebugInfoFrom(Marker, std::nullopt); 2138 for (DbgVariableRecord &DVR : filterDbgVars(DVRRange)) 2139 RetargetDbgVariableRecordIfPossible(&DVR); 2140 } 2141 } 2142 2143 /// Attempt to thread through two successive basic blocks. 2144 bool JumpThreadingPass::maybethreadThroughTwoBasicBlocks(BasicBlock *BB, 2145 Value *Cond) { 2146 // Consider: 2147 // 2148 // PredBB: 2149 // %var = phi i32* [ null, %bb1 ], [ @a, %bb2 ] 2150 // %tobool = icmp eq i32 %cond, 0 2151 // br i1 %tobool, label %BB, label ... 2152 // 2153 // BB: 2154 // %cmp = icmp eq i32* %var, null 2155 // br i1 %cmp, label ..., label ... 2156 // 2157 // We don't know the value of %var at BB even if we know which incoming edge 2158 // we take to BB. However, once we duplicate PredBB for each of its incoming 2159 // edges (say, PredBB1 and PredBB2), we know the value of %var in each copy of 2160 // PredBB. Then we can thread edges PredBB1->BB and PredBB2->BB through BB. 2161 2162 // Require that BB end with a Branch for simplicity. 2163 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2164 if (!CondBr) 2165 return false; 2166 2167 // BB must have exactly one predecessor. 2168 BasicBlock *PredBB = BB->getSinglePredecessor(); 2169 if (!PredBB) 2170 return false; 2171 2172 // Require that PredBB end with a conditional Branch. If PredBB ends with an 2173 // unconditional branch, we should be merging PredBB and BB instead. For 2174 // simplicity, we don't deal with a switch. 2175 BranchInst *PredBBBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 2176 if (!PredBBBranch || PredBBBranch->isUnconditional()) 2177 return false; 2178 2179 // If PredBB has exactly one incoming edge, we don't gain anything by copying 2180 // PredBB. 2181 if (PredBB->getSinglePredecessor()) 2182 return false; 2183 2184 // Don't thread through PredBB if it contains a successor edge to itself, in 2185 // which case we would infinite loop. Suppose we are threading an edge from 2186 // PredPredBB through PredBB and BB to SuccBB with PredBB containing a 2187 // successor edge to itself. If we allowed jump threading in this case, we 2188 // could duplicate PredBB and BB as, say, PredBB.thread and BB.thread. Since 2189 // PredBB.thread has a successor edge to PredBB, we would immediately come up 2190 // with another jump threading opportunity from PredBB.thread through PredBB 2191 // and BB to SuccBB. This jump threading would repeatedly occur. That is, we 2192 // would keep peeling one iteration from PredBB. 2193 if (llvm::is_contained(successors(PredBB), PredBB)) 2194 return false; 2195 2196 // Don't thread across a loop header. 2197 if (LoopHeaders.count(PredBB)) 2198 return false; 2199 2200 // Avoid complication with duplicating EH pads. 2201 if (PredBB->isEHPad()) 2202 return false; 2203 2204 // Find a predecessor that we can thread. For simplicity, we only consider a 2205 // successor edge out of BB to which we thread exactly one incoming edge into 2206 // PredBB. 2207 unsigned ZeroCount = 0; 2208 unsigned OneCount = 0; 2209 BasicBlock *ZeroPred = nullptr; 2210 BasicBlock *OnePred = nullptr; 2211 const DataLayout &DL = BB->getDataLayout(); 2212 for (BasicBlock *P : predecessors(PredBB)) { 2213 // If PredPred ends with IndirectBrInst, we can't handle it. 2214 if (isa<IndirectBrInst>(P->getTerminator())) 2215 continue; 2216 if (ConstantInt *CI = dyn_cast_or_null<ConstantInt>( 2217 evaluateOnPredecessorEdge(BB, P, Cond, DL))) { 2218 if (CI->isZero()) { 2219 ZeroCount++; 2220 ZeroPred = P; 2221 } else if (CI->isOne()) { 2222 OneCount++; 2223 OnePred = P; 2224 } 2225 } 2226 } 2227 2228 // Disregard complicated cases where we have to thread multiple edges. 2229 BasicBlock *PredPredBB; 2230 if (ZeroCount == 1) { 2231 PredPredBB = ZeroPred; 2232 } else if (OneCount == 1) { 2233 PredPredBB = OnePred; 2234 } else { 2235 return false; 2236 } 2237 2238 BasicBlock *SuccBB = CondBr->getSuccessor(PredPredBB == ZeroPred); 2239 2240 // If threading to the same block as we come from, we would infinite loop. 2241 if (SuccBB == BB) { 2242 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 2243 << "' - would thread to self!\n"); 2244 return false; 2245 } 2246 2247 // If threading this would thread across a loop header, don't thread the edge. 2248 // See the comments above findLoopHeaders for justifications and caveats. 2249 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { 2250 LLVM_DEBUG({ 2251 bool BBIsHeader = LoopHeaders.count(BB); 2252 bool SuccIsHeader = LoopHeaders.count(SuccBB); 2253 dbgs() << " Not threading across " 2254 << (BBIsHeader ? "loop header BB '" : "block BB '") 2255 << BB->getName() << "' to dest " 2256 << (SuccIsHeader ? "loop header BB '" : "block BB '") 2257 << SuccBB->getName() 2258 << "' - it might create an irreducible loop!\n"; 2259 }); 2260 return false; 2261 } 2262 2263 // Compute the cost of duplicating BB and PredBB. 2264 unsigned BBCost = getJumpThreadDuplicationCost( 2265 TTI, BB, BB->getTerminator(), BBDupThreshold); 2266 unsigned PredBBCost = getJumpThreadDuplicationCost( 2267 TTI, PredBB, PredBB->getTerminator(), BBDupThreshold); 2268 2269 // Give up if costs are too high. We need to check BBCost and PredBBCost 2270 // individually before checking their sum because getJumpThreadDuplicationCost 2271 // return (unsigned)~0 for those basic blocks that cannot be duplicated. 2272 if (BBCost > BBDupThreshold || PredBBCost > BBDupThreshold || 2273 BBCost + PredBBCost > BBDupThreshold) { 2274 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() 2275 << "' - Cost is too high: " << PredBBCost 2276 << " for PredBB, " << BBCost << "for BB\n"); 2277 return false; 2278 } 2279 2280 // Now we are ready to duplicate PredBB. 2281 threadThroughTwoBasicBlocks(PredPredBB, PredBB, BB, SuccBB); 2282 return true; 2283 } 2284 2285 void JumpThreadingPass::threadThroughTwoBasicBlocks(BasicBlock *PredPredBB, 2286 BasicBlock *PredBB, 2287 BasicBlock *BB, 2288 BasicBlock *SuccBB) { 2289 LLVM_DEBUG(dbgs() << " Threading through '" << PredBB->getName() << "' and '" 2290 << BB->getName() << "'\n"); 2291 2292 // Build BPI/BFI before any changes are made to IR. 2293 bool HasProfile = doesBlockHaveProfileData(BB); 2294 auto *BFI = getOrCreateBFI(HasProfile); 2295 auto *BPI = getOrCreateBPI(BFI != nullptr); 2296 2297 BranchInst *CondBr = cast<BranchInst>(BB->getTerminator()); 2298 BranchInst *PredBBBranch = cast<BranchInst>(PredBB->getTerminator()); 2299 2300 BasicBlock *NewBB = 2301 BasicBlock::Create(PredBB->getContext(), PredBB->getName() + ".thread", 2302 PredBB->getParent(), PredBB); 2303 NewBB->moveAfter(PredBB); 2304 2305 // Set the block frequency of NewBB. 2306 if (BFI) { 2307 assert(BPI && "It's expected BPI to exist along with BFI"); 2308 auto NewBBFreq = BFI->getBlockFreq(PredPredBB) * 2309 BPI->getEdgeProbability(PredPredBB, PredBB); 2310 BFI->setBlockFreq(NewBB, NewBBFreq); 2311 } 2312 2313 // We are going to have to map operands from the original BB block to the new 2314 // copy of the block 'NewBB'. If there are PHI nodes in PredBB, evaluate them 2315 // to account for entry from PredPredBB. 2316 ValueToValueMapTy ValueMapping; 2317 cloneInstructions(ValueMapping, PredBB->begin(), PredBB->end(), NewBB, 2318 PredPredBB); 2319 2320 // Copy the edge probabilities from PredBB to NewBB. 2321 if (BPI) 2322 BPI->copyEdgeProbabilities(PredBB, NewBB); 2323 2324 // Update the terminator of PredPredBB to jump to NewBB instead of PredBB. 2325 // This eliminates predecessors from PredPredBB, which requires us to simplify 2326 // any PHI nodes in PredBB. 2327 Instruction *PredPredTerm = PredPredBB->getTerminator(); 2328 for (unsigned i = 0, e = PredPredTerm->getNumSuccessors(); i != e; ++i) 2329 if (PredPredTerm->getSuccessor(i) == PredBB) { 2330 PredBB->removePredecessor(PredPredBB, true); 2331 PredPredTerm->setSuccessor(i, NewBB); 2332 } 2333 2334 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(0), PredBB, NewBB, 2335 ValueMapping); 2336 addPHINodeEntriesForMappedBlock(PredBBBranch->getSuccessor(1), PredBB, NewBB, 2337 ValueMapping); 2338 2339 DTU->applyUpdatesPermissive( 2340 {{DominatorTree::Insert, NewBB, CondBr->getSuccessor(0)}, 2341 {DominatorTree::Insert, NewBB, CondBr->getSuccessor(1)}, 2342 {DominatorTree::Insert, PredPredBB, NewBB}, 2343 {DominatorTree::Delete, PredPredBB, PredBB}}); 2344 2345 // Remap source location atoms beacuse we're duplicating control flow. 2346 remapSourceAtoms(ValueMapping, NewBB->begin(), NewBB->end()); 2347 2348 updateSSA(PredBB, NewBB, ValueMapping); 2349 2350 // Clean up things like PHI nodes with single operands, dead instructions, 2351 // etc. 2352 SimplifyInstructionsInBlock(NewBB, TLI); 2353 SimplifyInstructionsInBlock(PredBB, TLI); 2354 2355 SmallVector<BasicBlock *, 1> PredsToFactor; 2356 PredsToFactor.push_back(NewBB); 2357 threadEdge(BB, PredsToFactor, SuccBB); 2358 } 2359 2360 /// tryThreadEdge - Thread an edge if it's safe and profitable to do so. 2361 bool JumpThreadingPass::tryThreadEdge( 2362 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs, 2363 BasicBlock *SuccBB) { 2364 // If threading to the same block as we come from, we would infinite loop. 2365 if (SuccBB == BB) { 2366 LLVM_DEBUG(dbgs() << " Not threading across BB '" << BB->getName() 2367 << "' - would thread to self!\n"); 2368 return false; 2369 } 2370 2371 // If threading this would thread across a loop header, don't thread the edge. 2372 // See the comments above findLoopHeaders for justifications and caveats. 2373 if (LoopHeaders.count(BB) || LoopHeaders.count(SuccBB)) { 2374 LLVM_DEBUG({ 2375 bool BBIsHeader = LoopHeaders.count(BB); 2376 bool SuccIsHeader = LoopHeaders.count(SuccBB); 2377 dbgs() << " Not threading across " 2378 << (BBIsHeader ? "loop header BB '" : "block BB '") << BB->getName() 2379 << "' to dest " << (SuccIsHeader ? "loop header BB '" : "block BB '") 2380 << SuccBB->getName() << "' - it might create an irreducible loop!\n"; 2381 }); 2382 return false; 2383 } 2384 2385 unsigned JumpThreadCost = getJumpThreadDuplicationCost( 2386 TTI, BB, BB->getTerminator(), BBDupThreshold); 2387 if (JumpThreadCost > BBDupThreshold) { 2388 LLVM_DEBUG(dbgs() << " Not threading BB '" << BB->getName() 2389 << "' - Cost is too high: " << JumpThreadCost << "\n"); 2390 return false; 2391 } 2392 2393 threadEdge(BB, PredBBs, SuccBB); 2394 return true; 2395 } 2396 2397 /// threadEdge - We have decided that it is safe and profitable to factor the 2398 /// blocks in PredBBs to one predecessor, then thread an edge from it to SuccBB 2399 /// across BB. Transform the IR to reflect this change. 2400 void JumpThreadingPass::threadEdge(BasicBlock *BB, 2401 const SmallVectorImpl<BasicBlock *> &PredBBs, 2402 BasicBlock *SuccBB) { 2403 assert(SuccBB != BB && "Don't create an infinite loop"); 2404 2405 assert(!LoopHeaders.count(BB) && !LoopHeaders.count(SuccBB) && 2406 "Don't thread across loop headers"); 2407 2408 // Build BPI/BFI before any changes are made to IR. 2409 bool HasProfile = doesBlockHaveProfileData(BB); 2410 auto *BFI = getOrCreateBFI(HasProfile); 2411 auto *BPI = getOrCreateBPI(BFI != nullptr); 2412 2413 // And finally, do it! Start by factoring the predecessors if needed. 2414 BasicBlock *PredBB; 2415 if (PredBBs.size() == 1) 2416 PredBB = PredBBs[0]; 2417 else { 2418 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() 2419 << " common predecessors.\n"); 2420 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm"); 2421 } 2422 2423 // And finally, do it! 2424 LLVM_DEBUG(dbgs() << " Threading edge from '" << PredBB->getName() 2425 << "' to '" << SuccBB->getName() 2426 << ", across block:\n " << *BB << "\n"); 2427 2428 LVI->threadEdge(PredBB, BB, SuccBB); 2429 2430 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), 2431 BB->getName()+".thread", 2432 BB->getParent(), BB); 2433 NewBB->moveAfter(PredBB); 2434 2435 // Set the block frequency of NewBB. 2436 if (BFI) { 2437 assert(BPI && "It's expected BPI to exist along with BFI"); 2438 auto NewBBFreq = 2439 BFI->getBlockFreq(PredBB) * BPI->getEdgeProbability(PredBB, BB); 2440 BFI->setBlockFreq(NewBB, NewBBFreq); 2441 } 2442 2443 // Copy all the instructions from BB to NewBB except the terminator. 2444 ValueToValueMapTy ValueMapping; 2445 cloneInstructions(ValueMapping, BB->begin(), std::prev(BB->end()), NewBB, 2446 PredBB); 2447 2448 // We didn't copy the terminator from BB over to NewBB, because there is now 2449 // an unconditional jump to SuccBB. Insert the unconditional jump. 2450 BranchInst *NewBI = BranchInst::Create(SuccBB, NewBB); 2451 NewBI->setDebugLoc(BB->getTerminator()->getDebugLoc()); 2452 2453 // Check to see if SuccBB has PHI nodes. If so, we need to add entries to the 2454 // PHI nodes for NewBB now. 2455 addPHINodeEntriesForMappedBlock(SuccBB, BB, NewBB, ValueMapping); 2456 2457 // Update the terminator of PredBB to jump to NewBB instead of BB. This 2458 // eliminates predecessors from BB, which requires us to simplify any PHI 2459 // nodes in BB. 2460 Instruction *PredTerm = PredBB->getTerminator(); 2461 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) 2462 if (PredTerm->getSuccessor(i) == BB) { 2463 BB->removePredecessor(PredBB, true); 2464 PredTerm->setSuccessor(i, NewBB); 2465 } 2466 2467 // Enqueue required DT updates. 2468 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, SuccBB}, 2469 {DominatorTree::Insert, PredBB, NewBB}, 2470 {DominatorTree::Delete, PredBB, BB}}); 2471 2472 remapSourceAtoms(ValueMapping, NewBB->begin(), NewBB->end()); 2473 updateSSA(BB, NewBB, ValueMapping); 2474 2475 // At this point, the IR is fully up to date and consistent. Do a quick scan 2476 // over the new instructions and zap any that are constants or dead. This 2477 // frequently happens because of phi translation. 2478 SimplifyInstructionsInBlock(NewBB, TLI); 2479 2480 // Update the edge weight from BB to SuccBB, which should be less than before. 2481 updateBlockFreqAndEdgeWeight(PredBB, BB, NewBB, SuccBB, BFI, BPI, HasProfile); 2482 2483 // Threaded an edge! 2484 ++NumThreads; 2485 } 2486 2487 /// Create a new basic block that will be the predecessor of BB and successor of 2488 /// all blocks in Preds. When profile data is available, update the frequency of 2489 /// this new block. 2490 BasicBlock *JumpThreadingPass::splitBlockPreds(BasicBlock *BB, 2491 ArrayRef<BasicBlock *> Preds, 2492 const char *Suffix) { 2493 SmallVector<BasicBlock *, 2> NewBBs; 2494 2495 // Collect the frequencies of all predecessors of BB, which will be used to 2496 // update the edge weight of the result of splitting predecessors. 2497 DenseMap<BasicBlock *, BlockFrequency> FreqMap; 2498 auto *BFI = getBFI(); 2499 if (BFI) { 2500 auto *BPI = getOrCreateBPI(true); 2501 for (auto *Pred : Preds) 2502 FreqMap.insert(std::make_pair( 2503 Pred, BFI->getBlockFreq(Pred) * BPI->getEdgeProbability(Pred, BB))); 2504 } 2505 2506 // In the case when BB is a LandingPad block we create 2 new predecessors 2507 // instead of just one. 2508 if (BB->isLandingPad()) { 2509 std::string NewName = std::string(Suffix) + ".split-lp"; 2510 SplitLandingPadPredecessors(BB, Preds, Suffix, NewName.c_str(), NewBBs); 2511 } else { 2512 NewBBs.push_back(SplitBlockPredecessors(BB, Preds, Suffix)); 2513 } 2514 2515 std::vector<DominatorTree::UpdateType> Updates; 2516 Updates.reserve((2 * Preds.size()) + NewBBs.size()); 2517 for (auto *NewBB : NewBBs) { 2518 BlockFrequency NewBBFreq(0); 2519 Updates.push_back({DominatorTree::Insert, NewBB, BB}); 2520 for (auto *Pred : predecessors(NewBB)) { 2521 Updates.push_back({DominatorTree::Delete, Pred, BB}); 2522 Updates.push_back({DominatorTree::Insert, Pred, NewBB}); 2523 if (BFI) // Update frequencies between Pred -> NewBB. 2524 NewBBFreq += FreqMap.lookup(Pred); 2525 } 2526 if (BFI) // Apply the summed frequency to NewBB. 2527 BFI->setBlockFreq(NewBB, NewBBFreq); 2528 } 2529 2530 DTU->applyUpdatesPermissive(Updates); 2531 return NewBBs[0]; 2532 } 2533 2534 bool JumpThreadingPass::doesBlockHaveProfileData(BasicBlock *BB) { 2535 const Instruction *TI = BB->getTerminator(); 2536 if (!TI || TI->getNumSuccessors() < 2) 2537 return false; 2538 2539 return hasValidBranchWeightMD(*TI); 2540 } 2541 2542 /// Update the block frequency of BB and branch weight and the metadata on the 2543 /// edge BB->SuccBB. This is done by scaling the weight of BB->SuccBB by 1 - 2544 /// Freq(PredBB->BB) / Freq(BB->SuccBB). 2545 void JumpThreadingPass::updateBlockFreqAndEdgeWeight(BasicBlock *PredBB, 2546 BasicBlock *BB, 2547 BasicBlock *NewBB, 2548 BasicBlock *SuccBB, 2549 BlockFrequencyInfo *BFI, 2550 BranchProbabilityInfo *BPI, 2551 bool HasProfile) { 2552 assert(((BFI && BPI) || (!BFI && !BFI)) && 2553 "Both BFI & BPI should either be set or unset"); 2554 2555 if (!BFI) { 2556 assert(!HasProfile && 2557 "It's expected to have BFI/BPI when profile info exists"); 2558 return; 2559 } 2560 2561 // As the edge from PredBB to BB is deleted, we have to update the block 2562 // frequency of BB. 2563 auto BBOrigFreq = BFI->getBlockFreq(BB); 2564 auto NewBBFreq = BFI->getBlockFreq(NewBB); 2565 auto BBNewFreq = BBOrigFreq - NewBBFreq; 2566 BFI->setBlockFreq(BB, BBNewFreq); 2567 2568 // Collect updated outgoing edges' frequencies from BB and use them to update 2569 // edge probabilities. 2570 SmallVector<uint64_t, 4> BBSuccFreq; 2571 for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) { 2572 auto BB2SuccBBFreq = 2573 BBOrigFreq * BPI->getEdgeProbability(BB, I.getSuccessorIndex()); 2574 auto SuccFreq = (*I == SuccBB) ? BB2SuccBBFreq - NewBBFreq : BB2SuccBBFreq; 2575 BBSuccFreq.push_back(SuccFreq.getFrequency()); 2576 } 2577 2578 uint64_t MaxBBSuccFreq = *llvm::max_element(BBSuccFreq); 2579 2580 SmallVector<BranchProbability, 4> BBSuccProbs; 2581 if (MaxBBSuccFreq == 0) 2582 BBSuccProbs.assign(BBSuccFreq.size(), 2583 {1, static_cast<unsigned>(BBSuccFreq.size())}); 2584 else { 2585 for (uint64_t Freq : BBSuccFreq) 2586 BBSuccProbs.push_back( 2587 BranchProbability::getBranchProbability(Freq, MaxBBSuccFreq)); 2588 // Normalize edge probabilities so that they sum up to one. 2589 BranchProbability::normalizeProbabilities(BBSuccProbs.begin(), 2590 BBSuccProbs.end()); 2591 } 2592 2593 // Update edge probabilities in BPI. 2594 BPI->setEdgeProbability(BB, BBSuccProbs); 2595 2596 // Update the profile metadata as well. 2597 // 2598 // Don't do this if the profile of the transformed blocks was statically 2599 // estimated. (This could occur despite the function having an entry 2600 // frequency in completely cold parts of the CFG.) 2601 // 2602 // In this case we don't want to suggest to subsequent passes that the 2603 // calculated weights are fully consistent. Consider this graph: 2604 // 2605 // check_1 2606 // 50% / | 2607 // eq_1 | 50% 2608 // \ | 2609 // check_2 2610 // 50% / | 2611 // eq_2 | 50% 2612 // \ | 2613 // check_3 2614 // 50% / | 2615 // eq_3 | 50% 2616 // \ | 2617 // 2618 // Assuming the blocks check_* all compare the same value against 1, 2 and 3, 2619 // the overall probabilities are inconsistent; the total probability that the 2620 // value is either 1, 2 or 3 is 150%. 2621 // 2622 // As a consequence if we thread eq_1 -> check_2 to check_3, check_2->check_3 2623 // becomes 0%. This is even worse if the edge whose probability becomes 0% is 2624 // the loop exit edge. Then based solely on static estimation we would assume 2625 // the loop was extremely hot. 2626 // 2627 // FIXME this locally as well so that BPI and BFI are consistent as well. We 2628 // shouldn't make edges extremely likely or unlikely based solely on static 2629 // estimation. 2630 if (BBSuccProbs.size() >= 2 && HasProfile) { 2631 SmallVector<uint32_t, 4> Weights; 2632 for (auto Prob : BBSuccProbs) 2633 Weights.push_back(Prob.getNumerator()); 2634 2635 auto TI = BB->getTerminator(); 2636 setBranchWeights(*TI, Weights, hasBranchWeightOrigin(*TI)); 2637 } 2638 } 2639 2640 /// duplicateCondBranchOnPHIIntoPred - PredBB contains an unconditional branch 2641 /// to BB which contains an i1 PHI node and a conditional branch on that PHI. 2642 /// If we can duplicate the contents of BB up into PredBB do so now, this 2643 /// improves the odds that the branch will be on an analyzable instruction like 2644 /// a compare. 2645 bool JumpThreadingPass::duplicateCondBranchOnPHIIntoPred( 2646 BasicBlock *BB, const SmallVectorImpl<BasicBlock *> &PredBBs) { 2647 assert(!PredBBs.empty() && "Can't handle an empty set"); 2648 2649 // If BB is a loop header, then duplicating this block outside the loop would 2650 // cause us to transform this into an irreducible loop, don't do this. 2651 // See the comments above findLoopHeaders for justifications and caveats. 2652 if (LoopHeaders.count(BB)) { 2653 LLVM_DEBUG(dbgs() << " Not duplicating loop header '" << BB->getName() 2654 << "' into predecessor block '" << PredBBs[0]->getName() 2655 << "' - it might create an irreducible loop!\n"); 2656 return false; 2657 } 2658 2659 unsigned DuplicationCost = getJumpThreadDuplicationCost( 2660 TTI, BB, BB->getTerminator(), BBDupThreshold); 2661 if (DuplicationCost > BBDupThreshold) { 2662 LLVM_DEBUG(dbgs() << " Not duplicating BB '" << BB->getName() 2663 << "' - Cost is too high: " << DuplicationCost << "\n"); 2664 return false; 2665 } 2666 2667 // And finally, do it! Start by factoring the predecessors if needed. 2668 std::vector<DominatorTree::UpdateType> Updates; 2669 BasicBlock *PredBB; 2670 if (PredBBs.size() == 1) 2671 PredBB = PredBBs[0]; 2672 else { 2673 LLVM_DEBUG(dbgs() << " Factoring out " << PredBBs.size() 2674 << " common predecessors.\n"); 2675 PredBB = splitBlockPreds(BB, PredBBs, ".thr_comm"); 2676 } 2677 Updates.push_back({DominatorTree::Delete, PredBB, BB}); 2678 2679 // Okay, we decided to do this! Clone all the instructions in BB onto the end 2680 // of PredBB. 2681 LLVM_DEBUG(dbgs() << " Duplicating block '" << BB->getName() 2682 << "' into end of '" << PredBB->getName() 2683 << "' to eliminate branch on phi. Cost: " 2684 << DuplicationCost << " block is:" << *BB << "\n"); 2685 2686 // Unless PredBB ends with an unconditional branch, split the edge so that we 2687 // can just clone the bits from BB into the end of the new PredBB. 2688 BranchInst *OldPredBranch = dyn_cast<BranchInst>(PredBB->getTerminator()); 2689 2690 if (!OldPredBranch || !OldPredBranch->isUnconditional()) { 2691 BasicBlock *OldPredBB = PredBB; 2692 PredBB = SplitEdge(OldPredBB, BB); 2693 Updates.push_back({DominatorTree::Insert, OldPredBB, PredBB}); 2694 Updates.push_back({DominatorTree::Insert, PredBB, BB}); 2695 Updates.push_back({DominatorTree::Delete, OldPredBB, BB}); 2696 OldPredBranch = cast<BranchInst>(PredBB->getTerminator()); 2697 } 2698 2699 // We are going to have to map operands from the original BB block into the 2700 // PredBB block. Evaluate PHI nodes in BB. 2701 ValueToValueMapTy ValueMapping; 2702 2703 // Remember the position before the inserted instructions. 2704 auto RItBeforeInsertPt = std::next(OldPredBranch->getReverseIterator()); 2705 2706 BasicBlock::iterator BI = BB->begin(); 2707 for (; PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 2708 ValueMapping[PN] = PN->getIncomingValueForBlock(PredBB); 2709 // Clone the non-phi instructions of BB into PredBB, keeping track of the 2710 // mapping and using it to remap operands in the cloned instructions. 2711 for (; BI != BB->end(); ++BI) { 2712 Instruction *New = BI->clone(); 2713 New->insertInto(PredBB, OldPredBranch->getIterator()); 2714 2715 // Remap operands to patch up intra-block references. 2716 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2717 if (Instruction *Inst = dyn_cast<Instruction>(New->getOperand(i))) { 2718 ValueToValueMapTy::iterator I = ValueMapping.find(Inst); 2719 if (I != ValueMapping.end()) 2720 New->setOperand(i, I->second); 2721 } 2722 2723 // Remap debug variable operands. 2724 remapDebugVariable(ValueMapping, New); 2725 if (const DebugLoc &DL = New->getDebugLoc()) 2726 mapAtomInstance(DL, ValueMapping); 2727 2728 // If this instruction can be simplified after the operands are updated, 2729 // just use the simplified value instead. This frequently happens due to 2730 // phi translation. 2731 if (Value *IV = simplifyInstruction( 2732 New, 2733 {BB->getDataLayout(), TLI, nullptr, nullptr, New})) { 2734 ValueMapping[&*BI] = IV; 2735 if (!New->mayHaveSideEffects()) { 2736 New->eraseFromParent(); 2737 New = nullptr; 2738 // Clone debug-info on the elided instruction to the destination 2739 // position. 2740 OldPredBranch->cloneDebugInfoFrom(&*BI, std::nullopt, true); 2741 } 2742 } else { 2743 ValueMapping[&*BI] = New; 2744 } 2745 if (New) { 2746 // Otherwise, insert the new instruction into the block. 2747 New->setName(BI->getName()); 2748 // Clone across any debug-info attached to the old instruction. 2749 New->cloneDebugInfoFrom(&*BI); 2750 // Update Dominance from simplified New instruction operands. 2751 for (unsigned i = 0, e = New->getNumOperands(); i != e; ++i) 2752 if (BasicBlock *SuccBB = dyn_cast<BasicBlock>(New->getOperand(i))) 2753 Updates.push_back({DominatorTree::Insert, PredBB, SuccBB}); 2754 } 2755 } 2756 2757 // Check to see if the targets of the branch had PHI nodes. If so, we need to 2758 // add entries to the PHI nodes for branch from PredBB now. 2759 BranchInst *BBBranch = cast<BranchInst>(BB->getTerminator()); 2760 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(0), BB, PredBB, 2761 ValueMapping); 2762 addPHINodeEntriesForMappedBlock(BBBranch->getSuccessor(1), BB, PredBB, 2763 ValueMapping); 2764 2765 // KeyInstructions: Remap the cloned instructions' atoms only. 2766 remapSourceAtoms(ValueMapping, std::prev(RItBeforeInsertPt)->getIterator(), 2767 OldPredBranch->getIterator()); 2768 2769 updateSSA(BB, PredBB, ValueMapping); 2770 2771 // PredBB no longer jumps to BB, remove entries in the PHI node for the edge 2772 // that we nuked. 2773 BB->removePredecessor(PredBB, true); 2774 2775 // Remove the unconditional branch at the end of the PredBB block. 2776 OldPredBranch->eraseFromParent(); 2777 if (auto *BPI = getBPI()) 2778 BPI->copyEdgeProbabilities(BB, PredBB); 2779 DTU->applyUpdatesPermissive(Updates); 2780 2781 ++NumDupes; 2782 return true; 2783 } 2784 2785 // Pred is a predecessor of BB with an unconditional branch to BB. SI is 2786 // a Select instruction in Pred. BB has other predecessors and SI is used in 2787 // a PHI node in BB. SI has no other use. 2788 // A new basic block, NewBB, is created and SI is converted to compare and 2789 // conditional branch. SI is erased from parent. 2790 void JumpThreadingPass::unfoldSelectInstr(BasicBlock *Pred, BasicBlock *BB, 2791 SelectInst *SI, PHINode *SIUse, 2792 unsigned Idx) { 2793 // Expand the select. 2794 // 2795 // Pred -- 2796 // | v 2797 // | NewBB 2798 // | | 2799 // |----- 2800 // v 2801 // BB 2802 BranchInst *PredTerm = cast<BranchInst>(Pred->getTerminator()); 2803 BasicBlock *NewBB = BasicBlock::Create(BB->getContext(), "select.unfold", 2804 BB->getParent(), BB); 2805 // Move the unconditional branch to NewBB. 2806 PredTerm->removeFromParent(); 2807 PredTerm->insertInto(NewBB, NewBB->end()); 2808 // Create a conditional branch and update PHI nodes. 2809 auto *BI = BranchInst::Create(NewBB, BB, SI->getCondition(), Pred); 2810 BI->applyMergedLocation(PredTerm->getDebugLoc(), SI->getDebugLoc()); 2811 BI->copyMetadata(*SI, {LLVMContext::MD_prof}); 2812 SIUse->setIncomingValue(Idx, SI->getFalseValue()); 2813 SIUse->addIncoming(SI->getTrueValue(), NewBB); 2814 2815 uint64_t TrueWeight = 1; 2816 uint64_t FalseWeight = 1; 2817 // Copy probabilities from 'SI' to created conditional branch in 'Pred'. 2818 if (extractBranchWeights(*SI, TrueWeight, FalseWeight) && 2819 (TrueWeight + FalseWeight) != 0) { 2820 SmallVector<BranchProbability, 2> BP; 2821 BP.emplace_back(BranchProbability::getBranchProbability( 2822 TrueWeight, TrueWeight + FalseWeight)); 2823 BP.emplace_back(BranchProbability::getBranchProbability( 2824 FalseWeight, TrueWeight + FalseWeight)); 2825 // Update BPI if exists. 2826 if (auto *BPI = getBPI()) 2827 BPI->setEdgeProbability(Pred, BP); 2828 } 2829 // Set the block frequency of NewBB. 2830 if (auto *BFI = getBFI()) { 2831 if ((TrueWeight + FalseWeight) == 0) { 2832 TrueWeight = 1; 2833 FalseWeight = 1; 2834 } 2835 BranchProbability PredToNewBBProb = BranchProbability::getBranchProbability( 2836 TrueWeight, TrueWeight + FalseWeight); 2837 auto NewBBFreq = BFI->getBlockFreq(Pred) * PredToNewBBProb; 2838 BFI->setBlockFreq(NewBB, NewBBFreq); 2839 } 2840 2841 // The select is now dead. 2842 SI->eraseFromParent(); 2843 DTU->applyUpdatesPermissive({{DominatorTree::Insert, NewBB, BB}, 2844 {DominatorTree::Insert, Pred, NewBB}}); 2845 2846 // Update any other PHI nodes in BB. 2847 for (BasicBlock::iterator BI = BB->begin(); 2848 PHINode *Phi = dyn_cast<PHINode>(BI); ++BI) 2849 if (Phi != SIUse) 2850 Phi->addIncoming(Phi->getIncomingValueForBlock(Pred), NewBB); 2851 } 2852 2853 bool JumpThreadingPass::tryToUnfoldSelect(SwitchInst *SI, BasicBlock *BB) { 2854 PHINode *CondPHI = dyn_cast<PHINode>(SI->getCondition()); 2855 2856 if (!CondPHI || CondPHI->getParent() != BB) 2857 return false; 2858 2859 for (unsigned I = 0, E = CondPHI->getNumIncomingValues(); I != E; ++I) { 2860 BasicBlock *Pred = CondPHI->getIncomingBlock(I); 2861 SelectInst *PredSI = dyn_cast<SelectInst>(CondPHI->getIncomingValue(I)); 2862 2863 // The second and third condition can be potentially relaxed. Currently 2864 // the conditions help to simplify the code and allow us to reuse existing 2865 // code, developed for tryToUnfoldSelect(CmpInst *, BasicBlock *) 2866 if (!PredSI || PredSI->getParent() != Pred || !PredSI->hasOneUse()) 2867 continue; 2868 2869 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2870 if (!PredTerm || !PredTerm->isUnconditional()) 2871 continue; 2872 2873 unfoldSelectInstr(Pred, BB, PredSI, CondPHI, I); 2874 return true; 2875 } 2876 return false; 2877 } 2878 2879 /// tryToUnfoldSelect - Look for blocks of the form 2880 /// bb1: 2881 /// %a = select 2882 /// br bb2 2883 /// 2884 /// bb2: 2885 /// %p = phi [%a, %bb1] ... 2886 /// %c = icmp %p 2887 /// br i1 %c 2888 /// 2889 /// And expand the select into a branch structure if one of its arms allows %c 2890 /// to be folded. This later enables threading from bb1 over bb2. 2891 bool JumpThreadingPass::tryToUnfoldSelect(CmpInst *CondCmp, BasicBlock *BB) { 2892 BranchInst *CondBr = dyn_cast<BranchInst>(BB->getTerminator()); 2893 PHINode *CondLHS = dyn_cast<PHINode>(CondCmp->getOperand(0)); 2894 Constant *CondRHS = cast<Constant>(CondCmp->getOperand(1)); 2895 2896 if (!CondBr || !CondBr->isConditional() || !CondLHS || 2897 CondLHS->getParent() != BB) 2898 return false; 2899 2900 for (unsigned I = 0, E = CondLHS->getNumIncomingValues(); I != E; ++I) { 2901 BasicBlock *Pred = CondLHS->getIncomingBlock(I); 2902 SelectInst *SI = dyn_cast<SelectInst>(CondLHS->getIncomingValue(I)); 2903 2904 // Look if one of the incoming values is a select in the corresponding 2905 // predecessor. 2906 if (!SI || SI->getParent() != Pred || !SI->hasOneUse()) 2907 continue; 2908 2909 BranchInst *PredTerm = dyn_cast<BranchInst>(Pred->getTerminator()); 2910 if (!PredTerm || !PredTerm->isUnconditional()) 2911 continue; 2912 2913 // Now check if one of the select values would allow us to constant fold the 2914 // terminator in BB. We don't do the transform if both sides fold, those 2915 // cases will be threaded in any case. 2916 Constant *LHSRes = 2917 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(1), 2918 CondRHS, Pred, BB, CondCmp); 2919 Constant *RHSRes = 2920 LVI->getPredicateOnEdge(CondCmp->getPredicate(), SI->getOperand(2), 2921 CondRHS, Pred, BB, CondCmp); 2922 if ((LHSRes || RHSRes) && LHSRes != RHSRes) { 2923 unfoldSelectInstr(Pred, BB, SI, CondLHS, I); 2924 return true; 2925 } 2926 } 2927 return false; 2928 } 2929 2930 /// tryToUnfoldSelectInCurrBB - Look for PHI/Select or PHI/CMP/Select in the 2931 /// same BB in the form 2932 /// bb: 2933 /// %p = phi [false, %bb1], [true, %bb2], [false, %bb3], [true, %bb4], ... 2934 /// %s = select %p, trueval, falseval 2935 /// 2936 /// or 2937 /// 2938 /// bb: 2939 /// %p = phi [0, %bb1], [1, %bb2], [0, %bb3], [1, %bb4], ... 2940 /// %c = cmp %p, 0 2941 /// %s = select %c, trueval, falseval 2942 /// 2943 /// And expand the select into a branch structure. This later enables 2944 /// jump-threading over bb in this pass. 2945 /// 2946 /// Using the similar approach of SimplifyCFG::FoldCondBranchOnPHI(), unfold 2947 /// select if the associated PHI has at least one constant. If the unfolded 2948 /// select is not jump-threaded, it will be folded again in the later 2949 /// optimizations. 2950 bool JumpThreadingPass::tryToUnfoldSelectInCurrBB(BasicBlock *BB) { 2951 // This transform would reduce the quality of msan diagnostics. 2952 // Disable this transform under MemorySanitizer. 2953 if (BB->getParent()->hasFnAttribute(Attribute::SanitizeMemory)) 2954 return false; 2955 2956 // If threading this would thread across a loop header, don't thread the edge. 2957 // See the comments above findLoopHeaders for justifications and caveats. 2958 if (LoopHeaders.count(BB)) 2959 return false; 2960 2961 for (BasicBlock::iterator BI = BB->begin(); 2962 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) { 2963 // Look for a Phi having at least one constant incoming value. 2964 if (llvm::all_of(PN->incoming_values(), 2965 [](Value *V) { return !isa<ConstantInt>(V); })) 2966 continue; 2967 2968 auto isUnfoldCandidate = [BB](SelectInst *SI, Value *V) { 2969 using namespace PatternMatch; 2970 2971 // Check if SI is in BB and use V as condition. 2972 if (SI->getParent() != BB) 2973 return false; 2974 Value *Cond = SI->getCondition(); 2975 bool IsAndOr = match(SI, m_CombineOr(m_LogicalAnd(), m_LogicalOr())); 2976 return Cond && Cond == V && Cond->getType()->isIntegerTy(1) && !IsAndOr; 2977 }; 2978 2979 SelectInst *SI = nullptr; 2980 for (Use &U : PN->uses()) { 2981 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(U.getUser())) { 2982 // Look for a ICmp in BB that compares PN with a constant and is the 2983 // condition of a Select. 2984 if (Cmp->getParent() == BB && Cmp->hasOneUse() && 2985 isa<ConstantInt>(Cmp->getOperand(1 - U.getOperandNo()))) 2986 if (SelectInst *SelectI = dyn_cast<SelectInst>(Cmp->user_back())) 2987 if (isUnfoldCandidate(SelectI, Cmp->use_begin()->get())) { 2988 SI = SelectI; 2989 break; 2990 } 2991 } else if (SelectInst *SelectI = dyn_cast<SelectInst>(U.getUser())) { 2992 // Look for a Select in BB that uses PN as condition. 2993 if (isUnfoldCandidate(SelectI, U.get())) { 2994 SI = SelectI; 2995 break; 2996 } 2997 } 2998 } 2999 3000 if (!SI) 3001 continue; 3002 // Expand the select. 3003 Value *Cond = SI->getCondition(); 3004 if (!isGuaranteedNotToBeUndefOrPoison(Cond, nullptr, SI)) { 3005 Cond = new FreezeInst(Cond, "cond.fr", SI->getIterator()); 3006 cast<FreezeInst>(Cond)->setDebugLoc(DebugLoc::getTemporary()); 3007 } 3008 MDNode *BranchWeights = getBranchWeightMDNode(*SI); 3009 Instruction *Term = 3010 SplitBlockAndInsertIfThen(Cond, SI, false, BranchWeights); 3011 BasicBlock *SplitBB = SI->getParent(); 3012 BasicBlock *NewBB = Term->getParent(); 3013 PHINode *NewPN = PHINode::Create(SI->getType(), 2, "", SI->getIterator()); 3014 NewPN->addIncoming(SI->getTrueValue(), Term->getParent()); 3015 NewPN->addIncoming(SI->getFalseValue(), BB); 3016 NewPN->setDebugLoc(SI->getDebugLoc()); 3017 SI->replaceAllUsesWith(NewPN); 3018 SI->eraseFromParent(); 3019 // NewBB and SplitBB are newly created blocks which require insertion. 3020 std::vector<DominatorTree::UpdateType> Updates; 3021 Updates.reserve((2 * SplitBB->getTerminator()->getNumSuccessors()) + 3); 3022 Updates.push_back({DominatorTree::Insert, BB, SplitBB}); 3023 Updates.push_back({DominatorTree::Insert, BB, NewBB}); 3024 Updates.push_back({DominatorTree::Insert, NewBB, SplitBB}); 3025 // BB's successors were moved to SplitBB, update DTU accordingly. 3026 for (auto *Succ : successors(SplitBB)) { 3027 Updates.push_back({DominatorTree::Delete, BB, Succ}); 3028 Updates.push_back({DominatorTree::Insert, SplitBB, Succ}); 3029 } 3030 DTU->applyUpdatesPermissive(Updates); 3031 return true; 3032 } 3033 return false; 3034 } 3035 3036 /// Try to propagate a guard from the current BB into one of its predecessors 3037 /// in case if another branch of execution implies that the condition of this 3038 /// guard is always true. Currently we only process the simplest case that 3039 /// looks like: 3040 /// 3041 /// Start: 3042 /// %cond = ... 3043 /// br i1 %cond, label %T1, label %F1 3044 /// T1: 3045 /// br label %Merge 3046 /// F1: 3047 /// br label %Merge 3048 /// Merge: 3049 /// %condGuard = ... 3050 /// call void(i1, ...) @llvm.experimental.guard( i1 %condGuard )[ "deopt"() ] 3051 /// 3052 /// And cond either implies condGuard or !condGuard. In this case all the 3053 /// instructions before the guard can be duplicated in both branches, and the 3054 /// guard is then threaded to one of them. 3055 bool JumpThreadingPass::processGuards(BasicBlock *BB) { 3056 using namespace PatternMatch; 3057 3058 // We only want to deal with two predecessors. 3059 BasicBlock *Pred1, *Pred2; 3060 auto PI = pred_begin(BB), PE = pred_end(BB); 3061 if (PI == PE) 3062 return false; 3063 Pred1 = *PI++; 3064 if (PI == PE) 3065 return false; 3066 Pred2 = *PI++; 3067 if (PI != PE) 3068 return false; 3069 if (Pred1 == Pred2) 3070 return false; 3071 3072 // Try to thread one of the guards of the block. 3073 // TODO: Look up deeper than to immediate predecessor? 3074 auto *Parent = Pred1->getSinglePredecessor(); 3075 if (!Parent || Parent != Pred2->getSinglePredecessor()) 3076 return false; 3077 3078 if (auto *BI = dyn_cast<BranchInst>(Parent->getTerminator())) 3079 for (auto &I : *BB) 3080 if (isGuard(&I) && threadGuard(BB, cast<IntrinsicInst>(&I), BI)) 3081 return true; 3082 3083 return false; 3084 } 3085 3086 /// Try to propagate the guard from BB which is the lower block of a diamond 3087 /// to one of its branches, in case if diamond's condition implies guard's 3088 /// condition. 3089 bool JumpThreadingPass::threadGuard(BasicBlock *BB, IntrinsicInst *Guard, 3090 BranchInst *BI) { 3091 assert(BI->getNumSuccessors() == 2 && "Wrong number of successors?"); 3092 assert(BI->isConditional() && "Unconditional branch has 2 successors?"); 3093 Value *GuardCond = Guard->getArgOperand(0); 3094 Value *BranchCond = BI->getCondition(); 3095 BasicBlock *TrueDest = BI->getSuccessor(0); 3096 BasicBlock *FalseDest = BI->getSuccessor(1); 3097 3098 auto &DL = BB->getDataLayout(); 3099 bool TrueDestIsSafe = false; 3100 bool FalseDestIsSafe = false; 3101 3102 // True dest is safe if BranchCond => GuardCond. 3103 auto Impl = isImpliedCondition(BranchCond, GuardCond, DL); 3104 if (Impl && *Impl) 3105 TrueDestIsSafe = true; 3106 else { 3107 // False dest is safe if !BranchCond => GuardCond. 3108 Impl = isImpliedCondition(BranchCond, GuardCond, DL, /* LHSIsTrue */ false); 3109 if (Impl && *Impl) 3110 FalseDestIsSafe = true; 3111 } 3112 3113 if (!TrueDestIsSafe && !FalseDestIsSafe) 3114 return false; 3115 3116 BasicBlock *PredUnguardedBlock = TrueDestIsSafe ? TrueDest : FalseDest; 3117 BasicBlock *PredGuardedBlock = FalseDestIsSafe ? TrueDest : FalseDest; 3118 3119 ValueToValueMapTy UnguardedMapping, GuardedMapping; 3120 Instruction *AfterGuard = Guard->getNextNode(); 3121 unsigned Cost = 3122 getJumpThreadDuplicationCost(TTI, BB, AfterGuard, BBDupThreshold); 3123 if (Cost > BBDupThreshold) 3124 return false; 3125 // Duplicate all instructions before the guard and the guard itself to the 3126 // branch where implication is not proved. 3127 BasicBlock *GuardedBlock = DuplicateInstructionsInSplitBetween( 3128 BB, PredGuardedBlock, AfterGuard, GuardedMapping, *DTU); 3129 assert(GuardedBlock && "Could not create the guarded block?"); 3130 // Duplicate all instructions before the guard in the unguarded branch. 3131 // Since we have successfully duplicated the guarded block and this block 3132 // has fewer instructions, we expect it to succeed. 3133 BasicBlock *UnguardedBlock = DuplicateInstructionsInSplitBetween( 3134 BB, PredUnguardedBlock, Guard, UnguardedMapping, *DTU); 3135 assert(UnguardedBlock && "Could not create the unguarded block?"); 3136 LLVM_DEBUG(dbgs() << "Moved guard " << *Guard << " to block " 3137 << GuardedBlock->getName() << "\n"); 3138 // Some instructions before the guard may still have uses. For them, we need 3139 // to create Phi nodes merging their copies in both guarded and unguarded 3140 // branches. Those instructions that have no uses can be just removed. 3141 SmallVector<Instruction *, 4> ToRemove; 3142 for (auto BI = BB->begin(); &*BI != AfterGuard; ++BI) 3143 if (!isa<PHINode>(&*BI)) 3144 ToRemove.push_back(&*BI); 3145 3146 BasicBlock::iterator InsertionPoint = BB->getFirstInsertionPt(); 3147 assert(InsertionPoint != BB->end() && "Empty block?"); 3148 // Substitute with Phis & remove. 3149 for (auto *Inst : reverse(ToRemove)) { 3150 if (!Inst->use_empty()) { 3151 PHINode *NewPN = PHINode::Create(Inst->getType(), 2); 3152 NewPN->addIncoming(UnguardedMapping[Inst], UnguardedBlock); 3153 NewPN->addIncoming(GuardedMapping[Inst], GuardedBlock); 3154 NewPN->setDebugLoc(Inst->getDebugLoc()); 3155 NewPN->insertBefore(InsertionPoint); 3156 Inst->replaceAllUsesWith(NewPN); 3157 } 3158 Inst->dropDbgRecords(); 3159 Inst->eraseFromParent(); 3160 } 3161 return true; 3162 } 3163 3164 PreservedAnalyses JumpThreadingPass::getPreservedAnalysis() const { 3165 PreservedAnalyses PA; 3166 PA.preserve<LazyValueAnalysis>(); 3167 PA.preserve<DominatorTreeAnalysis>(); 3168 3169 // TODO: We would like to preserve BPI/BFI. Enable once all paths update them. 3170 // TODO: Would be nice to verify BPI/BFI consistency as well. 3171 return PA; 3172 } 3173 3174 template <typename AnalysisT> 3175 typename AnalysisT::Result *JumpThreadingPass::runExternalAnalysis() { 3176 assert(FAM && "Can't run external analysis without FunctionAnalysisManager"); 3177 3178 // If there were no changes since last call to 'runExternalAnalysis' then all 3179 // analysis is either up to date or explicitly invalidated. Just go ahead and 3180 // run the "external" analysis. 3181 if (!ChangedSinceLastAnalysisUpdate) { 3182 assert(!DTU->hasPendingUpdates() && 3183 "Lost update of 'ChangedSinceLastAnalysisUpdate'?"); 3184 // Run the "external" analysis. 3185 return &FAM->getResult<AnalysisT>(*F); 3186 } 3187 ChangedSinceLastAnalysisUpdate = false; 3188 3189 auto PA = getPreservedAnalysis(); 3190 // TODO: This shouldn't be needed once 'getPreservedAnalysis' reports BPI/BFI 3191 // as preserved. 3192 PA.preserve<BranchProbabilityAnalysis>(); 3193 PA.preserve<BlockFrequencyAnalysis>(); 3194 // Report everything except explicitly preserved as invalid. 3195 FAM->invalidate(*F, PA); 3196 // Update DT/PDT. 3197 DTU->flush(); 3198 // Make sure DT/PDT are valid before running "external" analysis. 3199 assert(DTU->getDomTree().verify(DominatorTree::VerificationLevel::Fast)); 3200 assert((!DTU->hasPostDomTree() || 3201 DTU->getPostDomTree().verify( 3202 PostDominatorTree::VerificationLevel::Fast))); 3203 // Run the "external" analysis. 3204 auto *Result = &FAM->getResult<AnalysisT>(*F); 3205 // Update analysis JumpThreading depends on and not explicitly preserved. 3206 TTI = &FAM->getResult<TargetIRAnalysis>(*F); 3207 TLI = &FAM->getResult<TargetLibraryAnalysis>(*F); 3208 AA = &FAM->getResult<AAManager>(*F); 3209 3210 return Result; 3211 } 3212 3213 BranchProbabilityInfo *JumpThreadingPass::getBPI() { 3214 if (!BPI) { 3215 assert(FAM && "Can't create BPI without FunctionAnalysisManager"); 3216 BPI = FAM->getCachedResult<BranchProbabilityAnalysis>(*F); 3217 } 3218 return BPI; 3219 } 3220 3221 BlockFrequencyInfo *JumpThreadingPass::getBFI() { 3222 if (!BFI) { 3223 assert(FAM && "Can't create BFI without FunctionAnalysisManager"); 3224 BFI = FAM->getCachedResult<BlockFrequencyAnalysis>(*F); 3225 } 3226 return BFI; 3227 } 3228 3229 // Important note on validity of BPI/BFI. JumpThreading tries to preserve 3230 // BPI/BFI as it goes. Thus if cached instance exists it will be updated. 3231 // Otherwise, new instance of BPI/BFI is created (up to date by definition). 3232 BranchProbabilityInfo *JumpThreadingPass::getOrCreateBPI(bool Force) { 3233 auto *Res = getBPI(); 3234 if (Res) 3235 return Res; 3236 3237 if (Force) 3238 BPI = runExternalAnalysis<BranchProbabilityAnalysis>(); 3239 3240 return BPI; 3241 } 3242 3243 BlockFrequencyInfo *JumpThreadingPass::getOrCreateBFI(bool Force) { 3244 auto *Res = getBFI(); 3245 if (Res) 3246 return Res; 3247 3248 if (Force) 3249 BFI = runExternalAnalysis<BlockFrequencyAnalysis>(); 3250 3251 return BFI; 3252 } 3253