1 //===- BranchProbabilityInfo.cpp - Branch Probability Analysis ------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Loops should be simplified before this analysis. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/BranchProbabilityInfo.h" 14 #include "llvm/ADT/PostOrderIterator.h" 15 #include "llvm/ADT/SCCIterator.h" 16 #include "llvm/ADT/STLExtras.h" 17 #include "llvm/ADT/SmallVector.h" 18 #include "llvm/Analysis/ConstantFolding.h" 19 #include "llvm/Analysis/LoopInfo.h" 20 #include "llvm/Analysis/PostDominators.h" 21 #include "llvm/Analysis/TargetLibraryInfo.h" 22 #include "llvm/IR/Attributes.h" 23 #include "llvm/IR/BasicBlock.h" 24 #include "llvm/IR/CFG.h" 25 #include "llvm/IR/Constants.h" 26 #include "llvm/IR/Dominators.h" 27 #include "llvm/IR/Function.h" 28 #include "llvm/IR/InstrTypes.h" 29 #include "llvm/IR/Instruction.h" 30 #include "llvm/IR/Instructions.h" 31 #include "llvm/IR/LLVMContext.h" 32 #include "llvm/IR/Metadata.h" 33 #include "llvm/IR/PassManager.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/IR/Value.h" 36 #include "llvm/InitializePasses.h" 37 #include "llvm/Pass.h" 38 #include "llvm/Support/BranchProbability.h" 39 #include "llvm/Support/Casting.h" 40 #include "llvm/Support/CommandLine.h" 41 #include "llvm/Support/Debug.h" 42 #include "llvm/Support/raw_ostream.h" 43 #include <cassert> 44 #include <cstdint> 45 #include <iterator> 46 #include <map> 47 #include <utility> 48 49 using namespace llvm; 50 51 #define DEBUG_TYPE "branch-prob" 52 53 static cl::opt<bool> PrintBranchProb( 54 "print-bpi", cl::init(false), cl::Hidden, 55 cl::desc("Print the branch probability info.")); 56 57 cl::opt<std::string> PrintBranchProbFuncName( 58 "print-bpi-func-name", cl::Hidden, 59 cl::desc("The option to specify the name of the function " 60 "whose branch probability info is printed.")); 61 62 INITIALIZE_PASS_BEGIN(BranchProbabilityInfoWrapperPass, "branch-prob", 63 "Branch Probability Analysis", false, true) 64 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 65 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 66 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 67 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass) 68 INITIALIZE_PASS_END(BranchProbabilityInfoWrapperPass, "branch-prob", 69 "Branch Probability Analysis", false, true) 70 71 BranchProbabilityInfoWrapperPass::BranchProbabilityInfoWrapperPass() 72 : FunctionPass(ID) { 73 initializeBranchProbabilityInfoWrapperPassPass( 74 *PassRegistry::getPassRegistry()); 75 } 76 77 char BranchProbabilityInfoWrapperPass::ID = 0; 78 79 // Weights are for internal use only. They are used by heuristics to help to 80 // estimate edges' probability. Example: 81 // 82 // Using "Loop Branch Heuristics" we predict weights of edges for the 83 // block BB2. 84 // ... 85 // | 86 // V 87 // BB1<-+ 88 // | | 89 // | | (Weight = 124) 90 // V | 91 // BB2--+ 92 // | 93 // | (Weight = 4) 94 // V 95 // BB3 96 // 97 // Probability of the edge BB2->BB1 = 124 / (124 + 4) = 0.96875 98 // Probability of the edge BB2->BB3 = 4 / (124 + 4) = 0.03125 99 static const uint32_t LBH_TAKEN_WEIGHT = 124; 100 static const uint32_t LBH_NONTAKEN_WEIGHT = 4; 101 102 /// Unreachable-terminating branch taken probability. 103 /// 104 /// This is the probability for a branch being taken to a block that terminates 105 /// (eventually) in unreachable. These are predicted as unlikely as possible. 106 /// All reachable probability will proportionally share the remaining part. 107 static const BranchProbability UR_TAKEN_PROB = BranchProbability::getRaw(1); 108 109 /// Heuristics and lookup tables for non-loop branches: 110 /// Pointer Heuristics (PH) 111 static const uint32_t PH_TAKEN_WEIGHT = 20; 112 static const uint32_t PH_NONTAKEN_WEIGHT = 12; 113 static const BranchProbability 114 PtrTakenProb(PH_TAKEN_WEIGHT, PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT); 115 static const BranchProbability 116 PtrUntakenProb(PH_NONTAKEN_WEIGHT, PH_TAKEN_WEIGHT + PH_NONTAKEN_WEIGHT); 117 118 using ProbabilityList = SmallVector<BranchProbability>; 119 using ProbabilityTable = std::map<CmpInst::Predicate, ProbabilityList>; 120 121 /// Pointer comparisons: 122 static const ProbabilityTable PointerTable{ 123 {ICmpInst::ICMP_NE, {PtrTakenProb, PtrUntakenProb}}, /// p != q -> Likely 124 {ICmpInst::ICMP_EQ, {PtrUntakenProb, PtrTakenProb}}, /// p == q -> Unlikely 125 }; 126 127 /// Zero Heuristics (ZH) 128 static const uint32_t ZH_TAKEN_WEIGHT = 20; 129 static const uint32_t ZH_NONTAKEN_WEIGHT = 12; 130 static const BranchProbability 131 ZeroTakenProb(ZH_TAKEN_WEIGHT, ZH_TAKEN_WEIGHT + ZH_NONTAKEN_WEIGHT); 132 static const BranchProbability 133 ZeroUntakenProb(ZH_NONTAKEN_WEIGHT, ZH_TAKEN_WEIGHT + ZH_NONTAKEN_WEIGHT); 134 135 /// Integer compares with 0: 136 static const ProbabilityTable ICmpWithZeroTable{ 137 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, /// X == 0 -> Unlikely 138 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, /// X != 0 -> Likely 139 {CmpInst::ICMP_SLT, {ZeroUntakenProb, ZeroTakenProb}}, /// X < 0 -> Unlikely 140 {CmpInst::ICMP_SGT, {ZeroTakenProb, ZeroUntakenProb}}, /// X > 0 -> Likely 141 }; 142 143 /// Integer compares with -1: 144 static const ProbabilityTable ICmpWithMinusOneTable{ 145 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, /// X == -1 -> Unlikely 146 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, /// X != -1 -> Likely 147 // InstCombine canonicalizes X >= 0 into X > -1 148 {CmpInst::ICMP_SGT, {ZeroTakenProb, ZeroUntakenProb}}, /// X >= 0 -> Likely 149 }; 150 151 /// Integer compares with 1: 152 static const ProbabilityTable ICmpWithOneTable{ 153 // InstCombine canonicalizes X <= 0 into X < 1 154 {CmpInst::ICMP_SLT, {ZeroUntakenProb, ZeroTakenProb}}, /// X <= 0 -> Unlikely 155 }; 156 157 /// strcmp and similar functions return zero, negative, or positive, if the 158 /// first string is equal, less, or greater than the second. We consider it 159 /// likely that the strings are not equal, so a comparison with zero is 160 /// probably false, but also a comparison with any other number is also 161 /// probably false given that what exactly is returned for nonzero values is 162 /// not specified. Any kind of comparison other than equality we know 163 /// nothing about. 164 static const ProbabilityTable ICmpWithLibCallTable{ 165 {CmpInst::ICMP_EQ, {ZeroUntakenProb, ZeroTakenProb}}, 166 {CmpInst::ICMP_NE, {ZeroTakenProb, ZeroUntakenProb}}, 167 }; 168 169 // Floating-Point Heuristics (FPH) 170 static const uint32_t FPH_TAKEN_WEIGHT = 20; 171 static const uint32_t FPH_NONTAKEN_WEIGHT = 12; 172 173 /// This is the probability for an ordered floating point comparison. 174 static const uint32_t FPH_ORD_WEIGHT = 1024 * 1024 - 1; 175 /// This is the probability for an unordered floating point comparison, it means 176 /// one or two of the operands are NaN. Usually it is used to test for an 177 /// exceptional case, so the result is unlikely. 178 static const uint32_t FPH_UNO_WEIGHT = 1; 179 180 static const BranchProbability FPOrdTakenProb(FPH_ORD_WEIGHT, 181 FPH_ORD_WEIGHT + FPH_UNO_WEIGHT); 182 static const BranchProbability 183 FPOrdUntakenProb(FPH_UNO_WEIGHT, FPH_ORD_WEIGHT + FPH_UNO_WEIGHT); 184 static const BranchProbability 185 FPTakenProb(FPH_TAKEN_WEIGHT, FPH_TAKEN_WEIGHT + FPH_NONTAKEN_WEIGHT); 186 static const BranchProbability 187 FPUntakenProb(FPH_NONTAKEN_WEIGHT, FPH_TAKEN_WEIGHT + FPH_NONTAKEN_WEIGHT); 188 189 /// Floating-Point compares: 190 static const ProbabilityTable FCmpTable{ 191 {FCmpInst::FCMP_ORD, {FPOrdTakenProb, FPOrdUntakenProb}}, /// !isnan -> Likely 192 {FCmpInst::FCMP_UNO, {FPOrdUntakenProb, FPOrdTakenProb}}, /// isnan -> Unlikely 193 }; 194 195 /// Set of dedicated "absolute" execution weights for a block. These weights are 196 /// meaningful relative to each other and their derivatives only. 197 enum class BlockExecWeight : std::uint32_t { 198 /// Special weight used for cases with exact zero probability. 199 ZERO = 0x0, 200 /// Minimal possible non zero weight. 201 LOWEST_NON_ZERO = 0x1, 202 /// Weight to an 'unreachable' block. 203 UNREACHABLE = ZERO, 204 /// Weight to a block containing non returning call. 205 NORETURN = LOWEST_NON_ZERO, 206 /// Weight to 'unwind' block of an invoke instruction. 207 UNWIND = LOWEST_NON_ZERO, 208 /// Weight to a 'cold' block. Cold blocks are the ones containing calls marked 209 /// with attribute 'cold'. 210 COLD = 0xffff, 211 /// Default weight is used in cases when there is no dedicated execution 212 /// weight set. It is not propagated through the domination line either. 213 DEFAULT = 0xfffff 214 }; 215 216 BranchProbabilityInfo::SccInfo::SccInfo(const Function &F) { 217 // Record SCC numbers of blocks in the CFG to identify irreducible loops. 218 // FIXME: We could only calculate this if the CFG is known to be irreducible 219 // (perhaps cache this info in LoopInfo if we can easily calculate it there?). 220 int SccNum = 0; 221 for (scc_iterator<const Function *> It = scc_begin(&F); !It.isAtEnd(); 222 ++It, ++SccNum) { 223 // Ignore single-block SCCs since they either aren't loops or LoopInfo will 224 // catch them. 225 const std::vector<const BasicBlock *> &Scc = *It; 226 if (Scc.size() == 1) 227 continue; 228 229 LLVM_DEBUG(dbgs() << "BPI: SCC " << SccNum << ":"); 230 for (const auto *BB : Scc) { 231 LLVM_DEBUG(dbgs() << " " << BB->getName()); 232 SccNums[BB] = SccNum; 233 calculateSccBlockType(BB, SccNum); 234 } 235 LLVM_DEBUG(dbgs() << "\n"); 236 } 237 } 238 239 int BranchProbabilityInfo::SccInfo::getSCCNum(const BasicBlock *BB) const { 240 auto SccIt = SccNums.find(BB); 241 if (SccIt == SccNums.end()) 242 return -1; 243 return SccIt->second; 244 } 245 246 void BranchProbabilityInfo::SccInfo::getSccEnterBlocks( 247 int SccNum, SmallVectorImpl<BasicBlock *> &Enters) const { 248 249 for (auto MapIt : SccBlocks[SccNum]) { 250 const auto *BB = MapIt.first; 251 if (isSCCHeader(BB, SccNum)) 252 for (const auto *Pred : predecessors(BB)) 253 if (getSCCNum(Pred) != SccNum) 254 Enters.push_back(const_cast<BasicBlock *>(BB)); 255 } 256 } 257 258 void BranchProbabilityInfo::SccInfo::getSccExitBlocks( 259 int SccNum, SmallVectorImpl<BasicBlock *> &Exits) const { 260 for (auto MapIt : SccBlocks[SccNum]) { 261 const auto *BB = MapIt.first; 262 if (isSCCExitingBlock(BB, SccNum)) 263 for (const auto *Succ : successors(BB)) 264 if (getSCCNum(Succ) != SccNum) 265 Exits.push_back(const_cast<BasicBlock *>(Succ)); 266 } 267 } 268 269 uint32_t BranchProbabilityInfo::SccInfo::getSccBlockType(const BasicBlock *BB, 270 int SccNum) const { 271 assert(getSCCNum(BB) == SccNum); 272 273 assert(SccBlocks.size() > static_cast<unsigned>(SccNum) && "Unknown SCC"); 274 const auto &SccBlockTypes = SccBlocks[SccNum]; 275 276 auto It = SccBlockTypes.find(BB); 277 if (It != SccBlockTypes.end()) { 278 return It->second; 279 } 280 return Inner; 281 } 282 283 void BranchProbabilityInfo::SccInfo::calculateSccBlockType(const BasicBlock *BB, 284 int SccNum) { 285 assert(getSCCNum(BB) == SccNum); 286 uint32_t BlockType = Inner; 287 288 if (llvm::any_of(predecessors(BB), [&](const BasicBlock *Pred) { 289 // Consider any block that is an entry point to the SCC as 290 // a header. 291 return getSCCNum(Pred) != SccNum; 292 })) 293 BlockType |= Header; 294 295 if (llvm::any_of(successors(BB), [&](const BasicBlock *Succ) { 296 return getSCCNum(Succ) != SccNum; 297 })) 298 BlockType |= Exiting; 299 300 // Lazily compute the set of headers for a given SCC and cache the results 301 // in the SccHeaderMap. 302 if (SccBlocks.size() <= static_cast<unsigned>(SccNum)) 303 SccBlocks.resize(SccNum + 1); 304 auto &SccBlockTypes = SccBlocks[SccNum]; 305 306 if (BlockType != Inner) { 307 bool IsInserted; 308 std::tie(std::ignore, IsInserted) = 309 SccBlockTypes.insert(std::make_pair(BB, BlockType)); 310 assert(IsInserted && "Duplicated block in SCC"); 311 } 312 } 313 314 BranchProbabilityInfo::LoopBlock::LoopBlock(const BasicBlock *BB, 315 const LoopInfo &LI, 316 const SccInfo &SccI) 317 : BB(BB) { 318 LD.first = LI.getLoopFor(BB); 319 if (!LD.first) { 320 LD.second = SccI.getSCCNum(BB); 321 } 322 } 323 324 bool BranchProbabilityInfo::isLoopEnteringEdge(const LoopEdge &Edge) const { 325 const auto &SrcBlock = Edge.first; 326 const auto &DstBlock = Edge.second; 327 return (DstBlock.getLoop() && 328 !DstBlock.getLoop()->contains(SrcBlock.getLoop())) || 329 // Assume that SCCs can't be nested. 330 (DstBlock.getSccNum() != -1 && 331 SrcBlock.getSccNum() != DstBlock.getSccNum()); 332 } 333 334 bool BranchProbabilityInfo::isLoopExitingEdge(const LoopEdge &Edge) const { 335 return isLoopEnteringEdge({Edge.second, Edge.first}); 336 } 337 338 bool BranchProbabilityInfo::isLoopEnteringExitingEdge( 339 const LoopEdge &Edge) const { 340 return isLoopEnteringEdge(Edge) || isLoopExitingEdge(Edge); 341 } 342 343 bool BranchProbabilityInfo::isLoopBackEdge(const LoopEdge &Edge) const { 344 const auto &SrcBlock = Edge.first; 345 const auto &DstBlock = Edge.second; 346 return SrcBlock.belongsToSameLoop(DstBlock) && 347 ((DstBlock.getLoop() && 348 DstBlock.getLoop()->getHeader() == DstBlock.getBlock()) || 349 (DstBlock.getSccNum() != -1 && 350 SccI->isSCCHeader(DstBlock.getBlock(), DstBlock.getSccNum()))); 351 } 352 353 void BranchProbabilityInfo::getLoopEnterBlocks( 354 const LoopBlock &LB, SmallVectorImpl<BasicBlock *> &Enters) const { 355 if (LB.getLoop()) { 356 auto *Header = LB.getLoop()->getHeader(); 357 Enters.append(pred_begin(Header), pred_end(Header)); 358 } else { 359 assert(LB.getSccNum() != -1 && "LB doesn't belong to any loop?"); 360 SccI->getSccEnterBlocks(LB.getSccNum(), Enters); 361 } 362 } 363 364 void BranchProbabilityInfo::getLoopExitBlocks( 365 const LoopBlock &LB, SmallVectorImpl<BasicBlock *> &Exits) const { 366 if (LB.getLoop()) { 367 LB.getLoop()->getExitBlocks(Exits); 368 } else { 369 assert(LB.getSccNum() != -1 && "LB doesn't belong to any loop?"); 370 SccI->getSccExitBlocks(LB.getSccNum(), Exits); 371 } 372 } 373 374 // Propagate existing explicit probabilities from either profile data or 375 // 'expect' intrinsic processing. Examine metadata against unreachable 376 // heuristic. The probability of the edge coming to unreachable block is 377 // set to min of metadata and unreachable heuristic. 378 bool BranchProbabilityInfo::calcMetadataWeights(const BasicBlock *BB) { 379 const Instruction *TI = BB->getTerminator(); 380 assert(TI->getNumSuccessors() > 1 && "expected more than one successor!"); 381 if (!(isa<BranchInst>(TI) || isa<SwitchInst>(TI) || isa<IndirectBrInst>(TI) || 382 isa<InvokeInst>(TI))) 383 return false; 384 385 MDNode *WeightsNode = TI->getMetadata(LLVMContext::MD_prof); 386 if (!WeightsNode) 387 return false; 388 389 // Check that the number of successors is manageable. 390 assert(TI->getNumSuccessors() < UINT32_MAX && "Too many successors"); 391 392 // Ensure there are weights for all of the successors. Note that the first 393 // operand to the metadata node is a name, not a weight. 394 if (WeightsNode->getNumOperands() != TI->getNumSuccessors() + 1) 395 return false; 396 397 // Build up the final weights that will be used in a temporary buffer. 398 // Compute the sum of all weights to later decide whether they need to 399 // be scaled to fit in 32 bits. 400 uint64_t WeightSum = 0; 401 SmallVector<uint32_t, 2> Weights; 402 SmallVector<unsigned, 2> UnreachableIdxs; 403 SmallVector<unsigned, 2> ReachableIdxs; 404 Weights.reserve(TI->getNumSuccessors()); 405 for (unsigned I = 1, E = WeightsNode->getNumOperands(); I != E; ++I) { 406 ConstantInt *Weight = 407 mdconst::dyn_extract<ConstantInt>(WeightsNode->getOperand(I)); 408 if (!Weight) 409 return false; 410 assert(Weight->getValue().getActiveBits() <= 32 && 411 "Too many bits for uint32_t"); 412 Weights.push_back(Weight->getZExtValue()); 413 WeightSum += Weights.back(); 414 const LoopBlock SrcLoopBB = getLoopBlock(BB); 415 const LoopBlock DstLoopBB = getLoopBlock(TI->getSuccessor(I - 1)); 416 auto EstimatedWeight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB}); 417 if (EstimatedWeight && 418 *EstimatedWeight <= static_cast<uint32_t>(BlockExecWeight::UNREACHABLE)) 419 UnreachableIdxs.push_back(I - 1); 420 else 421 ReachableIdxs.push_back(I - 1); 422 } 423 assert(Weights.size() == TI->getNumSuccessors() && "Checked above"); 424 425 // If the sum of weights does not fit in 32 bits, scale every weight down 426 // accordingly. 427 uint64_t ScalingFactor = 428 (WeightSum > UINT32_MAX) ? WeightSum / UINT32_MAX + 1 : 1; 429 430 if (ScalingFactor > 1) { 431 WeightSum = 0; 432 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) { 433 Weights[I] /= ScalingFactor; 434 WeightSum += Weights[I]; 435 } 436 } 437 assert(WeightSum <= UINT32_MAX && 438 "Expected weights to scale down to 32 bits"); 439 440 if (WeightSum == 0 || ReachableIdxs.size() == 0) { 441 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) 442 Weights[I] = 1; 443 WeightSum = TI->getNumSuccessors(); 444 } 445 446 // Set the probability. 447 SmallVector<BranchProbability, 2> BP; 448 for (unsigned I = 0, E = TI->getNumSuccessors(); I != E; ++I) 449 BP.push_back({ Weights[I], static_cast<uint32_t>(WeightSum) }); 450 451 // Examine the metadata against unreachable heuristic. 452 // If the unreachable heuristic is more strong then we use it for this edge. 453 if (UnreachableIdxs.size() == 0 || ReachableIdxs.size() == 0) { 454 setEdgeProbability(BB, BP); 455 return true; 456 } 457 458 auto UnreachableProb = UR_TAKEN_PROB; 459 for (auto I : UnreachableIdxs) 460 if (UnreachableProb < BP[I]) { 461 BP[I] = UnreachableProb; 462 } 463 464 // Sum of all edge probabilities must be 1.0. If we modified the probability 465 // of some edges then we must distribute the introduced difference over the 466 // reachable blocks. 467 // 468 // Proportional distribution: the relation between probabilities of the 469 // reachable edges is kept unchanged. That is for any reachable edges i and j: 470 // newBP[i] / newBP[j] == oldBP[i] / oldBP[j] => 471 // newBP[i] / oldBP[i] == newBP[j] / oldBP[j] == K 472 // Where K is independent of i,j. 473 // newBP[i] == oldBP[i] * K 474 // We need to find K. 475 // Make sum of all reachables of the left and right parts: 476 // sum_of_reachable(newBP) == K * sum_of_reachable(oldBP) 477 // Sum of newBP must be equal to 1.0: 478 // sum_of_reachable(newBP) + sum_of_unreachable(newBP) == 1.0 => 479 // sum_of_reachable(newBP) = 1.0 - sum_of_unreachable(newBP) 480 // Where sum_of_unreachable(newBP) is what has been just changed. 481 // Finally: 482 // K == sum_of_reachable(newBP) / sum_of_reachable(oldBP) => 483 // K == (1.0 - sum_of_unreachable(newBP)) / sum_of_reachable(oldBP) 484 BranchProbability NewUnreachableSum = BranchProbability::getZero(); 485 for (auto I : UnreachableIdxs) 486 NewUnreachableSum += BP[I]; 487 488 BranchProbability NewReachableSum = 489 BranchProbability::getOne() - NewUnreachableSum; 490 491 BranchProbability OldReachableSum = BranchProbability::getZero(); 492 for (auto I : ReachableIdxs) 493 OldReachableSum += BP[I]; 494 495 if (OldReachableSum != NewReachableSum) { // Anything to dsitribute? 496 if (OldReachableSum.isZero()) { 497 // If all oldBP[i] are zeroes then the proportional distribution results 498 // in all zero probabilities and the error stays big. In this case we 499 // evenly spread NewReachableSum over the reachable edges. 500 BranchProbability PerEdge = NewReachableSum / ReachableIdxs.size(); 501 for (auto I : ReachableIdxs) 502 BP[I] = PerEdge; 503 } else { 504 for (auto I : ReachableIdxs) { 505 // We use uint64_t to avoid double rounding error of the following 506 // calculation: BP[i] = BP[i] * NewReachableSum / OldReachableSum 507 // The formula is taken from the private constructor 508 // BranchProbability(uint32_t Numerator, uint32_t Denominator) 509 uint64_t Mul = static_cast<uint64_t>(NewReachableSum.getNumerator()) * 510 BP[I].getNumerator(); 511 uint32_t Div = static_cast<uint32_t>( 512 divideNearest(Mul, OldReachableSum.getNumerator())); 513 BP[I] = BranchProbability::getRaw(Div); 514 } 515 } 516 } 517 518 setEdgeProbability(BB, BP); 519 520 return true; 521 } 522 523 // Calculate Edge Weights using "Pointer Heuristics". Predict a comparison 524 // between two pointer or pointer and NULL will fail. 525 bool BranchProbabilityInfo::calcPointerHeuristics(const BasicBlock *BB) { 526 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 527 if (!BI || !BI->isConditional()) 528 return false; 529 530 Value *Cond = BI->getCondition(); 531 ICmpInst *CI = dyn_cast<ICmpInst>(Cond); 532 if (!CI || !CI->isEquality()) 533 return false; 534 535 Value *LHS = CI->getOperand(0); 536 537 if (!LHS->getType()->isPointerTy()) 538 return false; 539 540 assert(CI->getOperand(1)->getType()->isPointerTy()); 541 542 auto Search = PointerTable.find(CI->getPredicate()); 543 if (Search == PointerTable.end()) 544 return false; 545 setEdgeProbability(BB, Search->second); 546 return true; 547 } 548 549 // Compute the unlikely successors to the block BB in the loop L, specifically 550 // those that are unlikely because this is a loop, and add them to the 551 // UnlikelyBlocks set. 552 static void 553 computeUnlikelySuccessors(const BasicBlock *BB, Loop *L, 554 SmallPtrSetImpl<const BasicBlock*> &UnlikelyBlocks) { 555 // Sometimes in a loop we have a branch whose condition is made false by 556 // taking it. This is typically something like 557 // int n = 0; 558 // while (...) { 559 // if (++n >= MAX) { 560 // n = 0; 561 // } 562 // } 563 // In this sort of situation taking the branch means that at the very least it 564 // won't be taken again in the next iteration of the loop, so we should 565 // consider it less likely than a typical branch. 566 // 567 // We detect this by looking back through the graph of PHI nodes that sets the 568 // value that the condition depends on, and seeing if we can reach a successor 569 // block which can be determined to make the condition false. 570 // 571 // FIXME: We currently consider unlikely blocks to be half as likely as other 572 // blocks, but if we consider the example above the likelyhood is actually 573 // 1/MAX. We could therefore be more precise in how unlikely we consider 574 // blocks to be, but it would require more careful examination of the form 575 // of the comparison expression. 576 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 577 if (!BI || !BI->isConditional()) 578 return; 579 580 // Check if the branch is based on an instruction compared with a constant 581 CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition()); 582 if (!CI || !isa<Instruction>(CI->getOperand(0)) || 583 !isa<Constant>(CI->getOperand(1))) 584 return; 585 586 // Either the instruction must be a PHI, or a chain of operations involving 587 // constants that ends in a PHI which we can then collapse into a single value 588 // if the PHI value is known. 589 Instruction *CmpLHS = dyn_cast<Instruction>(CI->getOperand(0)); 590 PHINode *CmpPHI = dyn_cast<PHINode>(CmpLHS); 591 Constant *CmpConst = dyn_cast<Constant>(CI->getOperand(1)); 592 // Collect the instructions until we hit a PHI 593 SmallVector<BinaryOperator *, 1> InstChain; 594 while (!CmpPHI && CmpLHS && isa<BinaryOperator>(CmpLHS) && 595 isa<Constant>(CmpLHS->getOperand(1))) { 596 // Stop if the chain extends outside of the loop 597 if (!L->contains(CmpLHS)) 598 return; 599 InstChain.push_back(cast<BinaryOperator>(CmpLHS)); 600 CmpLHS = dyn_cast<Instruction>(CmpLHS->getOperand(0)); 601 if (CmpLHS) 602 CmpPHI = dyn_cast<PHINode>(CmpLHS); 603 } 604 if (!CmpPHI || !L->contains(CmpPHI)) 605 return; 606 607 // Trace the phi node to find all values that come from successors of BB 608 SmallPtrSet<PHINode*, 8> VisitedInsts; 609 SmallVector<PHINode*, 8> WorkList; 610 WorkList.push_back(CmpPHI); 611 VisitedInsts.insert(CmpPHI); 612 while (!WorkList.empty()) { 613 PHINode *P = WorkList.pop_back_val(); 614 for (BasicBlock *B : P->blocks()) { 615 // Skip blocks that aren't part of the loop 616 if (!L->contains(B)) 617 continue; 618 Value *V = P->getIncomingValueForBlock(B); 619 // If the source is a PHI add it to the work list if we haven't 620 // already visited it. 621 if (PHINode *PN = dyn_cast<PHINode>(V)) { 622 if (VisitedInsts.insert(PN).second) 623 WorkList.push_back(PN); 624 continue; 625 } 626 // If this incoming value is a constant and B is a successor of BB, then 627 // we can constant-evaluate the compare to see if it makes the branch be 628 // taken or not. 629 Constant *CmpLHSConst = dyn_cast<Constant>(V); 630 if (!CmpLHSConst || !llvm::is_contained(successors(BB), B)) 631 continue; 632 // First collapse InstChain 633 const DataLayout &DL = BB->getModule()->getDataLayout(); 634 for (Instruction *I : llvm::reverse(InstChain)) { 635 CmpLHSConst = ConstantFoldBinaryOpOperands( 636 I->getOpcode(), CmpLHSConst, cast<Constant>(I->getOperand(1)), DL); 637 if (!CmpLHSConst) 638 break; 639 } 640 if (!CmpLHSConst) 641 continue; 642 // Now constant-evaluate the compare 643 Constant *Result = ConstantExpr::getCompare(CI->getPredicate(), 644 CmpLHSConst, CmpConst, true); 645 // If the result means we don't branch to the block then that block is 646 // unlikely. 647 if (Result && 648 ((Result->isZeroValue() && B == BI->getSuccessor(0)) || 649 (Result->isOneValue() && B == BI->getSuccessor(1)))) 650 UnlikelyBlocks.insert(B); 651 } 652 } 653 } 654 655 Optional<uint32_t> 656 BranchProbabilityInfo::getEstimatedBlockWeight(const BasicBlock *BB) const { 657 auto WeightIt = EstimatedBlockWeight.find(BB); 658 if (WeightIt == EstimatedBlockWeight.end()) 659 return None; 660 return WeightIt->second; 661 } 662 663 Optional<uint32_t> 664 BranchProbabilityInfo::getEstimatedLoopWeight(const LoopData &L) const { 665 auto WeightIt = EstimatedLoopWeight.find(L); 666 if (WeightIt == EstimatedLoopWeight.end()) 667 return None; 668 return WeightIt->second; 669 } 670 671 Optional<uint32_t> 672 BranchProbabilityInfo::getEstimatedEdgeWeight(const LoopEdge &Edge) const { 673 // For edges entering a loop take weight of a loop rather than an individual 674 // block in the loop. 675 return isLoopEnteringEdge(Edge) 676 ? getEstimatedLoopWeight(Edge.second.getLoopData()) 677 : getEstimatedBlockWeight(Edge.second.getBlock()); 678 } 679 680 template <class IterT> 681 Optional<uint32_t> BranchProbabilityInfo::getMaxEstimatedEdgeWeight( 682 const LoopBlock &SrcLoopBB, iterator_range<IterT> Successors) const { 683 SmallVector<uint32_t, 4> Weights; 684 Optional<uint32_t> MaxWeight; 685 for (const BasicBlock *DstBB : Successors) { 686 const LoopBlock DstLoopBB = getLoopBlock(DstBB); 687 auto Weight = getEstimatedEdgeWeight({SrcLoopBB, DstLoopBB}); 688 689 if (!Weight) 690 return None; 691 692 if (!MaxWeight || *MaxWeight < *Weight) 693 MaxWeight = Weight; 694 } 695 696 return MaxWeight; 697 } 698 699 // Updates \p LoopBB's weight and returns true. If \p LoopBB has already 700 // an associated weight it is unchanged and false is returned. 701 // 702 // Please note by the algorithm the weight is not expected to change once set 703 // thus 'false' status is used to track visited blocks. 704 bool BranchProbabilityInfo::updateEstimatedBlockWeight( 705 LoopBlock &LoopBB, uint32_t BBWeight, 706 SmallVectorImpl<BasicBlock *> &BlockWorkList, 707 SmallVectorImpl<LoopBlock> &LoopWorkList) { 708 BasicBlock *BB = LoopBB.getBlock(); 709 710 // In general, weight is assigned to a block when it has final value and 711 // can't/shouldn't be changed. However, there are cases when a block 712 // inherently has several (possibly "contradicting") weights. For example, 713 // "unwind" block may also contain "cold" call. In that case the first 714 // set weight is favored and all consequent weights are ignored. 715 if (!EstimatedBlockWeight.insert({BB, BBWeight}).second) 716 return false; 717 718 for (BasicBlock *PredBlock : predecessors(BB)) { 719 LoopBlock PredLoop = getLoopBlock(PredBlock); 720 // Add affected block/loop to a working list. 721 if (isLoopExitingEdge({PredLoop, LoopBB})) { 722 if (!EstimatedLoopWeight.count(PredLoop.getLoopData())) 723 LoopWorkList.push_back(PredLoop); 724 } else if (!EstimatedBlockWeight.count(PredBlock)) 725 BlockWorkList.push_back(PredBlock); 726 } 727 return true; 728 } 729 730 // Starting from \p BB traverse through dominator blocks and assign \p BBWeight 731 // to all such blocks that are post dominated by \BB. In other words to all 732 // blocks that the one is executed if and only if another one is executed. 733 // Importantly, we skip loops here for two reasons. First weights of blocks in 734 // a loop should be scaled by trip count (yet possibly unknown). Second there is 735 // no any value in doing that because that doesn't give any additional 736 // information regarding distribution of probabilities inside the loop. 737 // Exception is loop 'enter' and 'exit' edges that are handled in a special way 738 // at calcEstimatedHeuristics. 739 // 740 // In addition, \p WorkList is populated with basic blocks if at leas one 741 // successor has updated estimated weight. 742 void BranchProbabilityInfo::propagateEstimatedBlockWeight( 743 const LoopBlock &LoopBB, DominatorTree *DT, PostDominatorTree *PDT, 744 uint32_t BBWeight, SmallVectorImpl<BasicBlock *> &BlockWorkList, 745 SmallVectorImpl<LoopBlock> &LoopWorkList) { 746 const BasicBlock *BB = LoopBB.getBlock(); 747 const auto *DTStartNode = DT->getNode(BB); 748 const auto *PDTStartNode = PDT->getNode(BB); 749 750 // TODO: Consider propagating weight down the domination line as well. 751 for (const auto *DTNode = DTStartNode; DTNode != nullptr; 752 DTNode = DTNode->getIDom()) { 753 auto *DomBB = DTNode->getBlock(); 754 // Consider blocks which lie on one 'line'. 755 if (!PDT->dominates(PDTStartNode, PDT->getNode(DomBB))) 756 // If BB doesn't post dominate DomBB it will not post dominate dominators 757 // of DomBB as well. 758 break; 759 760 LoopBlock DomLoopBB = getLoopBlock(DomBB); 761 const LoopEdge Edge{DomLoopBB, LoopBB}; 762 // Don't propagate weight to blocks belonging to different loops. 763 if (!isLoopEnteringExitingEdge(Edge)) { 764 if (!updateEstimatedBlockWeight(DomLoopBB, BBWeight, BlockWorkList, 765 LoopWorkList)) 766 // If DomBB has weight set then all it's predecessors are already 767 // processed (since we propagate weight up to the top of IR each time). 768 break; 769 } else if (isLoopExitingEdge(Edge)) { 770 LoopWorkList.push_back(DomLoopBB); 771 } 772 } 773 } 774 775 Optional<uint32_t> BranchProbabilityInfo::getInitialEstimatedBlockWeight( 776 const BasicBlock *BB) { 777 // Returns true if \p BB has call marked with "NoReturn" attribute. 778 auto hasNoReturn = [&](const BasicBlock *BB) { 779 for (const auto &I : reverse(*BB)) 780 if (const CallInst *CI = dyn_cast<CallInst>(&I)) 781 if (CI->hasFnAttr(Attribute::NoReturn)) 782 return true; 783 784 return false; 785 }; 786 787 // Important note regarding the order of checks. They are ordered by weight 788 // from lowest to highest. Doing that allows to avoid "unstable" results 789 // when several conditions heuristics can be applied simultaneously. 790 if (isa<UnreachableInst>(BB->getTerminator()) || 791 // If this block is terminated by a call to 792 // @llvm.experimental.deoptimize then treat it like an unreachable 793 // since it is expected to practically never execute. 794 // TODO: Should we actually treat as never returning call? 795 BB->getTerminatingDeoptimizeCall()) 796 return hasNoReturn(BB) 797 ? static_cast<uint32_t>(BlockExecWeight::NORETURN) 798 : static_cast<uint32_t>(BlockExecWeight::UNREACHABLE); 799 800 // Check if the block is 'unwind' handler of some invoke instruction. 801 for (const auto *Pred : predecessors(BB)) 802 if (Pred) 803 if (const auto *II = dyn_cast<InvokeInst>(Pred->getTerminator())) 804 if (II->getUnwindDest() == BB) 805 return static_cast<uint32_t>(BlockExecWeight::UNWIND); 806 807 // Check if the block contains 'cold' call. 808 for (const auto &I : *BB) 809 if (const CallInst *CI = dyn_cast<CallInst>(&I)) 810 if (CI->hasFnAttr(Attribute::Cold)) 811 return static_cast<uint32_t>(BlockExecWeight::COLD); 812 813 return None; 814 } 815 816 // Does RPO traversal over all blocks in \p F and assigns weights to 817 // 'unreachable', 'noreturn', 'cold', 'unwind' blocks. In addition it does its 818 // best to propagate the weight to up/down the IR. 819 void BranchProbabilityInfo::computeEestimateBlockWeight( 820 const Function &F, DominatorTree *DT, PostDominatorTree *PDT) { 821 SmallVector<BasicBlock *, 8> BlockWorkList; 822 SmallVector<LoopBlock, 8> LoopWorkList; 823 824 // By doing RPO we make sure that all predecessors already have weights 825 // calculated before visiting theirs successors. 826 ReversePostOrderTraversal<const Function *> RPOT(&F); 827 for (const auto *BB : RPOT) 828 if (auto BBWeight = getInitialEstimatedBlockWeight(BB)) 829 // If we were able to find estimated weight for the block set it to this 830 // block and propagate up the IR. 831 propagateEstimatedBlockWeight(getLoopBlock(BB), DT, PDT, BBWeight.value(), 832 BlockWorkList, LoopWorkList); 833 834 // BlockWorklist/LoopWorkList contains blocks/loops with at least one 835 // successor/exit having estimated weight. Try to propagate weight to such 836 // blocks/loops from successors/exits. 837 // Process loops and blocks. Order is not important. 838 do { 839 while (!LoopWorkList.empty()) { 840 const LoopBlock LoopBB = LoopWorkList.pop_back_val(); 841 842 if (EstimatedLoopWeight.count(LoopBB.getLoopData())) 843 continue; 844 845 SmallVector<BasicBlock *, 4> Exits; 846 getLoopExitBlocks(LoopBB, Exits); 847 auto LoopWeight = getMaxEstimatedEdgeWeight( 848 LoopBB, make_range(Exits.begin(), Exits.end())); 849 850 if (LoopWeight) { 851 // If we never exit the loop then we can enter it once at maximum. 852 if (LoopWeight <= static_cast<uint32_t>(BlockExecWeight::UNREACHABLE)) 853 LoopWeight = static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO); 854 855 EstimatedLoopWeight.insert({LoopBB.getLoopData(), *LoopWeight}); 856 // Add all blocks entering the loop into working list. 857 getLoopEnterBlocks(LoopBB, BlockWorkList); 858 } 859 } 860 861 while (!BlockWorkList.empty()) { 862 // We can reach here only if BlockWorkList is not empty. 863 const BasicBlock *BB = BlockWorkList.pop_back_val(); 864 if (EstimatedBlockWeight.count(BB)) 865 continue; 866 867 // We take maximum over all weights of successors. In other words we take 868 // weight of "hot" path. In theory we can probably find a better function 869 // which gives higher accuracy results (comparing to "maximum") but I 870 // can't 871 // think of any right now. And I doubt it will make any difference in 872 // practice. 873 const LoopBlock LoopBB = getLoopBlock(BB); 874 auto MaxWeight = getMaxEstimatedEdgeWeight(LoopBB, successors(BB)); 875 876 if (MaxWeight) 877 propagateEstimatedBlockWeight(LoopBB, DT, PDT, *MaxWeight, 878 BlockWorkList, LoopWorkList); 879 } 880 } while (!BlockWorkList.empty() || !LoopWorkList.empty()); 881 } 882 883 // Calculate edge probabilities based on block's estimated weight. 884 // Note that gathered weights were not scaled for loops. Thus edges entering 885 // and exiting loops requires special processing. 886 bool BranchProbabilityInfo::calcEstimatedHeuristics(const BasicBlock *BB) { 887 assert(BB->getTerminator()->getNumSuccessors() > 1 && 888 "expected more than one successor!"); 889 890 const LoopBlock LoopBB = getLoopBlock(BB); 891 892 SmallPtrSet<const BasicBlock *, 8> UnlikelyBlocks; 893 uint32_t TC = LBH_TAKEN_WEIGHT / LBH_NONTAKEN_WEIGHT; 894 if (LoopBB.getLoop()) 895 computeUnlikelySuccessors(BB, LoopBB.getLoop(), UnlikelyBlocks); 896 897 // Changed to 'true' if at least one successor has estimated weight. 898 bool FoundEstimatedWeight = false; 899 SmallVector<uint32_t, 4> SuccWeights; 900 uint64_t TotalWeight = 0; 901 // Go over all successors of BB and put their weights into SuccWeights. 902 for (const BasicBlock *SuccBB : successors(BB)) { 903 Optional<uint32_t> Weight; 904 const LoopBlock SuccLoopBB = getLoopBlock(SuccBB); 905 const LoopEdge Edge{LoopBB, SuccLoopBB}; 906 907 Weight = getEstimatedEdgeWeight(Edge); 908 909 if (isLoopExitingEdge(Edge) && 910 // Avoid adjustment of ZERO weight since it should remain unchanged. 911 Weight != static_cast<uint32_t>(BlockExecWeight::ZERO)) { 912 // Scale down loop exiting weight by trip count. 913 Weight = std::max( 914 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO), 915 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)) / 916 TC); 917 } 918 bool IsUnlikelyEdge = LoopBB.getLoop() && UnlikelyBlocks.contains(SuccBB); 919 if (IsUnlikelyEdge && 920 // Avoid adjustment of ZERO weight since it should remain unchanged. 921 Weight != static_cast<uint32_t>(BlockExecWeight::ZERO)) { 922 // 'Unlikely' blocks have twice lower weight. 923 Weight = std::max( 924 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO), 925 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)) / 2); 926 } 927 928 if (Weight) 929 FoundEstimatedWeight = true; 930 931 auto WeightVal = 932 Weight.value_or(static_cast<uint32_t>(BlockExecWeight::DEFAULT)); 933 TotalWeight += WeightVal; 934 SuccWeights.push_back(WeightVal); 935 } 936 937 // If non of blocks have estimated weight bail out. 938 // If TotalWeight is 0 that means weight of each successor is 0 as well and 939 // equally likely. Bail out early to not deal with devision by zero. 940 if (!FoundEstimatedWeight || TotalWeight == 0) 941 return false; 942 943 assert(SuccWeights.size() == succ_size(BB) && "Missed successor?"); 944 const unsigned SuccCount = SuccWeights.size(); 945 946 // If the sum of weights does not fit in 32 bits, scale every weight down 947 // accordingly. 948 if (TotalWeight > UINT32_MAX) { 949 uint64_t ScalingFactor = TotalWeight / UINT32_MAX + 1; 950 TotalWeight = 0; 951 for (unsigned Idx = 0; Idx < SuccCount; ++Idx) { 952 SuccWeights[Idx] /= ScalingFactor; 953 if (SuccWeights[Idx] == static_cast<uint32_t>(BlockExecWeight::ZERO)) 954 SuccWeights[Idx] = 955 static_cast<uint32_t>(BlockExecWeight::LOWEST_NON_ZERO); 956 TotalWeight += SuccWeights[Idx]; 957 } 958 assert(TotalWeight <= UINT32_MAX && "Total weight overflows"); 959 } 960 961 // Finally set probabilities to edges according to estimated block weights. 962 SmallVector<BranchProbability, 4> EdgeProbabilities( 963 SuccCount, BranchProbability::getUnknown()); 964 965 for (unsigned Idx = 0; Idx < SuccCount; ++Idx) { 966 EdgeProbabilities[Idx] = 967 BranchProbability(SuccWeights[Idx], (uint32_t)TotalWeight); 968 } 969 setEdgeProbability(BB, EdgeProbabilities); 970 return true; 971 } 972 973 bool BranchProbabilityInfo::calcZeroHeuristics(const BasicBlock *BB, 974 const TargetLibraryInfo *TLI) { 975 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 976 if (!BI || !BI->isConditional()) 977 return false; 978 979 Value *Cond = BI->getCondition(); 980 ICmpInst *CI = dyn_cast<ICmpInst>(Cond); 981 if (!CI) 982 return false; 983 984 auto GetConstantInt = [](Value *V) { 985 if (auto *I = dyn_cast<BitCastInst>(V)) 986 return dyn_cast<ConstantInt>(I->getOperand(0)); 987 return dyn_cast<ConstantInt>(V); 988 }; 989 990 Value *RHS = CI->getOperand(1); 991 ConstantInt *CV = GetConstantInt(RHS); 992 if (!CV) 993 return false; 994 995 // If the LHS is the result of AND'ing a value with a single bit bitmask, 996 // we don't have information about probabilities. 997 if (Instruction *LHS = dyn_cast<Instruction>(CI->getOperand(0))) 998 if (LHS->getOpcode() == Instruction::And) 999 if (ConstantInt *AndRHS = GetConstantInt(LHS->getOperand(1))) 1000 if (AndRHS->getValue().isPowerOf2()) 1001 return false; 1002 1003 // Check if the LHS is the return value of a library function 1004 LibFunc Func = NumLibFuncs; 1005 if (TLI) 1006 if (CallInst *Call = dyn_cast<CallInst>(CI->getOperand(0))) 1007 if (Function *CalledFn = Call->getCalledFunction()) 1008 TLI->getLibFunc(*CalledFn, Func); 1009 1010 ProbabilityTable::const_iterator Search; 1011 if (Func == LibFunc_strcasecmp || 1012 Func == LibFunc_strcmp || 1013 Func == LibFunc_strncasecmp || 1014 Func == LibFunc_strncmp || 1015 Func == LibFunc_memcmp || 1016 Func == LibFunc_bcmp) { 1017 Search = ICmpWithLibCallTable.find(CI->getPredicate()); 1018 if (Search == ICmpWithLibCallTable.end()) 1019 return false; 1020 } else if (CV->isZero()) { 1021 Search = ICmpWithZeroTable.find(CI->getPredicate()); 1022 if (Search == ICmpWithZeroTable.end()) 1023 return false; 1024 } else if (CV->isOne()) { 1025 Search = ICmpWithOneTable.find(CI->getPredicate()); 1026 if (Search == ICmpWithOneTable.end()) 1027 return false; 1028 } else if (CV->isMinusOne()) { 1029 Search = ICmpWithMinusOneTable.find(CI->getPredicate()); 1030 if (Search == ICmpWithMinusOneTable.end()) 1031 return false; 1032 } else { 1033 return false; 1034 } 1035 1036 setEdgeProbability(BB, Search->second); 1037 return true; 1038 } 1039 1040 bool BranchProbabilityInfo::calcFloatingPointHeuristics(const BasicBlock *BB) { 1041 const BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()); 1042 if (!BI || !BI->isConditional()) 1043 return false; 1044 1045 Value *Cond = BI->getCondition(); 1046 FCmpInst *FCmp = dyn_cast<FCmpInst>(Cond); 1047 if (!FCmp) 1048 return false; 1049 1050 ProbabilityList ProbList; 1051 if (FCmp->isEquality()) { 1052 ProbList = !FCmp->isTrueWhenEqual() ? 1053 // f1 == f2 -> Unlikely 1054 ProbabilityList({FPTakenProb, FPUntakenProb}) : 1055 // f1 != f2 -> Likely 1056 ProbabilityList({FPUntakenProb, FPTakenProb}); 1057 } else { 1058 auto Search = FCmpTable.find(FCmp->getPredicate()); 1059 if (Search == FCmpTable.end()) 1060 return false; 1061 ProbList = Search->second; 1062 } 1063 1064 setEdgeProbability(BB, ProbList); 1065 return true; 1066 } 1067 1068 void BranchProbabilityInfo::releaseMemory() { 1069 Probs.clear(); 1070 Handles.clear(); 1071 } 1072 1073 bool BranchProbabilityInfo::invalidate(Function &, const PreservedAnalyses &PA, 1074 FunctionAnalysisManager::Invalidator &) { 1075 // Check whether the analysis, all analyses on functions, or the function's 1076 // CFG have been preserved. 1077 auto PAC = PA.getChecker<BranchProbabilityAnalysis>(); 1078 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>() || 1079 PAC.preservedSet<CFGAnalyses>()); 1080 } 1081 1082 void BranchProbabilityInfo::print(raw_ostream &OS) const { 1083 OS << "---- Branch Probabilities ----\n"; 1084 // We print the probabilities from the last function the analysis ran over, 1085 // or the function it is currently running over. 1086 assert(LastF && "Cannot print prior to running over a function"); 1087 for (const auto &BI : *LastF) { 1088 for (const BasicBlock *Succ : successors(&BI)) 1089 printEdgeProbability(OS << " ", &BI, Succ); 1090 } 1091 } 1092 1093 bool BranchProbabilityInfo:: 1094 isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const { 1095 // Hot probability is at least 4/5 = 80% 1096 // FIXME: Compare against a static "hot" BranchProbability. 1097 return getEdgeProbability(Src, Dst) > BranchProbability(4, 5); 1098 } 1099 1100 /// Get the raw edge probability for the edge. If can't find it, return a 1101 /// default probability 1/N where N is the number of successors. Here an edge is 1102 /// specified using PredBlock and an 1103 /// index to the successors. 1104 BranchProbability 1105 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1106 unsigned IndexInSuccessors) const { 1107 auto I = Probs.find(std::make_pair(Src, IndexInSuccessors)); 1108 assert((Probs.end() == Probs.find(std::make_pair(Src, 0))) == 1109 (Probs.end() == I) && 1110 "Probability for I-th successor must always be defined along with the " 1111 "probability for the first successor"); 1112 1113 if (I != Probs.end()) 1114 return I->second; 1115 1116 return {1, static_cast<uint32_t>(succ_size(Src))}; 1117 } 1118 1119 BranchProbability 1120 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1121 const_succ_iterator Dst) const { 1122 return getEdgeProbability(Src, Dst.getSuccessorIndex()); 1123 } 1124 1125 /// Get the raw edge probability calculated for the block pair. This returns the 1126 /// sum of all raw edge probabilities from Src to Dst. 1127 BranchProbability 1128 BranchProbabilityInfo::getEdgeProbability(const BasicBlock *Src, 1129 const BasicBlock *Dst) const { 1130 if (!Probs.count(std::make_pair(Src, 0))) 1131 return BranchProbability(llvm::count(successors(Src), Dst), succ_size(Src)); 1132 1133 auto Prob = BranchProbability::getZero(); 1134 for (const_succ_iterator I = succ_begin(Src), E = succ_end(Src); I != E; ++I) 1135 if (*I == Dst) 1136 Prob += Probs.find(std::make_pair(Src, I.getSuccessorIndex()))->second; 1137 1138 return Prob; 1139 } 1140 1141 /// Set the edge probability for all edges at once. 1142 void BranchProbabilityInfo::setEdgeProbability( 1143 const BasicBlock *Src, const SmallVectorImpl<BranchProbability> &Probs) { 1144 assert(Src->getTerminator()->getNumSuccessors() == Probs.size()); 1145 eraseBlock(Src); // Erase stale data if any. 1146 if (Probs.size() == 0) 1147 return; // Nothing to set. 1148 1149 Handles.insert(BasicBlockCallbackVH(Src, this)); 1150 uint64_t TotalNumerator = 0; 1151 for (unsigned SuccIdx = 0; SuccIdx < Probs.size(); ++SuccIdx) { 1152 this->Probs[std::make_pair(Src, SuccIdx)] = Probs[SuccIdx]; 1153 LLVM_DEBUG(dbgs() << "set edge " << Src->getName() << " -> " << SuccIdx 1154 << " successor probability to " << Probs[SuccIdx] 1155 << "\n"); 1156 TotalNumerator += Probs[SuccIdx].getNumerator(); 1157 } 1158 1159 // Because of rounding errors the total probability cannot be checked to be 1160 // 1.0 exactly. That is TotalNumerator == BranchProbability::getDenominator. 1161 // Instead, every single probability in Probs must be as accurate as possible. 1162 // This results in error 1/denominator at most, thus the total absolute error 1163 // should be within Probs.size / BranchProbability::getDenominator. 1164 assert(TotalNumerator <= BranchProbability::getDenominator() + Probs.size()); 1165 assert(TotalNumerator >= BranchProbability::getDenominator() - Probs.size()); 1166 (void)TotalNumerator; 1167 } 1168 1169 void BranchProbabilityInfo::copyEdgeProbabilities(BasicBlock *Src, 1170 BasicBlock *Dst) { 1171 eraseBlock(Dst); // Erase stale data if any. 1172 unsigned NumSuccessors = Src->getTerminator()->getNumSuccessors(); 1173 assert(NumSuccessors == Dst->getTerminator()->getNumSuccessors()); 1174 if (NumSuccessors == 0) 1175 return; // Nothing to set. 1176 if (this->Probs.find(std::make_pair(Src, 0)) == this->Probs.end()) 1177 return; // No probability is set for edges from Src. Keep the same for Dst. 1178 1179 Handles.insert(BasicBlockCallbackVH(Dst, this)); 1180 for (unsigned SuccIdx = 0; SuccIdx < NumSuccessors; ++SuccIdx) { 1181 auto Prob = this->Probs[std::make_pair(Src, SuccIdx)]; 1182 this->Probs[std::make_pair(Dst, SuccIdx)] = Prob; 1183 LLVM_DEBUG(dbgs() << "set edge " << Dst->getName() << " -> " << SuccIdx 1184 << " successor probability to " << Prob << "\n"); 1185 } 1186 } 1187 1188 raw_ostream & 1189 BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS, 1190 const BasicBlock *Src, 1191 const BasicBlock *Dst) const { 1192 const BranchProbability Prob = getEdgeProbability(Src, Dst); 1193 OS << "edge " << Src->getName() << " -> " << Dst->getName() 1194 << " probability is " << Prob 1195 << (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n"); 1196 1197 return OS; 1198 } 1199 1200 void BranchProbabilityInfo::eraseBlock(const BasicBlock *BB) { 1201 LLVM_DEBUG(dbgs() << "eraseBlock " << BB->getName() << "\n"); 1202 1203 // Note that we cannot use successors of BB because the terminator of BB may 1204 // have changed when eraseBlock is called as a BasicBlockCallbackVH callback. 1205 // Instead we remove prob data for the block by iterating successors by their 1206 // indices from 0 till the last which exists. There could not be prob data for 1207 // a pair (BB, N) if there is no data for (BB, N-1) because the data is always 1208 // set for all successors from 0 to M at once by the method 1209 // setEdgeProbability(). 1210 Handles.erase(BasicBlockCallbackVH(BB, this)); 1211 for (unsigned I = 0;; ++I) { 1212 auto MapI = Probs.find(std::make_pair(BB, I)); 1213 if (MapI == Probs.end()) { 1214 assert(Probs.count(std::make_pair(BB, I + 1)) == 0 && 1215 "Must be no more successors"); 1216 return; 1217 } 1218 Probs.erase(MapI); 1219 } 1220 } 1221 1222 void BranchProbabilityInfo::calculate(const Function &F, const LoopInfo &LoopI, 1223 const TargetLibraryInfo *TLI, 1224 DominatorTree *DT, 1225 PostDominatorTree *PDT) { 1226 LLVM_DEBUG(dbgs() << "---- Branch Probability Info : " << F.getName() 1227 << " ----\n\n"); 1228 LastF = &F; // Store the last function we ran on for printing. 1229 LI = &LoopI; 1230 1231 SccI = std::make_unique<SccInfo>(F); 1232 1233 assert(EstimatedBlockWeight.empty()); 1234 assert(EstimatedLoopWeight.empty()); 1235 1236 std::unique_ptr<DominatorTree> DTPtr; 1237 std::unique_ptr<PostDominatorTree> PDTPtr; 1238 1239 if (!DT) { 1240 DTPtr = std::make_unique<DominatorTree>(const_cast<Function &>(F)); 1241 DT = DTPtr.get(); 1242 } 1243 1244 if (!PDT) { 1245 PDTPtr = std::make_unique<PostDominatorTree>(const_cast<Function &>(F)); 1246 PDT = PDTPtr.get(); 1247 } 1248 1249 computeEestimateBlockWeight(F, DT, PDT); 1250 1251 // Walk the basic blocks in post-order so that we can build up state about 1252 // the successors of a block iteratively. 1253 for (const auto *BB : post_order(&F.getEntryBlock())) { 1254 LLVM_DEBUG(dbgs() << "Computing probabilities for " << BB->getName() 1255 << "\n"); 1256 // If there is no at least two successors, no sense to set probability. 1257 if (BB->getTerminator()->getNumSuccessors() < 2) 1258 continue; 1259 if (calcMetadataWeights(BB)) 1260 continue; 1261 if (calcEstimatedHeuristics(BB)) 1262 continue; 1263 if (calcPointerHeuristics(BB)) 1264 continue; 1265 if (calcZeroHeuristics(BB, TLI)) 1266 continue; 1267 if (calcFloatingPointHeuristics(BB)) 1268 continue; 1269 } 1270 1271 EstimatedLoopWeight.clear(); 1272 EstimatedBlockWeight.clear(); 1273 SccI.reset(); 1274 1275 if (PrintBranchProb && 1276 (PrintBranchProbFuncName.empty() || 1277 F.getName().equals(PrintBranchProbFuncName))) { 1278 print(dbgs()); 1279 } 1280 } 1281 1282 void BranchProbabilityInfoWrapperPass::getAnalysisUsage( 1283 AnalysisUsage &AU) const { 1284 // We require DT so it's available when LI is available. The LI updating code 1285 // asserts that DT is also present so if we don't make sure that we have DT 1286 // here, that assert will trigger. 1287 AU.addRequired<DominatorTreeWrapperPass>(); 1288 AU.addRequired<LoopInfoWrapperPass>(); 1289 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1290 AU.addRequired<DominatorTreeWrapperPass>(); 1291 AU.addRequired<PostDominatorTreeWrapperPass>(); 1292 AU.setPreservesAll(); 1293 } 1294 1295 bool BranchProbabilityInfoWrapperPass::runOnFunction(Function &F) { 1296 const LoopInfo &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 1297 const TargetLibraryInfo &TLI = 1298 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1299 DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1300 PostDominatorTree &PDT = 1301 getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree(); 1302 BPI.calculate(F, LI, &TLI, &DT, &PDT); 1303 return false; 1304 } 1305 1306 void BranchProbabilityInfoWrapperPass::releaseMemory() { BPI.releaseMemory(); } 1307 1308 void BranchProbabilityInfoWrapperPass::print(raw_ostream &OS, 1309 const Module *) const { 1310 BPI.print(OS); 1311 } 1312 1313 AnalysisKey BranchProbabilityAnalysis::Key; 1314 BranchProbabilityInfo 1315 BranchProbabilityAnalysis::run(Function &F, FunctionAnalysisManager &AM) { 1316 BranchProbabilityInfo BPI; 1317 BPI.calculate(F, AM.getResult<LoopAnalysis>(F), 1318 &AM.getResult<TargetLibraryAnalysis>(F), 1319 &AM.getResult<DominatorTreeAnalysis>(F), 1320 &AM.getResult<PostDominatorTreeAnalysis>(F)); 1321 return BPI; 1322 } 1323 1324 PreservedAnalyses 1325 BranchProbabilityPrinterPass::run(Function &F, FunctionAnalysisManager &AM) { 1326 OS << "Printing analysis results of BPI for function " 1327 << "'" << F.getName() << "':" 1328 << "\n"; 1329 AM.getResult<BranchProbabilityAnalysis>(F).print(OS); 1330 return PreservedAnalyses::all(); 1331 } 1332