1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The machine combiner pass uses machine trace metrics to ensure the combined 10 // instructions do not lengthen the critical path or the resource depth. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/DenseMap.h" 14 #include "llvm/ADT/Statistic.h" 15 #include "llvm/Analysis/ProfileSummaryInfo.h" 16 #include "llvm/CodeGen/LazyMachineBlockFrequencyInfo.h" 17 #include "llvm/CodeGen/MachineDominators.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineFunctionPass.h" 20 #include "llvm/CodeGen/MachineLoopInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/MachineSizeOpts.h" 23 #include "llvm/CodeGen/MachineTraceMetrics.h" 24 #include "llvm/CodeGen/Passes.h" 25 #include "llvm/CodeGen/TargetInstrInfo.h" 26 #include "llvm/CodeGen/TargetRegisterInfo.h" 27 #include "llvm/CodeGen/TargetSchedule.h" 28 #include "llvm/CodeGen/TargetSubtargetInfo.h" 29 #include "llvm/InitializePasses.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/raw_ostream.h" 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "machine-combiner" 37 38 STATISTIC(NumInstCombined, "Number of machineinst combined"); 39 40 static cl::opt<unsigned> 41 inc_threshold("machine-combiner-inc-threshold", cl::Hidden, 42 cl::desc("Incremental depth computation will be used for basic " 43 "blocks with more instructions."), cl::init(500)); 44 45 static cl::opt<bool> dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden, 46 cl::desc("Dump all substituted intrs"), 47 cl::init(false)); 48 49 #ifdef EXPENSIVE_CHECKS 50 static cl::opt<bool> VerifyPatternOrder( 51 "machine-combiner-verify-pattern-order", cl::Hidden, 52 cl::desc( 53 "Verify that the generated patterns are ordered by increasing latency"), 54 cl::init(true)); 55 #else 56 static cl::opt<bool> VerifyPatternOrder( 57 "machine-combiner-verify-pattern-order", cl::Hidden, 58 cl::desc( 59 "Verify that the generated patterns are ordered by increasing latency"), 60 cl::init(false)); 61 #endif 62 63 namespace { 64 class MachineCombiner : public MachineFunctionPass { 65 const TargetSubtargetInfo *STI; 66 const TargetInstrInfo *TII; 67 const TargetRegisterInfo *TRI; 68 MCSchedModel SchedModel; 69 MachineRegisterInfo *MRI; 70 MachineLoopInfo *MLI; // Current MachineLoopInfo 71 MachineTraceMetrics *Traces; 72 MachineTraceMetrics::Ensemble *MinInstr; 73 MachineBlockFrequencyInfo *MBFI; 74 ProfileSummaryInfo *PSI; 75 76 TargetSchedModel TSchedModel; 77 78 /// True if optimizing for code size. 79 bool OptSize; 80 81 public: 82 static char ID; 83 MachineCombiner() : MachineFunctionPass(ID) { 84 initializeMachineCombinerPass(*PassRegistry::getPassRegistry()); 85 } 86 void getAnalysisUsage(AnalysisUsage &AU) const override; 87 bool runOnMachineFunction(MachineFunction &MF) override; 88 StringRef getPassName() const override { return "Machine InstCombiner"; } 89 90 private: 91 bool doSubstitute(unsigned NewSize, unsigned OldSize, bool OptForSize); 92 bool combineInstructions(MachineBasicBlock *); 93 MachineInstr *getOperandDef(const MachineOperand &MO); 94 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 95 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 96 MachineTraceMetrics::Trace BlockTrace); 97 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot, 98 MachineTraceMetrics::Trace BlockTrace); 99 bool 100 improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, 101 MachineTraceMetrics::Trace BlockTrace, 102 SmallVectorImpl<MachineInstr *> &InsInstrs, 103 SmallVectorImpl<MachineInstr *> &DelInstrs, 104 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 105 MachineCombinerPattern Pattern, bool SlackIsAccurate); 106 bool preservesResourceLen(MachineBasicBlock *MBB, 107 MachineTraceMetrics::Trace BlockTrace, 108 SmallVectorImpl<MachineInstr *> &InsInstrs, 109 SmallVectorImpl<MachineInstr *> &DelInstrs); 110 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs, 111 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC); 112 std::pair<unsigned, unsigned> 113 getLatenciesForInstrSequences(MachineInstr &MI, 114 SmallVectorImpl<MachineInstr *> &InsInstrs, 115 SmallVectorImpl<MachineInstr *> &DelInstrs, 116 MachineTraceMetrics::Trace BlockTrace); 117 118 void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root, 119 SmallVector<MachineCombinerPattern, 16> &Patterns); 120 }; 121 } 122 123 char MachineCombiner::ID = 0; 124 char &llvm::MachineCombinerID = MachineCombiner::ID; 125 126 INITIALIZE_PASS_BEGIN(MachineCombiner, DEBUG_TYPE, 127 "Machine InstCombiner", false, false) 128 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 129 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics) 130 INITIALIZE_PASS_END(MachineCombiner, DEBUG_TYPE, "Machine InstCombiner", 131 false, false) 132 133 void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 134 AU.setPreservesCFG(); 135 AU.addPreserved<MachineDominatorTree>(); 136 AU.addRequired<MachineLoopInfo>(); 137 AU.addPreserved<MachineLoopInfo>(); 138 AU.addRequired<MachineTraceMetrics>(); 139 AU.addPreserved<MachineTraceMetrics>(); 140 AU.addRequired<LazyMachineBlockFrequencyInfoPass>(); 141 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 142 MachineFunctionPass::getAnalysisUsage(AU); 143 } 144 145 MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) { 146 MachineInstr *DefInstr = nullptr; 147 // We need a virtual register definition. 148 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) 149 DefInstr = MRI->getUniqueVRegDef(MO.getReg()); 150 // PHI's have no depth etc. 151 if (DefInstr && DefInstr->isPHI()) 152 DefInstr = nullptr; 153 return DefInstr; 154 } 155 156 /// Computes depth of instructions in vector \InsInstr. 157 /// 158 /// \param InsInstrs is a vector of machine instructions 159 /// \param InstrIdxForVirtReg is a dense map of virtual register to index 160 /// of defining machine instruction in \p InsInstrs 161 /// \param BlockTrace is a trace of machine instructions 162 /// 163 /// \returns Depth of last instruction in \InsInstrs ("NewRoot") 164 unsigned 165 MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 166 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 167 MachineTraceMetrics::Trace BlockTrace) { 168 SmallVector<unsigned, 16> InstrDepth; 169 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 170 "Missing machine model\n"); 171 172 // For each instruction in the new sequence compute the depth based on the 173 // operands. Use the trace information when possible. For new operands which 174 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth 175 for (auto *InstrPtr : InsInstrs) { // for each Use 176 unsigned IDepth = 0; 177 for (const MachineOperand &MO : InstrPtr->operands()) { 178 // Check for virtual register operand. 179 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg()))) 180 continue; 181 if (!MO.isUse()) 182 continue; 183 unsigned DepthOp = 0; 184 unsigned LatencyOp = 0; 185 DenseMap<unsigned, unsigned>::iterator II = 186 InstrIdxForVirtReg.find(MO.getReg()); 187 if (II != InstrIdxForVirtReg.end()) { 188 // Operand is new virtual register not in trace 189 assert(II->second < InstrDepth.size() && "Bad Index"); 190 MachineInstr *DefInstr = InsInstrs[II->second]; 191 assert(DefInstr && 192 "There must be a definition for a new virtual register"); 193 DepthOp = InstrDepth[II->second]; 194 int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg()); 195 int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg()); 196 LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx, 197 InstrPtr, UseIdx); 198 } else { 199 MachineInstr *DefInstr = getOperandDef(MO); 200 if (DefInstr) { 201 DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth; 202 LatencyOp = TSchedModel.computeOperandLatency( 203 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()), 204 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg())); 205 } 206 } 207 IDepth = std::max(IDepth, DepthOp + LatencyOp); 208 } 209 InstrDepth.push_back(IDepth); 210 } 211 unsigned NewRootIdx = InsInstrs.size() - 1; 212 return InstrDepth[NewRootIdx]; 213 } 214 215 /// Computes instruction latency as max of latency of defined operands. 216 /// 217 /// \param Root is a machine instruction that could be replaced by NewRoot. 218 /// It is used to compute a more accurate latency information for NewRoot in 219 /// case there is a dependent instruction in the same trace (\p BlockTrace) 220 /// \param NewRoot is the instruction for which the latency is computed 221 /// \param BlockTrace is a trace of machine instructions 222 /// 223 /// \returns Latency of \p NewRoot 224 unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot, 225 MachineTraceMetrics::Trace BlockTrace) { 226 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 227 "Missing machine model\n"); 228 229 // Check each definition in NewRoot and compute the latency 230 unsigned NewRootLatency = 0; 231 232 for (const MachineOperand &MO : NewRoot->operands()) { 233 // Check for virtual register operand. 234 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg()))) 235 continue; 236 if (!MO.isDef()) 237 continue; 238 // Get the first instruction that uses MO 239 MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg()); 240 RI++; 241 if (RI == MRI->reg_end()) 242 continue; 243 MachineInstr *UseMO = RI->getParent(); 244 unsigned LatencyOp = 0; 245 if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) { 246 LatencyOp = TSchedModel.computeOperandLatency( 247 NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO, 248 UseMO->findRegisterUseOperandIdx(MO.getReg())); 249 } else { 250 LatencyOp = TSchedModel.computeInstrLatency(NewRoot); 251 } 252 NewRootLatency = std::max(NewRootLatency, LatencyOp); 253 } 254 return NewRootLatency; 255 } 256 257 /// The combiner's goal may differ based on which pattern it is attempting 258 /// to optimize. 259 enum class CombinerObjective { 260 MustReduceDepth, // The data dependency chain must be improved. 261 Default // The critical path must not be lengthened. 262 }; 263 264 static CombinerObjective getCombinerObjective(MachineCombinerPattern P) { 265 // TODO: If C++ ever gets a real enum class, make this part of the 266 // MachineCombinerPattern class. 267 switch (P) { 268 case MachineCombinerPattern::REASSOC_AX_BY: 269 case MachineCombinerPattern::REASSOC_AX_YB: 270 case MachineCombinerPattern::REASSOC_XA_BY: 271 case MachineCombinerPattern::REASSOC_XA_YB: 272 case MachineCombinerPattern::REASSOC_XY_AMM_BMM: 273 case MachineCombinerPattern::REASSOC_XMM_AMM_BMM: 274 return CombinerObjective::MustReduceDepth; 275 default: 276 return CombinerObjective::Default; 277 } 278 } 279 280 /// Estimate the latency of the new and original instruction sequence by summing 281 /// up the latencies of the inserted and deleted instructions. This assumes 282 /// that the inserted and deleted instructions are dependent instruction chains, 283 /// which might not hold in all cases. 284 std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences( 285 MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs, 286 SmallVectorImpl<MachineInstr *> &DelInstrs, 287 MachineTraceMetrics::Trace BlockTrace) { 288 assert(!InsInstrs.empty() && "Only support sequences that insert instrs."); 289 unsigned NewRootLatency = 0; 290 // NewRoot is the last instruction in the \p InsInstrs vector. 291 MachineInstr *NewRoot = InsInstrs.back(); 292 for (unsigned i = 0; i < InsInstrs.size() - 1; i++) 293 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]); 294 NewRootLatency += getLatency(&MI, NewRoot, BlockTrace); 295 296 unsigned RootLatency = 0; 297 for (auto I : DelInstrs) 298 RootLatency += TSchedModel.computeInstrLatency(I); 299 300 return {NewRootLatency, RootLatency}; 301 } 302 303 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root. 304 /// The new code sequence ends in MI NewRoot. A necessary condition for the new 305 /// sequence to replace the old sequence is that it cannot lengthen the critical 306 /// path. The definition of "improve" may be restricted by specifying that the 307 /// new path improves the data dependency chain (MustReduceDepth). 308 bool MachineCombiner::improvesCriticalPathLen( 309 MachineBasicBlock *MBB, MachineInstr *Root, 310 MachineTraceMetrics::Trace BlockTrace, 311 SmallVectorImpl<MachineInstr *> &InsInstrs, 312 SmallVectorImpl<MachineInstr *> &DelInstrs, 313 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 314 MachineCombinerPattern Pattern, 315 bool SlackIsAccurate) { 316 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 317 "Missing machine model\n"); 318 // Get depth and latency of NewRoot and Root. 319 unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace); 320 unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth; 321 322 LLVM_DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: " 323 << NewRootDepth << "\tRootDepth: " << RootDepth); 324 325 // For a transform such as reassociation, the cost equation is 326 // conservatively calculated so that we must improve the depth (data 327 // dependency cycles) in the critical path to proceed with the transform. 328 // Being conservative also protects against inaccuracies in the underlying 329 // machine trace metrics and CPU models. 330 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) { 331 LLVM_DEBUG(dbgs() << "\tIt MustReduceDepth "); 332 LLVM_DEBUG(NewRootDepth < RootDepth 333 ? dbgs() << "\t and it does it\n" 334 : dbgs() << "\t but it does NOT do it\n"); 335 return NewRootDepth < RootDepth; 336 } 337 338 // A more flexible cost calculation for the critical path includes the slack 339 // of the original code sequence. This may allow the transform to proceed 340 // even if the instruction depths (data dependency cycles) become worse. 341 342 // Account for the latency of the inserted and deleted instructions by 343 unsigned NewRootLatency, RootLatency; 344 std::tie(NewRootLatency, RootLatency) = 345 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace); 346 347 unsigned RootSlack = BlockTrace.getInstrSlack(*Root); 348 unsigned NewCycleCount = NewRootDepth + NewRootLatency; 349 unsigned OldCycleCount = 350 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0); 351 LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency 352 << "\tRootLatency: " << RootLatency << "\n\tRootSlack: " 353 << RootSlack << " SlackIsAccurate=" << SlackIsAccurate 354 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount 355 << "\n\tRootDepth + RootLatency + RootSlack = " 356 << OldCycleCount;); 357 LLVM_DEBUG(NewCycleCount <= OldCycleCount 358 ? dbgs() << "\n\t It IMPROVES PathLen because" 359 : dbgs() << "\n\t It DOES NOT improve PathLen because"); 360 LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount 361 << ", OldCycleCount = " << OldCycleCount << "\n"); 362 363 return NewCycleCount <= OldCycleCount; 364 } 365 366 /// helper routine to convert instructions into SC 367 void MachineCombiner::instr2instrSC( 368 SmallVectorImpl<MachineInstr *> &Instrs, 369 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) { 370 for (auto *InstrPtr : Instrs) { 371 unsigned Opc = InstrPtr->getOpcode(); 372 unsigned Idx = TII->get(Opc).getSchedClass(); 373 const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx); 374 InstrsSC.push_back(SC); 375 } 376 } 377 378 /// True when the new instructions do not increase resource length 379 bool MachineCombiner::preservesResourceLen( 380 MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace, 381 SmallVectorImpl<MachineInstr *> &InsInstrs, 382 SmallVectorImpl<MachineInstr *> &DelInstrs) { 383 if (!TSchedModel.hasInstrSchedModel()) 384 return true; 385 386 // Compute current resource length 387 388 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB); 389 SmallVector <const MachineBasicBlock *, 1> MBBarr; 390 MBBarr.push_back(MBB); 391 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr); 392 393 // Deal with SC rather than Instructions. 394 SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC; 395 SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC; 396 397 instr2instrSC(InsInstrs, InsInstrsSC); 398 instr2instrSC(DelInstrs, DelInstrsSC); 399 400 ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC); 401 ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC); 402 403 // Compute new resource length. 404 unsigned ResLenAfterCombine = 405 BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr); 406 407 LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: " 408 << ResLenBeforeCombine 409 << " and after: " << ResLenAfterCombine << "\n";); 410 LLVM_DEBUG( 411 ResLenAfterCombine <= 412 ResLenBeforeCombine + TII->getExtendResourceLenLimit() 413 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n" 414 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource " 415 "Length\n"); 416 417 return ResLenAfterCombine <= 418 ResLenBeforeCombine + TII->getExtendResourceLenLimit(); 419 } 420 421 /// \returns true when new instruction sequence should be generated 422 /// independent if it lengthens critical path or not 423 bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize, 424 bool OptForSize) { 425 if (OptForSize && (NewSize < OldSize)) 426 return true; 427 if (!TSchedModel.hasInstrSchedModelOrItineraries()) 428 return true; 429 return false; 430 } 431 432 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction 433 /// depths if requested. 434 /// 435 /// \param MBB basic block to insert instructions in 436 /// \param MI current machine instruction 437 /// \param InsInstrs new instructions to insert in \p MBB 438 /// \param DelInstrs instruction to delete from \p MBB 439 /// \param MinInstr is a pointer to the machine trace information 440 /// \param RegUnits set of live registers, needed to compute instruction depths 441 /// \param IncrementalUpdate if true, compute instruction depths incrementally, 442 /// otherwise invalidate the trace 443 static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI, 444 SmallVector<MachineInstr *, 16> InsInstrs, 445 SmallVector<MachineInstr *, 16> DelInstrs, 446 MachineTraceMetrics::Ensemble *MinInstr, 447 SparseSet<LiveRegUnit> &RegUnits, 448 bool IncrementalUpdate) { 449 for (auto *InstrPtr : InsInstrs) 450 MBB->insert((MachineBasicBlock::iterator)&MI, InstrPtr); 451 452 for (auto *InstrPtr : DelInstrs) { 453 InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval(); 454 // Erase all LiveRegs defined by the removed instruction 455 for (auto I = RegUnits.begin(); I != RegUnits.end(); ) { 456 if (I->MI == InstrPtr) 457 I = RegUnits.erase(I); 458 else 459 I++; 460 } 461 } 462 463 if (IncrementalUpdate) 464 for (auto *InstrPtr : InsInstrs) 465 MinInstr->updateDepth(MBB, *InstrPtr, RegUnits); 466 else 467 MinInstr->invalidate(MBB); 468 469 NumInstCombined++; 470 } 471 472 // Check that the difference between original and new latency is decreasing for 473 // later patterns. This helps to discover sub-optimal pattern orderings. 474 void MachineCombiner::verifyPatternOrder( 475 MachineBasicBlock *MBB, MachineInstr &Root, 476 SmallVector<MachineCombinerPattern, 16> &Patterns) { 477 long PrevLatencyDiff = std::numeric_limits<long>::max(); 478 (void)PrevLatencyDiff; // Variable is used in assert only. 479 for (auto P : Patterns) { 480 SmallVector<MachineInstr *, 16> InsInstrs; 481 SmallVector<MachineInstr *, 16> DelInstrs; 482 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 483 TII->genAlternativeCodeSequence(Root, P, InsInstrs, DelInstrs, 484 InstrIdxForVirtReg); 485 // Found pattern, but did not generate alternative sequence. 486 // This can happen e.g. when an immediate could not be materialized 487 // in a single instruction. 488 if (InsInstrs.empty() || !TSchedModel.hasInstrSchedModelOrItineraries()) 489 continue; 490 491 unsigned NewRootLatency, RootLatency; 492 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences( 493 Root, InsInstrs, DelInstrs, MinInstr->getTrace(MBB)); 494 long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency); 495 assert(CurrentLatencyDiff <= PrevLatencyDiff && 496 "Current pattern is better than previous pattern."); 497 PrevLatencyDiff = CurrentLatencyDiff; 498 } 499 } 500 501 /// Substitute a slow code sequence with a faster one by 502 /// evaluating instruction combining pattern. 503 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction 504 /// combining based on machine trace metrics. Only combine a sequence of 505 /// instructions when this neither lengthens the critical path nor increases 506 /// resource pressure. When optimizing for codesize always combine when the new 507 /// sequence is shorter. 508 bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { 509 bool Changed = false; 510 LLVM_DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n"); 511 512 bool IncrementalUpdate = false; 513 auto BlockIter = MBB->begin(); 514 decltype(BlockIter) LastUpdate; 515 // Check if the block is in a loop. 516 const MachineLoop *ML = MLI->getLoopFor(MBB); 517 if (!MinInstr) 518 MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount); 519 520 SparseSet<LiveRegUnit> RegUnits; 521 RegUnits.setUniverse(TRI->getNumRegUnits()); 522 523 bool OptForSize = OptSize || llvm::shouldOptimizeForSize(MBB, PSI, MBFI); 524 525 while (BlockIter != MBB->end()) { 526 auto &MI = *BlockIter++; 527 SmallVector<MachineCombinerPattern, 16> Patterns; 528 // The motivating example is: 529 // 530 // MUL Other MUL_op1 MUL_op2 Other 531 // \ / \ | / 532 // ADD/SUB => MADD/MSUB 533 // (=Root) (=NewRoot) 534 535 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is 536 // usually beneficial for code size it unfortunately can hurt performance 537 // when the ADD is on the critical path, but the MUL is not. With the 538 // substitution the MUL becomes part of the critical path (in form of the 539 // MADD) and can lengthen it on architectures where the MADD latency is 540 // longer than the ADD latency. 541 // 542 // For each instruction we check if it can be the root of a combiner 543 // pattern. Then for each pattern the new code sequence in form of MI is 544 // generated and evaluated. When the efficiency criteria (don't lengthen 545 // critical path, don't use more resources) is met the new sequence gets 546 // hooked up into the basic block before the old sequence is removed. 547 // 548 // The algorithm does not try to evaluate all patterns and pick the best. 549 // This is only an artificial restriction though. In practice there is 550 // mostly one pattern, and getMachineCombinerPatterns() can order patterns 551 // based on an internal cost heuristic. If 552 // machine-combiner-verify-pattern-order is enabled, all patterns are 553 // checked to ensure later patterns do not provide better latency savings. 554 555 if (!TII->getMachineCombinerPatterns(MI, Patterns)) 556 continue; 557 558 if (VerifyPatternOrder) 559 verifyPatternOrder(MBB, MI, Patterns); 560 561 for (auto P : Patterns) { 562 SmallVector<MachineInstr *, 16> InsInstrs; 563 SmallVector<MachineInstr *, 16> DelInstrs; 564 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 565 TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs, 566 InstrIdxForVirtReg); 567 unsigned NewInstCount = InsInstrs.size(); 568 unsigned OldInstCount = DelInstrs.size(); 569 // Found pattern, but did not generate alternative sequence. 570 // This can happen e.g. when an immediate could not be materialized 571 // in a single instruction. 572 if (!NewInstCount) 573 continue; 574 575 LLVM_DEBUG(if (dump_intrs) { 576 dbgs() << "\tFor the Pattern (" << (int)P 577 << ") these instructions could be removed\n"; 578 for (auto const *InstrPtr : DelInstrs) 579 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 580 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 581 dbgs() << "\tThese instructions could replace the removed ones\n"; 582 for (auto const *InstrPtr : InsInstrs) 583 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 584 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 585 }); 586 587 bool SubstituteAlways = false; 588 if (ML && TII->isThroughputPattern(P)) 589 SubstituteAlways = true; 590 591 if (IncrementalUpdate) { 592 // Update depths since the last incremental update. 593 MinInstr->updateDepths(LastUpdate, BlockIter, RegUnits); 594 LastUpdate = BlockIter; 595 } 596 597 // Substitute when we optimize for codesize and the new sequence has 598 // fewer instructions OR 599 // the new sequence neither lengthens the critical path nor increases 600 // resource pressure. 601 if (SubstituteAlways || 602 doSubstitute(NewInstCount, OldInstCount, OptForSize)) { 603 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 604 RegUnits, IncrementalUpdate); 605 // Eagerly stop after the first pattern fires. 606 Changed = true; 607 break; 608 } else { 609 // For big basic blocks, we only compute the full trace the first time 610 // we hit this. We do not invalidate the trace, but instead update the 611 // instruction depths incrementally. 612 // NOTE: Only the instruction depths up to MI are accurate. All other 613 // trace information is not updated. 614 MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB); 615 Traces->verifyAnalysis(); 616 if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs, 617 InstrIdxForVirtReg, P, 618 !IncrementalUpdate) && 619 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) { 620 if (MBB->size() > inc_threshold) { 621 // Use incremental depth updates for basic blocks above treshold 622 IncrementalUpdate = true; 623 LastUpdate = BlockIter; 624 } 625 626 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 627 RegUnits, IncrementalUpdate); 628 629 // Eagerly stop after the first pattern fires. 630 Changed = true; 631 break; 632 } 633 // Cleanup instructions of the alternative code sequence. There is no 634 // use for them. 635 MachineFunction *MF = MBB->getParent(); 636 for (auto *InstrPtr : InsInstrs) 637 MF->DeleteMachineInstr(InstrPtr); 638 } 639 InstrIdxForVirtReg.clear(); 640 } 641 } 642 643 if (Changed && IncrementalUpdate) 644 Traces->invalidate(MBB); 645 return Changed; 646 } 647 648 bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { 649 STI = &MF.getSubtarget(); 650 TII = STI->getInstrInfo(); 651 TRI = STI->getRegisterInfo(); 652 SchedModel = STI->getSchedModel(); 653 TSchedModel.init(STI); 654 MRI = &MF.getRegInfo(); 655 MLI = &getAnalysis<MachineLoopInfo>(); 656 Traces = &getAnalysis<MachineTraceMetrics>(); 657 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 658 MBFI = (PSI && PSI->hasProfileSummary()) ? 659 &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() : 660 nullptr; 661 MinInstr = nullptr; 662 OptSize = MF.getFunction().hasOptSize(); 663 664 LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); 665 if (!TII->useMachineCombiner()) { 666 LLVM_DEBUG( 667 dbgs() 668 << " Skipping pass: Target does not support machine combiner\n"); 669 return false; 670 } 671 672 bool Changed = false; 673 674 // Try to combine instructions. 675 for (auto &MBB : MF) 676 Changed |= combineInstructions(&MBB); 677 678 return Changed; 679 } 680