1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The machine combiner pass uses machine trace metrics to ensure the combined 10 // instructions do not lengthen the critical path or the resource depth. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/DenseMap.h" 14 #include "llvm/ADT/Statistic.h" 15 #include "llvm/CodeGen/MachineDominators.h" 16 #include "llvm/CodeGen/MachineFunction.h" 17 #include "llvm/CodeGen/MachineFunctionPass.h" 18 #include "llvm/CodeGen/MachineLoopInfo.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 #include "llvm/CodeGen/MachineTraceMetrics.h" 21 #include "llvm/CodeGen/Passes.h" 22 #include "llvm/CodeGen/TargetInstrInfo.h" 23 #include "llvm/CodeGen/TargetRegisterInfo.h" 24 #include "llvm/CodeGen/TargetSchedule.h" 25 #include "llvm/CodeGen/TargetSubtargetInfo.h" 26 #include "llvm/Support/CommandLine.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/raw_ostream.h" 29 30 using namespace llvm; 31 32 #define DEBUG_TYPE "machine-combiner" 33 34 STATISTIC(NumInstCombined, "Number of machineinst combined"); 35 36 static cl::opt<unsigned> 37 inc_threshold("machine-combiner-inc-threshold", cl::Hidden, 38 cl::desc("Incremental depth computation will be used for basic " 39 "blocks with more instructions."), cl::init(500)); 40 41 static cl::opt<bool> dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden, 42 cl::desc("Dump all substituted intrs"), 43 cl::init(false)); 44 45 #ifdef EXPENSIVE_CHECKS 46 static cl::opt<bool> VerifyPatternOrder( 47 "machine-combiner-verify-pattern-order", cl::Hidden, 48 cl::desc( 49 "Verify that the generated patterns are ordered by increasing latency"), 50 cl::init(true)); 51 #else 52 static cl::opt<bool> VerifyPatternOrder( 53 "machine-combiner-verify-pattern-order", cl::Hidden, 54 cl::desc( 55 "Verify that the generated patterns are ordered by increasing latency"), 56 cl::init(false)); 57 #endif 58 59 namespace { 60 class MachineCombiner : public MachineFunctionPass { 61 const TargetSubtargetInfo *STI; 62 const TargetInstrInfo *TII; 63 const TargetRegisterInfo *TRI; 64 MCSchedModel SchedModel; 65 MachineRegisterInfo *MRI; 66 MachineLoopInfo *MLI; // Current MachineLoopInfo 67 MachineTraceMetrics *Traces; 68 MachineTraceMetrics::Ensemble *MinInstr; 69 70 TargetSchedModel TSchedModel; 71 72 /// True if optimizing for code size. 73 bool OptSize; 74 75 public: 76 static char ID; 77 MachineCombiner() : MachineFunctionPass(ID) { 78 initializeMachineCombinerPass(*PassRegistry::getPassRegistry()); 79 } 80 void getAnalysisUsage(AnalysisUsage &AU) const override; 81 bool runOnMachineFunction(MachineFunction &MF) override; 82 StringRef getPassName() const override { return "Machine InstCombiner"; } 83 84 private: 85 bool doSubstitute(unsigned NewSize, unsigned OldSize); 86 bool combineInstructions(MachineBasicBlock *); 87 MachineInstr *getOperandDef(const MachineOperand &MO); 88 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 89 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 90 MachineTraceMetrics::Trace BlockTrace); 91 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot, 92 MachineTraceMetrics::Trace BlockTrace); 93 bool 94 improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, 95 MachineTraceMetrics::Trace BlockTrace, 96 SmallVectorImpl<MachineInstr *> &InsInstrs, 97 SmallVectorImpl<MachineInstr *> &DelInstrs, 98 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 99 MachineCombinerPattern Pattern, bool SlackIsAccurate); 100 bool preservesResourceLen(MachineBasicBlock *MBB, 101 MachineTraceMetrics::Trace BlockTrace, 102 SmallVectorImpl<MachineInstr *> &InsInstrs, 103 SmallVectorImpl<MachineInstr *> &DelInstrs); 104 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs, 105 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC); 106 std::pair<unsigned, unsigned> 107 getLatenciesForInstrSequences(MachineInstr &MI, 108 SmallVectorImpl<MachineInstr *> &InsInstrs, 109 SmallVectorImpl<MachineInstr *> &DelInstrs, 110 MachineTraceMetrics::Trace BlockTrace); 111 112 void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root, 113 SmallVector<MachineCombinerPattern, 16> &Patterns); 114 }; 115 } 116 117 char MachineCombiner::ID = 0; 118 char &llvm::MachineCombinerID = MachineCombiner::ID; 119 120 INITIALIZE_PASS_BEGIN(MachineCombiner, DEBUG_TYPE, 121 "Machine InstCombiner", false, false) 122 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 123 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics) 124 INITIALIZE_PASS_END(MachineCombiner, DEBUG_TYPE, "Machine InstCombiner", 125 false, false) 126 127 void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 128 AU.setPreservesCFG(); 129 AU.addPreserved<MachineDominatorTree>(); 130 AU.addRequired<MachineLoopInfo>(); 131 AU.addPreserved<MachineLoopInfo>(); 132 AU.addRequired<MachineTraceMetrics>(); 133 AU.addPreserved<MachineTraceMetrics>(); 134 MachineFunctionPass::getAnalysisUsage(AU); 135 } 136 137 MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) { 138 MachineInstr *DefInstr = nullptr; 139 // We need a virtual register definition. 140 if (MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg())) 141 DefInstr = MRI->getUniqueVRegDef(MO.getReg()); 142 // PHI's have no depth etc. 143 if (DefInstr && DefInstr->isPHI()) 144 DefInstr = nullptr; 145 return DefInstr; 146 } 147 148 /// Computes depth of instructions in vector \InsInstr. 149 /// 150 /// \param InsInstrs is a vector of machine instructions 151 /// \param InstrIdxForVirtReg is a dense map of virtual register to index 152 /// of defining machine instruction in \p InsInstrs 153 /// \param BlockTrace is a trace of machine instructions 154 /// 155 /// \returns Depth of last instruction in \InsInstrs ("NewRoot") 156 unsigned 157 MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 158 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 159 MachineTraceMetrics::Trace BlockTrace) { 160 SmallVector<unsigned, 16> InstrDepth; 161 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 162 "Missing machine model\n"); 163 164 // For each instruction in the new sequence compute the depth based on the 165 // operands. Use the trace information when possible. For new operands which 166 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth 167 for (auto *InstrPtr : InsInstrs) { // for each Use 168 unsigned IDepth = 0; 169 for (const MachineOperand &MO : InstrPtr->operands()) { 170 // Check for virtual register operand. 171 if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))) 172 continue; 173 if (!MO.isUse()) 174 continue; 175 unsigned DepthOp = 0; 176 unsigned LatencyOp = 0; 177 DenseMap<unsigned, unsigned>::iterator II = 178 InstrIdxForVirtReg.find(MO.getReg()); 179 if (II != InstrIdxForVirtReg.end()) { 180 // Operand is new virtual register not in trace 181 assert(II->second < InstrDepth.size() && "Bad Index"); 182 MachineInstr *DefInstr = InsInstrs[II->second]; 183 assert(DefInstr && 184 "There must be a definition for a new virtual register"); 185 DepthOp = InstrDepth[II->second]; 186 int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg()); 187 int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg()); 188 LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx, 189 InstrPtr, UseIdx); 190 } else { 191 MachineInstr *DefInstr = getOperandDef(MO); 192 if (DefInstr) { 193 DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth; 194 LatencyOp = TSchedModel.computeOperandLatency( 195 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()), 196 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg())); 197 } 198 } 199 IDepth = std::max(IDepth, DepthOp + LatencyOp); 200 } 201 InstrDepth.push_back(IDepth); 202 } 203 unsigned NewRootIdx = InsInstrs.size() - 1; 204 return InstrDepth[NewRootIdx]; 205 } 206 207 /// Computes instruction latency as max of latency of defined operands. 208 /// 209 /// \param Root is a machine instruction that could be replaced by NewRoot. 210 /// It is used to compute a more accurate latency information for NewRoot in 211 /// case there is a dependent instruction in the same trace (\p BlockTrace) 212 /// \param NewRoot is the instruction for which the latency is computed 213 /// \param BlockTrace is a trace of machine instructions 214 /// 215 /// \returns Latency of \p NewRoot 216 unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot, 217 MachineTraceMetrics::Trace BlockTrace) { 218 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 219 "Missing machine model\n"); 220 221 // Check each definition in NewRoot and compute the latency 222 unsigned NewRootLatency = 0; 223 224 for (const MachineOperand &MO : NewRoot->operands()) { 225 // Check for virtual register operand. 226 if (!(MO.isReg() && TargetRegisterInfo::isVirtualRegister(MO.getReg()))) 227 continue; 228 if (!MO.isDef()) 229 continue; 230 // Get the first instruction that uses MO 231 MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg()); 232 RI++; 233 if (RI == MRI->reg_end()) 234 continue; 235 MachineInstr *UseMO = RI->getParent(); 236 unsigned LatencyOp = 0; 237 if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) { 238 LatencyOp = TSchedModel.computeOperandLatency( 239 NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO, 240 UseMO->findRegisterUseOperandIdx(MO.getReg())); 241 } else { 242 LatencyOp = TSchedModel.computeInstrLatency(NewRoot); 243 } 244 NewRootLatency = std::max(NewRootLatency, LatencyOp); 245 } 246 return NewRootLatency; 247 } 248 249 /// The combiner's goal may differ based on which pattern it is attempting 250 /// to optimize. 251 enum class CombinerObjective { 252 MustReduceDepth, // The data dependency chain must be improved. 253 Default // The critical path must not be lengthened. 254 }; 255 256 static CombinerObjective getCombinerObjective(MachineCombinerPattern P) { 257 // TODO: If C++ ever gets a real enum class, make this part of the 258 // MachineCombinerPattern class. 259 switch (P) { 260 case MachineCombinerPattern::REASSOC_AX_BY: 261 case MachineCombinerPattern::REASSOC_AX_YB: 262 case MachineCombinerPattern::REASSOC_XA_BY: 263 case MachineCombinerPattern::REASSOC_XA_YB: 264 return CombinerObjective::MustReduceDepth; 265 default: 266 return CombinerObjective::Default; 267 } 268 } 269 270 /// Estimate the latency of the new and original instruction sequence by summing 271 /// up the latencies of the inserted and deleted instructions. This assumes 272 /// that the inserted and deleted instructions are dependent instruction chains, 273 /// which might not hold in all cases. 274 std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences( 275 MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs, 276 SmallVectorImpl<MachineInstr *> &DelInstrs, 277 MachineTraceMetrics::Trace BlockTrace) { 278 assert(!InsInstrs.empty() && "Only support sequences that insert instrs."); 279 unsigned NewRootLatency = 0; 280 // NewRoot is the last instruction in the \p InsInstrs vector. 281 MachineInstr *NewRoot = InsInstrs.back(); 282 for (unsigned i = 0; i < InsInstrs.size() - 1; i++) 283 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]); 284 NewRootLatency += getLatency(&MI, NewRoot, BlockTrace); 285 286 unsigned RootLatency = 0; 287 for (auto I : DelInstrs) 288 RootLatency += TSchedModel.computeInstrLatency(I); 289 290 return {NewRootLatency, RootLatency}; 291 } 292 293 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root. 294 /// The new code sequence ends in MI NewRoot. A necessary condition for the new 295 /// sequence to replace the old sequence is that it cannot lengthen the critical 296 /// path. The definition of "improve" may be restricted by specifying that the 297 /// new path improves the data dependency chain (MustReduceDepth). 298 bool MachineCombiner::improvesCriticalPathLen( 299 MachineBasicBlock *MBB, MachineInstr *Root, 300 MachineTraceMetrics::Trace BlockTrace, 301 SmallVectorImpl<MachineInstr *> &InsInstrs, 302 SmallVectorImpl<MachineInstr *> &DelInstrs, 303 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 304 MachineCombinerPattern Pattern, 305 bool SlackIsAccurate) { 306 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 307 "Missing machine model\n"); 308 // Get depth and latency of NewRoot and Root. 309 unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace); 310 unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth; 311 312 LLVM_DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: " 313 << NewRootDepth << "\tRootDepth: " << RootDepth); 314 315 // For a transform such as reassociation, the cost equation is 316 // conservatively calculated so that we must improve the depth (data 317 // dependency cycles) in the critical path to proceed with the transform. 318 // Being conservative also protects against inaccuracies in the underlying 319 // machine trace metrics and CPU models. 320 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) { 321 LLVM_DEBUG(dbgs() << "\tIt MustReduceDepth "); 322 LLVM_DEBUG(NewRootDepth < RootDepth 323 ? dbgs() << "\t and it does it\n" 324 : dbgs() << "\t but it does NOT do it\n"); 325 return NewRootDepth < RootDepth; 326 } 327 328 // A more flexible cost calculation for the critical path includes the slack 329 // of the original code sequence. This may allow the transform to proceed 330 // even if the instruction depths (data dependency cycles) become worse. 331 332 // Account for the latency of the inserted and deleted instructions by 333 unsigned NewRootLatency, RootLatency; 334 std::tie(NewRootLatency, RootLatency) = 335 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace); 336 337 unsigned RootSlack = BlockTrace.getInstrSlack(*Root); 338 unsigned NewCycleCount = NewRootDepth + NewRootLatency; 339 unsigned OldCycleCount = 340 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0); 341 LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency 342 << "\tRootLatency: " << RootLatency << "\n\tRootSlack: " 343 << RootSlack << " SlackIsAccurate=" << SlackIsAccurate 344 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount 345 << "\n\tRootDepth + RootLatency + RootSlack = " 346 << OldCycleCount;); 347 LLVM_DEBUG(NewCycleCount <= OldCycleCount 348 ? dbgs() << "\n\t It IMPROVES PathLen because" 349 : dbgs() << "\n\t It DOES NOT improve PathLen because"); 350 LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount 351 << ", OldCycleCount = " << OldCycleCount << "\n"); 352 353 return NewCycleCount <= OldCycleCount; 354 } 355 356 /// helper routine to convert instructions into SC 357 void MachineCombiner::instr2instrSC( 358 SmallVectorImpl<MachineInstr *> &Instrs, 359 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) { 360 for (auto *InstrPtr : Instrs) { 361 unsigned Opc = InstrPtr->getOpcode(); 362 unsigned Idx = TII->get(Opc).getSchedClass(); 363 const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx); 364 InstrsSC.push_back(SC); 365 } 366 } 367 368 /// True when the new instructions do not increase resource length 369 bool MachineCombiner::preservesResourceLen( 370 MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace, 371 SmallVectorImpl<MachineInstr *> &InsInstrs, 372 SmallVectorImpl<MachineInstr *> &DelInstrs) { 373 if (!TSchedModel.hasInstrSchedModel()) 374 return true; 375 376 // Compute current resource length 377 378 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB); 379 SmallVector <const MachineBasicBlock *, 1> MBBarr; 380 MBBarr.push_back(MBB); 381 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr); 382 383 // Deal with SC rather than Instructions. 384 SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC; 385 SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC; 386 387 instr2instrSC(InsInstrs, InsInstrsSC); 388 instr2instrSC(DelInstrs, DelInstrsSC); 389 390 ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC); 391 ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC); 392 393 // Compute new resource length. 394 unsigned ResLenAfterCombine = 395 BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr); 396 397 LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: " 398 << ResLenBeforeCombine 399 << " and after: " << ResLenAfterCombine << "\n";); 400 LLVM_DEBUG( 401 ResLenAfterCombine <= ResLenBeforeCombine 402 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n" 403 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource " 404 "Length\n"); 405 406 return ResLenAfterCombine <= ResLenBeforeCombine; 407 } 408 409 /// \returns true when new instruction sequence should be generated 410 /// independent if it lengthens critical path or not 411 bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize) { 412 if (OptSize && (NewSize < OldSize)) 413 return true; 414 if (!TSchedModel.hasInstrSchedModelOrItineraries()) 415 return true; 416 return false; 417 } 418 419 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction 420 /// depths if requested. 421 /// 422 /// \param MBB basic block to insert instructions in 423 /// \param MI current machine instruction 424 /// \param InsInstrs new instructions to insert in \p MBB 425 /// \param DelInstrs instruction to delete from \p MBB 426 /// \param MinInstr is a pointer to the machine trace information 427 /// \param RegUnits set of live registers, needed to compute instruction depths 428 /// \param IncrementalUpdate if true, compute instruction depths incrementally, 429 /// otherwise invalidate the trace 430 static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI, 431 SmallVector<MachineInstr *, 16> InsInstrs, 432 SmallVector<MachineInstr *, 16> DelInstrs, 433 MachineTraceMetrics::Ensemble *MinInstr, 434 SparseSet<LiveRegUnit> &RegUnits, 435 bool IncrementalUpdate) { 436 for (auto *InstrPtr : InsInstrs) 437 MBB->insert((MachineBasicBlock::iterator)&MI, InstrPtr); 438 439 for (auto *InstrPtr : DelInstrs) { 440 InstrPtr->eraseFromParentAndMarkDBGValuesForRemoval(); 441 // Erase all LiveRegs defined by the removed instruction 442 for (auto I = RegUnits.begin(); I != RegUnits.end(); ) { 443 if (I->MI == InstrPtr) 444 I = RegUnits.erase(I); 445 else 446 I++; 447 } 448 } 449 450 if (IncrementalUpdate) 451 for (auto *InstrPtr : InsInstrs) 452 MinInstr->updateDepth(MBB, *InstrPtr, RegUnits); 453 else 454 MinInstr->invalidate(MBB); 455 456 NumInstCombined++; 457 } 458 459 // Check that the difference between original and new latency is decreasing for 460 // later patterns. This helps to discover sub-optimal pattern orderings. 461 void MachineCombiner::verifyPatternOrder( 462 MachineBasicBlock *MBB, MachineInstr &Root, 463 SmallVector<MachineCombinerPattern, 16> &Patterns) { 464 long PrevLatencyDiff = std::numeric_limits<long>::max(); 465 (void)PrevLatencyDiff; // Variable is used in assert only. 466 for (auto P : Patterns) { 467 SmallVector<MachineInstr *, 16> InsInstrs; 468 SmallVector<MachineInstr *, 16> DelInstrs; 469 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 470 TII->genAlternativeCodeSequence(Root, P, InsInstrs, DelInstrs, 471 InstrIdxForVirtReg); 472 // Found pattern, but did not generate alternative sequence. 473 // This can happen e.g. when an immediate could not be materialized 474 // in a single instruction. 475 if (InsInstrs.empty() || !TSchedModel.hasInstrSchedModelOrItineraries()) 476 continue; 477 478 unsigned NewRootLatency, RootLatency; 479 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences( 480 Root, InsInstrs, DelInstrs, MinInstr->getTrace(MBB)); 481 long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency); 482 assert(CurrentLatencyDiff <= PrevLatencyDiff && 483 "Current pattern is better than previous pattern."); 484 PrevLatencyDiff = CurrentLatencyDiff; 485 } 486 } 487 488 /// Substitute a slow code sequence with a faster one by 489 /// evaluating instruction combining pattern. 490 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction 491 /// combining based on machine trace metrics. Only combine a sequence of 492 /// instructions when this neither lengthens the critical path nor increases 493 /// resource pressure. When optimizing for codesize always combine when the new 494 /// sequence is shorter. 495 bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { 496 bool Changed = false; 497 LLVM_DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n"); 498 499 bool IncrementalUpdate = false; 500 auto BlockIter = MBB->begin(); 501 decltype(BlockIter) LastUpdate; 502 // Check if the block is in a loop. 503 const MachineLoop *ML = MLI->getLoopFor(MBB); 504 if (!MinInstr) 505 MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount); 506 507 SparseSet<LiveRegUnit> RegUnits; 508 RegUnits.setUniverse(TRI->getNumRegUnits()); 509 510 while (BlockIter != MBB->end()) { 511 auto &MI = *BlockIter++; 512 SmallVector<MachineCombinerPattern, 16> Patterns; 513 // The motivating example is: 514 // 515 // MUL Other MUL_op1 MUL_op2 Other 516 // \ / \ | / 517 // ADD/SUB => MADD/MSUB 518 // (=Root) (=NewRoot) 519 520 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is 521 // usually beneficial for code size it unfortunately can hurt performance 522 // when the ADD is on the critical path, but the MUL is not. With the 523 // substitution the MUL becomes part of the critical path (in form of the 524 // MADD) and can lengthen it on architectures where the MADD latency is 525 // longer than the ADD latency. 526 // 527 // For each instruction we check if it can be the root of a combiner 528 // pattern. Then for each pattern the new code sequence in form of MI is 529 // generated and evaluated. When the efficiency criteria (don't lengthen 530 // critical path, don't use more resources) is met the new sequence gets 531 // hooked up into the basic block before the old sequence is removed. 532 // 533 // The algorithm does not try to evaluate all patterns and pick the best. 534 // This is only an artificial restriction though. In practice there is 535 // mostly one pattern, and getMachineCombinerPatterns() can order patterns 536 // based on an internal cost heuristic. If 537 // machine-combiner-verify-pattern-order is enabled, all patterns are 538 // checked to ensure later patterns do not provide better latency savings. 539 540 if (!TII->getMachineCombinerPatterns(MI, Patterns)) 541 continue; 542 543 if (VerifyPatternOrder) 544 verifyPatternOrder(MBB, MI, Patterns); 545 546 for (auto P : Patterns) { 547 SmallVector<MachineInstr *, 16> InsInstrs; 548 SmallVector<MachineInstr *, 16> DelInstrs; 549 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 550 TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs, 551 InstrIdxForVirtReg); 552 unsigned NewInstCount = InsInstrs.size(); 553 unsigned OldInstCount = DelInstrs.size(); 554 // Found pattern, but did not generate alternative sequence. 555 // This can happen e.g. when an immediate could not be materialized 556 // in a single instruction. 557 if (!NewInstCount) 558 continue; 559 560 LLVM_DEBUG(if (dump_intrs) { 561 dbgs() << "\tFor the Pattern (" << (int)P 562 << ") these instructions could be removed\n"; 563 for (auto const *InstrPtr : DelInstrs) 564 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 565 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 566 dbgs() << "\tThese instructions could replace the removed ones\n"; 567 for (auto const *InstrPtr : InsInstrs) 568 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 569 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 570 }); 571 572 bool SubstituteAlways = false; 573 if (ML && TII->isThroughputPattern(P)) 574 SubstituteAlways = true; 575 576 if (IncrementalUpdate) { 577 // Update depths since the last incremental update. 578 MinInstr->updateDepths(LastUpdate, BlockIter, RegUnits); 579 LastUpdate = BlockIter; 580 } 581 582 // Substitute when we optimize for codesize and the new sequence has 583 // fewer instructions OR 584 // the new sequence neither lengthens the critical path nor increases 585 // resource pressure. 586 if (SubstituteAlways || doSubstitute(NewInstCount, OldInstCount)) { 587 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 588 RegUnits, IncrementalUpdate); 589 // Eagerly stop after the first pattern fires. 590 Changed = true; 591 break; 592 } else { 593 // For big basic blocks, we only compute the full trace the first time 594 // we hit this. We do not invalidate the trace, but instead update the 595 // instruction depths incrementally. 596 // NOTE: Only the instruction depths up to MI are accurate. All other 597 // trace information is not updated. 598 MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB); 599 Traces->verifyAnalysis(); 600 if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs, 601 InstrIdxForVirtReg, P, 602 !IncrementalUpdate) && 603 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) { 604 if (MBB->size() > inc_threshold) { 605 // Use incremental depth updates for basic blocks above treshold 606 IncrementalUpdate = true; 607 LastUpdate = BlockIter; 608 } 609 610 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 611 RegUnits, IncrementalUpdate); 612 613 // Eagerly stop after the first pattern fires. 614 Changed = true; 615 break; 616 } 617 // Cleanup instructions of the alternative code sequence. There is no 618 // use for them. 619 MachineFunction *MF = MBB->getParent(); 620 for (auto *InstrPtr : InsInstrs) 621 MF->DeleteMachineInstr(InstrPtr); 622 } 623 InstrIdxForVirtReg.clear(); 624 } 625 } 626 627 if (Changed && IncrementalUpdate) 628 Traces->invalidate(MBB); 629 return Changed; 630 } 631 632 bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { 633 STI = &MF.getSubtarget(); 634 TII = STI->getInstrInfo(); 635 TRI = STI->getRegisterInfo(); 636 SchedModel = STI->getSchedModel(); 637 TSchedModel.init(STI); 638 MRI = &MF.getRegInfo(); 639 MLI = &getAnalysis<MachineLoopInfo>(); 640 Traces = &getAnalysis<MachineTraceMetrics>(); 641 MinInstr = nullptr; 642 OptSize = MF.getFunction().hasOptSize(); 643 644 LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); 645 if (!TII->useMachineCombiner()) { 646 LLVM_DEBUG( 647 dbgs() 648 << " Skipping pass: Target does not support machine combiner\n"); 649 return false; 650 } 651 652 bool Changed = false; 653 654 // Try to combine instructions. 655 for (auto &MBB : MF) 656 Changed |= combineInstructions(&MBB); 657 658 return Changed; 659 } 660