1 //===---- MachineCombiner.cpp - Instcombining on SSA form machine code ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // The machine combiner pass uses machine trace metrics to ensure the combined 10 // instructions do not lengthen the critical path or the resource depth. 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/ADT/DenseMap.h" 14 #include "llvm/ADT/Statistic.h" 15 #include "llvm/Analysis/ProfileSummaryInfo.h" 16 #include "llvm/CodeGen/LazyMachineBlockFrequencyInfo.h" 17 #include "llvm/CodeGen/MachineDominators.h" 18 #include "llvm/CodeGen/MachineFunction.h" 19 #include "llvm/CodeGen/MachineFunctionPass.h" 20 #include "llvm/CodeGen/MachineLoopInfo.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/MachineSizeOpts.h" 23 #include "llvm/CodeGen/MachineTraceMetrics.h" 24 #include "llvm/CodeGen/RegisterClassInfo.h" 25 #include "llvm/CodeGen/TargetInstrInfo.h" 26 #include "llvm/CodeGen/TargetRegisterInfo.h" 27 #include "llvm/CodeGen/TargetSchedule.h" 28 #include "llvm/CodeGen/TargetSubtargetInfo.h" 29 #include "llvm/InitializePasses.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/raw_ostream.h" 33 34 using namespace llvm; 35 36 #define DEBUG_TYPE "machine-combiner" 37 38 STATISTIC(NumInstCombined, "Number of machineinst combined"); 39 40 static cl::opt<unsigned> 41 inc_threshold("machine-combiner-inc-threshold", cl::Hidden, 42 cl::desc("Incremental depth computation will be used for basic " 43 "blocks with more instructions."), cl::init(500)); 44 45 static cl::opt<bool> dump_intrs("machine-combiner-dump-subst-intrs", cl::Hidden, 46 cl::desc("Dump all substituted intrs"), 47 cl::init(false)); 48 49 #ifdef EXPENSIVE_CHECKS 50 static cl::opt<bool> VerifyPatternOrder( 51 "machine-combiner-verify-pattern-order", cl::Hidden, 52 cl::desc( 53 "Verify that the generated patterns are ordered by increasing latency"), 54 cl::init(true)); 55 #else 56 static cl::opt<bool> VerifyPatternOrder( 57 "machine-combiner-verify-pattern-order", cl::Hidden, 58 cl::desc( 59 "Verify that the generated patterns are ordered by increasing latency"), 60 cl::init(false)); 61 #endif 62 63 namespace { 64 class MachineCombiner : public MachineFunctionPass { 65 const TargetSubtargetInfo *STI; 66 const TargetInstrInfo *TII; 67 const TargetRegisterInfo *TRI; 68 MCSchedModel SchedModel; 69 MachineRegisterInfo *MRI; 70 MachineLoopInfo *MLI; // Current MachineLoopInfo 71 MachineTraceMetrics *Traces; 72 MachineTraceMetrics::Ensemble *MinInstr; 73 MachineBlockFrequencyInfo *MBFI; 74 ProfileSummaryInfo *PSI; 75 RegisterClassInfo RegClassInfo; 76 77 TargetSchedModel TSchedModel; 78 79 /// True if optimizing for code size. 80 bool OptSize; 81 82 public: 83 static char ID; 84 MachineCombiner() : MachineFunctionPass(ID) { 85 initializeMachineCombinerPass(*PassRegistry::getPassRegistry()); 86 } 87 void getAnalysisUsage(AnalysisUsage &AU) const override; 88 bool runOnMachineFunction(MachineFunction &MF) override; 89 StringRef getPassName() const override { return "Machine InstCombiner"; } 90 91 private: 92 bool doSubstitute(unsigned NewSize, unsigned OldSize, bool OptForSize); 93 bool combineInstructions(MachineBasicBlock *); 94 MachineInstr *getOperandDef(const MachineOperand &MO); 95 bool isTransientMI(const MachineInstr *MI); 96 unsigned getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 97 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 98 MachineTraceMetrics::Trace BlockTrace); 99 unsigned getLatency(MachineInstr *Root, MachineInstr *NewRoot, 100 MachineTraceMetrics::Trace BlockTrace); 101 bool 102 improvesCriticalPathLen(MachineBasicBlock *MBB, MachineInstr *Root, 103 MachineTraceMetrics::Trace BlockTrace, 104 SmallVectorImpl<MachineInstr *> &InsInstrs, 105 SmallVectorImpl<MachineInstr *> &DelInstrs, 106 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 107 MachineCombinerPattern Pattern, bool SlackIsAccurate); 108 bool reduceRegisterPressure(MachineInstr &Root, MachineBasicBlock *MBB, 109 SmallVectorImpl<MachineInstr *> &InsInstrs, 110 SmallVectorImpl<MachineInstr *> &DelInstrs, 111 MachineCombinerPattern Pattern); 112 bool preservesResourceLen(MachineBasicBlock *MBB, 113 MachineTraceMetrics::Trace BlockTrace, 114 SmallVectorImpl<MachineInstr *> &InsInstrs, 115 SmallVectorImpl<MachineInstr *> &DelInstrs); 116 void instr2instrSC(SmallVectorImpl<MachineInstr *> &Instrs, 117 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC); 118 std::pair<unsigned, unsigned> 119 getLatenciesForInstrSequences(MachineInstr &MI, 120 SmallVectorImpl<MachineInstr *> &InsInstrs, 121 SmallVectorImpl<MachineInstr *> &DelInstrs, 122 MachineTraceMetrics::Trace BlockTrace); 123 124 void verifyPatternOrder(MachineBasicBlock *MBB, MachineInstr &Root, 125 SmallVector<MachineCombinerPattern, 16> &Patterns); 126 }; 127 } 128 129 char MachineCombiner::ID = 0; 130 char &llvm::MachineCombinerID = MachineCombiner::ID; 131 132 INITIALIZE_PASS_BEGIN(MachineCombiner, DEBUG_TYPE, 133 "Machine InstCombiner", false, false) 134 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 135 INITIALIZE_PASS_DEPENDENCY(MachineTraceMetrics) 136 INITIALIZE_PASS_END(MachineCombiner, DEBUG_TYPE, "Machine InstCombiner", 137 false, false) 138 139 void MachineCombiner::getAnalysisUsage(AnalysisUsage &AU) const { 140 AU.setPreservesCFG(); 141 AU.addPreserved<MachineDominatorTree>(); 142 AU.addRequired<MachineLoopInfo>(); 143 AU.addPreserved<MachineLoopInfo>(); 144 AU.addRequired<MachineTraceMetrics>(); 145 AU.addPreserved<MachineTraceMetrics>(); 146 AU.addRequired<LazyMachineBlockFrequencyInfoPass>(); 147 AU.addRequired<ProfileSummaryInfoWrapperPass>(); 148 MachineFunctionPass::getAnalysisUsage(AU); 149 } 150 151 MachineInstr *MachineCombiner::getOperandDef(const MachineOperand &MO) { 152 MachineInstr *DefInstr = nullptr; 153 // We need a virtual register definition. 154 if (MO.isReg() && Register::isVirtualRegister(MO.getReg())) 155 DefInstr = MRI->getUniqueVRegDef(MO.getReg()); 156 // PHI's have no depth etc. 157 if (DefInstr && DefInstr->isPHI()) 158 DefInstr = nullptr; 159 return DefInstr; 160 } 161 162 /// Return true if MI is unlikely to generate an actual target instruction. 163 bool MachineCombiner::isTransientMI(const MachineInstr *MI) { 164 if (!MI->isCopy()) 165 return MI->isTransient(); 166 167 // If MI is a COPY, check if its src and dst registers can be coalesced. 168 Register Dst = MI->getOperand(0).getReg(); 169 Register Src = MI->getOperand(1).getReg(); 170 171 if (!MI->isFullCopy()) { 172 // If src RC contains super registers of dst RC, it can also be coalesced. 173 if (MI->getOperand(0).getSubReg() || Src.isPhysical() || Dst.isPhysical()) 174 return false; 175 176 auto SrcSub = MI->getOperand(1).getSubReg(); 177 auto SrcRC = MRI->getRegClass(Src); 178 auto DstRC = MRI->getRegClass(Dst); 179 return TRI->getMatchingSuperRegClass(SrcRC, DstRC, SrcSub) != nullptr; 180 } 181 182 if (Src.isPhysical() && Dst.isPhysical()) 183 return Src == Dst; 184 185 if (Src.isVirtual() && Dst.isVirtual()) { 186 auto SrcRC = MRI->getRegClass(Src); 187 auto DstRC = MRI->getRegClass(Dst); 188 return SrcRC->hasSuperClassEq(DstRC) || SrcRC->hasSubClassEq(DstRC); 189 } 190 191 if (Src.isVirtual()) 192 std::swap(Src, Dst); 193 194 // Now Src is physical register, Dst is virtual register. 195 auto DstRC = MRI->getRegClass(Dst); 196 return DstRC->contains(Src); 197 } 198 199 /// Computes depth of instructions in vector \InsInstr. 200 /// 201 /// \param InsInstrs is a vector of machine instructions 202 /// \param InstrIdxForVirtReg is a dense map of virtual register to index 203 /// of defining machine instruction in \p InsInstrs 204 /// \param BlockTrace is a trace of machine instructions 205 /// 206 /// \returns Depth of last instruction in \InsInstrs ("NewRoot") 207 unsigned 208 MachineCombiner::getDepth(SmallVectorImpl<MachineInstr *> &InsInstrs, 209 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 210 MachineTraceMetrics::Trace BlockTrace) { 211 SmallVector<unsigned, 16> InstrDepth; 212 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 213 "Missing machine model\n"); 214 215 // For each instruction in the new sequence compute the depth based on the 216 // operands. Use the trace information when possible. For new operands which 217 // are tracked in the InstrIdxForVirtReg map depth is looked up in InstrDepth 218 for (auto *InstrPtr : InsInstrs) { // for each Use 219 unsigned IDepth = 0; 220 for (const MachineOperand &MO : InstrPtr->operands()) { 221 // Check for virtual register operand. 222 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg()))) 223 continue; 224 if (!MO.isUse()) 225 continue; 226 unsigned DepthOp = 0; 227 unsigned LatencyOp = 0; 228 DenseMap<unsigned, unsigned>::iterator II = 229 InstrIdxForVirtReg.find(MO.getReg()); 230 if (II != InstrIdxForVirtReg.end()) { 231 // Operand is new virtual register not in trace 232 assert(II->second < InstrDepth.size() && "Bad Index"); 233 MachineInstr *DefInstr = InsInstrs[II->second]; 234 assert(DefInstr && 235 "There must be a definition for a new virtual register"); 236 DepthOp = InstrDepth[II->second]; 237 int DefIdx = DefInstr->findRegisterDefOperandIdx(MO.getReg()); 238 int UseIdx = InstrPtr->findRegisterUseOperandIdx(MO.getReg()); 239 LatencyOp = TSchedModel.computeOperandLatency(DefInstr, DefIdx, 240 InstrPtr, UseIdx); 241 } else { 242 MachineInstr *DefInstr = getOperandDef(MO); 243 if (DefInstr) { 244 DepthOp = BlockTrace.getInstrCycles(*DefInstr).Depth; 245 if (!isTransientMI(DefInstr)) 246 LatencyOp = TSchedModel.computeOperandLatency( 247 DefInstr, DefInstr->findRegisterDefOperandIdx(MO.getReg()), 248 InstrPtr, InstrPtr->findRegisterUseOperandIdx(MO.getReg())); 249 } 250 } 251 IDepth = std::max(IDepth, DepthOp + LatencyOp); 252 } 253 InstrDepth.push_back(IDepth); 254 } 255 unsigned NewRootIdx = InsInstrs.size() - 1; 256 return InstrDepth[NewRootIdx]; 257 } 258 259 /// Computes instruction latency as max of latency of defined operands. 260 /// 261 /// \param Root is a machine instruction that could be replaced by NewRoot. 262 /// It is used to compute a more accurate latency information for NewRoot in 263 /// case there is a dependent instruction in the same trace (\p BlockTrace) 264 /// \param NewRoot is the instruction for which the latency is computed 265 /// \param BlockTrace is a trace of machine instructions 266 /// 267 /// \returns Latency of \p NewRoot 268 unsigned MachineCombiner::getLatency(MachineInstr *Root, MachineInstr *NewRoot, 269 MachineTraceMetrics::Trace BlockTrace) { 270 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 271 "Missing machine model\n"); 272 273 // Check each definition in NewRoot and compute the latency 274 unsigned NewRootLatency = 0; 275 276 for (const MachineOperand &MO : NewRoot->operands()) { 277 // Check for virtual register operand. 278 if (!(MO.isReg() && Register::isVirtualRegister(MO.getReg()))) 279 continue; 280 if (!MO.isDef()) 281 continue; 282 // Get the first instruction that uses MO 283 MachineRegisterInfo::reg_iterator RI = MRI->reg_begin(MO.getReg()); 284 RI++; 285 if (RI == MRI->reg_end()) 286 continue; 287 MachineInstr *UseMO = RI->getParent(); 288 unsigned LatencyOp = 0; 289 if (UseMO && BlockTrace.isDepInTrace(*Root, *UseMO)) { 290 LatencyOp = TSchedModel.computeOperandLatency( 291 NewRoot, NewRoot->findRegisterDefOperandIdx(MO.getReg()), UseMO, 292 UseMO->findRegisterUseOperandIdx(MO.getReg())); 293 } else { 294 LatencyOp = TSchedModel.computeInstrLatency(NewRoot); 295 } 296 NewRootLatency = std::max(NewRootLatency, LatencyOp); 297 } 298 return NewRootLatency; 299 } 300 301 /// The combiner's goal may differ based on which pattern it is attempting 302 /// to optimize. 303 enum class CombinerObjective { 304 MustReduceDepth, // The data dependency chain must be improved. 305 MustReduceRegisterPressure, // The register pressure must be reduced. 306 Default // The critical path must not be lengthened. 307 }; 308 309 static CombinerObjective getCombinerObjective(MachineCombinerPattern P) { 310 // TODO: If C++ ever gets a real enum class, make this part of the 311 // MachineCombinerPattern class. 312 switch (P) { 313 case MachineCombinerPattern::REASSOC_AX_BY: 314 case MachineCombinerPattern::REASSOC_AX_YB: 315 case MachineCombinerPattern::REASSOC_XA_BY: 316 case MachineCombinerPattern::REASSOC_XA_YB: 317 case MachineCombinerPattern::REASSOC_XY_AMM_BMM: 318 case MachineCombinerPattern::REASSOC_XMM_AMM_BMM: 319 case MachineCombinerPattern::SUBADD_OP1: 320 case MachineCombinerPattern::SUBADD_OP2: 321 return CombinerObjective::MustReduceDepth; 322 case MachineCombinerPattern::REASSOC_XY_BCA: 323 case MachineCombinerPattern::REASSOC_XY_BAC: 324 return CombinerObjective::MustReduceRegisterPressure; 325 default: 326 return CombinerObjective::Default; 327 } 328 } 329 330 /// Estimate the latency of the new and original instruction sequence by summing 331 /// up the latencies of the inserted and deleted instructions. This assumes 332 /// that the inserted and deleted instructions are dependent instruction chains, 333 /// which might not hold in all cases. 334 std::pair<unsigned, unsigned> MachineCombiner::getLatenciesForInstrSequences( 335 MachineInstr &MI, SmallVectorImpl<MachineInstr *> &InsInstrs, 336 SmallVectorImpl<MachineInstr *> &DelInstrs, 337 MachineTraceMetrics::Trace BlockTrace) { 338 assert(!InsInstrs.empty() && "Only support sequences that insert instrs."); 339 unsigned NewRootLatency = 0; 340 // NewRoot is the last instruction in the \p InsInstrs vector. 341 MachineInstr *NewRoot = InsInstrs.back(); 342 for (unsigned i = 0; i < InsInstrs.size() - 1; i++) 343 NewRootLatency += TSchedModel.computeInstrLatency(InsInstrs[i]); 344 NewRootLatency += getLatency(&MI, NewRoot, BlockTrace); 345 346 unsigned RootLatency = 0; 347 for (auto *I : DelInstrs) 348 RootLatency += TSchedModel.computeInstrLatency(I); 349 350 return {NewRootLatency, RootLatency}; 351 } 352 353 bool MachineCombiner::reduceRegisterPressure( 354 MachineInstr &Root, MachineBasicBlock *MBB, 355 SmallVectorImpl<MachineInstr *> &InsInstrs, 356 SmallVectorImpl<MachineInstr *> &DelInstrs, 357 MachineCombinerPattern Pattern) { 358 // FIXME: for now, we don't do any check for the register pressure patterns. 359 // We treat them as always profitable. But we can do better if we make 360 // RegPressureTracker class be aware of TIE attribute. Then we can get an 361 // accurate compare of register pressure with DelInstrs or InsInstrs. 362 return true; 363 } 364 365 /// The DAGCombine code sequence ends in MI (Machine Instruction) Root. 366 /// The new code sequence ends in MI NewRoot. A necessary condition for the new 367 /// sequence to replace the old sequence is that it cannot lengthen the critical 368 /// path. The definition of "improve" may be restricted by specifying that the 369 /// new path improves the data dependency chain (MustReduceDepth). 370 bool MachineCombiner::improvesCriticalPathLen( 371 MachineBasicBlock *MBB, MachineInstr *Root, 372 MachineTraceMetrics::Trace BlockTrace, 373 SmallVectorImpl<MachineInstr *> &InsInstrs, 374 SmallVectorImpl<MachineInstr *> &DelInstrs, 375 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg, 376 MachineCombinerPattern Pattern, 377 bool SlackIsAccurate) { 378 assert(TSchedModel.hasInstrSchedModelOrItineraries() && 379 "Missing machine model\n"); 380 // Get depth and latency of NewRoot and Root. 381 unsigned NewRootDepth = getDepth(InsInstrs, InstrIdxForVirtReg, BlockTrace); 382 unsigned RootDepth = BlockTrace.getInstrCycles(*Root).Depth; 383 384 LLVM_DEBUG(dbgs() << " Dependence data for " << *Root << "\tNewRootDepth: " 385 << NewRootDepth << "\tRootDepth: " << RootDepth); 386 387 // For a transform such as reassociation, the cost equation is 388 // conservatively calculated so that we must improve the depth (data 389 // dependency cycles) in the critical path to proceed with the transform. 390 // Being conservative also protects against inaccuracies in the underlying 391 // machine trace metrics and CPU models. 392 if (getCombinerObjective(Pattern) == CombinerObjective::MustReduceDepth) { 393 LLVM_DEBUG(dbgs() << "\tIt MustReduceDepth "); 394 LLVM_DEBUG(NewRootDepth < RootDepth 395 ? dbgs() << "\t and it does it\n" 396 : dbgs() << "\t but it does NOT do it\n"); 397 return NewRootDepth < RootDepth; 398 } 399 400 // A more flexible cost calculation for the critical path includes the slack 401 // of the original code sequence. This may allow the transform to proceed 402 // even if the instruction depths (data dependency cycles) become worse. 403 404 // Account for the latency of the inserted and deleted instructions by 405 unsigned NewRootLatency, RootLatency; 406 std::tie(NewRootLatency, RootLatency) = 407 getLatenciesForInstrSequences(*Root, InsInstrs, DelInstrs, BlockTrace); 408 409 unsigned RootSlack = BlockTrace.getInstrSlack(*Root); 410 unsigned NewCycleCount = NewRootDepth + NewRootLatency; 411 unsigned OldCycleCount = 412 RootDepth + RootLatency + (SlackIsAccurate ? RootSlack : 0); 413 LLVM_DEBUG(dbgs() << "\n\tNewRootLatency: " << NewRootLatency 414 << "\tRootLatency: " << RootLatency << "\n\tRootSlack: " 415 << RootSlack << " SlackIsAccurate=" << SlackIsAccurate 416 << "\n\tNewRootDepth + NewRootLatency = " << NewCycleCount 417 << "\n\tRootDepth + RootLatency + RootSlack = " 418 << OldCycleCount;); 419 LLVM_DEBUG(NewCycleCount <= OldCycleCount 420 ? dbgs() << "\n\t It IMPROVES PathLen because" 421 : dbgs() << "\n\t It DOES NOT improve PathLen because"); 422 LLVM_DEBUG(dbgs() << "\n\t\tNewCycleCount = " << NewCycleCount 423 << ", OldCycleCount = " << OldCycleCount << "\n"); 424 425 return NewCycleCount <= OldCycleCount; 426 } 427 428 /// helper routine to convert instructions into SC 429 void MachineCombiner::instr2instrSC( 430 SmallVectorImpl<MachineInstr *> &Instrs, 431 SmallVectorImpl<const MCSchedClassDesc *> &InstrsSC) { 432 for (auto *InstrPtr : Instrs) { 433 unsigned Opc = InstrPtr->getOpcode(); 434 unsigned Idx = TII->get(Opc).getSchedClass(); 435 const MCSchedClassDesc *SC = SchedModel.getSchedClassDesc(Idx); 436 InstrsSC.push_back(SC); 437 } 438 } 439 440 /// True when the new instructions do not increase resource length 441 bool MachineCombiner::preservesResourceLen( 442 MachineBasicBlock *MBB, MachineTraceMetrics::Trace BlockTrace, 443 SmallVectorImpl<MachineInstr *> &InsInstrs, 444 SmallVectorImpl<MachineInstr *> &DelInstrs) { 445 if (!TSchedModel.hasInstrSchedModel()) 446 return true; 447 448 // Compute current resource length 449 450 //ArrayRef<const MachineBasicBlock *> MBBarr(MBB); 451 SmallVector <const MachineBasicBlock *, 1> MBBarr; 452 MBBarr.push_back(MBB); 453 unsigned ResLenBeforeCombine = BlockTrace.getResourceLength(MBBarr); 454 455 // Deal with SC rather than Instructions. 456 SmallVector<const MCSchedClassDesc *, 16> InsInstrsSC; 457 SmallVector<const MCSchedClassDesc *, 16> DelInstrsSC; 458 459 instr2instrSC(InsInstrs, InsInstrsSC); 460 instr2instrSC(DelInstrs, DelInstrsSC); 461 462 ArrayRef<const MCSchedClassDesc *> MSCInsArr = makeArrayRef(InsInstrsSC); 463 ArrayRef<const MCSchedClassDesc *> MSCDelArr = makeArrayRef(DelInstrsSC); 464 465 // Compute new resource length. 466 unsigned ResLenAfterCombine = 467 BlockTrace.getResourceLength(MBBarr, MSCInsArr, MSCDelArr); 468 469 LLVM_DEBUG(dbgs() << "\t\tResource length before replacement: " 470 << ResLenBeforeCombine 471 << " and after: " << ResLenAfterCombine << "\n";); 472 LLVM_DEBUG( 473 ResLenAfterCombine <= 474 ResLenBeforeCombine + TII->getExtendResourceLenLimit() 475 ? dbgs() << "\t\t As result it IMPROVES/PRESERVES Resource Length\n" 476 : dbgs() << "\t\t As result it DOES NOT improve/preserve Resource " 477 "Length\n"); 478 479 return ResLenAfterCombine <= 480 ResLenBeforeCombine + TII->getExtendResourceLenLimit(); 481 } 482 483 /// \returns true when new instruction sequence should be generated 484 /// independent if it lengthens critical path or not 485 bool MachineCombiner::doSubstitute(unsigned NewSize, unsigned OldSize, 486 bool OptForSize) { 487 if (OptForSize && (NewSize < OldSize)) 488 return true; 489 if (!TSchedModel.hasInstrSchedModelOrItineraries()) 490 return true; 491 return false; 492 } 493 494 /// Inserts InsInstrs and deletes DelInstrs. Incrementally updates instruction 495 /// depths if requested. 496 /// 497 /// \param MBB basic block to insert instructions in 498 /// \param MI current machine instruction 499 /// \param InsInstrs new instructions to insert in \p MBB 500 /// \param DelInstrs instruction to delete from \p MBB 501 /// \param MinInstr is a pointer to the machine trace information 502 /// \param RegUnits set of live registers, needed to compute instruction depths 503 /// \param TII is target instruction info, used to call target hook 504 /// \param Pattern is used to call target hook finalizeInsInstrs 505 /// \param IncrementalUpdate if true, compute instruction depths incrementally, 506 /// otherwise invalidate the trace 507 static void insertDeleteInstructions(MachineBasicBlock *MBB, MachineInstr &MI, 508 SmallVector<MachineInstr *, 16> InsInstrs, 509 SmallVector<MachineInstr *, 16> DelInstrs, 510 MachineTraceMetrics::Ensemble *MinInstr, 511 SparseSet<LiveRegUnit> &RegUnits, 512 const TargetInstrInfo *TII, 513 MachineCombinerPattern Pattern, 514 bool IncrementalUpdate) { 515 // If we want to fix up some placeholder for some target, do it now. 516 // We need this because in genAlternativeCodeSequence, we have not decided the 517 // better pattern InsInstrs or DelInstrs, so we don't want generate some 518 // sideeffect to the function. For example we need to delay the constant pool 519 // entry creation here after InsInstrs is selected as better pattern. 520 // Otherwise the constant pool entry created for InsInstrs will not be deleted 521 // even if InsInstrs is not the better pattern. 522 TII->finalizeInsInstrs(MI, Pattern, InsInstrs); 523 524 for (auto *InstrPtr : InsInstrs) 525 MBB->insert((MachineBasicBlock::iterator)&MI, InstrPtr); 526 527 for (auto *InstrPtr : DelInstrs) { 528 InstrPtr->eraseFromParent(); 529 // Erase all LiveRegs defined by the removed instruction 530 for (auto *I = RegUnits.begin(); I != RegUnits.end();) { 531 if (I->MI == InstrPtr) 532 I = RegUnits.erase(I); 533 else 534 I++; 535 } 536 } 537 538 if (IncrementalUpdate) 539 for (auto *InstrPtr : InsInstrs) 540 MinInstr->updateDepth(MBB, *InstrPtr, RegUnits); 541 else 542 MinInstr->invalidate(MBB); 543 544 NumInstCombined++; 545 } 546 547 // Check that the difference between original and new latency is decreasing for 548 // later patterns. This helps to discover sub-optimal pattern orderings. 549 void MachineCombiner::verifyPatternOrder( 550 MachineBasicBlock *MBB, MachineInstr &Root, 551 SmallVector<MachineCombinerPattern, 16> &Patterns) { 552 long PrevLatencyDiff = std::numeric_limits<long>::max(); 553 (void)PrevLatencyDiff; // Variable is used in assert only. 554 for (auto P : Patterns) { 555 SmallVector<MachineInstr *, 16> InsInstrs; 556 SmallVector<MachineInstr *, 16> DelInstrs; 557 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 558 TII->genAlternativeCodeSequence(Root, P, InsInstrs, DelInstrs, 559 InstrIdxForVirtReg); 560 // Found pattern, but did not generate alternative sequence. 561 // This can happen e.g. when an immediate could not be materialized 562 // in a single instruction. 563 if (InsInstrs.empty() || !TSchedModel.hasInstrSchedModelOrItineraries()) 564 continue; 565 566 unsigned NewRootLatency, RootLatency; 567 std::tie(NewRootLatency, RootLatency) = getLatenciesForInstrSequences( 568 Root, InsInstrs, DelInstrs, MinInstr->getTrace(MBB)); 569 long CurrentLatencyDiff = ((long)RootLatency) - ((long)NewRootLatency); 570 assert(CurrentLatencyDiff <= PrevLatencyDiff && 571 "Current pattern is better than previous pattern."); 572 PrevLatencyDiff = CurrentLatencyDiff; 573 } 574 } 575 576 /// Substitute a slow code sequence with a faster one by 577 /// evaluating instruction combining pattern. 578 /// The prototype of such a pattern is MUl + ADD -> MADD. Performs instruction 579 /// combining based on machine trace metrics. Only combine a sequence of 580 /// instructions when this neither lengthens the critical path nor increases 581 /// resource pressure. When optimizing for codesize always combine when the new 582 /// sequence is shorter. 583 bool MachineCombiner::combineInstructions(MachineBasicBlock *MBB) { 584 bool Changed = false; 585 LLVM_DEBUG(dbgs() << "Combining MBB " << MBB->getName() << "\n"); 586 587 bool IncrementalUpdate = false; 588 auto BlockIter = MBB->begin(); 589 decltype(BlockIter) LastUpdate; 590 // Check if the block is in a loop. 591 const MachineLoop *ML = MLI->getLoopFor(MBB); 592 if (!MinInstr) 593 MinInstr = Traces->getEnsemble(MachineTraceMetrics::TS_MinInstrCount); 594 595 SparseSet<LiveRegUnit> RegUnits; 596 RegUnits.setUniverse(TRI->getNumRegUnits()); 597 598 bool OptForSize = OptSize || llvm::shouldOptimizeForSize(MBB, PSI, MBFI); 599 600 bool DoRegPressureReduce = 601 TII->shouldReduceRegisterPressure(MBB, &RegClassInfo); 602 603 while (BlockIter != MBB->end()) { 604 auto &MI = *BlockIter++; 605 SmallVector<MachineCombinerPattern, 16> Patterns; 606 // The motivating example is: 607 // 608 // MUL Other MUL_op1 MUL_op2 Other 609 // \ / \ | / 610 // ADD/SUB => MADD/MSUB 611 // (=Root) (=NewRoot) 612 613 // The DAGCombine code always replaced MUL + ADD/SUB by MADD. While this is 614 // usually beneficial for code size it unfortunately can hurt performance 615 // when the ADD is on the critical path, but the MUL is not. With the 616 // substitution the MUL becomes part of the critical path (in form of the 617 // MADD) and can lengthen it on architectures where the MADD latency is 618 // longer than the ADD latency. 619 // 620 // For each instruction we check if it can be the root of a combiner 621 // pattern. Then for each pattern the new code sequence in form of MI is 622 // generated and evaluated. When the efficiency criteria (don't lengthen 623 // critical path, don't use more resources) is met the new sequence gets 624 // hooked up into the basic block before the old sequence is removed. 625 // 626 // The algorithm does not try to evaluate all patterns and pick the best. 627 // This is only an artificial restriction though. In practice there is 628 // mostly one pattern, and getMachineCombinerPatterns() can order patterns 629 // based on an internal cost heuristic. If 630 // machine-combiner-verify-pattern-order is enabled, all patterns are 631 // checked to ensure later patterns do not provide better latency savings. 632 633 if (!TII->getMachineCombinerPatterns(MI, Patterns, DoRegPressureReduce)) 634 continue; 635 636 if (VerifyPatternOrder) 637 verifyPatternOrder(MBB, MI, Patterns); 638 639 for (auto P : Patterns) { 640 SmallVector<MachineInstr *, 16> InsInstrs; 641 SmallVector<MachineInstr *, 16> DelInstrs; 642 DenseMap<unsigned, unsigned> InstrIdxForVirtReg; 643 TII->genAlternativeCodeSequence(MI, P, InsInstrs, DelInstrs, 644 InstrIdxForVirtReg); 645 unsigned NewInstCount = InsInstrs.size(); 646 unsigned OldInstCount = DelInstrs.size(); 647 // Found pattern, but did not generate alternative sequence. 648 // This can happen e.g. when an immediate could not be materialized 649 // in a single instruction. 650 if (!NewInstCount) 651 continue; 652 653 LLVM_DEBUG(if (dump_intrs) { 654 dbgs() << "\tFor the Pattern (" << (int)P 655 << ") these instructions could be removed\n"; 656 for (auto const *InstrPtr : DelInstrs) 657 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 658 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 659 dbgs() << "\tThese instructions could replace the removed ones\n"; 660 for (auto const *InstrPtr : InsInstrs) 661 InstrPtr->print(dbgs(), /*IsStandalone*/false, /*SkipOpers*/false, 662 /*SkipDebugLoc*/false, /*AddNewLine*/true, TII); 663 }); 664 665 bool SubstituteAlways = false; 666 if (ML && TII->isThroughputPattern(P)) 667 SubstituteAlways = true; 668 669 if (IncrementalUpdate && LastUpdate != BlockIter) { 670 // Update depths since the last incremental update. 671 MinInstr->updateDepths(LastUpdate, BlockIter, RegUnits); 672 LastUpdate = BlockIter; 673 } 674 675 if (DoRegPressureReduce && 676 getCombinerObjective(P) == 677 CombinerObjective::MustReduceRegisterPressure) { 678 if (MBB->size() > inc_threshold) { 679 // Use incremental depth updates for basic blocks above threshold 680 IncrementalUpdate = true; 681 LastUpdate = BlockIter; 682 } 683 if (reduceRegisterPressure(MI, MBB, InsInstrs, DelInstrs, P)) { 684 // Replace DelInstrs with InsInstrs. 685 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 686 RegUnits, TII, P, IncrementalUpdate); 687 Changed |= true; 688 689 // Go back to previous instruction as it may have ILP reassociation 690 // opportunity. 691 BlockIter--; 692 break; 693 } 694 } 695 696 // Substitute when we optimize for codesize and the new sequence has 697 // fewer instructions OR 698 // the new sequence neither lengthens the critical path nor increases 699 // resource pressure. 700 if (SubstituteAlways || 701 doSubstitute(NewInstCount, OldInstCount, OptForSize)) { 702 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 703 RegUnits, TII, P, IncrementalUpdate); 704 // Eagerly stop after the first pattern fires. 705 Changed = true; 706 break; 707 } else { 708 // For big basic blocks, we only compute the full trace the first time 709 // we hit this. We do not invalidate the trace, but instead update the 710 // instruction depths incrementally. 711 // NOTE: Only the instruction depths up to MI are accurate. All other 712 // trace information is not updated. 713 MachineTraceMetrics::Trace BlockTrace = MinInstr->getTrace(MBB); 714 Traces->verifyAnalysis(); 715 if (improvesCriticalPathLen(MBB, &MI, BlockTrace, InsInstrs, DelInstrs, 716 InstrIdxForVirtReg, P, 717 !IncrementalUpdate) && 718 preservesResourceLen(MBB, BlockTrace, InsInstrs, DelInstrs)) { 719 if (MBB->size() > inc_threshold) { 720 // Use incremental depth updates for basic blocks above treshold 721 IncrementalUpdate = true; 722 LastUpdate = BlockIter; 723 } 724 725 insertDeleteInstructions(MBB, MI, InsInstrs, DelInstrs, MinInstr, 726 RegUnits, TII, P, IncrementalUpdate); 727 728 // Eagerly stop after the first pattern fires. 729 Changed = true; 730 break; 731 } 732 // Cleanup instructions of the alternative code sequence. There is no 733 // use for them. 734 MachineFunction *MF = MBB->getParent(); 735 for (auto *InstrPtr : InsInstrs) 736 MF->deleteMachineInstr(InstrPtr); 737 } 738 InstrIdxForVirtReg.clear(); 739 } 740 } 741 742 if (Changed && IncrementalUpdate) 743 Traces->invalidate(MBB); 744 return Changed; 745 } 746 747 bool MachineCombiner::runOnMachineFunction(MachineFunction &MF) { 748 STI = &MF.getSubtarget(); 749 TII = STI->getInstrInfo(); 750 TRI = STI->getRegisterInfo(); 751 SchedModel = STI->getSchedModel(); 752 TSchedModel.init(STI); 753 MRI = &MF.getRegInfo(); 754 MLI = &getAnalysis<MachineLoopInfo>(); 755 Traces = &getAnalysis<MachineTraceMetrics>(); 756 PSI = &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI(); 757 MBFI = (PSI && PSI->hasProfileSummary()) ? 758 &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() : 759 nullptr; 760 MinInstr = nullptr; 761 OptSize = MF.getFunction().hasOptSize(); 762 RegClassInfo.runOnMachineFunction(MF); 763 764 LLVM_DEBUG(dbgs() << getPassName() << ": " << MF.getName() << '\n'); 765 if (!TII->useMachineCombiner()) { 766 LLVM_DEBUG( 767 dbgs() 768 << " Skipping pass: Target does not support machine combiner\n"); 769 return false; 770 } 771 772 bool Changed = false; 773 774 // Try to combine instructions. 775 for (auto &MBB : MF) 776 Changed |= combineInstructions(&MBB); 777 778 return Changed; 779 } 780