xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/MachineScheduler.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // MachineScheduler schedules machine instructions after phi elimination. It
10 // preserves LiveIntervals so it can be invoked before register allocation.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/MachineScheduler.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/PriorityQueue.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/LiveInterval.h"
25 #include "llvm/CodeGen/LiveIntervals.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineDominators.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineOperand.h"
33 #include "llvm/CodeGen/MachinePassRegistry.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/MachineValueType.h"
36 #include "llvm/CodeGen/RegisterClassInfo.h"
37 #include "llvm/CodeGen/RegisterPressure.h"
38 #include "llvm/CodeGen/ScheduleDAG.h"
39 #include "llvm/CodeGen/ScheduleDAGInstrs.h"
40 #include "llvm/CodeGen/ScheduleDAGMutation.h"
41 #include "llvm/CodeGen/ScheduleDFS.h"
42 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
43 #include "llvm/CodeGen/SlotIndexes.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetPassConfig.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/TargetSchedule.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/Config/llvm-config.h"
52 #include "llvm/InitializePasses.h"
53 #include "llvm/MC/LaneBitmask.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Compiler.h"
57 #include "llvm/Support/Debug.h"
58 #include "llvm/Support/ErrorHandling.h"
59 #include "llvm/Support/GraphWriter.h"
60 #include "llvm/Support/raw_ostream.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <cstdint>
64 #include <iterator>
65 #include <limits>
66 #include <memory>
67 #include <string>
68 #include <tuple>
69 #include <utility>
70 #include <vector>
71 
72 using namespace llvm;
73 
74 #define DEBUG_TYPE "machine-scheduler"
75 
76 STATISTIC(NumClustered, "Number of load/store pairs clustered");
77 
78 namespace llvm {
79 
80 cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
81                            cl::desc("Force top-down list scheduling"));
82 cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
83                             cl::desc("Force bottom-up list scheduling"));
84 cl::opt<bool>
85 DumpCriticalPathLength("misched-dcpl", cl::Hidden,
86                        cl::desc("Print critical path length to stdout"));
87 
88 cl::opt<bool> VerifyScheduling(
89     "verify-misched", cl::Hidden,
90     cl::desc("Verify machine instrs before and after machine scheduling"));
91 
92 #ifndef NDEBUG
93 cl::opt<bool> ViewMISchedDAGs(
94     "view-misched-dags", cl::Hidden,
95     cl::desc("Pop up a window to show MISched dags after they are processed"));
96 cl::opt<bool> PrintDAGs("misched-print-dags", cl::Hidden,
97                         cl::desc("Print schedule DAGs"));
98 cl::opt<bool> MISchedDumpReservedCycles(
99     "misched-dump-reserved-cycles", cl::Hidden, cl::init(false),
100     cl::desc("Dump resource usage at schedule boundary."));
101 cl::opt<bool> MischedDetailResourceBooking(
102     "misched-detail-resource-booking", cl::Hidden, cl::init(false),
103     cl::desc("Show details of invoking getNextResoufceCycle."));
104 #else
105 const bool ViewMISchedDAGs = false;
106 const bool PrintDAGs = false;
107 const bool MischedDetailResourceBooking = false;
108 #ifdef LLVM_ENABLE_DUMP
109 const bool MISchedDumpReservedCycles = false;
110 #endif // LLVM_ENABLE_DUMP
111 #endif // NDEBUG
112 
113 } // end namespace llvm
114 
115 #ifndef NDEBUG
116 /// In some situations a few uninteresting nodes depend on nearly all other
117 /// nodes in the graph, provide a cutoff to hide them.
118 static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
119   cl::desc("Hide nodes with more predecessor/successor than cutoff"));
120 
121 static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
122   cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
123 
124 static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
125   cl::desc("Only schedule this function"));
126 static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
127                                         cl::desc("Only schedule this MBB#"));
128 #endif // NDEBUG
129 
130 /// Avoid quadratic complexity in unusually large basic blocks by limiting the
131 /// size of the ready lists.
132 static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
133   cl::desc("Limit ready list to N instructions"), cl::init(256));
134 
135 static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
136   cl::desc("Enable register pressure scheduling."), cl::init(true));
137 
138 static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
139   cl::desc("Enable cyclic critical path analysis."), cl::init(true));
140 
141 static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
142                                         cl::desc("Enable memop clustering."),
143                                         cl::init(true));
144 static cl::opt<bool>
145     ForceFastCluster("force-fast-cluster", cl::Hidden,
146                      cl::desc("Switch to fast cluster algorithm with the lost "
147                               "of some fusion opportunities"),
148                      cl::init(false));
149 static cl::opt<unsigned>
150     FastClusterThreshold("fast-cluster-threshold", cl::Hidden,
151                          cl::desc("The threshold for fast cluster"),
152                          cl::init(1000));
153 
154 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
155 static cl::opt<bool> MISchedDumpScheduleTrace(
156     "misched-dump-schedule-trace", cl::Hidden, cl::init(false),
157     cl::desc("Dump resource usage at schedule boundary."));
158 static cl::opt<unsigned>
159     HeaderColWidth("misched-dump-schedule-trace-col-header-width", cl::Hidden,
160                    cl::desc("Set width of the columns with "
161                             "the resources and schedule units"),
162                    cl::init(19));
163 static cl::opt<unsigned>
164     ColWidth("misched-dump-schedule-trace-col-width", cl::Hidden,
165              cl::desc("Set width of the columns showing resource booking."),
166              cl::init(5));
167 static cl::opt<bool> MISchedSortResourcesInTrace(
168     "misched-sort-resources-in-trace", cl::Hidden, cl::init(true),
169     cl::desc("Sort the resources printed in the dump trace"));
170 #endif
171 
172 static cl::opt<unsigned>
173     MIResourceCutOff("misched-resource-cutoff", cl::Hidden,
174                      cl::desc("Number of intervals to track"), cl::init(10));
175 
176 // DAG subtrees must have at least this many nodes.
177 static const unsigned MinSubtreeSize = 8;
178 
179 // Pin the vtables to this file.
180 void MachineSchedStrategy::anchor() {}
181 
182 void ScheduleDAGMutation::anchor() {}
183 
184 //===----------------------------------------------------------------------===//
185 // Machine Instruction Scheduling Pass and Registry
186 //===----------------------------------------------------------------------===//
187 
188 MachineSchedContext::MachineSchedContext() {
189   RegClassInfo = new RegisterClassInfo();
190 }
191 
192 MachineSchedContext::~MachineSchedContext() {
193   delete RegClassInfo;
194 }
195 
196 namespace {
197 
198 /// Base class for a machine scheduler class that can run at any point.
199 class MachineSchedulerBase : public MachineSchedContext,
200                              public MachineFunctionPass {
201 public:
202   MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
203 
204   void print(raw_ostream &O, const Module* = nullptr) const override;
205 
206 protected:
207   void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
208 };
209 
210 /// MachineScheduler runs after coalescing and before register allocation.
211 class MachineScheduler : public MachineSchedulerBase {
212 public:
213   MachineScheduler();
214 
215   void getAnalysisUsage(AnalysisUsage &AU) const override;
216 
217   bool runOnMachineFunction(MachineFunction&) override;
218 
219   static char ID; // Class identification, replacement for typeinfo
220 
221 protected:
222   ScheduleDAGInstrs *createMachineScheduler();
223 };
224 
225 /// PostMachineScheduler runs after shortly before code emission.
226 class PostMachineScheduler : public MachineSchedulerBase {
227 public:
228   PostMachineScheduler();
229 
230   void getAnalysisUsage(AnalysisUsage &AU) const override;
231 
232   bool runOnMachineFunction(MachineFunction&) override;
233 
234   static char ID; // Class identification, replacement for typeinfo
235 
236 protected:
237   ScheduleDAGInstrs *createPostMachineScheduler();
238 };
239 
240 } // end anonymous namespace
241 
242 char MachineScheduler::ID = 0;
243 
244 char &llvm::MachineSchedulerID = MachineScheduler::ID;
245 
246 INITIALIZE_PASS_BEGIN(MachineScheduler, DEBUG_TYPE,
247                       "Machine Instruction Scheduler", false, false)
248 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
249 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
250 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
251 INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
252 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
253 INITIALIZE_PASS_END(MachineScheduler, DEBUG_TYPE,
254                     "Machine Instruction Scheduler", false, false)
255 
256 MachineScheduler::MachineScheduler() : MachineSchedulerBase(ID) {
257   initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
258 }
259 
260 void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
261   AU.setPreservesCFG();
262   AU.addRequired<MachineDominatorTree>();
263   AU.addRequired<MachineLoopInfo>();
264   AU.addRequired<AAResultsWrapperPass>();
265   AU.addRequired<TargetPassConfig>();
266   AU.addRequired<SlotIndexes>();
267   AU.addPreserved<SlotIndexes>();
268   AU.addRequired<LiveIntervals>();
269   AU.addPreserved<LiveIntervals>();
270   MachineFunctionPass::getAnalysisUsage(AU);
271 }
272 
273 char PostMachineScheduler::ID = 0;
274 
275 char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
276 
277 INITIALIZE_PASS_BEGIN(PostMachineScheduler, "postmisched",
278                       "PostRA Machine Instruction Scheduler", false, false)
279 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
280 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
281 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
282 INITIALIZE_PASS_END(PostMachineScheduler, "postmisched",
283                     "PostRA Machine Instruction Scheduler", false, false)
284 
285 PostMachineScheduler::PostMachineScheduler() : MachineSchedulerBase(ID) {
286   initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
287 }
288 
289 void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
290   AU.setPreservesCFG();
291   AU.addRequired<MachineDominatorTree>();
292   AU.addRequired<MachineLoopInfo>();
293   AU.addRequired<AAResultsWrapperPass>();
294   AU.addRequired<TargetPassConfig>();
295   MachineFunctionPass::getAnalysisUsage(AU);
296 }
297 
298 MachinePassRegistry<MachineSchedRegistry::ScheduleDAGCtor>
299     MachineSchedRegistry::Registry;
300 
301 /// A dummy default scheduler factory indicates whether the scheduler
302 /// is overridden on the command line.
303 static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
304   return nullptr;
305 }
306 
307 /// MachineSchedOpt allows command line selection of the scheduler.
308 static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
309                RegisterPassParser<MachineSchedRegistry>>
310 MachineSchedOpt("misched",
311                 cl::init(&useDefaultMachineSched), cl::Hidden,
312                 cl::desc("Machine instruction scheduler to use"));
313 
314 static MachineSchedRegistry
315 DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
316                      useDefaultMachineSched);
317 
318 static cl::opt<bool> EnableMachineSched(
319     "enable-misched",
320     cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
321     cl::Hidden);
322 
323 static cl::opt<bool> EnablePostRAMachineSched(
324     "enable-post-misched",
325     cl::desc("Enable the post-ra machine instruction scheduling pass."),
326     cl::init(true), cl::Hidden);
327 
328 /// Decrement this iterator until reaching the top or a non-debug instr.
329 static MachineBasicBlock::const_iterator
330 priorNonDebug(MachineBasicBlock::const_iterator I,
331               MachineBasicBlock::const_iterator Beg) {
332   assert(I != Beg && "reached the top of the region, cannot decrement");
333   while (--I != Beg) {
334     if (!I->isDebugOrPseudoInstr())
335       break;
336   }
337   return I;
338 }
339 
340 /// Non-const version.
341 static MachineBasicBlock::iterator
342 priorNonDebug(MachineBasicBlock::iterator I,
343               MachineBasicBlock::const_iterator Beg) {
344   return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
345       .getNonConstIterator();
346 }
347 
348 /// If this iterator is a debug value, increment until reaching the End or a
349 /// non-debug instruction.
350 static MachineBasicBlock::const_iterator
351 nextIfDebug(MachineBasicBlock::const_iterator I,
352             MachineBasicBlock::const_iterator End) {
353   for(; I != End; ++I) {
354     if (!I->isDebugOrPseudoInstr())
355       break;
356   }
357   return I;
358 }
359 
360 /// Non-const version.
361 static MachineBasicBlock::iterator
362 nextIfDebug(MachineBasicBlock::iterator I,
363             MachineBasicBlock::const_iterator End) {
364   return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
365       .getNonConstIterator();
366 }
367 
368 /// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
369 ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
370   // Select the scheduler, or set the default.
371   MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
372   if (Ctor != useDefaultMachineSched)
373     return Ctor(this);
374 
375   // Get the default scheduler set by the target for this function.
376   ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
377   if (Scheduler)
378     return Scheduler;
379 
380   // Default to GenericScheduler.
381   return createGenericSchedLive(this);
382 }
383 
384 /// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
385 /// the caller. We don't have a command line option to override the postRA
386 /// scheduler. The Target must configure it.
387 ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
388   // Get the postRA scheduler set by the target for this function.
389   ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
390   if (Scheduler)
391     return Scheduler;
392 
393   // Default to GenericScheduler.
394   return createGenericSchedPostRA(this);
395 }
396 
397 /// Top-level MachineScheduler pass driver.
398 ///
399 /// Visit blocks in function order. Divide each block into scheduling regions
400 /// and visit them bottom-up. Visiting regions bottom-up is not required, but is
401 /// consistent with the DAG builder, which traverses the interior of the
402 /// scheduling regions bottom-up.
403 ///
404 /// This design avoids exposing scheduling boundaries to the DAG builder,
405 /// simplifying the DAG builder's support for "special" target instructions.
406 /// At the same time the design allows target schedulers to operate across
407 /// scheduling boundaries, for example to bundle the boundary instructions
408 /// without reordering them. This creates complexity, because the target
409 /// scheduler must update the RegionBegin and RegionEnd positions cached by
410 /// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
411 /// design would be to split blocks at scheduling boundaries, but LLVM has a
412 /// general bias against block splitting purely for implementation simplicity.
413 bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
414   if (skipFunction(mf.getFunction()))
415     return false;
416 
417   if (EnableMachineSched.getNumOccurrences()) {
418     if (!EnableMachineSched)
419       return false;
420   } else if (!mf.getSubtarget().enableMachineScheduler())
421     return false;
422 
423   LLVM_DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
424 
425   // Initialize the context of the pass.
426   MF = &mf;
427   MLI = &getAnalysis<MachineLoopInfo>();
428   MDT = &getAnalysis<MachineDominatorTree>();
429   PassConfig = &getAnalysis<TargetPassConfig>();
430   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
431 
432   LIS = &getAnalysis<LiveIntervals>();
433 
434   if (VerifyScheduling) {
435     LLVM_DEBUG(LIS->dump());
436     MF->verify(this, "Before machine scheduling.");
437   }
438   RegClassInfo->runOnMachineFunction(*MF);
439 
440   // Instantiate the selected scheduler for this target, function, and
441   // optimization level.
442   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
443   scheduleRegions(*Scheduler, false);
444 
445   LLVM_DEBUG(LIS->dump());
446   if (VerifyScheduling)
447     MF->verify(this, "After machine scheduling.");
448   return true;
449 }
450 
451 bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
452   if (skipFunction(mf.getFunction()))
453     return false;
454 
455   if (EnablePostRAMachineSched.getNumOccurrences()) {
456     if (!EnablePostRAMachineSched)
457       return false;
458   } else if (!mf.getSubtarget().enablePostRAMachineScheduler()) {
459     LLVM_DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
460     return false;
461   }
462   LLVM_DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
463 
464   // Initialize the context of the pass.
465   MF = &mf;
466   MLI = &getAnalysis<MachineLoopInfo>();
467   PassConfig = &getAnalysis<TargetPassConfig>();
468   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
469 
470   if (VerifyScheduling)
471     MF->verify(this, "Before post machine scheduling.");
472 
473   // Instantiate the selected scheduler for this target, function, and
474   // optimization level.
475   std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
476   scheduleRegions(*Scheduler, true);
477 
478   if (VerifyScheduling)
479     MF->verify(this, "After post machine scheduling.");
480   return true;
481 }
482 
483 /// Return true of the given instruction should not be included in a scheduling
484 /// region.
485 ///
486 /// MachineScheduler does not currently support scheduling across calls. To
487 /// handle calls, the DAG builder needs to be modified to create register
488 /// anti/output dependencies on the registers clobbered by the call's regmask
489 /// operand. In PreRA scheduling, the stack pointer adjustment already prevents
490 /// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
491 /// the boundary, but there would be no benefit to postRA scheduling across
492 /// calls this late anyway.
493 static bool isSchedBoundary(MachineBasicBlock::iterator MI,
494                             MachineBasicBlock *MBB,
495                             MachineFunction *MF,
496                             const TargetInstrInfo *TII) {
497   return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
498 }
499 
500 /// A region of an MBB for scheduling.
501 namespace {
502 struct SchedRegion {
503   /// RegionBegin is the first instruction in the scheduling region, and
504   /// RegionEnd is either MBB->end() or the scheduling boundary after the
505   /// last instruction in the scheduling region. These iterators cannot refer
506   /// to instructions outside of the identified scheduling region because
507   /// those may be reordered before scheduling this region.
508   MachineBasicBlock::iterator RegionBegin;
509   MachineBasicBlock::iterator RegionEnd;
510   unsigned NumRegionInstrs;
511 
512   SchedRegion(MachineBasicBlock::iterator B, MachineBasicBlock::iterator E,
513               unsigned N) :
514     RegionBegin(B), RegionEnd(E), NumRegionInstrs(N) {}
515 };
516 } // end anonymous namespace
517 
518 using MBBRegionsVector = SmallVector<SchedRegion, 16>;
519 
520 static void
521 getSchedRegions(MachineBasicBlock *MBB,
522                 MBBRegionsVector &Regions,
523                 bool RegionsTopDown) {
524   MachineFunction *MF = MBB->getParent();
525   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
526 
527   MachineBasicBlock::iterator I = nullptr;
528   for(MachineBasicBlock::iterator RegionEnd = MBB->end();
529       RegionEnd != MBB->begin(); RegionEnd = I) {
530 
531     // Avoid decrementing RegionEnd for blocks with no terminator.
532     if (RegionEnd != MBB->end() ||
533         isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
534       --RegionEnd;
535     }
536 
537     // The next region starts above the previous region. Look backward in the
538     // instruction stream until we find the nearest boundary.
539     unsigned NumRegionInstrs = 0;
540     I = RegionEnd;
541     for (;I != MBB->begin(); --I) {
542       MachineInstr &MI = *std::prev(I);
543       if (isSchedBoundary(&MI, &*MBB, MF, TII))
544         break;
545       if (!MI.isDebugOrPseudoInstr()) {
546         // MBB::size() uses instr_iterator to count. Here we need a bundle to
547         // count as a single instruction.
548         ++NumRegionInstrs;
549       }
550     }
551 
552     // It's possible we found a scheduling region that only has debug
553     // instructions. Don't bother scheduling these.
554     if (NumRegionInstrs != 0)
555       Regions.push_back(SchedRegion(I, RegionEnd, NumRegionInstrs));
556   }
557 
558   if (RegionsTopDown)
559     std::reverse(Regions.begin(), Regions.end());
560 }
561 
562 /// Main driver for both MachineScheduler and PostMachineScheduler.
563 void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
564                                            bool FixKillFlags) {
565   // Visit all machine basic blocks.
566   //
567   // TODO: Visit blocks in global postorder or postorder within the bottom-up
568   // loop tree. Then we can optionally compute global RegPressure.
569   for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
570        MBB != MBBEnd; ++MBB) {
571 
572     Scheduler.startBlock(&*MBB);
573 
574 #ifndef NDEBUG
575     if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
576       continue;
577     if (SchedOnlyBlock.getNumOccurrences()
578         && (int)SchedOnlyBlock != MBB->getNumber())
579       continue;
580 #endif
581 
582     // Break the block into scheduling regions [I, RegionEnd). RegionEnd
583     // points to the scheduling boundary at the bottom of the region. The DAG
584     // does not include RegionEnd, but the region does (i.e. the next
585     // RegionEnd is above the previous RegionBegin). If the current block has
586     // no terminator then RegionEnd == MBB->end() for the bottom region.
587     //
588     // All the regions of MBB are first found and stored in MBBRegions, which
589     // will be processed (MBB) top-down if initialized with true.
590     //
591     // The Scheduler may insert instructions during either schedule() or
592     // exitRegion(), even for empty regions. So the local iterators 'I' and
593     // 'RegionEnd' are invalid across these calls. Instructions must not be
594     // added to other regions than the current one without updating MBBRegions.
595 
596     MBBRegionsVector MBBRegions;
597     getSchedRegions(&*MBB, MBBRegions, Scheduler.doMBBSchedRegionsTopDown());
598     for (const SchedRegion &R : MBBRegions) {
599       MachineBasicBlock::iterator I = R.RegionBegin;
600       MachineBasicBlock::iterator RegionEnd = R.RegionEnd;
601       unsigned NumRegionInstrs = R.NumRegionInstrs;
602 
603       // Notify the scheduler of the region, even if we may skip scheduling
604       // it. Perhaps it still needs to be bundled.
605       Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
606 
607       // Skip empty scheduling regions (0 or 1 schedulable instructions).
608       if (I == RegionEnd || I == std::prev(RegionEnd)) {
609         // Close the current region. Bundle the terminator if needed.
610         // This invalidates 'RegionEnd' and 'I'.
611         Scheduler.exitRegion();
612         continue;
613       }
614       LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
615       LLVM_DEBUG(dbgs() << MF->getName() << ":" << printMBBReference(*MBB)
616                         << " " << MBB->getName() << "\n  From: " << *I
617                         << "    To: ";
618                  if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
619                  else dbgs() << "End\n";
620                  dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
621       if (DumpCriticalPathLength) {
622         errs() << MF->getName();
623         errs() << ":%bb. " << MBB->getNumber();
624         errs() << " " << MBB->getName() << " \n";
625       }
626 
627       // Schedule a region: possibly reorder instructions.
628       // This invalidates the original region iterators.
629       Scheduler.schedule();
630 
631       // Close the current region.
632       Scheduler.exitRegion();
633     }
634     Scheduler.finishBlock();
635     // FIXME: Ideally, no further passes should rely on kill flags. However,
636     // thumb2 size reduction is currently an exception, so the PostMIScheduler
637     // needs to do this.
638     if (FixKillFlags)
639       Scheduler.fixupKills(*MBB);
640   }
641   Scheduler.finalizeSchedule();
642 }
643 
644 void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
645   // unimplemented
646 }
647 
648 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
649 LLVM_DUMP_METHOD void ReadyQueue::dump() const {
650   dbgs() << "Queue " << Name << ": ";
651   for (const SUnit *SU : Queue)
652     dbgs() << SU->NodeNum << " ";
653   dbgs() << "\n";
654 }
655 #endif
656 
657 //===----------------------------------------------------------------------===//
658 // ScheduleDAGMI - Basic machine instruction scheduling. This is
659 // independent of PreRA/PostRA scheduling and involves no extra book-keeping for
660 // virtual registers.
661 // ===----------------------------------------------------------------------===/
662 
663 // Provide a vtable anchor.
664 ScheduleDAGMI::~ScheduleDAGMI() = default;
665 
666 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
667 /// NumPredsLeft reaches zero, release the successor node.
668 ///
669 /// FIXME: Adjust SuccSU height based on MinLatency.
670 void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
671   SUnit *SuccSU = SuccEdge->getSUnit();
672 
673   if (SuccEdge->isWeak()) {
674     --SuccSU->WeakPredsLeft;
675     if (SuccEdge->isCluster())
676       NextClusterSucc = SuccSU;
677     return;
678   }
679 #ifndef NDEBUG
680   if (SuccSU->NumPredsLeft == 0) {
681     dbgs() << "*** Scheduling failed! ***\n";
682     dumpNode(*SuccSU);
683     dbgs() << " has been released too many times!\n";
684     llvm_unreachable(nullptr);
685   }
686 #endif
687   // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
688   // CurrCycle may have advanced since then.
689   if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
690     SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
691 
692   --SuccSU->NumPredsLeft;
693   if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
694     SchedImpl->releaseTopNode(SuccSU);
695 }
696 
697 /// releaseSuccessors - Call releaseSucc on each of SU's successors.
698 void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
699   for (SDep &Succ : SU->Succs)
700     releaseSucc(SU, &Succ);
701 }
702 
703 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
704 /// NumSuccsLeft reaches zero, release the predecessor node.
705 ///
706 /// FIXME: Adjust PredSU height based on MinLatency.
707 void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
708   SUnit *PredSU = PredEdge->getSUnit();
709 
710   if (PredEdge->isWeak()) {
711     --PredSU->WeakSuccsLeft;
712     if (PredEdge->isCluster())
713       NextClusterPred = PredSU;
714     return;
715   }
716 #ifndef NDEBUG
717   if (PredSU->NumSuccsLeft == 0) {
718     dbgs() << "*** Scheduling failed! ***\n";
719     dumpNode(*PredSU);
720     dbgs() << " has been released too many times!\n";
721     llvm_unreachable(nullptr);
722   }
723 #endif
724   // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
725   // CurrCycle may have advanced since then.
726   if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
727     PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
728 
729   --PredSU->NumSuccsLeft;
730   if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
731     SchedImpl->releaseBottomNode(PredSU);
732 }
733 
734 /// releasePredecessors - Call releasePred on each of SU's predecessors.
735 void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
736   for (SDep &Pred : SU->Preds)
737     releasePred(SU, &Pred);
738 }
739 
740 void ScheduleDAGMI::startBlock(MachineBasicBlock *bb) {
741   ScheduleDAGInstrs::startBlock(bb);
742   SchedImpl->enterMBB(bb);
743 }
744 
745 void ScheduleDAGMI::finishBlock() {
746   SchedImpl->leaveMBB();
747   ScheduleDAGInstrs::finishBlock();
748 }
749 
750 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
751 /// crossing a scheduling boundary. [begin, end) includes all instructions in
752 /// the region, including the boundary itself and single-instruction regions
753 /// that don't get scheduled.
754 void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
755                                      MachineBasicBlock::iterator begin,
756                                      MachineBasicBlock::iterator end,
757                                      unsigned regioninstrs)
758 {
759   ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
760 
761   SchedImpl->initPolicy(begin, end, regioninstrs);
762 }
763 
764 /// This is normally called from the main scheduler loop but may also be invoked
765 /// by the scheduling strategy to perform additional code motion.
766 void ScheduleDAGMI::moveInstruction(
767   MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
768   // Advance RegionBegin if the first instruction moves down.
769   if (&*RegionBegin == MI)
770     ++RegionBegin;
771 
772   // Update the instruction stream.
773   BB->splice(InsertPos, BB, MI);
774 
775   // Update LiveIntervals
776   if (LIS)
777     LIS->handleMove(*MI, /*UpdateFlags=*/true);
778 
779   // Recede RegionBegin if an instruction moves above the first.
780   if (RegionBegin == InsertPos)
781     RegionBegin = MI;
782 }
783 
784 bool ScheduleDAGMI::checkSchedLimit() {
785 #if LLVM_ENABLE_ABI_BREAKING_CHECKS && !defined(NDEBUG)
786   if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
787     CurrentTop = CurrentBottom;
788     return false;
789   }
790   ++NumInstrsScheduled;
791 #endif
792   return true;
793 }
794 
795 /// Per-region scheduling driver, called back from
796 /// MachineScheduler::runOnMachineFunction. This is a simplified driver that
797 /// does not consider liveness or register pressure. It is useful for PostRA
798 /// scheduling and potentially other custom schedulers.
799 void ScheduleDAGMI::schedule() {
800   LLVM_DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
801   LLVM_DEBUG(SchedImpl->dumpPolicy());
802 
803   // Build the DAG.
804   buildSchedGraph(AA);
805 
806   postProcessDAG();
807 
808   SmallVector<SUnit*, 8> TopRoots, BotRoots;
809   findRootsAndBiasEdges(TopRoots, BotRoots);
810 
811   LLVM_DEBUG(dump());
812   if (PrintDAGs) dump();
813   if (ViewMISchedDAGs) viewGraph();
814 
815   // Initialize the strategy before modifying the DAG.
816   // This may initialize a DFSResult to be used for queue priority.
817   SchedImpl->initialize(this);
818 
819   // Initialize ready queues now that the DAG and priority data are finalized.
820   initQueues(TopRoots, BotRoots);
821 
822   bool IsTopNode = false;
823   while (true) {
824     LLVM_DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
825     SUnit *SU = SchedImpl->pickNode(IsTopNode);
826     if (!SU) break;
827 
828     assert(!SU->isScheduled && "Node already scheduled");
829     if (!checkSchedLimit())
830       break;
831 
832     MachineInstr *MI = SU->getInstr();
833     if (IsTopNode) {
834       assert(SU->isTopReady() && "node still has unscheduled dependencies");
835       if (&*CurrentTop == MI)
836         CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
837       else
838         moveInstruction(MI, CurrentTop);
839     } else {
840       assert(SU->isBottomReady() && "node still has unscheduled dependencies");
841       MachineBasicBlock::iterator priorII =
842         priorNonDebug(CurrentBottom, CurrentTop);
843       if (&*priorII == MI)
844         CurrentBottom = priorII;
845       else {
846         if (&*CurrentTop == MI)
847           CurrentTop = nextIfDebug(++CurrentTop, priorII);
848         moveInstruction(MI, CurrentBottom);
849         CurrentBottom = MI;
850       }
851     }
852     // Notify the scheduling strategy before updating the DAG.
853     // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
854     // runs, it can then use the accurate ReadyCycle time to determine whether
855     // newly released nodes can move to the readyQ.
856     SchedImpl->schedNode(SU, IsTopNode);
857 
858     updateQueues(SU, IsTopNode);
859   }
860   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
861 
862   placeDebugValues();
863 
864   LLVM_DEBUG({
865     dbgs() << "*** Final schedule for "
866            << printMBBReference(*begin()->getParent()) << " ***\n";
867     dumpSchedule();
868     dbgs() << '\n';
869   });
870 }
871 
872 /// Apply each ScheduleDAGMutation step in order.
873 void ScheduleDAGMI::postProcessDAG() {
874   for (auto &m : Mutations)
875     m->apply(this);
876 }
877 
878 void ScheduleDAGMI::
879 findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
880                       SmallVectorImpl<SUnit*> &BotRoots) {
881   for (SUnit &SU : SUnits) {
882     assert(!SU.isBoundaryNode() && "Boundary node should not be in SUnits");
883 
884     // Order predecessors so DFSResult follows the critical path.
885     SU.biasCriticalPath();
886 
887     // A SUnit is ready to top schedule if it has no predecessors.
888     if (!SU.NumPredsLeft)
889       TopRoots.push_back(&SU);
890     // A SUnit is ready to bottom schedule if it has no successors.
891     if (!SU.NumSuccsLeft)
892       BotRoots.push_back(&SU);
893   }
894   ExitSU.biasCriticalPath();
895 }
896 
897 /// Identify DAG roots and setup scheduler queues.
898 void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
899                                ArrayRef<SUnit*> BotRoots) {
900   NextClusterSucc = nullptr;
901   NextClusterPred = nullptr;
902 
903   // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
904   //
905   // Nodes with unreleased weak edges can still be roots.
906   // Release top roots in forward order.
907   for (SUnit *SU : TopRoots)
908     SchedImpl->releaseTopNode(SU);
909 
910   // Release bottom roots in reverse order so the higher priority nodes appear
911   // first. This is more natural and slightly more efficient.
912   for (SmallVectorImpl<SUnit*>::const_reverse_iterator
913          I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
914     SchedImpl->releaseBottomNode(*I);
915   }
916 
917   releaseSuccessors(&EntrySU);
918   releasePredecessors(&ExitSU);
919 
920   SchedImpl->registerRoots();
921 
922   // Advance past initial DebugValues.
923   CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
924   CurrentBottom = RegionEnd;
925 }
926 
927 /// Update scheduler queues after scheduling an instruction.
928 void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
929   // Release dependent instructions for scheduling.
930   if (IsTopNode)
931     releaseSuccessors(SU);
932   else
933     releasePredecessors(SU);
934 
935   SU->isScheduled = true;
936 }
937 
938 /// Reinsert any remaining debug_values, just like the PostRA scheduler.
939 void ScheduleDAGMI::placeDebugValues() {
940   // If first instruction was a DBG_VALUE then put it back.
941   if (FirstDbgValue) {
942     BB->splice(RegionBegin, BB, FirstDbgValue);
943     RegionBegin = FirstDbgValue;
944   }
945 
946   for (std::vector<std::pair<MachineInstr *, MachineInstr *>>::iterator
947          DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
948     std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
949     MachineInstr *DbgValue = P.first;
950     MachineBasicBlock::iterator OrigPrevMI = P.second;
951     if (&*RegionBegin == DbgValue)
952       ++RegionBegin;
953     BB->splice(std::next(OrigPrevMI), BB, DbgValue);
954     if (RegionEnd != BB->end() && OrigPrevMI == &*RegionEnd)
955       RegionEnd = DbgValue;
956   }
957 }
958 
959 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
960 static const char *scheduleTableLegend = "  i: issue\n  x: resource booked";
961 
962 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpScheduleTraceTopDown() const {
963   // Bail off when there is no schedule model to query.
964   if (!SchedModel.hasInstrSchedModel())
965     return;
966 
967   //  Nothing to show if there is no or just one instruction.
968   if (BB->size() < 2)
969     return;
970 
971   dbgs() << " * Schedule table (TopDown):\n";
972   dbgs() << scheduleTableLegend << "\n";
973   const unsigned FirstCycle = getSUnit(&*(std::begin(*this)))->TopReadyCycle;
974   unsigned LastCycle = getSUnit(&*(std::prev(std::end(*this))))->TopReadyCycle;
975   for (MachineInstr &MI : *this) {
976     SUnit *SU = getSUnit(&MI);
977     if (!SU)
978       continue;
979     const MCSchedClassDesc *SC = getSchedClass(SU);
980     for (TargetSchedModel::ProcResIter PI = SchedModel.getWriteProcResBegin(SC),
981                                        PE = SchedModel.getWriteProcResEnd(SC);
982          PI != PE; ++PI) {
983       if (SU->TopReadyCycle + PI->Cycles - 1 > LastCycle)
984         LastCycle = SU->TopReadyCycle + PI->Cycles - 1;
985     }
986   }
987   // Print the header with the cycles
988   dbgs() << llvm::left_justify("Cycle", HeaderColWidth);
989   for (unsigned C = FirstCycle; C <= LastCycle; ++C)
990     dbgs() << llvm::left_justify("| " + std::to_string(C), ColWidth);
991   dbgs() << "|\n";
992 
993   for (MachineInstr &MI : *this) {
994     SUnit *SU = getSUnit(&MI);
995     if (!SU) {
996       dbgs() << "Missing SUnit\n";
997       continue;
998     }
999     std::string NodeName("SU(");
1000     NodeName += std::to_string(SU->NodeNum) + ")";
1001     dbgs() << llvm::left_justify(NodeName, HeaderColWidth);
1002     unsigned C = FirstCycle;
1003     for (; C <= LastCycle; ++C) {
1004       if (C == SU->TopReadyCycle)
1005         dbgs() << llvm::left_justify("| i", ColWidth);
1006       else
1007         dbgs() << llvm::left_justify("|", ColWidth);
1008     }
1009     dbgs() << "|\n";
1010     const MCSchedClassDesc *SC = getSchedClass(SU);
1011 
1012     SmallVector<MCWriteProcResEntry, 4> ResourcesIt(
1013         make_range(SchedModel.getWriteProcResBegin(SC),
1014                    SchedModel.getWriteProcResEnd(SC)));
1015 
1016     if (MISchedSortResourcesInTrace)
1017       llvm::stable_sort(ResourcesIt,
1018                         [](const MCWriteProcResEntry &LHS,
1019                            const MCWriteProcResEntry &RHS) -> bool {
1020                           return LHS.StartAtCycle < RHS.StartAtCycle ||
1021                                  (LHS.StartAtCycle == RHS.StartAtCycle &&
1022                                   LHS.Cycles < RHS.Cycles);
1023                         });
1024     for (const MCWriteProcResEntry &PI : ResourcesIt) {
1025       C = FirstCycle;
1026       const std::string ResName =
1027           SchedModel.getResourceName(PI.ProcResourceIdx);
1028       dbgs() << llvm::right_justify(ResName + " ", HeaderColWidth);
1029       for (; C < SU->TopReadyCycle + PI.StartAtCycle; ++C) {
1030         dbgs() << llvm::left_justify("|", ColWidth);
1031       }
1032       for (unsigned I = 0, E = PI.Cycles - PI.StartAtCycle; I != E; ++I, ++C)
1033         dbgs() << llvm::left_justify("| x", ColWidth);
1034       while (C++ <= LastCycle)
1035         dbgs() << llvm::left_justify("|", ColWidth);
1036       // Place end char
1037       dbgs() << "| \n";
1038     }
1039   }
1040 }
1041 
1042 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpScheduleTraceBottomUp() const {
1043   // Bail off when there is no schedule model to query.
1044   if (!SchedModel.hasInstrSchedModel())
1045     return;
1046 
1047   //  Nothing to show if there is no or just one instruction.
1048   if (BB->size() < 2)
1049     return;
1050 
1051   dbgs() << " * Schedule table (BottomUp):\n";
1052   dbgs() << scheduleTableLegend << "\n";
1053 
1054   const int FirstCycle = getSUnit(&*(std::begin(*this)))->BotReadyCycle;
1055   int LastCycle = getSUnit(&*(std::prev(std::end(*this))))->BotReadyCycle;
1056   for (MachineInstr &MI : *this) {
1057     SUnit *SU = getSUnit(&MI);
1058     if (!SU)
1059       continue;
1060     const MCSchedClassDesc *SC = getSchedClass(SU);
1061     for (TargetSchedModel::ProcResIter PI = SchedModel.getWriteProcResBegin(SC),
1062                                        PE = SchedModel.getWriteProcResEnd(SC);
1063          PI != PE; ++PI) {
1064       if ((int)SU->BotReadyCycle - PI->Cycles + 1 < LastCycle)
1065         LastCycle = (int)SU->BotReadyCycle - PI->Cycles + 1;
1066     }
1067   }
1068   // Print the header with the cycles
1069   dbgs() << llvm::left_justify("Cycle", HeaderColWidth);
1070   for (int C = FirstCycle; C >= LastCycle; --C)
1071     dbgs() << llvm::left_justify("| " + std::to_string(C), ColWidth);
1072   dbgs() << "|\n";
1073 
1074   for (MachineInstr &MI : *this) {
1075     SUnit *SU = getSUnit(&MI);
1076     if (!SU) {
1077       dbgs() << "Missing SUnit\n";
1078       continue;
1079     }
1080     std::string NodeName("SU(");
1081     NodeName += std::to_string(SU->NodeNum) + ")";
1082     dbgs() << llvm::left_justify(NodeName, HeaderColWidth);
1083     int C = FirstCycle;
1084     for (; C >= LastCycle; --C) {
1085       if (C == (int)SU->BotReadyCycle)
1086         dbgs() << llvm::left_justify("| i", ColWidth);
1087       else
1088         dbgs() << llvm::left_justify("|", ColWidth);
1089     }
1090     dbgs() << "|\n";
1091     const MCSchedClassDesc *SC = getSchedClass(SU);
1092     SmallVector<MCWriteProcResEntry, 4> ResourcesIt(
1093         make_range(SchedModel.getWriteProcResBegin(SC),
1094                    SchedModel.getWriteProcResEnd(SC)));
1095 
1096     if (MISchedSortResourcesInTrace)
1097       llvm::stable_sort(ResourcesIt,
1098                         [](const MCWriteProcResEntry &LHS,
1099                            const MCWriteProcResEntry &RHS) -> bool {
1100                           return LHS.StartAtCycle < RHS.StartAtCycle ||
1101                                  (LHS.StartAtCycle == RHS.StartAtCycle &&
1102                                   LHS.Cycles < RHS.Cycles);
1103                         });
1104     for (const MCWriteProcResEntry &PI : ResourcesIt) {
1105       C = FirstCycle;
1106       const std::string ResName =
1107           SchedModel.getResourceName(PI.ProcResourceIdx);
1108       dbgs() << llvm::right_justify(ResName + " ", HeaderColWidth);
1109       for (; C > ((int)SU->BotReadyCycle - (int)PI.StartAtCycle); --C) {
1110         dbgs() << llvm::left_justify("|", ColWidth);
1111       }
1112       for (unsigned I = 0, E = PI.Cycles - PI.StartAtCycle; I != E; ++I, --C)
1113         dbgs() << llvm::left_justify("| x", ColWidth);
1114       while (C-- >= LastCycle)
1115         dbgs() << llvm::left_justify("|", ColWidth);
1116       // Place end char
1117       dbgs() << "| \n";
1118     }
1119   }
1120 }
1121 #endif
1122 
1123 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1124 LLVM_DUMP_METHOD void ScheduleDAGMI::dumpSchedule() const {
1125   if (MISchedDumpScheduleTrace) {
1126     if (ForceTopDown)
1127       dumpScheduleTraceTopDown();
1128     else if (ForceBottomUp)
1129       dumpScheduleTraceBottomUp();
1130     else {
1131       dbgs() << "* Schedule table (Bidirectional): not implemented\n";
1132     }
1133   }
1134 
1135   for (MachineInstr &MI : *this) {
1136     if (SUnit *SU = getSUnit(&MI))
1137       dumpNode(*SU);
1138     else
1139       dbgs() << "Missing SUnit\n";
1140   }
1141 }
1142 #endif
1143 
1144 //===----------------------------------------------------------------------===//
1145 // ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
1146 // preservation.
1147 //===----------------------------------------------------------------------===//
1148 
1149 ScheduleDAGMILive::~ScheduleDAGMILive() {
1150   delete DFSResult;
1151 }
1152 
1153 void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
1154   const MachineInstr &MI = *SU.getInstr();
1155   for (const MachineOperand &MO : MI.operands()) {
1156     if (!MO.isReg())
1157       continue;
1158     if (!MO.readsReg())
1159       continue;
1160     if (TrackLaneMasks && !MO.isUse())
1161       continue;
1162 
1163     Register Reg = MO.getReg();
1164     if (!Reg.isVirtual())
1165       continue;
1166 
1167     // Ignore re-defs.
1168     if (TrackLaneMasks) {
1169       bool FoundDef = false;
1170       for (const MachineOperand &MO2 : MI.all_defs()) {
1171         if (MO2.getReg() == Reg && !MO2.isDead()) {
1172           FoundDef = true;
1173           break;
1174         }
1175       }
1176       if (FoundDef)
1177         continue;
1178     }
1179 
1180     // Record this local VReg use.
1181     VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg);
1182     for (; UI != VRegUses.end(); ++UI) {
1183       if (UI->SU == &SU)
1184         break;
1185     }
1186     if (UI == VRegUses.end())
1187       VRegUses.insert(VReg2SUnit(Reg, LaneBitmask::getNone(), &SU));
1188   }
1189 }
1190 
1191 /// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
1192 /// crossing a scheduling boundary. [begin, end) includes all instructions in
1193 /// the region, including the boundary itself and single-instruction regions
1194 /// that don't get scheduled.
1195 void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
1196                                 MachineBasicBlock::iterator begin,
1197                                 MachineBasicBlock::iterator end,
1198                                 unsigned regioninstrs)
1199 {
1200   // ScheduleDAGMI initializes SchedImpl's per-region policy.
1201   ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
1202 
1203   // For convenience remember the end of the liveness region.
1204   LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
1205 
1206   SUPressureDiffs.clear();
1207 
1208   ShouldTrackPressure = SchedImpl->shouldTrackPressure();
1209   ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
1210 
1211   assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
1212          "ShouldTrackLaneMasks requires ShouldTrackPressure");
1213 }
1214 
1215 // Setup the register pressure trackers for the top scheduled and bottom
1216 // scheduled regions.
1217 void ScheduleDAGMILive::initRegPressure() {
1218   VRegUses.clear();
1219   VRegUses.setUniverse(MRI.getNumVirtRegs());
1220   for (SUnit &SU : SUnits)
1221     collectVRegUses(SU);
1222 
1223   TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
1224                     ShouldTrackLaneMasks, false);
1225   BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1226                     ShouldTrackLaneMasks, false);
1227 
1228   // Close the RPTracker to finalize live ins.
1229   RPTracker.closeRegion();
1230 
1231   LLVM_DEBUG(RPTracker.dump());
1232 
1233   // Initialize the live ins and live outs.
1234   TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
1235   BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
1236 
1237   // Close one end of the tracker so we can call
1238   // getMaxUpward/DownwardPressureDelta before advancing across any
1239   // instructions. This converts currently live regs into live ins/outs.
1240   TopRPTracker.closeTop();
1241   BotRPTracker.closeBottom();
1242 
1243   BotRPTracker.initLiveThru(RPTracker);
1244   if (!BotRPTracker.getLiveThru().empty()) {
1245     TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
1246     LLVM_DEBUG(dbgs() << "Live Thru: ";
1247                dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
1248   };
1249 
1250   // For each live out vreg reduce the pressure change associated with other
1251   // uses of the same vreg below the live-out reaching def.
1252   updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
1253 
1254   // Account for liveness generated by the region boundary.
1255   if (LiveRegionEnd != RegionEnd) {
1256     SmallVector<RegisterMaskPair, 8> LiveUses;
1257     BotRPTracker.recede(&LiveUses);
1258     updatePressureDiffs(LiveUses);
1259   }
1260 
1261   LLVM_DEBUG(dbgs() << "Top Pressure:\n";
1262              dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1263              dbgs() << "Bottom Pressure:\n";
1264              dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI););
1265 
1266   assert((BotRPTracker.getPos() == RegionEnd ||
1267           (RegionEnd->isDebugInstr() &&
1268            BotRPTracker.getPos() == priorNonDebug(RegionEnd, RegionBegin))) &&
1269          "Can't find the region bottom");
1270 
1271   // Cache the list of excess pressure sets in this region. This will also track
1272   // the max pressure in the scheduled code for these sets.
1273   RegionCriticalPSets.clear();
1274   const std::vector<unsigned> &RegionPressure =
1275     RPTracker.getPressure().MaxSetPressure;
1276   for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
1277     unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
1278     if (RegionPressure[i] > Limit) {
1279       LLVM_DEBUG(dbgs() << TRI->getRegPressureSetName(i) << " Limit " << Limit
1280                         << " Actual " << RegionPressure[i] << "\n");
1281       RegionCriticalPSets.push_back(PressureChange(i));
1282     }
1283   }
1284   LLVM_DEBUG(dbgs() << "Excess PSets: ";
1285              for (const PressureChange &RCPS
1286                   : RegionCriticalPSets) dbgs()
1287              << TRI->getRegPressureSetName(RCPS.getPSet()) << " ";
1288              dbgs() << "\n");
1289 }
1290 
1291 void ScheduleDAGMILive::
1292 updateScheduledPressure(const SUnit *SU,
1293                         const std::vector<unsigned> &NewMaxPressure) {
1294   const PressureDiff &PDiff = getPressureDiff(SU);
1295   unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
1296   for (const PressureChange &PC : PDiff) {
1297     if (!PC.isValid())
1298       break;
1299     unsigned ID = PC.getPSet();
1300     while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
1301       ++CritIdx;
1302     if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
1303       if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
1304           && NewMaxPressure[ID] <= (unsigned)std::numeric_limits<int16_t>::max())
1305         RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
1306     }
1307     unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
1308     if (NewMaxPressure[ID] >= Limit - 2) {
1309       LLVM_DEBUG(dbgs() << "  " << TRI->getRegPressureSetName(ID) << ": "
1310                         << NewMaxPressure[ID]
1311                         << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ")
1312                         << Limit << "(+ " << BotRPTracker.getLiveThru()[ID]
1313                         << " livethru)\n");
1314     }
1315   }
1316 }
1317 
1318 /// Update the PressureDiff array for liveness after scheduling this
1319 /// instruction.
1320 void ScheduleDAGMILive::updatePressureDiffs(
1321     ArrayRef<RegisterMaskPair> LiveUses) {
1322   for (const RegisterMaskPair &P : LiveUses) {
1323     Register Reg = P.RegUnit;
1324     /// FIXME: Currently assuming single-use physregs.
1325     if (!Reg.isVirtual())
1326       continue;
1327 
1328     if (ShouldTrackLaneMasks) {
1329       // If the register has just become live then other uses won't change
1330       // this fact anymore => decrement pressure.
1331       // If the register has just become dead then other uses make it come
1332       // back to life => increment pressure.
1333       bool Decrement = P.LaneMask.any();
1334 
1335       for (const VReg2SUnit &V2SU
1336            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1337         SUnit &SU = *V2SU.SU;
1338         if (SU.isScheduled || &SU == &ExitSU)
1339           continue;
1340 
1341         PressureDiff &PDiff = getPressureDiff(&SU);
1342         PDiff.addPressureChange(Reg, Decrement, &MRI);
1343         LLVM_DEBUG(dbgs() << "  UpdateRegP: SU(" << SU.NodeNum << ") "
1344                           << printReg(Reg, TRI) << ':'
1345                           << PrintLaneMask(P.LaneMask) << ' ' << *SU.getInstr();
1346                    dbgs() << "              to "; PDiff.dump(*TRI););
1347       }
1348     } else {
1349       assert(P.LaneMask.any());
1350       LLVM_DEBUG(dbgs() << "  LiveReg: " << printVRegOrUnit(Reg, TRI) << "\n");
1351       // This may be called before CurrentBottom has been initialized. However,
1352       // BotRPTracker must have a valid position. We want the value live into the
1353       // instruction or live out of the block, so ask for the previous
1354       // instruction's live-out.
1355       const LiveInterval &LI = LIS->getInterval(Reg);
1356       VNInfo *VNI;
1357       MachineBasicBlock::const_iterator I =
1358         nextIfDebug(BotRPTracker.getPos(), BB->end());
1359       if (I == BB->end())
1360         VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1361       else {
1362         LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
1363         VNI = LRQ.valueIn();
1364       }
1365       // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1366       assert(VNI && "No live value at use.");
1367       for (const VReg2SUnit &V2SU
1368            : make_range(VRegUses.find(Reg), VRegUses.end())) {
1369         SUnit *SU = V2SU.SU;
1370         // If this use comes before the reaching def, it cannot be a last use,
1371         // so decrease its pressure change.
1372         if (!SU->isScheduled && SU != &ExitSU) {
1373           LiveQueryResult LRQ =
1374               LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1375           if (LRQ.valueIn() == VNI) {
1376             PressureDiff &PDiff = getPressureDiff(SU);
1377             PDiff.addPressureChange(Reg, true, &MRI);
1378             LLVM_DEBUG(dbgs() << "  UpdateRegP: SU(" << SU->NodeNum << ") "
1379                               << *SU->getInstr();
1380                        dbgs() << "              to "; PDiff.dump(*TRI););
1381           }
1382         }
1383       }
1384     }
1385   }
1386 }
1387 
1388 void ScheduleDAGMILive::dump() const {
1389 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1390   if (EntrySU.getInstr() != nullptr)
1391     dumpNodeAll(EntrySU);
1392   for (const SUnit &SU : SUnits) {
1393     dumpNodeAll(SU);
1394     if (ShouldTrackPressure) {
1395       dbgs() << "  Pressure Diff      : ";
1396       getPressureDiff(&SU).dump(*TRI);
1397     }
1398     dbgs() << "  Single Issue       : ";
1399     if (SchedModel.mustBeginGroup(SU.getInstr()) &&
1400         SchedModel.mustEndGroup(SU.getInstr()))
1401       dbgs() << "true;";
1402     else
1403       dbgs() << "false;";
1404     dbgs() << '\n';
1405   }
1406   if (ExitSU.getInstr() != nullptr)
1407     dumpNodeAll(ExitSU);
1408 #endif
1409 }
1410 
1411 /// schedule - Called back from MachineScheduler::runOnMachineFunction
1412 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1413 /// only includes instructions that have DAG nodes, not scheduling boundaries.
1414 ///
1415 /// This is a skeletal driver, with all the functionality pushed into helpers,
1416 /// so that it can be easily extended by experimental schedulers. Generally,
1417 /// implementing MachineSchedStrategy should be sufficient to implement a new
1418 /// scheduling algorithm. However, if a scheduler further subclasses
1419 /// ScheduleDAGMILive then it will want to override this virtual method in order
1420 /// to update any specialized state.
1421 void ScheduleDAGMILive::schedule() {
1422   LLVM_DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1423   LLVM_DEBUG(SchedImpl->dumpPolicy());
1424   buildDAGWithRegPressure();
1425 
1426   postProcessDAG();
1427 
1428   SmallVector<SUnit*, 8> TopRoots, BotRoots;
1429   findRootsAndBiasEdges(TopRoots, BotRoots);
1430 
1431   // Initialize the strategy before modifying the DAG.
1432   // This may initialize a DFSResult to be used for queue priority.
1433   SchedImpl->initialize(this);
1434 
1435   LLVM_DEBUG(dump());
1436   if (PrintDAGs) dump();
1437   if (ViewMISchedDAGs) viewGraph();
1438 
1439   // Initialize ready queues now that the DAG and priority data are finalized.
1440   initQueues(TopRoots, BotRoots);
1441 
1442   bool IsTopNode = false;
1443   while (true) {
1444     LLVM_DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1445     SUnit *SU = SchedImpl->pickNode(IsTopNode);
1446     if (!SU) break;
1447 
1448     assert(!SU->isScheduled && "Node already scheduled");
1449     if (!checkSchedLimit())
1450       break;
1451 
1452     scheduleMI(SU, IsTopNode);
1453 
1454     if (DFSResult) {
1455       unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1456       if (!ScheduledTrees.test(SubtreeID)) {
1457         ScheduledTrees.set(SubtreeID);
1458         DFSResult->scheduleTree(SubtreeID);
1459         SchedImpl->scheduleTree(SubtreeID);
1460       }
1461     }
1462 
1463     // Notify the scheduling strategy after updating the DAG.
1464     SchedImpl->schedNode(SU, IsTopNode);
1465 
1466     updateQueues(SU, IsTopNode);
1467   }
1468   assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1469 
1470   placeDebugValues();
1471 
1472   LLVM_DEBUG({
1473     dbgs() << "*** Final schedule for "
1474            << printMBBReference(*begin()->getParent()) << " ***\n";
1475     dumpSchedule();
1476     dbgs() << '\n';
1477   });
1478 }
1479 
1480 /// Build the DAG and setup three register pressure trackers.
1481 void ScheduleDAGMILive::buildDAGWithRegPressure() {
1482   if (!ShouldTrackPressure) {
1483     RPTracker.reset();
1484     RegionCriticalPSets.clear();
1485     buildSchedGraph(AA);
1486     return;
1487   }
1488 
1489   // Initialize the register pressure tracker used by buildSchedGraph.
1490   RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
1491                  ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
1492 
1493   // Account for liveness generate by the region boundary.
1494   if (LiveRegionEnd != RegionEnd)
1495     RPTracker.recede();
1496 
1497   // Build the DAG, and compute current register pressure.
1498   buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
1499 
1500   // Initialize top/bottom trackers after computing region pressure.
1501   initRegPressure();
1502 }
1503 
1504 void ScheduleDAGMILive::computeDFSResult() {
1505   if (!DFSResult)
1506     DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1507   DFSResult->clear();
1508   ScheduledTrees.clear();
1509   DFSResult->resize(SUnits.size());
1510   DFSResult->compute(SUnits);
1511   ScheduledTrees.resize(DFSResult->getNumSubtrees());
1512 }
1513 
1514 /// Compute the max cyclic critical path through the DAG. The scheduling DAG
1515 /// only provides the critical path for single block loops. To handle loops that
1516 /// span blocks, we could use the vreg path latencies provided by
1517 /// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1518 /// available for use in the scheduler.
1519 ///
1520 /// The cyclic path estimation identifies a def-use pair that crosses the back
1521 /// edge and considers the depth and height of the nodes. For example, consider
1522 /// the following instruction sequence where each instruction has unit latency
1523 /// and defines an eponymous virtual register:
1524 ///
1525 /// a->b(a,c)->c(b)->d(c)->exit
1526 ///
1527 /// The cyclic critical path is a two cycles: b->c->b
1528 /// The acyclic critical path is four cycles: a->b->c->d->exit
1529 /// LiveOutHeight = height(c) = len(c->d->exit) = 2
1530 /// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1531 /// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1532 /// LiveInDepth = depth(b) = len(a->b) = 1
1533 ///
1534 /// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1535 /// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1536 /// CyclicCriticalPath = min(2, 2) = 2
1537 ///
1538 /// This could be relevant to PostRA scheduling, but is currently implemented
1539 /// assuming LiveIntervals.
1540 unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
1541   // This only applies to single block loop.
1542   if (!BB->isSuccessor(BB))
1543     return 0;
1544 
1545   unsigned MaxCyclicLatency = 0;
1546   // Visit each live out vreg def to find def/use pairs that cross iterations.
1547   for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1548     Register Reg = P.RegUnit;
1549     if (!Reg.isVirtual())
1550       continue;
1551     const LiveInterval &LI = LIS->getInterval(Reg);
1552     const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1553     if (!DefVNI)
1554       continue;
1555 
1556     MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1557     const SUnit *DefSU = getSUnit(DefMI);
1558     if (!DefSU)
1559       continue;
1560 
1561     unsigned LiveOutHeight = DefSU->getHeight();
1562     unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1563     // Visit all local users of the vreg def.
1564     for (const VReg2SUnit &V2SU
1565          : make_range(VRegUses.find(Reg), VRegUses.end())) {
1566       SUnit *SU = V2SU.SU;
1567       if (SU == &ExitSU)
1568         continue;
1569 
1570       // Only consider uses of the phi.
1571       LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
1572       if (!LRQ.valueIn()->isPHIDef())
1573         continue;
1574 
1575       // Assume that a path spanning two iterations is a cycle, which could
1576       // overestimate in strange cases. This allows cyclic latency to be
1577       // estimated as the minimum slack of the vreg's depth or height.
1578       unsigned CyclicLatency = 0;
1579       if (LiveOutDepth > SU->getDepth())
1580         CyclicLatency = LiveOutDepth - SU->getDepth();
1581 
1582       unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
1583       if (LiveInHeight > LiveOutHeight) {
1584         if (LiveInHeight - LiveOutHeight < CyclicLatency)
1585           CyclicLatency = LiveInHeight - LiveOutHeight;
1586       } else
1587         CyclicLatency = 0;
1588 
1589       LLVM_DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
1590                         << SU->NodeNum << ") = " << CyclicLatency << "c\n");
1591       if (CyclicLatency > MaxCyclicLatency)
1592         MaxCyclicLatency = CyclicLatency;
1593     }
1594   }
1595   LLVM_DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1596   return MaxCyclicLatency;
1597 }
1598 
1599 /// Release ExitSU predecessors and setup scheduler queues. Re-position
1600 /// the Top RP tracker in case the region beginning has changed.
1601 void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1602                                    ArrayRef<SUnit*> BotRoots) {
1603   ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1604   if (ShouldTrackPressure) {
1605     assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1606     TopRPTracker.setPos(CurrentTop);
1607   }
1608 }
1609 
1610 /// Move an instruction and update register pressure.
1611 void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
1612   // Move the instruction to its new location in the instruction stream.
1613   MachineInstr *MI = SU->getInstr();
1614 
1615   if (IsTopNode) {
1616     assert(SU->isTopReady() && "node still has unscheduled dependencies");
1617     if (&*CurrentTop == MI)
1618       CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
1619     else {
1620       moveInstruction(MI, CurrentTop);
1621       TopRPTracker.setPos(MI);
1622     }
1623 
1624     if (ShouldTrackPressure) {
1625       // Update top scheduled pressure.
1626       RegisterOperands RegOpers;
1627       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1628       if (ShouldTrackLaneMasks) {
1629         // Adjust liveness and add missing dead+read-undef flags.
1630         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1631         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1632       } else {
1633         // Adjust for missing dead-def flags.
1634         RegOpers.detectDeadDefs(*MI, *LIS);
1635       }
1636 
1637       TopRPTracker.advance(RegOpers);
1638       assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
1639       LLVM_DEBUG(dbgs() << "Top Pressure:\n"; dumpRegSetPressure(
1640                      TopRPTracker.getRegSetPressureAtPos(), TRI););
1641 
1642       updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
1643     }
1644   } else {
1645     assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1646     MachineBasicBlock::iterator priorII =
1647       priorNonDebug(CurrentBottom, CurrentTop);
1648     if (&*priorII == MI)
1649       CurrentBottom = priorII;
1650     else {
1651       if (&*CurrentTop == MI) {
1652         CurrentTop = nextIfDebug(++CurrentTop, priorII);
1653         TopRPTracker.setPos(CurrentTop);
1654       }
1655       moveInstruction(MI, CurrentBottom);
1656       CurrentBottom = MI;
1657       BotRPTracker.setPos(CurrentBottom);
1658     }
1659     if (ShouldTrackPressure) {
1660       RegisterOperands RegOpers;
1661       RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1662       if (ShouldTrackLaneMasks) {
1663         // Adjust liveness and add missing dead+read-undef flags.
1664         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
1665         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1666       } else {
1667         // Adjust for missing dead-def flags.
1668         RegOpers.detectDeadDefs(*MI, *LIS);
1669       }
1670 
1671       if (BotRPTracker.getPos() != CurrentBottom)
1672         BotRPTracker.recedeSkipDebugValues();
1673       SmallVector<RegisterMaskPair, 8> LiveUses;
1674       BotRPTracker.recede(RegOpers, &LiveUses);
1675       assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
1676       LLVM_DEBUG(dbgs() << "Bottom Pressure:\n"; dumpRegSetPressure(
1677                      BotRPTracker.getRegSetPressureAtPos(), TRI););
1678 
1679       updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
1680       updatePressureDiffs(LiveUses);
1681     }
1682   }
1683 }
1684 
1685 //===----------------------------------------------------------------------===//
1686 // BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
1687 //===----------------------------------------------------------------------===//
1688 
1689 namespace {
1690 
1691 /// Post-process the DAG to create cluster edges between neighboring
1692 /// loads or between neighboring stores.
1693 class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1694   struct MemOpInfo {
1695     SUnit *SU;
1696     SmallVector<const MachineOperand *, 4> BaseOps;
1697     int64_t Offset;
1698     unsigned Width;
1699 
1700     MemOpInfo(SUnit *SU, ArrayRef<const MachineOperand *> BaseOps,
1701               int64_t Offset, unsigned Width)
1702         : SU(SU), BaseOps(BaseOps.begin(), BaseOps.end()), Offset(Offset),
1703           Width(Width) {}
1704 
1705     static bool Compare(const MachineOperand *const &A,
1706                         const MachineOperand *const &B) {
1707       if (A->getType() != B->getType())
1708         return A->getType() < B->getType();
1709       if (A->isReg())
1710         return A->getReg() < B->getReg();
1711       if (A->isFI()) {
1712         const MachineFunction &MF = *A->getParent()->getParent()->getParent();
1713         const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
1714         bool StackGrowsDown = TFI.getStackGrowthDirection() ==
1715                               TargetFrameLowering::StackGrowsDown;
1716         return StackGrowsDown ? A->getIndex() > B->getIndex()
1717                               : A->getIndex() < B->getIndex();
1718       }
1719 
1720       llvm_unreachable("MemOpClusterMutation only supports register or frame "
1721                        "index bases.");
1722     }
1723 
1724     bool operator<(const MemOpInfo &RHS) const {
1725       // FIXME: Don't compare everything twice. Maybe use C++20 three way
1726       // comparison instead when it's available.
1727       if (std::lexicographical_compare(BaseOps.begin(), BaseOps.end(),
1728                                        RHS.BaseOps.begin(), RHS.BaseOps.end(),
1729                                        Compare))
1730         return true;
1731       if (std::lexicographical_compare(RHS.BaseOps.begin(), RHS.BaseOps.end(),
1732                                        BaseOps.begin(), BaseOps.end(), Compare))
1733         return false;
1734       if (Offset != RHS.Offset)
1735         return Offset < RHS.Offset;
1736       return SU->NodeNum < RHS.SU->NodeNum;
1737     }
1738   };
1739 
1740   const TargetInstrInfo *TII;
1741   const TargetRegisterInfo *TRI;
1742   bool IsLoad;
1743 
1744 public:
1745   BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1746                            const TargetRegisterInfo *tri, bool IsLoad)
1747       : TII(tii), TRI(tri), IsLoad(IsLoad) {}
1748 
1749   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1750 
1751 protected:
1752   void clusterNeighboringMemOps(ArrayRef<MemOpInfo> MemOps, bool FastCluster,
1753                                 ScheduleDAGInstrs *DAG);
1754   void collectMemOpRecords(std::vector<SUnit> &SUnits,
1755                            SmallVectorImpl<MemOpInfo> &MemOpRecords);
1756   bool groupMemOps(ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG,
1757                    DenseMap<unsigned, SmallVector<MemOpInfo, 32>> &Groups);
1758 };
1759 
1760 class StoreClusterMutation : public BaseMemOpClusterMutation {
1761 public:
1762   StoreClusterMutation(const TargetInstrInfo *tii,
1763                        const TargetRegisterInfo *tri)
1764       : BaseMemOpClusterMutation(tii, tri, false) {}
1765 };
1766 
1767 class LoadClusterMutation : public BaseMemOpClusterMutation {
1768 public:
1769   LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1770       : BaseMemOpClusterMutation(tii, tri, true) {}
1771 };
1772 
1773 } // end anonymous namespace
1774 
1775 namespace llvm {
1776 
1777 std::unique_ptr<ScheduleDAGMutation>
1778 createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1779                              const TargetRegisterInfo *TRI) {
1780   return EnableMemOpCluster ? std::make_unique<LoadClusterMutation>(TII, TRI)
1781                             : nullptr;
1782 }
1783 
1784 std::unique_ptr<ScheduleDAGMutation>
1785 createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1786                               const TargetRegisterInfo *TRI) {
1787   return EnableMemOpCluster ? std::make_unique<StoreClusterMutation>(TII, TRI)
1788                             : nullptr;
1789 }
1790 
1791 } // end namespace llvm
1792 
1793 // Sorting all the loads/stores first, then for each load/store, checking the
1794 // following load/store one by one, until reach the first non-dependent one and
1795 // call target hook to see if they can cluster.
1796 // If FastCluster is enabled, we assume that, all the loads/stores have been
1797 // preprocessed and now, they didn't have dependencies on each other.
1798 void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1799     ArrayRef<MemOpInfo> MemOpRecords, bool FastCluster,
1800     ScheduleDAGInstrs *DAG) {
1801   // Keep track of the current cluster length and bytes for each SUnit.
1802   DenseMap<unsigned, std::pair<unsigned, unsigned>> SUnit2ClusterInfo;
1803 
1804   // At this point, `MemOpRecords` array must hold atleast two mem ops. Try to
1805   // cluster mem ops collected within `MemOpRecords` array.
1806   for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1807     // Decision to cluster mem ops is taken based on target dependent logic
1808     auto MemOpa = MemOpRecords[Idx];
1809 
1810     // Seek for the next load/store to do the cluster.
1811     unsigned NextIdx = Idx + 1;
1812     for (; NextIdx < End; ++NextIdx)
1813       // Skip if MemOpb has been clustered already or has dependency with
1814       // MemOpa.
1815       if (!SUnit2ClusterInfo.count(MemOpRecords[NextIdx].SU->NodeNum) &&
1816           (FastCluster ||
1817            (!DAG->IsReachable(MemOpRecords[NextIdx].SU, MemOpa.SU) &&
1818             !DAG->IsReachable(MemOpa.SU, MemOpRecords[NextIdx].SU))))
1819         break;
1820     if (NextIdx == End)
1821       continue;
1822 
1823     auto MemOpb = MemOpRecords[NextIdx];
1824     unsigned ClusterLength = 2;
1825     unsigned CurrentClusterBytes = MemOpa.Width + MemOpb.Width;
1826     if (SUnit2ClusterInfo.count(MemOpa.SU->NodeNum)) {
1827       ClusterLength = SUnit2ClusterInfo[MemOpa.SU->NodeNum].first + 1;
1828       CurrentClusterBytes =
1829           SUnit2ClusterInfo[MemOpa.SU->NodeNum].second + MemOpb.Width;
1830     }
1831 
1832     if (!TII->shouldClusterMemOps(MemOpa.BaseOps, MemOpb.BaseOps, ClusterLength,
1833                                   CurrentClusterBytes))
1834       continue;
1835 
1836     SUnit *SUa = MemOpa.SU;
1837     SUnit *SUb = MemOpb.SU;
1838     if (SUa->NodeNum > SUb->NodeNum)
1839       std::swap(SUa, SUb);
1840 
1841     // FIXME: Is this check really required?
1842     if (!DAG->addEdge(SUb, SDep(SUa, SDep::Cluster)))
1843       continue;
1844 
1845     LLVM_DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
1846                       << SUb->NodeNum << ")\n");
1847     ++NumClustered;
1848 
1849     if (IsLoad) {
1850       // Copy successor edges from SUa to SUb. Interleaving computation
1851       // dependent on SUa can prevent load combining due to register reuse.
1852       // Predecessor edges do not need to be copied from SUb to SUa since
1853       // nearby loads should have effectively the same inputs.
1854       for (const SDep &Succ : SUa->Succs) {
1855         if (Succ.getSUnit() == SUb)
1856           continue;
1857         LLVM_DEBUG(dbgs() << "  Copy Succ SU(" << Succ.getSUnit()->NodeNum
1858                           << ")\n");
1859         DAG->addEdge(Succ.getSUnit(), SDep(SUb, SDep::Artificial));
1860       }
1861     } else {
1862       // Copy predecessor edges from SUb to SUa to avoid the SUnits that
1863       // SUb dependent on scheduled in-between SUb and SUa. Successor edges
1864       // do not need to be copied from SUa to SUb since no one will depend
1865       // on stores.
1866       // Notice that, we don't need to care about the memory dependency as
1867       // we won't try to cluster them if they have any memory dependency.
1868       for (const SDep &Pred : SUb->Preds) {
1869         if (Pred.getSUnit() == SUa)
1870           continue;
1871         LLVM_DEBUG(dbgs() << "  Copy Pred SU(" << Pred.getSUnit()->NodeNum
1872                           << ")\n");
1873         DAG->addEdge(SUa, SDep(Pred.getSUnit(), SDep::Artificial));
1874       }
1875     }
1876 
1877     SUnit2ClusterInfo[MemOpb.SU->NodeNum] = {ClusterLength,
1878                                              CurrentClusterBytes};
1879 
1880     LLVM_DEBUG(dbgs() << "  Curr cluster length: " << ClusterLength
1881                       << ", Curr cluster bytes: " << CurrentClusterBytes
1882                       << "\n");
1883   }
1884 }
1885 
1886 void BaseMemOpClusterMutation::collectMemOpRecords(
1887     std::vector<SUnit> &SUnits, SmallVectorImpl<MemOpInfo> &MemOpRecords) {
1888   for (auto &SU : SUnits) {
1889     if ((IsLoad && !SU.getInstr()->mayLoad()) ||
1890         (!IsLoad && !SU.getInstr()->mayStore()))
1891       continue;
1892 
1893     const MachineInstr &MI = *SU.getInstr();
1894     SmallVector<const MachineOperand *, 4> BaseOps;
1895     int64_t Offset;
1896     bool OffsetIsScalable;
1897     unsigned Width;
1898     if (TII->getMemOperandsWithOffsetWidth(MI, BaseOps, Offset,
1899                                            OffsetIsScalable, Width, TRI)) {
1900       MemOpRecords.push_back(MemOpInfo(&SU, BaseOps, Offset, Width));
1901 
1902       LLVM_DEBUG(dbgs() << "Num BaseOps: " << BaseOps.size() << ", Offset: "
1903                         << Offset << ", OffsetIsScalable: " << OffsetIsScalable
1904                         << ", Width: " << Width << "\n");
1905     }
1906 #ifndef NDEBUG
1907     for (const auto *Op : BaseOps)
1908       assert(Op);
1909 #endif
1910   }
1911 }
1912 
1913 bool BaseMemOpClusterMutation::groupMemOps(
1914     ArrayRef<MemOpInfo> MemOps, ScheduleDAGInstrs *DAG,
1915     DenseMap<unsigned, SmallVector<MemOpInfo, 32>> &Groups) {
1916   bool FastCluster =
1917       ForceFastCluster ||
1918       MemOps.size() * DAG->SUnits.size() / 1000 > FastClusterThreshold;
1919 
1920   for (const auto &MemOp : MemOps) {
1921     unsigned ChainPredID = DAG->SUnits.size();
1922     if (FastCluster) {
1923       for (const SDep &Pred : MemOp.SU->Preds) {
1924         // We only want to cluster the mem ops that have the same ctrl(non-data)
1925         // pred so that they didn't have ctrl dependency for each other. But for
1926         // store instrs, we can still cluster them if the pred is load instr.
1927         if ((Pred.isCtrl() &&
1928              (IsLoad ||
1929               (Pred.getSUnit() && Pred.getSUnit()->getInstr()->mayStore()))) &&
1930             !Pred.isArtificial()) {
1931           ChainPredID = Pred.getSUnit()->NodeNum;
1932           break;
1933         }
1934       }
1935     } else
1936       ChainPredID = 0;
1937 
1938     Groups[ChainPredID].push_back(MemOp);
1939   }
1940   return FastCluster;
1941 }
1942 
1943 /// Callback from DAG postProcessing to create cluster edges for loads/stores.
1944 void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAG) {
1945   // Collect all the clusterable loads/stores
1946   SmallVector<MemOpInfo, 32> MemOpRecords;
1947   collectMemOpRecords(DAG->SUnits, MemOpRecords);
1948 
1949   if (MemOpRecords.size() < 2)
1950     return;
1951 
1952   // Put the loads/stores without dependency into the same group with some
1953   // heuristic if the DAG is too complex to avoid compiling time blow up.
1954   // Notice that, some fusion pair could be lost with this.
1955   DenseMap<unsigned, SmallVector<MemOpInfo, 32>> Groups;
1956   bool FastCluster = groupMemOps(MemOpRecords, DAG, Groups);
1957 
1958   for (auto &Group : Groups) {
1959     // Sorting the loads/stores, so that, we can stop the cluster as early as
1960     // possible.
1961     llvm::sort(Group.second);
1962 
1963     // Trying to cluster all the neighboring loads/stores.
1964     clusterNeighboringMemOps(Group.second, FastCluster, DAG);
1965   }
1966 }
1967 
1968 //===----------------------------------------------------------------------===//
1969 // CopyConstrain - DAG post-processing to encourage copy elimination.
1970 //===----------------------------------------------------------------------===//
1971 
1972 namespace {
1973 
1974 /// Post-process the DAG to create weak edges from all uses of a copy to
1975 /// the one use that defines the copy's source vreg, most likely an induction
1976 /// variable increment.
1977 class CopyConstrain : public ScheduleDAGMutation {
1978   // Transient state.
1979   SlotIndex RegionBeginIdx;
1980 
1981   // RegionEndIdx is the slot index of the last non-debug instruction in the
1982   // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
1983   SlotIndex RegionEndIdx;
1984 
1985 public:
1986   CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1987 
1988   void apply(ScheduleDAGInstrs *DAGInstrs) override;
1989 
1990 protected:
1991   void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
1992 };
1993 
1994 } // end anonymous namespace
1995 
1996 namespace llvm {
1997 
1998 std::unique_ptr<ScheduleDAGMutation>
1999 createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
2000                                const TargetRegisterInfo *TRI) {
2001   return std::make_unique<CopyConstrain>(TII, TRI);
2002 }
2003 
2004 } // end namespace llvm
2005 
2006 /// constrainLocalCopy handles two possibilities:
2007 /// 1) Local src:
2008 /// I0:     = dst
2009 /// I1: src = ...
2010 /// I2:     = dst
2011 /// I3: dst = src (copy)
2012 /// (create pred->succ edges I0->I1, I2->I1)
2013 ///
2014 /// 2) Local copy:
2015 /// I0: dst = src (copy)
2016 /// I1:     = dst
2017 /// I2: src = ...
2018 /// I3:     = dst
2019 /// (create pred->succ edges I1->I2, I3->I2)
2020 ///
2021 /// Although the MachineScheduler is currently constrained to single blocks,
2022 /// this algorithm should handle extended blocks. An EBB is a set of
2023 /// contiguously numbered blocks such that the previous block in the EBB is
2024 /// always the single predecessor.
2025 void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
2026   LiveIntervals *LIS = DAG->getLIS();
2027   MachineInstr *Copy = CopySU->getInstr();
2028 
2029   // Check for pure vreg copies.
2030   const MachineOperand &SrcOp = Copy->getOperand(1);
2031   Register SrcReg = SrcOp.getReg();
2032   if (!SrcReg.isVirtual() || !SrcOp.readsReg())
2033     return;
2034 
2035   const MachineOperand &DstOp = Copy->getOperand(0);
2036   Register DstReg = DstOp.getReg();
2037   if (!DstReg.isVirtual() || DstOp.isDead())
2038     return;
2039 
2040   // Check if either the dest or source is local. If it's live across a back
2041   // edge, it's not local. Note that if both vregs are live across the back
2042   // edge, we cannot successfully contrain the copy without cyclic scheduling.
2043   // If both the copy's source and dest are local live intervals, then we
2044   // should treat the dest as the global for the purpose of adding
2045   // constraints. This adds edges from source's other uses to the copy.
2046   unsigned LocalReg = SrcReg;
2047   unsigned GlobalReg = DstReg;
2048   LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
2049   if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
2050     LocalReg = DstReg;
2051     GlobalReg = SrcReg;
2052     LocalLI = &LIS->getInterval(LocalReg);
2053     if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
2054       return;
2055   }
2056   LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
2057 
2058   // Find the global segment after the start of the local LI.
2059   LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
2060   // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
2061   // local live range. We could create edges from other global uses to the local
2062   // start, but the coalescer should have already eliminated these cases, so
2063   // don't bother dealing with it.
2064   if (GlobalSegment == GlobalLI->end())
2065     return;
2066 
2067   // If GlobalSegment is killed at the LocalLI->start, the call to find()
2068   // returned the next global segment. But if GlobalSegment overlaps with
2069   // LocalLI->start, then advance to the next segment. If a hole in GlobalLI
2070   // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
2071   if (GlobalSegment->contains(LocalLI->beginIndex()))
2072     ++GlobalSegment;
2073 
2074   if (GlobalSegment == GlobalLI->end())
2075     return;
2076 
2077   // Check if GlobalLI contains a hole in the vicinity of LocalLI.
2078   if (GlobalSegment != GlobalLI->begin()) {
2079     // Two address defs have no hole.
2080     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
2081                                GlobalSegment->start)) {
2082       return;
2083     }
2084     // If the prior global segment may be defined by the same two-address
2085     // instruction that also defines LocalLI, then can't make a hole here.
2086     if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
2087                                LocalLI->beginIndex())) {
2088       return;
2089     }
2090     // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
2091     // it would be a disconnected component in the live range.
2092     assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
2093            "Disconnected LRG within the scheduling region.");
2094   }
2095   MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
2096   if (!GlobalDef)
2097     return;
2098 
2099   SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
2100   if (!GlobalSU)
2101     return;
2102 
2103   // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
2104   // constraining the uses of the last local def to precede GlobalDef.
2105   SmallVector<SUnit*,8> LocalUses;
2106   const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
2107   MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
2108   SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
2109   for (const SDep &Succ : LastLocalSU->Succs) {
2110     if (Succ.getKind() != SDep::Data || Succ.getReg() != LocalReg)
2111       continue;
2112     if (Succ.getSUnit() == GlobalSU)
2113       continue;
2114     if (!DAG->canAddEdge(GlobalSU, Succ.getSUnit()))
2115       return;
2116     LocalUses.push_back(Succ.getSUnit());
2117   }
2118   // Open the top of the GlobalLI hole by constraining any earlier global uses
2119   // to precede the start of LocalLI.
2120   SmallVector<SUnit*,8> GlobalUses;
2121   MachineInstr *FirstLocalDef =
2122     LIS->getInstructionFromIndex(LocalLI->beginIndex());
2123   SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
2124   for (const SDep &Pred : GlobalSU->Preds) {
2125     if (Pred.getKind() != SDep::Anti || Pred.getReg() != GlobalReg)
2126       continue;
2127     if (Pred.getSUnit() == FirstLocalSU)
2128       continue;
2129     if (!DAG->canAddEdge(FirstLocalSU, Pred.getSUnit()))
2130       return;
2131     GlobalUses.push_back(Pred.getSUnit());
2132   }
2133   LLVM_DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
2134   // Add the weak edges.
2135   for (SUnit *LU : LocalUses) {
2136     LLVM_DEBUG(dbgs() << "  Local use SU(" << LU->NodeNum << ") -> SU("
2137                       << GlobalSU->NodeNum << ")\n");
2138     DAG->addEdge(GlobalSU, SDep(LU, SDep::Weak));
2139   }
2140   for (SUnit *GU : GlobalUses) {
2141     LLVM_DEBUG(dbgs() << "  Global use SU(" << GU->NodeNum << ") -> SU("
2142                       << FirstLocalSU->NodeNum << ")\n");
2143     DAG->addEdge(FirstLocalSU, SDep(GU, SDep::Weak));
2144   }
2145 }
2146 
2147 /// Callback from DAG postProcessing to create weak edges to encourage
2148 /// copy elimination.
2149 void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
2150   ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
2151   assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
2152 
2153   MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
2154   if (FirstPos == DAG->end())
2155     return;
2156   RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
2157   RegionEndIdx = DAG->getLIS()->getInstructionIndex(
2158       *priorNonDebug(DAG->end(), DAG->begin()));
2159 
2160   for (SUnit &SU : DAG->SUnits) {
2161     if (!SU.getInstr()->isCopy())
2162       continue;
2163 
2164     constrainLocalCopy(&SU, static_cast<ScheduleDAGMILive*>(DAG));
2165   }
2166 }
2167 
2168 //===----------------------------------------------------------------------===//
2169 // MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
2170 // and possibly other custom schedulers.
2171 //===----------------------------------------------------------------------===//
2172 
2173 static const unsigned InvalidCycle = ~0U;
2174 
2175 SchedBoundary::~SchedBoundary() { delete HazardRec; }
2176 
2177 /// Given a Count of resource usage and a Latency value, return true if a
2178 /// SchedBoundary becomes resource limited.
2179 /// If we are checking after scheduling a node, we should return true when
2180 /// we just reach the resource limit.
2181 static bool checkResourceLimit(unsigned LFactor, unsigned Count,
2182                                unsigned Latency, bool AfterSchedNode) {
2183   int ResCntFactor = (int)(Count - (Latency * LFactor));
2184   if (AfterSchedNode)
2185     return ResCntFactor >= (int)LFactor;
2186   else
2187     return ResCntFactor > (int)LFactor;
2188 }
2189 
2190 void SchedBoundary::reset() {
2191   // A new HazardRec is created for each DAG and owned by SchedBoundary.
2192   // Destroying and reconstructing it is very expensive though. So keep
2193   // invalid, placeholder HazardRecs.
2194   if (HazardRec && HazardRec->isEnabled()) {
2195     delete HazardRec;
2196     HazardRec = nullptr;
2197   }
2198   Available.clear();
2199   Pending.clear();
2200   CheckPending = false;
2201   CurrCycle = 0;
2202   CurrMOps = 0;
2203   MinReadyCycle = std::numeric_limits<unsigned>::max();
2204   ExpectedLatency = 0;
2205   DependentLatency = 0;
2206   RetiredMOps = 0;
2207   MaxExecutedResCount = 0;
2208   ZoneCritResIdx = 0;
2209   IsResourceLimited = false;
2210   ReservedCycles.clear();
2211   ReservedResourceSegments.clear();
2212   ReservedCyclesIndex.clear();
2213   ResourceGroupSubUnitMasks.clear();
2214 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2215   // Track the maximum number of stall cycles that could arise either from the
2216   // latency of a DAG edge or the number of cycles that a processor resource is
2217   // reserved (SchedBoundary::ReservedCycles).
2218   MaxObservedStall = 0;
2219 #endif
2220   // Reserve a zero-count for invalid CritResIdx.
2221   ExecutedResCounts.resize(1);
2222   assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
2223 }
2224 
2225 void SchedRemainder::
2226 init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
2227   reset();
2228   if (!SchedModel->hasInstrSchedModel())
2229     return;
2230   RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
2231   for (SUnit &SU : DAG->SUnits) {
2232     const MCSchedClassDesc *SC = DAG->getSchedClass(&SU);
2233     RemIssueCount += SchedModel->getNumMicroOps(SU.getInstr(), SC)
2234       * SchedModel->getMicroOpFactor();
2235     for (TargetSchedModel::ProcResIter
2236            PI = SchedModel->getWriteProcResBegin(SC),
2237            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2238       unsigned PIdx = PI->ProcResourceIdx;
2239       unsigned Factor = SchedModel->getResourceFactor(PIdx);
2240       assert(PI->Cycles >= PI->StartAtCycle);
2241       RemainingCounts[PIdx] += (Factor * (PI->Cycles - PI->StartAtCycle));
2242     }
2243   }
2244 }
2245 
2246 void SchedBoundary::
2247 init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
2248   reset();
2249   DAG = dag;
2250   SchedModel = smodel;
2251   Rem = rem;
2252   if (SchedModel->hasInstrSchedModel()) {
2253     unsigned ResourceCount = SchedModel->getNumProcResourceKinds();
2254     ReservedCyclesIndex.resize(ResourceCount);
2255     ExecutedResCounts.resize(ResourceCount);
2256     ResourceGroupSubUnitMasks.resize(ResourceCount, APInt(ResourceCount, 0));
2257     unsigned NumUnits = 0;
2258 
2259     for (unsigned i = 0; i < ResourceCount; ++i) {
2260       ReservedCyclesIndex[i] = NumUnits;
2261       NumUnits += SchedModel->getProcResource(i)->NumUnits;
2262       if (isUnbufferedGroup(i)) {
2263         auto SubUnits = SchedModel->getProcResource(i)->SubUnitsIdxBegin;
2264         for (unsigned U = 0, UE = SchedModel->getProcResource(i)->NumUnits;
2265              U != UE; ++U)
2266           ResourceGroupSubUnitMasks[i].setBit(SubUnits[U]);
2267       }
2268     }
2269 
2270     ReservedCycles.resize(NumUnits, InvalidCycle);
2271   }
2272 }
2273 
2274 /// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
2275 /// these "soft stalls" differently than the hard stall cycles based on CPU
2276 /// resources and computed by checkHazard(). A fully in-order model
2277 /// (MicroOpBufferSize==0) will not make use of this since instructions are not
2278 /// available for scheduling until they are ready. However, a weaker in-order
2279 /// model may use this for heuristics. For example, if a processor has in-order
2280 /// behavior when reading certain resources, this may come into play.
2281 unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
2282   if (!SU->isUnbuffered)
2283     return 0;
2284 
2285   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2286   if (ReadyCycle > CurrCycle)
2287     return ReadyCycle - CurrCycle;
2288   return 0;
2289 }
2290 
2291 /// Compute the next cycle at which the given processor resource unit
2292 /// can be scheduled.
2293 unsigned SchedBoundary::getNextResourceCycleByInstance(unsigned InstanceIdx,
2294                                                        unsigned Cycles,
2295                                                        unsigned StartAtCycle) {
2296   if (SchedModel && SchedModel->enableIntervals()) {
2297     if (isTop())
2298       return ReservedResourceSegments[InstanceIdx].getFirstAvailableAtFromTop(
2299           CurrCycle, StartAtCycle, Cycles);
2300 
2301     return ReservedResourceSegments[InstanceIdx].getFirstAvailableAtFromBottom(
2302         CurrCycle, StartAtCycle, Cycles);
2303   }
2304 
2305   unsigned NextUnreserved = ReservedCycles[InstanceIdx];
2306   // If this resource has never been used, always return cycle zero.
2307   if (NextUnreserved == InvalidCycle)
2308     return CurrCycle;
2309   // For bottom-up scheduling add the cycles needed for the current operation.
2310   if (!isTop())
2311     NextUnreserved = std::max(CurrCycle, NextUnreserved + Cycles);
2312   return NextUnreserved;
2313 }
2314 
2315 /// Compute the next cycle at which the given processor resource can be
2316 /// scheduled.  Returns the next cycle and the index of the processor resource
2317 /// instance in the reserved cycles vector.
2318 std::pair<unsigned, unsigned>
2319 SchedBoundary::getNextResourceCycle(const MCSchedClassDesc *SC, unsigned PIdx,
2320                                     unsigned Cycles, unsigned StartAtCycle) {
2321   if (MischedDetailResourceBooking) {
2322     LLVM_DEBUG(dbgs() << "  Resource booking (@" << CurrCycle << "c): \n");
2323     LLVM_DEBUG(dumpReservedCycles());
2324     LLVM_DEBUG(dbgs() << "  getNextResourceCycle (@" << CurrCycle << "c): \n");
2325   }
2326   unsigned MinNextUnreserved = InvalidCycle;
2327   unsigned InstanceIdx = 0;
2328   unsigned StartIndex = ReservedCyclesIndex[PIdx];
2329   unsigned NumberOfInstances = SchedModel->getProcResource(PIdx)->NumUnits;
2330   assert(NumberOfInstances > 0 &&
2331          "Cannot have zero instances of a ProcResource");
2332 
2333   if (isUnbufferedGroup(PIdx)) {
2334     // If any subunits are used by the instruction, report that the resource
2335     // group is available at 0, effectively removing the group record from
2336     // hazarding and basing the hazarding decisions on the subunit records.
2337     // Otherwise, choose the first available instance from among the subunits.
2338     // Specifications which assign cycles to both the subunits and the group or
2339     // which use an unbuffered group with buffered subunits will appear to
2340     // schedule strangely. In the first case, the additional cycles for the
2341     // group will be ignored.  In the second, the group will be ignored
2342     // entirely.
2343     for (const MCWriteProcResEntry &PE :
2344          make_range(SchedModel->getWriteProcResBegin(SC),
2345                     SchedModel->getWriteProcResEnd(SC)))
2346       if (ResourceGroupSubUnitMasks[PIdx][PE.ProcResourceIdx])
2347         return std::make_pair(0u, StartIndex);
2348 
2349     auto SubUnits = SchedModel->getProcResource(PIdx)->SubUnitsIdxBegin;
2350     for (unsigned I = 0, End = NumberOfInstances; I < End; ++I) {
2351       unsigned NextUnreserved, NextInstanceIdx;
2352       std::tie(NextUnreserved, NextInstanceIdx) =
2353           getNextResourceCycle(SC, SubUnits[I], Cycles, StartAtCycle);
2354       if (MinNextUnreserved > NextUnreserved) {
2355         InstanceIdx = NextInstanceIdx;
2356         MinNextUnreserved = NextUnreserved;
2357       }
2358     }
2359     return std::make_pair(MinNextUnreserved, InstanceIdx);
2360   }
2361 
2362   for (unsigned I = StartIndex, End = StartIndex + NumberOfInstances; I < End;
2363        ++I) {
2364     unsigned NextUnreserved =
2365         getNextResourceCycleByInstance(I, Cycles, StartAtCycle);
2366     if (MischedDetailResourceBooking)
2367       LLVM_DEBUG(dbgs() << "    Instance " << I - StartIndex << " available @"
2368                         << NextUnreserved << "c\n");
2369     if (MinNextUnreserved > NextUnreserved) {
2370       InstanceIdx = I;
2371       MinNextUnreserved = NextUnreserved;
2372     }
2373   }
2374   if (MischedDetailResourceBooking)
2375     LLVM_DEBUG(dbgs() << "    selecting " << SchedModel->getResourceName(PIdx)
2376                       << "[" << InstanceIdx - StartIndex << "]"
2377                       << " available @" << MinNextUnreserved << "c"
2378                       << "\n");
2379   return std::make_pair(MinNextUnreserved, InstanceIdx);
2380 }
2381 
2382 /// Does this SU have a hazard within the current instruction group.
2383 ///
2384 /// The scheduler supports two modes of hazard recognition. The first is the
2385 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
2386 /// supports highly complicated in-order reservation tables
2387 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
2388 ///
2389 /// The second is a streamlined mechanism that checks for hazards based on
2390 /// simple counters that the scheduler itself maintains. It explicitly checks
2391 /// for instruction dispatch limitations, including the number of micro-ops that
2392 /// can dispatch per cycle.
2393 ///
2394 /// TODO: Also check whether the SU must start a new group.
2395 bool SchedBoundary::checkHazard(SUnit *SU) {
2396   if (HazardRec->isEnabled()
2397       && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
2398     return true;
2399   }
2400 
2401   unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
2402   if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
2403     LLVM_DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") uops="
2404                       << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
2405     return true;
2406   }
2407 
2408   if (CurrMOps > 0 &&
2409       ((isTop() && SchedModel->mustBeginGroup(SU->getInstr())) ||
2410        (!isTop() && SchedModel->mustEndGroup(SU->getInstr())))) {
2411     LLVM_DEBUG(dbgs() << "  hazard: SU(" << SU->NodeNum << ") must "
2412                       << (isTop() ? "begin" : "end") << " group\n");
2413     return true;
2414   }
2415 
2416   if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
2417     const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2418     for (const MCWriteProcResEntry &PE :
2419           make_range(SchedModel->getWriteProcResBegin(SC),
2420                      SchedModel->getWriteProcResEnd(SC))) {
2421       unsigned ResIdx = PE.ProcResourceIdx;
2422       unsigned Cycles = PE.Cycles;
2423       unsigned StartAtCycle = PE.StartAtCycle;
2424       unsigned NRCycle, InstanceIdx;
2425       std::tie(NRCycle, InstanceIdx) =
2426           getNextResourceCycle(SC, ResIdx, Cycles, StartAtCycle);
2427       if (NRCycle > CurrCycle) {
2428 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2429         MaxObservedStall = std::max(Cycles, MaxObservedStall);
2430 #endif
2431         LLVM_DEBUG(dbgs() << "  SU(" << SU->NodeNum << ") "
2432                           << SchedModel->getResourceName(ResIdx)
2433                           << '[' << InstanceIdx - ReservedCyclesIndex[ResIdx]  << ']'
2434                           << "=" << NRCycle << "c\n");
2435         return true;
2436       }
2437     }
2438   }
2439   return false;
2440 }
2441 
2442 // Find the unscheduled node in ReadySUs with the highest latency.
2443 unsigned SchedBoundary::
2444 findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
2445   SUnit *LateSU = nullptr;
2446   unsigned RemLatency = 0;
2447   for (SUnit *SU : ReadySUs) {
2448     unsigned L = getUnscheduledLatency(SU);
2449     if (L > RemLatency) {
2450       RemLatency = L;
2451       LateSU = SU;
2452     }
2453   }
2454   if (LateSU) {
2455     LLVM_DEBUG(dbgs() << Available.getName() << " RemLatency SU("
2456                       << LateSU->NodeNum << ") " << RemLatency << "c\n");
2457   }
2458   return RemLatency;
2459 }
2460 
2461 // Count resources in this zone and the remaining unscheduled
2462 // instruction. Return the max count, scaled. Set OtherCritIdx to the critical
2463 // resource index, or zero if the zone is issue limited.
2464 unsigned SchedBoundary::
2465 getOtherResourceCount(unsigned &OtherCritIdx) {
2466   OtherCritIdx = 0;
2467   if (!SchedModel->hasInstrSchedModel())
2468     return 0;
2469 
2470   unsigned OtherCritCount = Rem->RemIssueCount
2471     + (RetiredMOps * SchedModel->getMicroOpFactor());
2472   LLVM_DEBUG(dbgs() << "  " << Available.getName() << " + Remain MOps: "
2473                     << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
2474   for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
2475        PIdx != PEnd; ++PIdx) {
2476     unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
2477     if (OtherCount > OtherCritCount) {
2478       OtherCritCount = OtherCount;
2479       OtherCritIdx = PIdx;
2480     }
2481   }
2482   if (OtherCritIdx) {
2483     LLVM_DEBUG(
2484         dbgs() << "  " << Available.getName() << " + Remain CritRes: "
2485                << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
2486                << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
2487   }
2488   return OtherCritCount;
2489 }
2490 
2491 void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle, bool InPQueue,
2492                                 unsigned Idx) {
2493   assert(SU->getInstr() && "Scheduled SUnit must have instr");
2494 
2495 #if LLVM_ENABLE_ABI_BREAKING_CHECKS
2496   // ReadyCycle was been bumped up to the CurrCycle when this node was
2497   // scheduled, but CurrCycle may have been eagerly advanced immediately after
2498   // scheduling, so may now be greater than ReadyCycle.
2499   if (ReadyCycle > CurrCycle)
2500     MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
2501 #endif
2502 
2503   if (ReadyCycle < MinReadyCycle)
2504     MinReadyCycle = ReadyCycle;
2505 
2506   // Check for interlocks first. For the purpose of other heuristics, an
2507   // instruction that cannot issue appears as if it's not in the ReadyQueue.
2508   bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
2509   bool HazardDetected = (!IsBuffered && ReadyCycle > CurrCycle) ||
2510                         checkHazard(SU) || (Available.size() >= ReadyListLimit);
2511 
2512   if (!HazardDetected) {
2513     Available.push(SU);
2514 
2515     if (InPQueue)
2516       Pending.remove(Pending.begin() + Idx);
2517     return;
2518   }
2519 
2520   if (!InPQueue)
2521     Pending.push(SU);
2522 }
2523 
2524 /// Move the boundary of scheduled code by one cycle.
2525 void SchedBoundary::bumpCycle(unsigned NextCycle) {
2526   if (SchedModel->getMicroOpBufferSize() == 0) {
2527     assert(MinReadyCycle < std::numeric_limits<unsigned>::max() &&
2528            "MinReadyCycle uninitialized");
2529     if (MinReadyCycle > NextCycle)
2530       NextCycle = MinReadyCycle;
2531   }
2532   // Update the current micro-ops, which will issue in the next cycle.
2533   unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2534   CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2535 
2536   // Decrement DependentLatency based on the next cycle.
2537   if ((NextCycle - CurrCycle) > DependentLatency)
2538     DependentLatency = 0;
2539   else
2540     DependentLatency -= (NextCycle - CurrCycle);
2541 
2542   if (!HazardRec->isEnabled()) {
2543     // Bypass HazardRec virtual calls.
2544     CurrCycle = NextCycle;
2545   } else {
2546     // Bypass getHazardType calls in case of long latency.
2547     for (; CurrCycle != NextCycle; ++CurrCycle) {
2548       if (isTop())
2549         HazardRec->AdvanceCycle();
2550       else
2551         HazardRec->RecedeCycle();
2552     }
2553   }
2554   CheckPending = true;
2555   IsResourceLimited =
2556       checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
2557                          getScheduledLatency(), true);
2558 
2559   LLVM_DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName()
2560                     << '\n');
2561 }
2562 
2563 void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
2564   ExecutedResCounts[PIdx] += Count;
2565   if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2566     MaxExecutedResCount = ExecutedResCounts[PIdx];
2567 }
2568 
2569 /// Add the given processor resource to this scheduled zone.
2570 ///
2571 /// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2572 /// during which this resource is consumed.
2573 ///
2574 /// \return the next cycle at which the instruction may execute without
2575 /// oversubscribing resources.
2576 unsigned SchedBoundary::countResource(const MCSchedClassDesc *SC, unsigned PIdx,
2577                                       unsigned Cycles, unsigned NextCycle,
2578                                       unsigned StartAtCycle) {
2579   unsigned Factor = SchedModel->getResourceFactor(PIdx);
2580   unsigned Count = Factor * (Cycles - StartAtCycle);
2581   LLVM_DEBUG(dbgs() << "  " << SchedModel->getResourceName(PIdx) << " +"
2582                     << Cycles << "x" << Factor << "u\n");
2583 
2584   // Update Executed resources counts.
2585   incExecutedResources(PIdx, Count);
2586   assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2587   Rem->RemainingCounts[PIdx] -= Count;
2588 
2589   // Check if this resource exceeds the current critical resource. If so, it
2590   // becomes the critical resource.
2591   if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
2592     ZoneCritResIdx = PIdx;
2593     LLVM_DEBUG(dbgs() << "  *** Critical resource "
2594                       << SchedModel->getResourceName(PIdx) << ": "
2595                       << getResourceCount(PIdx) / SchedModel->getLatencyFactor()
2596                       << "c\n");
2597   }
2598   // For reserved resources, record the highest cycle using the resource.
2599   unsigned NextAvailable, InstanceIdx;
2600   std::tie(NextAvailable, InstanceIdx) =
2601       getNextResourceCycle(SC, PIdx, Cycles, StartAtCycle);
2602   if (NextAvailable > CurrCycle) {
2603     LLVM_DEBUG(dbgs() << "  Resource conflict: "
2604                       << SchedModel->getResourceName(PIdx)
2605                       << '[' << InstanceIdx - ReservedCyclesIndex[PIdx]  << ']'
2606                       << " reserved until @" << NextAvailable << "\n");
2607   }
2608   return NextAvailable;
2609 }
2610 
2611 /// Move the boundary of scheduled code by one SUnit.
2612 void SchedBoundary::bumpNode(SUnit *SU) {
2613   // Update the reservation table.
2614   if (HazardRec->isEnabled()) {
2615     if (!isTop() && SU->isCall) {
2616       // Calls are scheduled with their preceding instructions. For bottom-up
2617       // scheduling, clear the pipeline state before emitting.
2618       HazardRec->Reset();
2619     }
2620     HazardRec->EmitInstruction(SU);
2621     // Scheduling an instruction may have made pending instructions available.
2622     CheckPending = true;
2623   }
2624   // checkHazard should prevent scheduling multiple instructions per cycle that
2625   // exceed the issue width.
2626   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2627   unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
2628   assert(
2629       (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
2630       "Cannot schedule this instruction's MicroOps in the current cycle.");
2631 
2632   unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2633   LLVM_DEBUG(dbgs() << "  Ready @" << ReadyCycle << "c\n");
2634 
2635   unsigned NextCycle = CurrCycle;
2636   switch (SchedModel->getMicroOpBufferSize()) {
2637   case 0:
2638     assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2639     break;
2640   case 1:
2641     if (ReadyCycle > NextCycle) {
2642       NextCycle = ReadyCycle;
2643       LLVM_DEBUG(dbgs() << "  *** Stall until: " << ReadyCycle << "\n");
2644     }
2645     break;
2646   default:
2647     // We don't currently model the OOO reorder buffer, so consider all
2648     // scheduled MOps to be "retired". We do loosely model in-order resource
2649     // latency. If this instruction uses an in-order resource, account for any
2650     // likely stall cycles.
2651     if (SU->isUnbuffered && ReadyCycle > NextCycle)
2652       NextCycle = ReadyCycle;
2653     break;
2654   }
2655   RetiredMOps += IncMOps;
2656 
2657   // Update resource counts and critical resource.
2658   if (SchedModel->hasInstrSchedModel()) {
2659     unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2660     assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2661     Rem->RemIssueCount -= DecRemIssue;
2662     if (ZoneCritResIdx) {
2663       // Scale scheduled micro-ops for comparing with the critical resource.
2664       unsigned ScaledMOps =
2665         RetiredMOps * SchedModel->getMicroOpFactor();
2666 
2667       // If scaled micro-ops are now more than the previous critical resource by
2668       // a full cycle, then micro-ops issue becomes critical.
2669       if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2670           >= (int)SchedModel->getLatencyFactor()) {
2671         ZoneCritResIdx = 0;
2672         LLVM_DEBUG(dbgs() << "  *** Critical resource NumMicroOps: "
2673                           << ScaledMOps / SchedModel->getLatencyFactor()
2674                           << "c\n");
2675       }
2676     }
2677     for (TargetSchedModel::ProcResIter
2678            PI = SchedModel->getWriteProcResBegin(SC),
2679            PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2680       unsigned RCycle = countResource(SC, PI->ProcResourceIdx, PI->Cycles,
2681                                       NextCycle, PI->StartAtCycle);
2682       if (RCycle > NextCycle)
2683         NextCycle = RCycle;
2684     }
2685     if (SU->hasReservedResource) {
2686       // For reserved resources, record the highest cycle using the resource.
2687       // For top-down scheduling, this is the cycle in which we schedule this
2688       // instruction plus the number of cycles the operations reserves the
2689       // resource. For bottom-up is it simply the instruction's cycle.
2690       for (TargetSchedModel::ProcResIter
2691              PI = SchedModel->getWriteProcResBegin(SC),
2692              PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2693         unsigned PIdx = PI->ProcResourceIdx;
2694         if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
2695 
2696           if (SchedModel && SchedModel->enableIntervals()) {
2697             unsigned ReservedUntil, InstanceIdx;
2698             std::tie(ReservedUntil, InstanceIdx) =
2699                 getNextResourceCycle(SC, PIdx, PI->Cycles, PI->StartAtCycle);
2700             if (isTop()) {
2701               ReservedResourceSegments[InstanceIdx].add(
2702                   ResourceSegments::getResourceIntervalTop(
2703                       NextCycle, PI->StartAtCycle, PI->Cycles),
2704                   MIResourceCutOff);
2705             } else {
2706               ReservedResourceSegments[InstanceIdx].add(
2707                   ResourceSegments::getResourceIntervalBottom(
2708                       NextCycle, PI->StartAtCycle, PI->Cycles),
2709                   MIResourceCutOff);
2710             }
2711           } else {
2712 
2713             unsigned ReservedUntil, InstanceIdx;
2714             std::tie(ReservedUntil, InstanceIdx) =
2715                 getNextResourceCycle(SC, PIdx, PI->Cycles, PI->StartAtCycle);
2716             if (isTop()) {
2717               ReservedCycles[InstanceIdx] =
2718                   std::max(ReservedUntil, NextCycle + PI->Cycles);
2719             } else
2720               ReservedCycles[InstanceIdx] = NextCycle;
2721           }
2722         }
2723       }
2724     }
2725   }
2726   // Update ExpectedLatency and DependentLatency.
2727   unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2728   unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2729   if (SU->getDepth() > TopLatency) {
2730     TopLatency = SU->getDepth();
2731     LLVM_DEBUG(dbgs() << "  " << Available.getName() << " TopLatency SU("
2732                       << SU->NodeNum << ") " << TopLatency << "c\n");
2733   }
2734   if (SU->getHeight() > BotLatency) {
2735     BotLatency = SU->getHeight();
2736     LLVM_DEBUG(dbgs() << "  " << Available.getName() << " BotLatency SU("
2737                       << SU->NodeNum << ") " << BotLatency << "c\n");
2738   }
2739   // If we stall for any reason, bump the cycle.
2740   if (NextCycle > CurrCycle)
2741     bumpCycle(NextCycle);
2742   else
2743     // After updating ZoneCritResIdx and ExpectedLatency, check if we're
2744     // resource limited. If a stall occurred, bumpCycle does this.
2745     IsResourceLimited =
2746         checkResourceLimit(SchedModel->getLatencyFactor(), getCriticalCount(),
2747                            getScheduledLatency(), true);
2748 
2749   // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2750   // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2751   // one cycle.  Since we commonly reach the max MOps here, opportunistically
2752   // bump the cycle to avoid uselessly checking everything in the readyQ.
2753   CurrMOps += IncMOps;
2754 
2755   // Bump the cycle count for issue group constraints.
2756   // This must be done after NextCycle has been adjust for all other stalls.
2757   // Calling bumpCycle(X) will reduce CurrMOps by one issue group and set
2758   // currCycle to X.
2759   if ((isTop() &&  SchedModel->mustEndGroup(SU->getInstr())) ||
2760       (!isTop() && SchedModel->mustBeginGroup(SU->getInstr()))) {
2761     LLVM_DEBUG(dbgs() << "  Bump cycle to " << (isTop() ? "end" : "begin")
2762                       << " group\n");
2763     bumpCycle(++NextCycle);
2764   }
2765 
2766   while (CurrMOps >= SchedModel->getIssueWidth()) {
2767     LLVM_DEBUG(dbgs() << "  *** Max MOps " << CurrMOps << " at cycle "
2768                       << CurrCycle << '\n');
2769     bumpCycle(++NextCycle);
2770   }
2771   LLVM_DEBUG(dumpScheduledState());
2772 }
2773 
2774 /// Release pending ready nodes in to the available queue. This makes them
2775 /// visible to heuristics.
2776 void SchedBoundary::releasePending() {
2777   // If the available queue is empty, it is safe to reset MinReadyCycle.
2778   if (Available.empty())
2779     MinReadyCycle = std::numeric_limits<unsigned>::max();
2780 
2781   // Check to see if any of the pending instructions are ready to issue.  If
2782   // so, add them to the available queue.
2783   for (unsigned I = 0, E = Pending.size(); I < E; ++I) {
2784     SUnit *SU = *(Pending.begin() + I);
2785     unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
2786 
2787     if (ReadyCycle < MinReadyCycle)
2788       MinReadyCycle = ReadyCycle;
2789 
2790     if (Available.size() >= ReadyListLimit)
2791       break;
2792 
2793     releaseNode(SU, ReadyCycle, true, I);
2794     if (E != Pending.size()) {
2795       --I;
2796       --E;
2797     }
2798   }
2799   CheckPending = false;
2800 }
2801 
2802 /// Remove SU from the ready set for this boundary.
2803 void SchedBoundary::removeReady(SUnit *SU) {
2804   if (Available.isInQueue(SU))
2805     Available.remove(Available.find(SU));
2806   else {
2807     assert(Pending.isInQueue(SU) && "bad ready count");
2808     Pending.remove(Pending.find(SU));
2809   }
2810 }
2811 
2812 /// If this queue only has one ready candidate, return it. As a side effect,
2813 /// defer any nodes that now hit a hazard, and advance the cycle until at least
2814 /// one node is ready. If multiple instructions are ready, return NULL.
2815 SUnit *SchedBoundary::pickOnlyChoice() {
2816   if (CheckPending)
2817     releasePending();
2818 
2819   // Defer any ready instrs that now have a hazard.
2820   for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2821     if (checkHazard(*I)) {
2822       Pending.push(*I);
2823       I = Available.remove(I);
2824       continue;
2825     }
2826     ++I;
2827   }
2828   for (unsigned i = 0; Available.empty(); ++i) {
2829 //  FIXME: Re-enable assert once PR20057 is resolved.
2830 //    assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2831 //           "permanent hazard");
2832     (void)i;
2833     bumpCycle(CurrCycle + 1);
2834     releasePending();
2835   }
2836 
2837   LLVM_DEBUG(Pending.dump());
2838   LLVM_DEBUG(Available.dump());
2839 
2840   if (Available.size() == 1)
2841     return *Available.begin();
2842   return nullptr;
2843 }
2844 
2845 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2846 
2847 /// Dump the content of the \ref ReservedCycles vector for the
2848 /// resources that are used in the basic block.
2849 ///
2850 LLVM_DUMP_METHOD void SchedBoundary::dumpReservedCycles() const {
2851   if (!SchedModel->hasInstrSchedModel())
2852     return;
2853 
2854   unsigned ResourceCount = SchedModel->getNumProcResourceKinds();
2855   unsigned StartIdx = 0;
2856 
2857   for (unsigned ResIdx = 0; ResIdx < ResourceCount; ++ResIdx) {
2858     const unsigned NumUnits = SchedModel->getProcResource(ResIdx)->NumUnits;
2859     std::string ResName = SchedModel->getResourceName(ResIdx);
2860     for (unsigned UnitIdx = 0; UnitIdx < NumUnits; ++UnitIdx) {
2861       dbgs() << ResName << "(" << UnitIdx << ") = ";
2862       if (SchedModel && SchedModel->enableIntervals()) {
2863         if (ReservedResourceSegments.count(StartIdx + UnitIdx))
2864           dbgs() << ReservedResourceSegments.at(StartIdx + UnitIdx);
2865         else
2866           dbgs() << "{ }\n";
2867       } else
2868         dbgs() << ReservedCycles[StartIdx + UnitIdx] << "\n";
2869     }
2870     StartIdx += NumUnits;
2871   }
2872 }
2873 
2874 // This is useful information to dump after bumpNode.
2875 // Note that the Queue contents are more useful before pickNodeFromQueue.
2876 LLVM_DUMP_METHOD void SchedBoundary::dumpScheduledState() const {
2877   unsigned ResFactor;
2878   unsigned ResCount;
2879   if (ZoneCritResIdx) {
2880     ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2881     ResCount = getResourceCount(ZoneCritResIdx);
2882   } else {
2883     ResFactor = SchedModel->getMicroOpFactor();
2884     ResCount = RetiredMOps * ResFactor;
2885   }
2886   unsigned LFactor = SchedModel->getLatencyFactor();
2887   dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2888          << "  Retired: " << RetiredMOps;
2889   dbgs() << "\n  Executed: " << getExecutedCount() / LFactor << "c";
2890   dbgs() << "\n  Critical: " << ResCount / LFactor << "c, "
2891          << ResCount / ResFactor << " "
2892          << SchedModel->getResourceName(ZoneCritResIdx)
2893          << "\n  ExpectedLatency: " << ExpectedLatency << "c\n"
2894          << (IsResourceLimited ? "  - Resource" : "  - Latency")
2895          << " limited.\n";
2896   if (MISchedDumpReservedCycles)
2897     dumpReservedCycles();
2898 }
2899 #endif
2900 
2901 //===----------------------------------------------------------------------===//
2902 // GenericScheduler - Generic implementation of MachineSchedStrategy.
2903 //===----------------------------------------------------------------------===//
2904 
2905 void GenericSchedulerBase::SchedCandidate::
2906 initResourceDelta(const ScheduleDAGMI *DAG,
2907                   const TargetSchedModel *SchedModel) {
2908   if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2909     return;
2910 
2911   const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2912   for (TargetSchedModel::ProcResIter
2913          PI = SchedModel->getWriteProcResBegin(SC),
2914          PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2915     if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2916       ResDelta.CritResources += PI->Cycles;
2917     if (PI->ProcResourceIdx == Policy.DemandResIdx)
2918       ResDelta.DemandedResources += PI->Cycles;
2919   }
2920 }
2921 
2922 /// Compute remaining latency. We need this both to determine whether the
2923 /// overall schedule has become latency-limited and whether the instructions
2924 /// outside this zone are resource or latency limited.
2925 ///
2926 /// The "dependent" latency is updated incrementally during scheduling as the
2927 /// max height/depth of scheduled nodes minus the cycles since it was
2928 /// scheduled:
2929 ///   DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2930 ///
2931 /// The "independent" latency is the max ready queue depth:
2932 ///   ILat = max N.depth for N in Available|Pending
2933 ///
2934 /// RemainingLatency is the greater of independent and dependent latency.
2935 ///
2936 /// These computations are expensive, especially in DAGs with many edges, so
2937 /// only do them if necessary.
2938 static unsigned computeRemLatency(SchedBoundary &CurrZone) {
2939   unsigned RemLatency = CurrZone.getDependentLatency();
2940   RemLatency = std::max(RemLatency,
2941                         CurrZone.findMaxLatency(CurrZone.Available.elements()));
2942   RemLatency = std::max(RemLatency,
2943                         CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2944   return RemLatency;
2945 }
2946 
2947 /// Returns true if the current cycle plus remaning latency is greater than
2948 /// the critical path in the scheduling region.
2949 bool GenericSchedulerBase::shouldReduceLatency(const CandPolicy &Policy,
2950                                                SchedBoundary &CurrZone,
2951                                                bool ComputeRemLatency,
2952                                                unsigned &RemLatency) const {
2953   // The current cycle is already greater than the critical path, so we are
2954   // already latency limited and don't need to compute the remaining latency.
2955   if (CurrZone.getCurrCycle() > Rem.CriticalPath)
2956     return true;
2957 
2958   // If we haven't scheduled anything yet, then we aren't latency limited.
2959   if (CurrZone.getCurrCycle() == 0)
2960     return false;
2961 
2962   if (ComputeRemLatency)
2963     RemLatency = computeRemLatency(CurrZone);
2964 
2965   return RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath;
2966 }
2967 
2968 /// Set the CandPolicy given a scheduling zone given the current resources and
2969 /// latencies inside and outside the zone.
2970 void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
2971                                      SchedBoundary &CurrZone,
2972                                      SchedBoundary *OtherZone) {
2973   // Apply preemptive heuristics based on the total latency and resources
2974   // inside and outside this zone. Potential stalls should be considered before
2975   // following this policy.
2976 
2977   // Compute the critical resource outside the zone.
2978   unsigned OtherCritIdx = 0;
2979   unsigned OtherCount =
2980     OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2981 
2982   bool OtherResLimited = false;
2983   unsigned RemLatency = 0;
2984   bool RemLatencyComputed = false;
2985   if (SchedModel->hasInstrSchedModel() && OtherCount != 0) {
2986     RemLatency = computeRemLatency(CurrZone);
2987     RemLatencyComputed = true;
2988     OtherResLimited = checkResourceLimit(SchedModel->getLatencyFactor(),
2989                                          OtherCount, RemLatency, false);
2990   }
2991 
2992   // Schedule aggressively for latency in PostRA mode. We don't check for
2993   // acyclic latency during PostRA, and highly out-of-order processors will
2994   // skip PostRA scheduling.
2995   if (!OtherResLimited &&
2996       (IsPostRA || shouldReduceLatency(Policy, CurrZone, !RemLatencyComputed,
2997                                        RemLatency))) {
2998     Policy.ReduceLatency |= true;
2999     LLVM_DEBUG(dbgs() << "  " << CurrZone.Available.getName()
3000                       << " RemainingLatency " << RemLatency << " + "
3001                       << CurrZone.getCurrCycle() << "c > CritPath "
3002                       << Rem.CriticalPath << "\n");
3003   }
3004   // If the same resource is limiting inside and outside the zone, do nothing.
3005   if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
3006     return;
3007 
3008   LLVM_DEBUG(if (CurrZone.isResourceLimited()) {
3009     dbgs() << "  " << CurrZone.Available.getName() << " ResourceLimited: "
3010            << SchedModel->getResourceName(CurrZone.getZoneCritResIdx()) << "\n";
3011   } if (OtherResLimited) dbgs()
3012                  << "  RemainingLimit: "
3013                  << SchedModel->getResourceName(OtherCritIdx) << "\n";
3014              if (!CurrZone.isResourceLimited() && !OtherResLimited) dbgs()
3015              << "  Latency limited both directions.\n");
3016 
3017   if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
3018     Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
3019 
3020   if (OtherResLimited)
3021     Policy.DemandResIdx = OtherCritIdx;
3022 }
3023 
3024 #ifndef NDEBUG
3025 const char *GenericSchedulerBase::getReasonStr(
3026   GenericSchedulerBase::CandReason Reason) {
3027   switch (Reason) {
3028   case NoCand:         return "NOCAND    ";
3029   case Only1:          return "ONLY1     ";
3030   case PhysReg:        return "PHYS-REG  ";
3031   case RegExcess:      return "REG-EXCESS";
3032   case RegCritical:    return "REG-CRIT  ";
3033   case Stall:          return "STALL     ";
3034   case Cluster:        return "CLUSTER   ";
3035   case Weak:           return "WEAK      ";
3036   case RegMax:         return "REG-MAX   ";
3037   case ResourceReduce: return "RES-REDUCE";
3038   case ResourceDemand: return "RES-DEMAND";
3039   case TopDepthReduce: return "TOP-DEPTH ";
3040   case TopPathReduce:  return "TOP-PATH  ";
3041   case BotHeightReduce:return "BOT-HEIGHT";
3042   case BotPathReduce:  return "BOT-PATH  ";
3043   case NextDefUse:     return "DEF-USE   ";
3044   case NodeOrder:      return "ORDER     ";
3045   };
3046   llvm_unreachable("Unknown reason!");
3047 }
3048 
3049 void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
3050   PressureChange P;
3051   unsigned ResIdx = 0;
3052   unsigned Latency = 0;
3053   switch (Cand.Reason) {
3054   default:
3055     break;
3056   case RegExcess:
3057     P = Cand.RPDelta.Excess;
3058     break;
3059   case RegCritical:
3060     P = Cand.RPDelta.CriticalMax;
3061     break;
3062   case RegMax:
3063     P = Cand.RPDelta.CurrentMax;
3064     break;
3065   case ResourceReduce:
3066     ResIdx = Cand.Policy.ReduceResIdx;
3067     break;
3068   case ResourceDemand:
3069     ResIdx = Cand.Policy.DemandResIdx;
3070     break;
3071   case TopDepthReduce:
3072     Latency = Cand.SU->getDepth();
3073     break;
3074   case TopPathReduce:
3075     Latency = Cand.SU->getHeight();
3076     break;
3077   case BotHeightReduce:
3078     Latency = Cand.SU->getHeight();
3079     break;
3080   case BotPathReduce:
3081     Latency = Cand.SU->getDepth();
3082     break;
3083   }
3084   dbgs() << "  Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
3085   if (P.isValid())
3086     dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
3087            << ":" << P.getUnitInc() << " ";
3088   else
3089     dbgs() << "      ";
3090   if (ResIdx)
3091     dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
3092   else
3093     dbgs() << "         ";
3094   if (Latency)
3095     dbgs() << " " << Latency << " cycles ";
3096   else
3097     dbgs() << "          ";
3098   dbgs() << '\n';
3099 }
3100 #endif
3101 
3102 namespace llvm {
3103 /// Return true if this heuristic determines order.
3104 /// TODO: Consider refactor return type of these functions as integer or enum,
3105 /// as we may need to differentiate whether TryCand is better than Cand.
3106 bool tryLess(int TryVal, int CandVal,
3107              GenericSchedulerBase::SchedCandidate &TryCand,
3108              GenericSchedulerBase::SchedCandidate &Cand,
3109              GenericSchedulerBase::CandReason Reason) {
3110   if (TryVal < CandVal) {
3111     TryCand.Reason = Reason;
3112     return true;
3113   }
3114   if (TryVal > CandVal) {
3115     if (Cand.Reason > Reason)
3116       Cand.Reason = Reason;
3117     return true;
3118   }
3119   return false;
3120 }
3121 
3122 bool tryGreater(int TryVal, int CandVal,
3123                 GenericSchedulerBase::SchedCandidate &TryCand,
3124                 GenericSchedulerBase::SchedCandidate &Cand,
3125                 GenericSchedulerBase::CandReason Reason) {
3126   if (TryVal > CandVal) {
3127     TryCand.Reason = Reason;
3128     return true;
3129   }
3130   if (TryVal < CandVal) {
3131     if (Cand.Reason > Reason)
3132       Cand.Reason = Reason;
3133     return true;
3134   }
3135   return false;
3136 }
3137 
3138 bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
3139                 GenericSchedulerBase::SchedCandidate &Cand,
3140                 SchedBoundary &Zone) {
3141   if (Zone.isTop()) {
3142     // Prefer the candidate with the lesser depth, but only if one of them has
3143     // depth greater than the total latency scheduled so far, otherwise either
3144     // of them could be scheduled now with no stall.
3145     if (std::max(TryCand.SU->getDepth(), Cand.SU->getDepth()) >
3146         Zone.getScheduledLatency()) {
3147       if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
3148                   TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
3149         return true;
3150     }
3151     if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
3152                    TryCand, Cand, GenericSchedulerBase::TopPathReduce))
3153       return true;
3154   } else {
3155     // Prefer the candidate with the lesser height, but only if one of them has
3156     // height greater than the total latency scheduled so far, otherwise either
3157     // of them could be scheduled now with no stall.
3158     if (std::max(TryCand.SU->getHeight(), Cand.SU->getHeight()) >
3159         Zone.getScheduledLatency()) {
3160       if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
3161                   TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
3162         return true;
3163     }
3164     if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
3165                    TryCand, Cand, GenericSchedulerBase::BotPathReduce))
3166       return true;
3167   }
3168   return false;
3169 }
3170 } // end namespace llvm
3171 
3172 static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
3173   LLVM_DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
3174                     << GenericSchedulerBase::getReasonStr(Reason) << '\n');
3175 }
3176 
3177 static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
3178   tracePick(Cand.Reason, Cand.AtTop);
3179 }
3180 
3181 void GenericScheduler::initialize(ScheduleDAGMI *dag) {
3182   assert(dag->hasVRegLiveness() &&
3183          "(PreRA)GenericScheduler needs vreg liveness");
3184   DAG = static_cast<ScheduleDAGMILive*>(dag);
3185   SchedModel = DAG->getSchedModel();
3186   TRI = DAG->TRI;
3187 
3188   if (RegionPolicy.ComputeDFSResult)
3189     DAG->computeDFSResult();
3190 
3191   Rem.init(DAG, SchedModel);
3192   Top.init(DAG, SchedModel, &Rem);
3193   Bot.init(DAG, SchedModel, &Rem);
3194 
3195   // Initialize resource counts.
3196 
3197   // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
3198   // are disabled, then these HazardRecs will be disabled.
3199   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3200   if (!Top.HazardRec) {
3201     Top.HazardRec =
3202         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3203             Itin, DAG);
3204   }
3205   if (!Bot.HazardRec) {
3206     Bot.HazardRec =
3207         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3208             Itin, DAG);
3209   }
3210   TopCand.SU = nullptr;
3211   BotCand.SU = nullptr;
3212 }
3213 
3214 /// Initialize the per-region scheduling policy.
3215 void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
3216                                   MachineBasicBlock::iterator End,
3217                                   unsigned NumRegionInstrs) {
3218   const MachineFunction &MF = *Begin->getMF();
3219   const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
3220 
3221   // Avoid setting up the register pressure tracker for small regions to save
3222   // compile time. As a rough heuristic, only track pressure when the number of
3223   // schedulable instructions exceeds half the integer register file.
3224   RegionPolicy.ShouldTrackPressure = true;
3225   for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
3226     MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
3227     if (TLI->isTypeLegal(LegalIntVT)) {
3228       unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
3229         TLI->getRegClassFor(LegalIntVT));
3230       RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
3231     }
3232   }
3233 
3234   // For generic targets, we default to bottom-up, because it's simpler and more
3235   // compile-time optimizations have been implemented in that direction.
3236   RegionPolicy.OnlyBottomUp = true;
3237 
3238   // Allow the subtarget to override default policy.
3239   MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
3240 
3241   // After subtarget overrides, apply command line options.
3242   if (!EnableRegPressure) {
3243     RegionPolicy.ShouldTrackPressure = false;
3244     RegionPolicy.ShouldTrackLaneMasks = false;
3245   }
3246 
3247   // Check -misched-topdown/bottomup can force or unforce scheduling direction.
3248   // e.g. -misched-bottomup=false allows scheduling in both directions.
3249   assert((!ForceTopDown || !ForceBottomUp) &&
3250          "-misched-topdown incompatible with -misched-bottomup");
3251   if (ForceBottomUp.getNumOccurrences() > 0) {
3252     RegionPolicy.OnlyBottomUp = ForceBottomUp;
3253     if (RegionPolicy.OnlyBottomUp)
3254       RegionPolicy.OnlyTopDown = false;
3255   }
3256   if (ForceTopDown.getNumOccurrences() > 0) {
3257     RegionPolicy.OnlyTopDown = ForceTopDown;
3258     if (RegionPolicy.OnlyTopDown)
3259       RegionPolicy.OnlyBottomUp = false;
3260   }
3261 }
3262 
3263 void GenericScheduler::dumpPolicy() const {
3264   // Cannot completely remove virtual function even in release mode.
3265 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
3266   dbgs() << "GenericScheduler RegionPolicy: "
3267          << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
3268          << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
3269          << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
3270          << "\n";
3271 #endif
3272 }
3273 
3274 /// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
3275 /// critical path by more cycles than it takes to drain the instruction buffer.
3276 /// We estimate an upper bounds on in-flight instructions as:
3277 ///
3278 /// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
3279 /// InFlightIterations = AcyclicPath / CyclesPerIteration
3280 /// InFlightResources = InFlightIterations * LoopResources
3281 ///
3282 /// TODO: Check execution resources in addition to IssueCount.
3283 void GenericScheduler::checkAcyclicLatency() {
3284   if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
3285     return;
3286 
3287   // Scaled number of cycles per loop iteration.
3288   unsigned IterCount =
3289     std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
3290              Rem.RemIssueCount);
3291   // Scaled acyclic critical path.
3292   unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
3293   // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
3294   unsigned InFlightCount =
3295     (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
3296   unsigned BufferLimit =
3297     SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
3298 
3299   Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
3300 
3301   LLVM_DEBUG(
3302       dbgs() << "IssueCycles="
3303              << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
3304              << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
3305              << "c NumIters=" << (AcyclicCount + IterCount - 1) / IterCount
3306              << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
3307              << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
3308       if (Rem.IsAcyclicLatencyLimited) dbgs() << "  ACYCLIC LATENCY LIMIT\n");
3309 }
3310 
3311 void GenericScheduler::registerRoots() {
3312   Rem.CriticalPath = DAG->ExitSU.getDepth();
3313 
3314   // Some roots may not feed into ExitSU. Check all of them in case.
3315   for (const SUnit *SU : Bot.Available) {
3316     if (SU->getDepth() > Rem.CriticalPath)
3317       Rem.CriticalPath = SU->getDepth();
3318   }
3319   LLVM_DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
3320   if (DumpCriticalPathLength) {
3321     errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
3322   }
3323 
3324   if (EnableCyclicPath && SchedModel->getMicroOpBufferSize() > 0) {
3325     Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
3326     checkAcyclicLatency();
3327   }
3328 }
3329 
3330 namespace llvm {
3331 bool tryPressure(const PressureChange &TryP,
3332                  const PressureChange &CandP,
3333                  GenericSchedulerBase::SchedCandidate &TryCand,
3334                  GenericSchedulerBase::SchedCandidate &Cand,
3335                  GenericSchedulerBase::CandReason Reason,
3336                  const TargetRegisterInfo *TRI,
3337                  const MachineFunction &MF) {
3338   // If one candidate decreases and the other increases, go with it.
3339   // Invalid candidates have UnitInc==0.
3340   if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
3341                  Reason)) {
3342     return true;
3343   }
3344   // Do not compare the magnitude of pressure changes between top and bottom
3345   // boundary.
3346   if (Cand.AtTop != TryCand.AtTop)
3347     return false;
3348 
3349   // If both candidates affect the same set in the same boundary, go with the
3350   // smallest increase.
3351   unsigned TryPSet = TryP.getPSetOrMax();
3352   unsigned CandPSet = CandP.getPSetOrMax();
3353   if (TryPSet == CandPSet) {
3354     return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
3355                    Reason);
3356   }
3357 
3358   int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
3359                                  std::numeric_limits<int>::max();
3360 
3361   int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
3362                                    std::numeric_limits<int>::max();
3363 
3364   // If the candidates are decreasing pressure, reverse priority.
3365   if (TryP.getUnitInc() < 0)
3366     std::swap(TryRank, CandRank);
3367   return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
3368 }
3369 
3370 unsigned getWeakLeft(const SUnit *SU, bool isTop) {
3371   return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
3372 }
3373 
3374 /// Minimize physical register live ranges. Regalloc wants them adjacent to
3375 /// their physreg def/use.
3376 ///
3377 /// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
3378 /// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
3379 /// with the operation that produces or consumes the physreg. We'll do this when
3380 /// regalloc has support for parallel copies.
3381 int biasPhysReg(const SUnit *SU, bool isTop) {
3382   const MachineInstr *MI = SU->getInstr();
3383 
3384   if (MI->isCopy()) {
3385     unsigned ScheduledOper = isTop ? 1 : 0;
3386     unsigned UnscheduledOper = isTop ? 0 : 1;
3387     // If we have already scheduled the physreg produce/consumer, immediately
3388     // schedule the copy.
3389     if (MI->getOperand(ScheduledOper).getReg().isPhysical())
3390       return 1;
3391     // If the physreg is at the boundary, defer it. Otherwise schedule it
3392     // immediately to free the dependent. We can hoist the copy later.
3393     bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
3394     if (MI->getOperand(UnscheduledOper).getReg().isPhysical())
3395       return AtBoundary ? -1 : 1;
3396   }
3397 
3398   if (MI->isMoveImmediate()) {
3399     // If we have a move immediate and all successors have been assigned, bias
3400     // towards scheduling this later. Make sure all register defs are to
3401     // physical registers.
3402     bool DoBias = true;
3403     for (const MachineOperand &Op : MI->defs()) {
3404       if (Op.isReg() && !Op.getReg().isPhysical()) {
3405         DoBias = false;
3406         break;
3407       }
3408     }
3409 
3410     if (DoBias)
3411       return isTop ? -1 : 1;
3412   }
3413 
3414   return 0;
3415 }
3416 } // end namespace llvm
3417 
3418 void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
3419                                      bool AtTop,
3420                                      const RegPressureTracker &RPTracker,
3421                                      RegPressureTracker &TempTracker) {
3422   Cand.SU = SU;
3423   Cand.AtTop = AtTop;
3424   if (DAG->isTrackingPressure()) {
3425     if (AtTop) {
3426       TempTracker.getMaxDownwardPressureDelta(
3427         Cand.SU->getInstr(),
3428         Cand.RPDelta,
3429         DAG->getRegionCriticalPSets(),
3430         DAG->getRegPressure().MaxSetPressure);
3431     } else {
3432       if (VerifyScheduling) {
3433         TempTracker.getMaxUpwardPressureDelta(
3434           Cand.SU->getInstr(),
3435           &DAG->getPressureDiff(Cand.SU),
3436           Cand.RPDelta,
3437           DAG->getRegionCriticalPSets(),
3438           DAG->getRegPressure().MaxSetPressure);
3439       } else {
3440         RPTracker.getUpwardPressureDelta(
3441           Cand.SU->getInstr(),
3442           DAG->getPressureDiff(Cand.SU),
3443           Cand.RPDelta,
3444           DAG->getRegionCriticalPSets(),
3445           DAG->getRegPressure().MaxSetPressure);
3446       }
3447     }
3448   }
3449   LLVM_DEBUG(if (Cand.RPDelta.Excess.isValid()) dbgs()
3450              << "  Try  SU(" << Cand.SU->NodeNum << ") "
3451              << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet()) << ":"
3452              << Cand.RPDelta.Excess.getUnitInc() << "\n");
3453 }
3454 
3455 /// Apply a set of heuristics to a new candidate. Heuristics are currently
3456 /// hierarchical. This may be more efficient than a graduated cost model because
3457 /// we don't need to evaluate all aspects of the model for each node in the
3458 /// queue. But it's really done to make the heuristics easier to debug and
3459 /// statistically analyze.
3460 ///
3461 /// \param Cand provides the policy and current best candidate.
3462 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3463 /// \param Zone describes the scheduled zone that we are extending, or nullptr
3464 ///             if Cand is from a different zone than TryCand.
3465 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3466 bool GenericScheduler::tryCandidate(SchedCandidate &Cand,
3467                                     SchedCandidate &TryCand,
3468                                     SchedBoundary *Zone) const {
3469   // Initialize the candidate if needed.
3470   if (!Cand.isValid()) {
3471     TryCand.Reason = NodeOrder;
3472     return true;
3473   }
3474 
3475   // Bias PhysReg Defs and copies to their uses and defined respectively.
3476   if (tryGreater(biasPhysReg(TryCand.SU, TryCand.AtTop),
3477                  biasPhysReg(Cand.SU, Cand.AtTop), TryCand, Cand, PhysReg))
3478     return TryCand.Reason != NoCand;
3479 
3480   // Avoid exceeding the target's limit.
3481   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
3482                                                Cand.RPDelta.Excess,
3483                                                TryCand, Cand, RegExcess, TRI,
3484                                                DAG->MF))
3485     return TryCand.Reason != NoCand;
3486 
3487   // Avoid increasing the max critical pressure in the scheduled region.
3488   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
3489                                                Cand.RPDelta.CriticalMax,
3490                                                TryCand, Cand, RegCritical, TRI,
3491                                                DAG->MF))
3492     return TryCand.Reason != NoCand;
3493 
3494   // We only compare a subset of features when comparing nodes between
3495   // Top and Bottom boundary. Some properties are simply incomparable, in many
3496   // other instances we should only override the other boundary if something
3497   // is a clear good pick on one boundary. Skip heuristics that are more
3498   // "tie-breaking" in nature.
3499   bool SameBoundary = Zone != nullptr;
3500   if (SameBoundary) {
3501     // For loops that are acyclic path limited, aggressively schedule for
3502     // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
3503     // heuristics to take precedence.
3504     if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
3505         tryLatency(TryCand, Cand, *Zone))
3506       return TryCand.Reason != NoCand;
3507 
3508     // Prioritize instructions that read unbuffered resources by stall cycles.
3509     if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
3510                 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3511       return TryCand.Reason != NoCand;
3512   }
3513 
3514   // Keep clustered nodes together to encourage downstream peephole
3515   // optimizations which may reduce resource requirements.
3516   //
3517   // This is a best effort to set things up for a post-RA pass. Optimizations
3518   // like generating loads of multiple registers should ideally be done within
3519   // the scheduler pass by combining the loads during DAG postprocessing.
3520   const SUnit *CandNextClusterSU =
3521     Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
3522   const SUnit *TryCandNextClusterSU =
3523     TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
3524   if (tryGreater(TryCand.SU == TryCandNextClusterSU,
3525                  Cand.SU == CandNextClusterSU,
3526                  TryCand, Cand, Cluster))
3527     return TryCand.Reason != NoCand;
3528 
3529   if (SameBoundary) {
3530     // Weak edges are for clustering and other constraints.
3531     if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
3532                 getWeakLeft(Cand.SU, Cand.AtTop),
3533                 TryCand, Cand, Weak))
3534       return TryCand.Reason != NoCand;
3535   }
3536 
3537   // Avoid increasing the max pressure of the entire region.
3538   if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
3539                                                Cand.RPDelta.CurrentMax,
3540                                                TryCand, Cand, RegMax, TRI,
3541                                                DAG->MF))
3542     return TryCand.Reason != NoCand;
3543 
3544   if (SameBoundary) {
3545     // Avoid critical resource consumption and balance the schedule.
3546     TryCand.initResourceDelta(DAG, SchedModel);
3547     if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3548                 TryCand, Cand, ResourceReduce))
3549       return TryCand.Reason != NoCand;
3550     if (tryGreater(TryCand.ResDelta.DemandedResources,
3551                    Cand.ResDelta.DemandedResources,
3552                    TryCand, Cand, ResourceDemand))
3553       return TryCand.Reason != NoCand;
3554 
3555     // Avoid serializing long latency dependence chains.
3556     // For acyclic path limited loops, latency was already checked above.
3557     if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
3558         !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
3559       return TryCand.Reason != NoCand;
3560 
3561     // Fall through to original instruction order.
3562     if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
3563         || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
3564       TryCand.Reason = NodeOrder;
3565       return true;
3566     }
3567   }
3568 
3569   return false;
3570 }
3571 
3572 /// Pick the best candidate from the queue.
3573 ///
3574 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
3575 /// DAG building. To adjust for the current scheduling location we need to
3576 /// maintain the number of vreg uses remaining to be top-scheduled.
3577 void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
3578                                          const CandPolicy &ZonePolicy,
3579                                          const RegPressureTracker &RPTracker,
3580                                          SchedCandidate &Cand) {
3581   // getMaxPressureDelta temporarily modifies the tracker.
3582   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
3583 
3584   ReadyQueue &Q = Zone.Available;
3585   for (SUnit *SU : Q) {
3586 
3587     SchedCandidate TryCand(ZonePolicy);
3588     initCandidate(TryCand, SU, Zone.isTop(), RPTracker, TempTracker);
3589     // Pass SchedBoundary only when comparing nodes from the same boundary.
3590     SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
3591     if (tryCandidate(Cand, TryCand, ZoneArg)) {
3592       // Initialize resource delta if needed in case future heuristics query it.
3593       if (TryCand.ResDelta == SchedResourceDelta())
3594         TryCand.initResourceDelta(DAG, SchedModel);
3595       Cand.setBest(TryCand);
3596       LLVM_DEBUG(traceCandidate(Cand));
3597     }
3598   }
3599 }
3600 
3601 /// Pick the best candidate node from either the top or bottom queue.
3602 SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
3603   // Schedule as far as possible in the direction of no choice. This is most
3604   // efficient, but also provides the best heuristics for CriticalPSets.
3605   if (SUnit *SU = Bot.pickOnlyChoice()) {
3606     IsTopNode = false;
3607     tracePick(Only1, false);
3608     return SU;
3609   }
3610   if (SUnit *SU = Top.pickOnlyChoice()) {
3611     IsTopNode = true;
3612     tracePick(Only1, true);
3613     return SU;
3614   }
3615   // Set the bottom-up policy based on the state of the current bottom zone and
3616   // the instructions outside the zone, including the top zone.
3617   CandPolicy BotPolicy;
3618   setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
3619   // Set the top-down policy based on the state of the current top zone and
3620   // the instructions outside the zone, including the bottom zone.
3621   CandPolicy TopPolicy;
3622   setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
3623 
3624   // See if BotCand is still valid (because we previously scheduled from Top).
3625   LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
3626   if (!BotCand.isValid() || BotCand.SU->isScheduled ||
3627       BotCand.Policy != BotPolicy) {
3628     BotCand.reset(CandPolicy());
3629     pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
3630     assert(BotCand.Reason != NoCand && "failed to find the first candidate");
3631   } else {
3632     LLVM_DEBUG(traceCandidate(BotCand));
3633 #ifndef NDEBUG
3634     if (VerifyScheduling) {
3635       SchedCandidate TCand;
3636       TCand.reset(CandPolicy());
3637       pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
3638       assert(TCand.SU == BotCand.SU &&
3639              "Last pick result should correspond to re-picking right now");
3640     }
3641 #endif
3642   }
3643 
3644   // Check if the top Q has a better candidate.
3645   LLVM_DEBUG(dbgs() << "Picking from Top:\n");
3646   if (!TopCand.isValid() || TopCand.SU->isScheduled ||
3647       TopCand.Policy != TopPolicy) {
3648     TopCand.reset(CandPolicy());
3649     pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
3650     assert(TopCand.Reason != NoCand && "failed to find the first candidate");
3651   } else {
3652     LLVM_DEBUG(traceCandidate(TopCand));
3653 #ifndef NDEBUG
3654     if (VerifyScheduling) {
3655       SchedCandidate TCand;
3656       TCand.reset(CandPolicy());
3657       pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
3658       assert(TCand.SU == TopCand.SU &&
3659            "Last pick result should correspond to re-picking right now");
3660     }
3661 #endif
3662   }
3663 
3664   // Pick best from BotCand and TopCand.
3665   assert(BotCand.isValid());
3666   assert(TopCand.isValid());
3667   SchedCandidate Cand = BotCand;
3668   TopCand.Reason = NoCand;
3669   if (tryCandidate(Cand, TopCand, nullptr)) {
3670     Cand.setBest(TopCand);
3671     LLVM_DEBUG(traceCandidate(Cand));
3672   }
3673 
3674   IsTopNode = Cand.AtTop;
3675   tracePick(Cand);
3676   return Cand.SU;
3677 }
3678 
3679 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
3680 SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
3681   if (DAG->top() == DAG->bottom()) {
3682     assert(Top.Available.empty() && Top.Pending.empty() &&
3683            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
3684     return nullptr;
3685   }
3686   SUnit *SU;
3687   do {
3688     if (RegionPolicy.OnlyTopDown) {
3689       SU = Top.pickOnlyChoice();
3690       if (!SU) {
3691         CandPolicy NoPolicy;
3692         TopCand.reset(NoPolicy);
3693         pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
3694         assert(TopCand.Reason != NoCand && "failed to find a candidate");
3695         tracePick(TopCand);
3696         SU = TopCand.SU;
3697       }
3698       IsTopNode = true;
3699     } else if (RegionPolicy.OnlyBottomUp) {
3700       SU = Bot.pickOnlyChoice();
3701       if (!SU) {
3702         CandPolicy NoPolicy;
3703         BotCand.reset(NoPolicy);
3704         pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
3705         assert(BotCand.Reason != NoCand && "failed to find a candidate");
3706         tracePick(BotCand);
3707         SU = BotCand.SU;
3708       }
3709       IsTopNode = false;
3710     } else {
3711       SU = pickNodeBidirectional(IsTopNode);
3712     }
3713   } while (SU->isScheduled);
3714 
3715   if (SU->isTopReady())
3716     Top.removeReady(SU);
3717   if (SU->isBottomReady())
3718     Bot.removeReady(SU);
3719 
3720   LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
3721                     << *SU->getInstr());
3722   return SU;
3723 }
3724 
3725 void GenericScheduler::reschedulePhysReg(SUnit *SU, bool isTop) {
3726   MachineBasicBlock::iterator InsertPos = SU->getInstr();
3727   if (!isTop)
3728     ++InsertPos;
3729   SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3730 
3731   // Find already scheduled copies with a single physreg dependence and move
3732   // them just above the scheduled instruction.
3733   for (SDep &Dep : Deps) {
3734     if (Dep.getKind() != SDep::Data ||
3735         !Register::isPhysicalRegister(Dep.getReg()))
3736       continue;
3737     SUnit *DepSU = Dep.getSUnit();
3738     if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3739       continue;
3740     MachineInstr *Copy = DepSU->getInstr();
3741     if (!Copy->isCopy() && !Copy->isMoveImmediate())
3742       continue;
3743     LLVM_DEBUG(dbgs() << "  Rescheduling physreg copy ";
3744                DAG->dumpNode(*Dep.getSUnit()));
3745     DAG->moveInstruction(Copy, InsertPos);
3746   }
3747 }
3748 
3749 /// Update the scheduler's state after scheduling a node. This is the same node
3750 /// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3751 /// update it's state based on the current cycle before MachineSchedStrategy
3752 /// does.
3753 ///
3754 /// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3755 /// them here. See comments in biasPhysReg.
3756 void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3757   if (IsTopNode) {
3758     SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3759     Top.bumpNode(SU);
3760     if (SU->hasPhysRegUses)
3761       reschedulePhysReg(SU, true);
3762   } else {
3763     SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
3764     Bot.bumpNode(SU);
3765     if (SU->hasPhysRegDefs)
3766       reschedulePhysReg(SU, false);
3767   }
3768 }
3769 
3770 /// Create the standard converging machine scheduler. This will be used as the
3771 /// default scheduler if the target does not set a default.
3772 ScheduleDAGMILive *llvm::createGenericSchedLive(MachineSchedContext *C) {
3773   ScheduleDAGMILive *DAG =
3774       new ScheduleDAGMILive(C, std::make_unique<GenericScheduler>(C));
3775   // Register DAG post-processors.
3776   //
3777   // FIXME: extend the mutation API to allow earlier mutations to instantiate
3778   // data and pass it to later mutations. Have a single mutation that gathers
3779   // the interesting nodes in one pass.
3780   DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI));
3781   return DAG;
3782 }
3783 
3784 static ScheduleDAGInstrs *createConvergingSched(MachineSchedContext *C) {
3785   return createGenericSchedLive(C);
3786 }
3787 
3788 static MachineSchedRegistry
3789 GenericSchedRegistry("converge", "Standard converging scheduler.",
3790                      createConvergingSched);
3791 
3792 //===----------------------------------------------------------------------===//
3793 // PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3794 //===----------------------------------------------------------------------===//
3795 
3796 void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3797   DAG = Dag;
3798   SchedModel = DAG->getSchedModel();
3799   TRI = DAG->TRI;
3800 
3801   Rem.init(DAG, SchedModel);
3802   Top.init(DAG, SchedModel, &Rem);
3803   BotRoots.clear();
3804 
3805   // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3806   // or are disabled, then these HazardRecs will be disabled.
3807   const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
3808   if (!Top.HazardRec) {
3809     Top.HazardRec =
3810         DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
3811             Itin, DAG);
3812   }
3813 }
3814 
3815 void PostGenericScheduler::registerRoots() {
3816   Rem.CriticalPath = DAG->ExitSU.getDepth();
3817 
3818   // Some roots may not feed into ExitSU. Check all of them in case.
3819   for (const SUnit *SU : BotRoots) {
3820     if (SU->getDepth() > Rem.CriticalPath)
3821       Rem.CriticalPath = SU->getDepth();
3822   }
3823   LLVM_DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3824   if (DumpCriticalPathLength) {
3825     errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3826   }
3827 }
3828 
3829 /// Apply a set of heuristics to a new candidate for PostRA scheduling.
3830 ///
3831 /// \param Cand provides the policy and current best candidate.
3832 /// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3833 /// \return \c true if TryCand is better than Cand (Reason is NOT NoCand)
3834 bool PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3835                                         SchedCandidate &TryCand) {
3836   // Initialize the candidate if needed.
3837   if (!Cand.isValid()) {
3838     TryCand.Reason = NodeOrder;
3839     return true;
3840   }
3841 
3842   // Prioritize instructions that read unbuffered resources by stall cycles.
3843   if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3844               Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3845     return TryCand.Reason != NoCand;
3846 
3847   // Keep clustered nodes together.
3848   if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(),
3849                  Cand.SU == DAG->getNextClusterSucc(),
3850                  TryCand, Cand, Cluster))
3851     return TryCand.Reason != NoCand;
3852 
3853   // Avoid critical resource consumption and balance the schedule.
3854   if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3855               TryCand, Cand, ResourceReduce))
3856     return TryCand.Reason != NoCand;
3857   if (tryGreater(TryCand.ResDelta.DemandedResources,
3858                  Cand.ResDelta.DemandedResources,
3859                  TryCand, Cand, ResourceDemand))
3860     return TryCand.Reason != NoCand;
3861 
3862   // Avoid serializing long latency dependence chains.
3863   if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3864     return TryCand.Reason != NoCand;
3865   }
3866 
3867   // Fall through to original instruction order.
3868   if (TryCand.SU->NodeNum < Cand.SU->NodeNum) {
3869     TryCand.Reason = NodeOrder;
3870     return true;
3871   }
3872 
3873   return false;
3874 }
3875 
3876 void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3877   ReadyQueue &Q = Top.Available;
3878   for (SUnit *SU : Q) {
3879     SchedCandidate TryCand(Cand.Policy);
3880     TryCand.SU = SU;
3881     TryCand.AtTop = true;
3882     TryCand.initResourceDelta(DAG, SchedModel);
3883     if (tryCandidate(Cand, TryCand)) {
3884       Cand.setBest(TryCand);
3885       LLVM_DEBUG(traceCandidate(Cand));
3886     }
3887   }
3888 }
3889 
3890 /// Pick the next node to schedule.
3891 SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3892   if (DAG->top() == DAG->bottom()) {
3893     assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
3894     return nullptr;
3895   }
3896   SUnit *SU;
3897   do {
3898     SU = Top.pickOnlyChoice();
3899     if (SU) {
3900       tracePick(Only1, true);
3901     } else {
3902       CandPolicy NoPolicy;
3903       SchedCandidate TopCand(NoPolicy);
3904       // Set the top-down policy based on the state of the current top zone and
3905       // the instructions outside the zone, including the bottom zone.
3906       setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
3907       pickNodeFromQueue(TopCand);
3908       assert(TopCand.Reason != NoCand && "failed to find a candidate");
3909       tracePick(TopCand);
3910       SU = TopCand.SU;
3911     }
3912   } while (SU->isScheduled);
3913 
3914   IsTopNode = true;
3915   Top.removeReady(SU);
3916 
3917   LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
3918                     << *SU->getInstr());
3919   return SU;
3920 }
3921 
3922 /// Called after ScheduleDAGMI has scheduled an instruction and updated
3923 /// scheduled/remaining flags in the DAG nodes.
3924 void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3925   SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3926   Top.bumpNode(SU);
3927 }
3928 
3929 ScheduleDAGMI *llvm::createGenericSchedPostRA(MachineSchedContext *C) {
3930   return new ScheduleDAGMI(C, std::make_unique<PostGenericScheduler>(C),
3931                            /*RemoveKillFlags=*/true);
3932 }
3933 
3934 //===----------------------------------------------------------------------===//
3935 // ILP Scheduler. Currently for experimental analysis of heuristics.
3936 //===----------------------------------------------------------------------===//
3937 
3938 namespace {
3939 
3940 /// Order nodes by the ILP metric.
3941 struct ILPOrder {
3942   const SchedDFSResult *DFSResult = nullptr;
3943   const BitVector *ScheduledTrees = nullptr;
3944   bool MaximizeILP;
3945 
3946   ILPOrder(bool MaxILP) : MaximizeILP(MaxILP) {}
3947 
3948   /// Apply a less-than relation on node priority.
3949   ///
3950   /// (Return true if A comes after B in the Q.)
3951   bool operator()(const SUnit *A, const SUnit *B) const {
3952     unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3953     unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3954     if (SchedTreeA != SchedTreeB) {
3955       // Unscheduled trees have lower priority.
3956       if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3957         return ScheduledTrees->test(SchedTreeB);
3958 
3959       // Trees with shallower connections have have lower priority.
3960       if (DFSResult->getSubtreeLevel(SchedTreeA)
3961           != DFSResult->getSubtreeLevel(SchedTreeB)) {
3962         return DFSResult->getSubtreeLevel(SchedTreeA)
3963           < DFSResult->getSubtreeLevel(SchedTreeB);
3964       }
3965     }
3966     if (MaximizeILP)
3967       return DFSResult->getILP(A) < DFSResult->getILP(B);
3968     else
3969       return DFSResult->getILP(A) > DFSResult->getILP(B);
3970   }
3971 };
3972 
3973 /// Schedule based on the ILP metric.
3974 class ILPScheduler : public MachineSchedStrategy {
3975   ScheduleDAGMILive *DAG = nullptr;
3976   ILPOrder Cmp;
3977 
3978   std::vector<SUnit*> ReadyQ;
3979 
3980 public:
3981   ILPScheduler(bool MaximizeILP) : Cmp(MaximizeILP) {}
3982 
3983   void initialize(ScheduleDAGMI *dag) override {
3984     assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3985     DAG = static_cast<ScheduleDAGMILive*>(dag);
3986     DAG->computeDFSResult();
3987     Cmp.DFSResult = DAG->getDFSResult();
3988     Cmp.ScheduledTrees = &DAG->getScheduledTrees();
3989     ReadyQ.clear();
3990   }
3991 
3992   void registerRoots() override {
3993     // Restore the heap in ReadyQ with the updated DFS results.
3994     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3995   }
3996 
3997   /// Implement MachineSchedStrategy interface.
3998   /// -----------------------------------------
3999 
4000   /// Callback to select the highest priority node from the ready Q.
4001   SUnit *pickNode(bool &IsTopNode) override {
4002     if (ReadyQ.empty()) return nullptr;
4003     std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
4004     SUnit *SU = ReadyQ.back();
4005     ReadyQ.pop_back();
4006     IsTopNode = false;
4007     LLVM_DEBUG(dbgs() << "Pick node "
4008                       << "SU(" << SU->NodeNum << ") "
4009                       << " ILP: " << DAG->getDFSResult()->getILP(SU)
4010                       << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU)
4011                       << " @"
4012                       << DAG->getDFSResult()->getSubtreeLevel(
4013                              DAG->getDFSResult()->getSubtreeID(SU))
4014                       << '\n'
4015                       << "Scheduling " << *SU->getInstr());
4016     return SU;
4017   }
4018 
4019   /// Scheduler callback to notify that a new subtree is scheduled.
4020   void scheduleTree(unsigned SubtreeID) override {
4021     std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
4022   }
4023 
4024   /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
4025   /// DFSResults, and resort the priority Q.
4026   void schedNode(SUnit *SU, bool IsTopNode) override {
4027     assert(!IsTopNode && "SchedDFSResult needs bottom-up");
4028   }
4029 
4030   void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
4031 
4032   void releaseBottomNode(SUnit *SU) override {
4033     ReadyQ.push_back(SU);
4034     std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
4035   }
4036 };
4037 
4038 } // end anonymous namespace
4039 
4040 static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
4041   return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(true));
4042 }
4043 static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
4044   return new ScheduleDAGMILive(C, std::make_unique<ILPScheduler>(false));
4045 }
4046 
4047 static MachineSchedRegistry ILPMaxRegistry(
4048   "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
4049 static MachineSchedRegistry ILPMinRegistry(
4050   "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
4051 
4052 //===----------------------------------------------------------------------===//
4053 // Machine Instruction Shuffler for Correctness Testing
4054 //===----------------------------------------------------------------------===//
4055 
4056 #ifndef NDEBUG
4057 namespace {
4058 
4059 /// Apply a less-than relation on the node order, which corresponds to the
4060 /// instruction order prior to scheduling. IsReverse implements greater-than.
4061 template<bool IsReverse>
4062 struct SUnitOrder {
4063   bool operator()(SUnit *A, SUnit *B) const {
4064     if (IsReverse)
4065       return A->NodeNum > B->NodeNum;
4066     else
4067       return A->NodeNum < B->NodeNum;
4068   }
4069 };
4070 
4071 /// Reorder instructions as much as possible.
4072 class InstructionShuffler : public MachineSchedStrategy {
4073   bool IsAlternating;
4074   bool IsTopDown;
4075 
4076   // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
4077   // gives nodes with a higher number higher priority causing the latest
4078   // instructions to be scheduled first.
4079   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false>>
4080     TopQ;
4081 
4082   // When scheduling bottom-up, use greater-than as the queue priority.
4083   PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true>>
4084     BottomQ;
4085 
4086 public:
4087   InstructionShuffler(bool alternate, bool topdown)
4088     : IsAlternating(alternate), IsTopDown(topdown) {}
4089 
4090   void initialize(ScheduleDAGMI*) override {
4091     TopQ.clear();
4092     BottomQ.clear();
4093   }
4094 
4095   /// Implement MachineSchedStrategy interface.
4096   /// -----------------------------------------
4097 
4098   SUnit *pickNode(bool &IsTopNode) override {
4099     SUnit *SU;
4100     if (IsTopDown) {
4101       do {
4102         if (TopQ.empty()) return nullptr;
4103         SU = TopQ.top();
4104         TopQ.pop();
4105       } while (SU->isScheduled);
4106       IsTopNode = true;
4107     } else {
4108       do {
4109         if (BottomQ.empty()) return nullptr;
4110         SU = BottomQ.top();
4111         BottomQ.pop();
4112       } while (SU->isScheduled);
4113       IsTopNode = false;
4114     }
4115     if (IsAlternating)
4116       IsTopDown = !IsTopDown;
4117     return SU;
4118   }
4119 
4120   void schedNode(SUnit *SU, bool IsTopNode) override {}
4121 
4122   void releaseTopNode(SUnit *SU) override {
4123     TopQ.push(SU);
4124   }
4125   void releaseBottomNode(SUnit *SU) override {
4126     BottomQ.push(SU);
4127   }
4128 };
4129 
4130 } // end anonymous namespace
4131 
4132 static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
4133   bool Alternate = !ForceTopDown && !ForceBottomUp;
4134   bool TopDown = !ForceBottomUp;
4135   assert((TopDown || !ForceTopDown) &&
4136          "-misched-topdown incompatible with -misched-bottomup");
4137   return new ScheduleDAGMILive(
4138       C, std::make_unique<InstructionShuffler>(Alternate, TopDown));
4139 }
4140 
4141 static MachineSchedRegistry ShufflerRegistry(
4142   "shuffle", "Shuffle machine instructions alternating directions",
4143   createInstructionShuffler);
4144 #endif // !NDEBUG
4145 
4146 //===----------------------------------------------------------------------===//
4147 // GraphWriter support for ScheduleDAGMILive.
4148 //===----------------------------------------------------------------------===//
4149 
4150 #ifndef NDEBUG
4151 namespace llvm {
4152 
4153 template<> struct GraphTraits<
4154   ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
4155 
4156 template<>
4157 struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
4158   DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
4159 
4160   static std::string getGraphName(const ScheduleDAG *G) {
4161     return std::string(G->MF.getName());
4162   }
4163 
4164   static bool renderGraphFromBottomUp() {
4165     return true;
4166   }
4167 
4168   static bool isNodeHidden(const SUnit *Node, const ScheduleDAG *G) {
4169     if (ViewMISchedCutoff == 0)
4170       return false;
4171     return (Node->Preds.size() > ViewMISchedCutoff
4172          || Node->Succs.size() > ViewMISchedCutoff);
4173   }
4174 
4175   /// If you want to override the dot attributes printed for a particular
4176   /// edge, override this method.
4177   static std::string getEdgeAttributes(const SUnit *Node,
4178                                        SUnitIterator EI,
4179                                        const ScheduleDAG *Graph) {
4180     if (EI.isArtificialDep())
4181       return "color=cyan,style=dashed";
4182     if (EI.isCtrlDep())
4183       return "color=blue,style=dashed";
4184     return "";
4185   }
4186 
4187   static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
4188     std::string Str;
4189     raw_string_ostream SS(Str);
4190     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
4191     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
4192       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
4193     SS << "SU:" << SU->NodeNum;
4194     if (DFS)
4195       SS << " I:" << DFS->getNumInstrs(SU);
4196     return SS.str();
4197   }
4198 
4199   static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
4200     return G->getGraphNodeLabel(SU);
4201   }
4202 
4203   static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
4204     std::string Str("shape=Mrecord");
4205     const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
4206     const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
4207       static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
4208     if (DFS) {
4209       Str += ",style=filled,fillcolor=\"#";
4210       Str += DOT::getColorString(DFS->getSubtreeID(N));
4211       Str += '"';
4212     }
4213     return Str;
4214   }
4215 };
4216 
4217 } // end namespace llvm
4218 #endif // NDEBUG
4219 
4220 /// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
4221 /// rendered using 'dot'.
4222 void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
4223 #ifndef NDEBUG
4224   ViewGraph(this, Name, false, Title);
4225 #else
4226   errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
4227          << "systems with Graphviz or gv!\n";
4228 #endif  // NDEBUG
4229 }
4230 
4231 /// Out-of-line implementation with no arguments is handy for gdb.
4232 void ScheduleDAGMI::viewGraph() {
4233   viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
4234 }
4235 
4236 /// Sort predicate for the intervals stored in an instance of
4237 /// ResourceSegments. Intervals are always disjoint (no intersection
4238 /// for any pairs of intervals), therefore we can sort the totality of
4239 /// the intervals by looking only at the left boundary.
4240 static bool sortIntervals(const ResourceSegments::IntervalTy &A,
4241                           const ResourceSegments::IntervalTy &B) {
4242   return A.first < B.first;
4243 }
4244 
4245 unsigned ResourceSegments::getFirstAvailableAt(
4246     unsigned CurrCycle, unsigned StartAtCycle, unsigned Cycle,
4247     std::function<ResourceSegments::IntervalTy(unsigned, unsigned, unsigned)>
4248         IntervalBuilder) const {
4249   assert(std::is_sorted(std::begin(_Intervals), std::end(_Intervals),
4250                         sortIntervals) &&
4251          "Cannot execute on an un-sorted set of intervals.");
4252   unsigned RetCycle = CurrCycle;
4253   ResourceSegments::IntervalTy NewInterval =
4254       IntervalBuilder(RetCycle, StartAtCycle, Cycle);
4255   for (auto &Interval : _Intervals) {
4256     if (!intersects(NewInterval, Interval))
4257       continue;
4258 
4259     // Move the interval right next to the top of the one it
4260     // intersects.
4261     assert(Interval.second > NewInterval.first &&
4262            "Invalid intervals configuration.");
4263     RetCycle += (unsigned)Interval.second - (unsigned)NewInterval.first;
4264     NewInterval = IntervalBuilder(RetCycle, StartAtCycle, Cycle);
4265   }
4266   return RetCycle;
4267 }
4268 
4269 void ResourceSegments::add(ResourceSegments::IntervalTy A,
4270                            const unsigned CutOff) {
4271   assert(A.first < A.second && "Cannot add empty resource usage");
4272   assert(CutOff > 0 && "0-size interval history has no use.");
4273   assert(all_of(_Intervals,
4274                 [&A](const ResourceSegments::IntervalTy &Interval) -> bool {
4275                   return !intersects(A, Interval);
4276                 }) &&
4277          "A resource is being overwritten");
4278   _Intervals.push_back(A);
4279 
4280   sortAndMerge();
4281 
4282   // Do not keep the full history of the intervals, just the
4283   // latest #CutOff.
4284   while (_Intervals.size() > CutOff)
4285     _Intervals.pop_front();
4286 }
4287 
4288 bool ResourceSegments::intersects(ResourceSegments::IntervalTy A,
4289                                   ResourceSegments::IntervalTy B) {
4290   assert(A.first <= A.second && "Invalid interval");
4291   assert(B.first <= B.second && "Invalid interval");
4292 
4293   // Share one boundary.
4294   if ((A.first == B.first) || (A.second == B.second))
4295     return true;
4296 
4297   // full intersersect: [    ***     )  B
4298   //                        [***)       A
4299   if ((A.first > B.first) && (A.second < B.second))
4300     return true;
4301 
4302   // right intersect: [     ***)        B
4303   //                       [***      )  A
4304   if ((A.first > B.first) && (A.first < B.second) && (A.second > B.second))
4305     return true;
4306 
4307   // left intersect:      [***      )  B
4308   //                 [     ***)        A
4309   if ((A.first < B.first) && (B.first < A.second) && (B.second > B.first))
4310     return true;
4311 
4312   return false;
4313 }
4314 
4315 void ResourceSegments::sortAndMerge() {
4316   if (_Intervals.size() <= 1)
4317     return;
4318 
4319   // First sort the collection.
4320   _Intervals.sort(sortIntervals);
4321 
4322   // can use next because I have at least 2 elements in the list
4323   auto next = std::next(std::begin(_Intervals));
4324   auto E = std::end(_Intervals);
4325   for (; next != E; ++next) {
4326     if (std::prev(next)->second >= next->first) {
4327       next->first = std::prev(next)->first;
4328       _Intervals.erase(std::prev(next));
4329       continue;
4330     }
4331   }
4332 }
4333