xref: /freebsd/contrib/llvm-project/llvm/lib/CodeGen/ScheduleDAG.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- ScheduleDAG.cpp - Implement the ScheduleDAG class ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file Implements the ScheduleDAG class, which is a base class used by
10 /// scheduling implementation classes.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/CodeGen/ScheduleDAG.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/CodeGen/MachineFunction.h"
19 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
20 #include "llvm/CodeGen/SelectionDAGNodes.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetRegisterInfo.h"
23 #include "llvm/CodeGen/TargetSubtargetInfo.h"
24 #include "llvm/Config/llvm-config.h"
25 #include "llvm/Support/CommandLine.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <algorithm>
30 #include <cassert>
31 #include <iterator>
32 #include <limits>
33 #include <utility>
34 #include <vector>
35 
36 using namespace llvm;
37 
38 #define DEBUG_TYPE "pre-RA-sched"
39 
40 STATISTIC(NumNewPredsAdded, "Number of times a  single predecessor was added");
41 STATISTIC(NumTopoInits,
42           "Number of times the topological order has been recomputed");
43 
44 #ifndef NDEBUG
45 static cl::opt<bool> StressSchedOpt(
46   "stress-sched", cl::Hidden, cl::init(false),
47   cl::desc("Stress test instruction scheduling"));
48 #endif
49 
anchor()50 void SchedulingPriorityQueue::anchor() {}
51 
ScheduleDAG(MachineFunction & mf)52 ScheduleDAG::ScheduleDAG(MachineFunction &mf)
53     : TM(mf.getTarget()), TII(mf.getSubtarget().getInstrInfo()),
54       TRI(mf.getSubtarget().getRegisterInfo()), MF(mf),
55       MRI(mf.getRegInfo()) {
56 #ifndef NDEBUG
57   StressSched = StressSchedOpt;
58 #endif
59 }
60 
61 ScheduleDAG::~ScheduleDAG() = default;
62 
clearDAG()63 void ScheduleDAG::clearDAG() {
64   SUnits.clear();
65   EntrySU = SUnit();
66   ExitSU = SUnit();
67 }
68 
getNodeDesc(const SDNode * Node) const69 const MCInstrDesc *ScheduleDAG::getNodeDesc(const SDNode *Node) const {
70   if (!Node || !Node->isMachineOpcode()) return nullptr;
71   return &TII->get(Node->getMachineOpcode());
72 }
73 
dump(const TargetRegisterInfo * TRI) const74 LLVM_DUMP_METHOD void SDep::dump(const TargetRegisterInfo *TRI) const {
75   switch (getKind()) {
76   case Data:   dbgs() << "Data"; break;
77   case Anti:   dbgs() << "Anti"; break;
78   case Output: dbgs() << "Out "; break;
79   case Order:  dbgs() << "Ord "; break;
80   }
81 
82   switch (getKind()) {
83   case Data:
84     dbgs() << " Latency=" << getLatency();
85     if (TRI && isAssignedRegDep())
86       dbgs() << " Reg=" << printReg(getReg(), TRI);
87     break;
88   case Anti:
89   case Output:
90     dbgs() << " Latency=" << getLatency();
91     break;
92   case Order:
93     dbgs() << " Latency=" << getLatency();
94     switch(Contents.OrdKind) {
95     case Barrier:      dbgs() << " Barrier"; break;
96     case MayAliasMem:
97     case MustAliasMem: dbgs() << " Memory"; break;
98     case Artificial:   dbgs() << " Artificial"; break;
99     case Weak:         dbgs() << " Weak"; break;
100     case Cluster:      dbgs() << " Cluster"; break;
101     }
102     break;
103   }
104 }
105 
addPred(const SDep & D,bool Required)106 bool SUnit::addPred(const SDep &D, bool Required) {
107   // If this node already has this dependence, don't add a redundant one.
108   for (SDep &PredDep : Preds) {
109     // Zero-latency weak edges may be added purely for heuristic ordering. Don't
110     // add them if another kind of edge already exists.
111     if (!Required && PredDep.getSUnit() == D.getSUnit())
112       return false;
113     if (PredDep.overlaps(D)) {
114       // Extend the latency if needed. Equivalent to
115       // removePred(PredDep) + addPred(D).
116       if (PredDep.getLatency() < D.getLatency()) {
117         SUnit *PredSU = PredDep.getSUnit();
118         // Find the corresponding successor in N.
119         SDep ForwardD = PredDep;
120         ForwardD.setSUnit(this);
121         for (SDep &SuccDep : PredSU->Succs) {
122           if (SuccDep == ForwardD) {
123             SuccDep.setLatency(D.getLatency());
124             break;
125           }
126         }
127         PredDep.setLatency(D.getLatency());
128       }
129       return false;
130     }
131   }
132   // Now add a corresponding succ to N.
133   SDep P = D;
134   P.setSUnit(this);
135   SUnit *N = D.getSUnit();
136   // Update the bookkeeping.
137   if (D.getKind() == SDep::Data) {
138     assert(NumPreds < std::numeric_limits<unsigned>::max() &&
139            "NumPreds will overflow!");
140     assert(N->NumSuccs < std::numeric_limits<unsigned>::max() &&
141            "NumSuccs will overflow!");
142     ++NumPreds;
143     ++N->NumSuccs;
144   }
145   if (!N->isScheduled) {
146     if (D.isWeak()) {
147       ++WeakPredsLeft;
148     }
149     else {
150       assert(NumPredsLeft < std::numeric_limits<unsigned>::max() &&
151              "NumPredsLeft will overflow!");
152       ++NumPredsLeft;
153     }
154   }
155   if (!isScheduled) {
156     if (D.isWeak()) {
157       ++N->WeakSuccsLeft;
158     }
159     else {
160       assert(N->NumSuccsLeft < std::numeric_limits<unsigned>::max() &&
161              "NumSuccsLeft will overflow!");
162       ++N->NumSuccsLeft;
163     }
164   }
165   Preds.push_back(D);
166   N->Succs.push_back(P);
167   if (P.getLatency() != 0) {
168     this->setDepthDirty();
169     N->setHeightDirty();
170   }
171   return true;
172 }
173 
removePred(const SDep & D)174 void SUnit::removePred(const SDep &D) {
175   // Find the matching predecessor.
176   SmallVectorImpl<SDep>::iterator I = llvm::find(Preds, D);
177   if (I == Preds.end())
178     return;
179   // Find the corresponding successor in N.
180   SDep P = D;
181   P.setSUnit(this);
182   SUnit *N = D.getSUnit();
183   SmallVectorImpl<SDep>::iterator Succ = llvm::find(N->Succs, P);
184   assert(Succ != N->Succs.end() && "Mismatching preds / succs lists!");
185   // Update the bookkeeping.
186   if (P.getKind() == SDep::Data) {
187     assert(NumPreds > 0 && "NumPreds will underflow!");
188     assert(N->NumSuccs > 0 && "NumSuccs will underflow!");
189     --NumPreds;
190     --N->NumSuccs;
191   }
192   if (!N->isScheduled) {
193     if (D.isWeak()) {
194       assert(WeakPredsLeft > 0 && "WeakPredsLeft will underflow!");
195       --WeakPredsLeft;
196     } else {
197       assert(NumPredsLeft > 0 && "NumPredsLeft will underflow!");
198       --NumPredsLeft;
199     }
200   }
201   if (!isScheduled) {
202     if (D.isWeak()) {
203       assert(N->WeakSuccsLeft > 0 && "WeakSuccsLeft will underflow!");
204       --N->WeakSuccsLeft;
205     } else {
206       assert(N->NumSuccsLeft > 0 && "NumSuccsLeft will underflow!");
207       --N->NumSuccsLeft;
208     }
209   }
210   N->Succs.erase(Succ);
211   Preds.erase(I);
212   if (P.getLatency() != 0) {
213     this->setDepthDirty();
214     N->setHeightDirty();
215   }
216 }
217 
setDepthDirty()218 void SUnit::setDepthDirty() {
219   if (!isDepthCurrent) return;
220   SmallVector<SUnit*, 8> WorkList;
221   WorkList.push_back(this);
222   do {
223     SUnit *SU = WorkList.pop_back_val();
224     SU->isDepthCurrent = false;
225     for (SDep &SuccDep : SU->Succs) {
226       SUnit *SuccSU = SuccDep.getSUnit();
227       if (SuccSU->isDepthCurrent)
228         WorkList.push_back(SuccSU);
229     }
230   } while (!WorkList.empty());
231 }
232 
setHeightDirty()233 void SUnit::setHeightDirty() {
234   if (!isHeightCurrent) return;
235   SmallVector<SUnit*, 8> WorkList;
236   WorkList.push_back(this);
237   do {
238     SUnit *SU = WorkList.pop_back_val();
239     SU->isHeightCurrent = false;
240     for (SDep &PredDep : SU->Preds) {
241       SUnit *PredSU = PredDep.getSUnit();
242       if (PredSU->isHeightCurrent)
243         WorkList.push_back(PredSU);
244     }
245   } while (!WorkList.empty());
246 }
247 
setDepthToAtLeast(unsigned NewDepth)248 void SUnit::setDepthToAtLeast(unsigned NewDepth) {
249   if (NewDepth <= getDepth())
250     return;
251   setDepthDirty();
252   Depth = NewDepth;
253   isDepthCurrent = true;
254 }
255 
setHeightToAtLeast(unsigned NewHeight)256 void SUnit::setHeightToAtLeast(unsigned NewHeight) {
257   if (NewHeight <= getHeight())
258     return;
259   setHeightDirty();
260   Height = NewHeight;
261   isHeightCurrent = true;
262 }
263 
264 /// Calculates the maximal path from the node to the exit.
ComputeDepth()265 void SUnit::ComputeDepth() {
266   SmallVector<SUnit*, 8> WorkList;
267   WorkList.push_back(this);
268   do {
269     SUnit *Cur = WorkList.back();
270 
271     bool Done = true;
272     unsigned MaxPredDepth = 0;
273     for (const SDep &PredDep : Cur->Preds) {
274       SUnit *PredSU = PredDep.getSUnit();
275       if (PredSU->isDepthCurrent)
276         MaxPredDepth = std::max(MaxPredDepth,
277                                 PredSU->Depth + PredDep.getLatency());
278       else {
279         Done = false;
280         WorkList.push_back(PredSU);
281       }
282     }
283 
284     if (Done) {
285       WorkList.pop_back();
286       if (MaxPredDepth != Cur->Depth) {
287         Cur->setDepthDirty();
288         Cur->Depth = MaxPredDepth;
289       }
290       Cur->isDepthCurrent = true;
291     }
292   } while (!WorkList.empty());
293 }
294 
295 /// Calculates the maximal path from the node to the entry.
ComputeHeight()296 void SUnit::ComputeHeight() {
297   SmallVector<SUnit*, 8> WorkList;
298   WorkList.push_back(this);
299   do {
300     SUnit *Cur = WorkList.back();
301 
302     bool Done = true;
303     unsigned MaxSuccHeight = 0;
304     for (const SDep &SuccDep : Cur->Succs) {
305       SUnit *SuccSU = SuccDep.getSUnit();
306       if (SuccSU->isHeightCurrent)
307         MaxSuccHeight = std::max(MaxSuccHeight,
308                                  SuccSU->Height + SuccDep.getLatency());
309       else {
310         Done = false;
311         WorkList.push_back(SuccSU);
312       }
313     }
314 
315     if (Done) {
316       WorkList.pop_back();
317       if (MaxSuccHeight != Cur->Height) {
318         Cur->setHeightDirty();
319         Cur->Height = MaxSuccHeight;
320       }
321       Cur->isHeightCurrent = true;
322     }
323   } while (!WorkList.empty());
324 }
325 
biasCriticalPath()326 void SUnit::biasCriticalPath() {
327   if (NumPreds < 2)
328     return;
329 
330   SUnit::pred_iterator BestI = Preds.begin();
331   unsigned MaxDepth = BestI->getSUnit()->getDepth();
332   for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
333        ++I) {
334     if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth) {
335       MaxDepth = I->getSUnit()->getDepth();
336       BestI = I;
337     }
338   }
339   if (BestI != Preds.begin())
340     std::swap(*Preds.begin(), *BestI);
341 }
342 
343 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dumpAttributes() const344 LLVM_DUMP_METHOD void SUnit::dumpAttributes() const {
345   dbgs() << "  # preds left       : " << NumPredsLeft << "\n";
346   dbgs() << "  # succs left       : " << NumSuccsLeft << "\n";
347   if (WeakPredsLeft)
348     dbgs() << "  # weak preds left  : " << WeakPredsLeft << "\n";
349   if (WeakSuccsLeft)
350     dbgs() << "  # weak succs left  : " << WeakSuccsLeft << "\n";
351   dbgs() << "  # rdefs left       : " << NumRegDefsLeft << "\n";
352   dbgs() << "  Latency            : " << Latency << "\n";
353   dbgs() << "  Depth              : " << getDepth() << "\n";
354   dbgs() << "  Height             : " << getHeight() << "\n";
355 }
356 
dumpNodeName(const SUnit & SU) const357 LLVM_DUMP_METHOD void ScheduleDAG::dumpNodeName(const SUnit &SU) const {
358   if (&SU == &EntrySU)
359     dbgs() << "EntrySU";
360   else if (&SU == &ExitSU)
361     dbgs() << "ExitSU";
362   else
363     dbgs() << "SU(" << SU.NodeNum << ")";
364 }
365 
dumpNodeAll(const SUnit & SU) const366 LLVM_DUMP_METHOD void ScheduleDAG::dumpNodeAll(const SUnit &SU) const {
367   dumpNode(SU);
368   SU.dumpAttributes();
369   if (SU.Preds.size() > 0) {
370     dbgs() << "  Predecessors:\n";
371     for (const SDep &Dep : SU.Preds) {
372       dbgs() << "    ";
373       dumpNodeName(*Dep.getSUnit());
374       dbgs() << ": ";
375       Dep.dump(TRI);
376       dbgs() << '\n';
377     }
378   }
379   if (SU.Succs.size() > 0) {
380     dbgs() << "  Successors:\n";
381     for (const SDep &Dep : SU.Succs) {
382       dbgs() << "    ";
383       dumpNodeName(*Dep.getSUnit());
384       dbgs() << ": ";
385       Dep.dump(TRI);
386       dbgs() << '\n';
387     }
388   }
389 }
390 #endif
391 
392 #ifndef NDEBUG
VerifyScheduledDAG(bool isBottomUp)393 unsigned ScheduleDAG::VerifyScheduledDAG(bool isBottomUp) {
394   bool AnyNotSched = false;
395   unsigned DeadNodes = 0;
396   for (const SUnit &SUnit : SUnits) {
397     if (!SUnit.isScheduled) {
398       if (SUnit.NumPreds == 0 && SUnit.NumSuccs == 0) {
399         ++DeadNodes;
400         continue;
401       }
402       if (!AnyNotSched)
403         dbgs() << "*** Scheduling failed! ***\n";
404       dumpNode(SUnit);
405       dbgs() << "has not been scheduled!\n";
406       AnyNotSched = true;
407     }
408     if (SUnit.isScheduled &&
409         (isBottomUp ? SUnit.getHeight() : SUnit.getDepth()) >
410           unsigned(std::numeric_limits<int>::max())) {
411       if (!AnyNotSched)
412         dbgs() << "*** Scheduling failed! ***\n";
413       dumpNode(SUnit);
414       dbgs() << "has an unexpected "
415            << (isBottomUp ? "Height" : "Depth") << " value!\n";
416       AnyNotSched = true;
417     }
418     if (isBottomUp) {
419       if (SUnit.NumSuccsLeft != 0) {
420         if (!AnyNotSched)
421           dbgs() << "*** Scheduling failed! ***\n";
422         dumpNode(SUnit);
423         dbgs() << "has successors left!\n";
424         AnyNotSched = true;
425       }
426     } else {
427       if (SUnit.NumPredsLeft != 0) {
428         if (!AnyNotSched)
429           dbgs() << "*** Scheduling failed! ***\n";
430         dumpNode(SUnit);
431         dbgs() << "has predecessors left!\n";
432         AnyNotSched = true;
433       }
434     }
435   }
436   assert(!AnyNotSched);
437   return SUnits.size() - DeadNodes;
438 }
439 #endif
440 
InitDAGTopologicalSorting()441 void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() {
442   // The idea of the algorithm is taken from
443   // "Online algorithms for managing the topological order of
444   // a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
445   // This is the MNR algorithm, which was first introduced by
446   // A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
447   // "Maintaining a topological order under edge insertions".
448   //
449   // Short description of the algorithm:
450   //
451   // Topological ordering, ord, of a DAG maps each node to a topological
452   // index so that for all edges X->Y it is the case that ord(X) < ord(Y).
453   //
454   // This means that if there is a path from the node X to the node Z,
455   // then ord(X) < ord(Z).
456   //
457   // This property can be used to check for reachability of nodes:
458   // if Z is reachable from X, then an insertion of the edge Z->X would
459   // create a cycle.
460   //
461   // The algorithm first computes a topological ordering for the DAG by
462   // initializing the Index2Node and Node2Index arrays and then tries to keep
463   // the ordering up-to-date after edge insertions by reordering the DAG.
464   //
465   // On insertion of the edge X->Y, the algorithm first marks by calling DFS
466   // the nodes reachable from Y, and then shifts them using Shift to lie
467   // immediately after X in Index2Node.
468 
469   // Cancel pending updates, mark as valid.
470   Dirty = false;
471   Updates.clear();
472 
473   unsigned DAGSize = SUnits.size();
474   std::vector<SUnit*> WorkList;
475   WorkList.reserve(DAGSize);
476 
477   Index2Node.resize(DAGSize);
478   Node2Index.resize(DAGSize);
479 
480   // Initialize the data structures.
481   if (ExitSU)
482     WorkList.push_back(ExitSU);
483   for (SUnit &SU : SUnits) {
484     int NodeNum = SU.NodeNum;
485     unsigned Degree = SU.Succs.size();
486     // Temporarily use the Node2Index array as scratch space for degree counts.
487     Node2Index[NodeNum] = Degree;
488 
489     // Is it a node without dependencies?
490     if (Degree == 0) {
491       assert(SU.Succs.empty() && "SUnit should have no successors");
492       // Collect leaf nodes.
493       WorkList.push_back(&SU);
494     }
495   }
496 
497   int Id = DAGSize;
498   while (!WorkList.empty()) {
499     SUnit *SU = WorkList.back();
500     WorkList.pop_back();
501     if (SU->NodeNum < DAGSize)
502       Allocate(SU->NodeNum, --Id);
503     for (const SDep &PredDep : SU->Preds) {
504       SUnit *SU = PredDep.getSUnit();
505       if (SU->NodeNum < DAGSize && !--Node2Index[SU->NodeNum])
506         // If all dependencies of the node are processed already,
507         // then the node can be computed now.
508         WorkList.push_back(SU);
509     }
510   }
511 
512   Visited.resize(DAGSize);
513   NumTopoInits++;
514 
515 #ifndef NDEBUG
516   // Check correctness of the ordering
517   for (SUnit &SU : SUnits)  {
518     for (const SDep &PD : SU.Preds) {
519       assert(Node2Index[SU.NodeNum] > Node2Index[PD.getSUnit()->NodeNum] &&
520       "Wrong topological sorting");
521     }
522   }
523 #endif
524 }
525 
FixOrder()526 void ScheduleDAGTopologicalSort::FixOrder() {
527   // Recompute from scratch after new nodes have been added.
528   if (Dirty) {
529     InitDAGTopologicalSorting();
530     return;
531   }
532 
533   // Otherwise apply updates one-by-one.
534   for (auto &U : Updates)
535     AddPred(U.first, U.second);
536   Updates.clear();
537 }
538 
AddPredQueued(SUnit * Y,SUnit * X)539 void ScheduleDAGTopologicalSort::AddPredQueued(SUnit *Y, SUnit *X) {
540   // Recomputing the order from scratch is likely more efficient than applying
541   // updates one-by-one for too many updates. The current cut-off is arbitrarily
542   // chosen.
543   Dirty = Dirty || Updates.size() > 10;
544 
545   if (Dirty)
546     return;
547 
548   Updates.emplace_back(Y, X);
549 }
550 
AddPred(SUnit * Y,SUnit * X)551 void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) {
552   int UpperBound, LowerBound;
553   LowerBound = Node2Index[Y->NodeNum];
554   UpperBound = Node2Index[X->NodeNum];
555   bool HasLoop = false;
556   // Is Ord(X) < Ord(Y) ?
557   if (LowerBound < UpperBound) {
558     // Update the topological order.
559     Visited.reset();
560     DFS(Y, UpperBound, HasLoop);
561     assert(!HasLoop && "Inserted edge creates a loop!");
562     // Recompute topological indexes.
563     Shift(Visited, LowerBound, UpperBound);
564   }
565 
566   NumNewPredsAdded++;
567 }
568 
RemovePred(SUnit * M,SUnit * N)569 void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) {
570   // InitDAGTopologicalSorting();
571 }
572 
DFS(const SUnit * SU,int UpperBound,bool & HasLoop)573 void ScheduleDAGTopologicalSort::DFS(const SUnit *SU, int UpperBound,
574                                      bool &HasLoop) {
575   std::vector<const SUnit*> WorkList;
576   WorkList.reserve(SUnits.size());
577 
578   WorkList.push_back(SU);
579   do {
580     SU = WorkList.back();
581     WorkList.pop_back();
582     Visited.set(SU->NodeNum);
583     for (const SDep &SuccDep : llvm::reverse(SU->Succs)) {
584       unsigned s = SuccDep.getSUnit()->NodeNum;
585       // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
586       if (s >= Node2Index.size())
587         continue;
588       if (Node2Index[s] == UpperBound) {
589         HasLoop = true;
590         return;
591       }
592       // Visit successors if not already and in affected region.
593       if (!Visited.test(s) && Node2Index[s] < UpperBound) {
594         WorkList.push_back(SuccDep.getSUnit());
595       }
596     }
597   } while (!WorkList.empty());
598 }
599 
GetSubGraph(const SUnit & StartSU,const SUnit & TargetSU,bool & Success)600 std::vector<int> ScheduleDAGTopologicalSort::GetSubGraph(const SUnit &StartSU,
601                                                          const SUnit &TargetSU,
602                                                          bool &Success) {
603   std::vector<const SUnit*> WorkList;
604   int LowerBound = Node2Index[StartSU.NodeNum];
605   int UpperBound = Node2Index[TargetSU.NodeNum];
606   bool Found = false;
607   BitVector VisitedBack;
608   std::vector<int> Nodes;
609 
610   if (LowerBound > UpperBound) {
611     Success = false;
612     return Nodes;
613   }
614 
615   WorkList.reserve(SUnits.size());
616   Visited.reset();
617 
618   // Starting from StartSU, visit all successors up
619   // to UpperBound.
620   WorkList.push_back(&StartSU);
621   do {
622     const SUnit *SU = WorkList.back();
623     WorkList.pop_back();
624     for (const SDep &SD : llvm::reverse(SU->Succs)) {
625       const SUnit *Succ = SD.getSUnit();
626       unsigned s = Succ->NodeNum;
627       // Edges to non-SUnits are allowed but ignored (e.g. ExitSU).
628       if (Succ->isBoundaryNode())
629         continue;
630       if (Node2Index[s] == UpperBound) {
631         Found = true;
632         continue;
633       }
634       // Visit successors if not already and in affected region.
635       if (!Visited.test(s) && Node2Index[s] < UpperBound) {
636         Visited.set(s);
637         WorkList.push_back(Succ);
638       }
639     }
640   } while (!WorkList.empty());
641 
642   if (!Found) {
643     Success = false;
644     return Nodes;
645   }
646 
647   WorkList.clear();
648   VisitedBack.resize(SUnits.size());
649   Found = false;
650 
651   // Starting from TargetSU, visit all predecessors up
652   // to LowerBound. SUs that are visited by the two
653   // passes are added to Nodes.
654   WorkList.push_back(&TargetSU);
655   do {
656     const SUnit *SU = WorkList.back();
657     WorkList.pop_back();
658     for (const SDep &SD : llvm::reverse(SU->Preds)) {
659       const SUnit *Pred = SD.getSUnit();
660       unsigned s = Pred->NodeNum;
661       // Edges to non-SUnits are allowed but ignored (e.g. EntrySU).
662       if (Pred->isBoundaryNode())
663         continue;
664       if (Node2Index[s] == LowerBound) {
665         Found = true;
666         continue;
667       }
668       if (!VisitedBack.test(s) && Visited.test(s)) {
669         VisitedBack.set(s);
670         WorkList.push_back(Pred);
671         Nodes.push_back(s);
672       }
673     }
674   } while (!WorkList.empty());
675 
676   assert(Found && "Error in SUnit Graph!");
677   Success = true;
678   return Nodes;
679 }
680 
Shift(BitVector & Visited,int LowerBound,int UpperBound)681 void ScheduleDAGTopologicalSort::Shift(BitVector& Visited, int LowerBound,
682                                        int UpperBound) {
683   std::vector<int> L;
684   int shift = 0;
685   int i;
686 
687   for (i = LowerBound; i <= UpperBound; ++i) {
688     // w is node at topological index i.
689     int w = Index2Node[i];
690     if (Visited.test(w)) {
691       // Unmark.
692       Visited.reset(w);
693       L.push_back(w);
694       shift = shift + 1;
695     } else {
696       Allocate(w, i - shift);
697     }
698   }
699 
700   for (unsigned LI : L) {
701     Allocate(LI, i - shift);
702     i = i + 1;
703   }
704 }
705 
WillCreateCycle(SUnit * TargetSU,SUnit * SU)706 bool ScheduleDAGTopologicalSort::WillCreateCycle(SUnit *TargetSU, SUnit *SU) {
707   FixOrder();
708   // Is SU reachable from TargetSU via successor edges?
709   if (IsReachable(SU, TargetSU))
710     return true;
711   for (const SDep &PredDep : TargetSU->Preds)
712     if (PredDep.isAssignedRegDep() &&
713         IsReachable(SU, PredDep.getSUnit()))
714       return true;
715   return false;
716 }
717 
AddSUnitWithoutPredecessors(const SUnit * SU)718 void ScheduleDAGTopologicalSort::AddSUnitWithoutPredecessors(const SUnit *SU) {
719   assert(SU->NodeNum == Index2Node.size() && "Node cannot be added at the end");
720   assert(SU->NumPreds == 0 && "Can only add SU's with no predecessors");
721   Node2Index.push_back(Index2Node.size());
722   Index2Node.push_back(SU->NodeNum);
723   Visited.resize(Node2Index.size());
724 }
725 
IsReachable(const SUnit * SU,const SUnit * TargetSU)726 bool ScheduleDAGTopologicalSort::IsReachable(const SUnit *SU,
727                                              const SUnit *TargetSU) {
728   assert(TargetSU != nullptr && "Invalid target SUnit");
729   assert(SU != nullptr && "Invalid SUnit");
730   FixOrder();
731   // If insertion of the edge SU->TargetSU would create a cycle
732   // then there is a path from TargetSU to SU.
733   int UpperBound, LowerBound;
734   LowerBound = Node2Index[TargetSU->NodeNum];
735   UpperBound = Node2Index[SU->NodeNum];
736   bool HasLoop = false;
737   // Is Ord(TargetSU) < Ord(SU) ?
738   if (LowerBound < UpperBound) {
739     Visited.reset();
740     // There may be a path from TargetSU to SU. Check for it.
741     DFS(TargetSU, UpperBound, HasLoop);
742   }
743   return HasLoop;
744 }
745 
Allocate(int n,int index)746 void ScheduleDAGTopologicalSort::Allocate(int n, int index) {
747   Node2Index[n] = index;
748   Index2Node[index] = n;
749 }
750 
751 ScheduleDAGTopologicalSort::
ScheduleDAGTopologicalSort(std::vector<SUnit> & sunits,SUnit * exitsu)752 ScheduleDAGTopologicalSort(std::vector<SUnit> &sunits, SUnit *exitsu)
753   : SUnits(sunits), ExitSU(exitsu) {}
754 
755 ScheduleHazardRecognizer::~ScheduleHazardRecognizer() = default;
756