1 //===---- LatencyPriorityQueue.cpp - A latency-oriented priority queue ----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the LatencyPriorityQueue class, which is a 10 // SchedulingPriorityQueue that schedules using latency information to 11 // reduce the length of the critical path through the basic block. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/LatencyPriorityQueue.h" 16 #include "llvm/Config/llvm-config.h" 17 #include "llvm/Support/Debug.h" 18 #include "llvm/Support/raw_ostream.h" 19 using namespace llvm; 20 21 #define DEBUG_TYPE "scheduler" 22 23 bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const { 24 // The isScheduleHigh flag allows nodes with wraparound dependencies that 25 // cannot easily be modeled as edges with latencies to be scheduled as 26 // soon as possible in a top-down schedule. 27 if (LHS->isScheduleHigh && !RHS->isScheduleHigh) 28 return false; 29 if (!LHS->isScheduleHigh && RHS->isScheduleHigh) 30 return true; 31 32 unsigned LHSNum = LHS->NodeNum; 33 unsigned RHSNum = RHS->NodeNum; 34 35 // The most important heuristic is scheduling the critical path. 36 unsigned LHSLatency = PQ->getLatency(LHSNum); 37 unsigned RHSLatency = PQ->getLatency(RHSNum); 38 if (LHSLatency < RHSLatency) return true; 39 if (LHSLatency > RHSLatency) return false; 40 41 // After that, if two nodes have identical latencies, look to see if one will 42 // unblock more other nodes than the other. 43 unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum); 44 unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum); 45 if (LHSBlocked < RHSBlocked) return true; 46 if (LHSBlocked > RHSBlocked) return false; 47 48 // Finally, just to provide a stable ordering, use the node number as a 49 // deciding factor. 50 return RHSNum < LHSNum; 51 } 52 53 54 /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor 55 /// of SU, return it, otherwise return null. 56 SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) { 57 SUnit *OnlyAvailablePred = nullptr; 58 for (const SDep &P : SU->Preds) { 59 SUnit &Pred = *P.getSUnit(); 60 if (!Pred.isScheduled) { 61 // We found an available, but not scheduled, predecessor. If it's the 62 // only one we have found, keep track of it... otherwise give up. 63 if (OnlyAvailablePred && OnlyAvailablePred != &Pred) 64 return nullptr; 65 OnlyAvailablePred = &Pred; 66 } 67 } 68 69 return OnlyAvailablePred; 70 } 71 72 void LatencyPriorityQueue::push(SUnit *SU) { 73 // Look at all of the successors of this node. Count the number of nodes that 74 // this node is the sole unscheduled node for. 75 unsigned NumNodesBlocking = 0; 76 for (const SDep &Succ : SU->Succs) 77 if (getSingleUnscheduledPred(Succ.getSUnit()) == SU) 78 ++NumNodesBlocking; 79 NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking; 80 81 Queue.push_back(SU); 82 } 83 84 85 // scheduledNode - As nodes are scheduled, we look to see if there are any 86 // successor nodes that have a single unscheduled predecessor. If so, that 87 // single predecessor has a higher priority, since scheduling it will make 88 // the node available. 89 void LatencyPriorityQueue::scheduledNode(SUnit *SU) { 90 for (const SDep &Succ : SU->Succs) 91 AdjustPriorityOfUnscheduledPreds(Succ.getSUnit()); 92 } 93 94 /// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just 95 /// scheduled. If SU is not itself available, then there is at least one 96 /// predecessor node that has not been scheduled yet. If SU has exactly ONE 97 /// unscheduled predecessor, we want to increase its priority: it getting 98 /// scheduled will make this node available, so it is better than some other 99 /// node of the same priority that will not make a node available. 100 void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) { 101 if (SU->isAvailable) return; // All preds scheduled. 102 103 SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU); 104 if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) return; 105 106 // Okay, we found a single predecessor that is available, but not scheduled. 107 // Since it is available, it must be in the priority queue. First remove it. 108 remove(OnlyAvailablePred); 109 110 // Reinsert the node into the priority queue, which recomputes its 111 // NumNodesSolelyBlocking value. 112 push(OnlyAvailablePred); 113 } 114 115 SUnit *LatencyPriorityQueue::pop() { 116 if (empty()) return nullptr; 117 std::vector<SUnit *>::iterator Best = Queue.begin(); 118 for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()), 119 E = Queue.end(); I != E; ++I) 120 if (Picker(*Best, *I)) 121 Best = I; 122 SUnit *V = *Best; 123 if (Best != std::prev(Queue.end())) 124 std::swap(*Best, Queue.back()); 125 Queue.pop_back(); 126 return V; 127 } 128 129 void LatencyPriorityQueue::remove(SUnit *SU) { 130 assert(!Queue.empty() && "Queue is empty!"); 131 std::vector<SUnit *>::iterator I = find(Queue, SU); 132 assert(I != Queue.end() && "Queue doesn't contain the SU being removed!"); 133 if (I != std::prev(Queue.end())) 134 std::swap(*I, Queue.back()); 135 Queue.pop_back(); 136 } 137 138 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 139 LLVM_DUMP_METHOD void LatencyPriorityQueue::dump(ScheduleDAG *DAG) const { 140 dbgs() << "Latency Priority Queue\n"; 141 dbgs() << " Number of Queue Entries: " << Queue.size() << "\n"; 142 for (const SUnit *SU : Queue) { 143 dbgs() << " "; 144 DAG->dumpNode(*SU); 145 } 146 } 147 #endif 148