/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIMachineScheduler.cpp | 177 NodeNum2Index[SU->NodeNum] = SUnits.size(); in addUnit() 178 SUnits.push_back(SU); in addUnit() 272 for (SUnit* SU : SUnits) { in fastSchedule() 390 for (SUnit* SU : SUnits) { in schedule() 408 assert(SUnits.size() == ScheduledSUnits.size() && in schedule() 410 for (SUnit* SU : SUnits) { in schedule() 420 for (SUnit* SU : SUnits) { in undoSchedule() 427 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0); in undoSchedule() 466 if (SuccSU->NodeNum >= DAG->SUnits.size()) in releaseSuccessors() 492 HasLowLatencyNonWaitedParent.assign(SUnits.size(), 0); in nodeScheduled() [all …]
|
H A D | GCNMinRegStrategy.cpp | 60 void initNumPreds(const decltype(ScheduleDAG::SUnits) &SUnits); 80 void GCNMinRegScheduler::initNumPreds(const decltype(ScheduleDAG::SUnits) &SUnits) { in initNumPreds() argument 81 NumPreds.resize(SUnits.size()); in initNumPreds() 82 for (unsigned I = 0; I < SUnits.size(); ++I) in initNumPreds() 83 NumPreds[I] = SUnits[I].NumPredsLeft; in initNumPreds() 227 const auto &SUnits = DAG.SUnits; in schedule() local 229 Schedule.reserve(SUnits.size()); in schedule() 231 initNumPreds(SUnits); in schedule() 264 assert(SUnits.size() == Schedule.size()); in schedule()
|
H A D | GCNILPSched.cpp | 290 auto &SUnits = const_cast<ScheduleDAG&>(DAG).SUnits; in schedule() local 293 SUSavedCopy.resize(SUnits.size()); in schedule() 297 for (const SUnit &SU : SUnits) in schedule() 300 SUNumbers.assign(SUnits.size(), 0); in schedule() 301 for (const SUnit &SU : SUnits) in schedule() 311 Schedule.reserve(SUnits.size()); in schedule() 343 assert(SUnits.size() == Schedule.size()); in schedule() 348 for (auto &SU : SUnits) in schedule()
|
H A D | GCNVOPDUtils.cpp | 166 for (ISUI = DAG->SUnits.begin(); ISUI != DAG->SUnits.end(); ++ISUI) { in apply() 173 for (JSUI = ISUI + 1; JSUI != DAG->SUnits.end(); ++JSUI) { in apply()
|
H A D | AMDGPUIGroupLP.cpp | 948 auto I = DAG->SUnits.rbegin(); in apply() 949 auto E = DAG->SUnits.rend(); in apply() 985 auto I = DAG->SUnits.begin(); in apply() 986 auto E = DAG->SUnits.end(); in apply() 1321 for (auto &SU : DAG->SUnits) in apply() 1382 for (SUnit &SU : DAG->SUnits) { in analyzeDAG() 1865 for (auto &Elt : SyncPipe[0].DAG->SUnits) { in apply() 2081 for (auto &SU : DAG->SUnits) { in applyIGLPStrategy() 2517 for (auto &SU : DAG->SUnits) { in initSchedGroup() 2529 for (auto E = DAG->SUnits.rend(); RIter != E; ++RIter) { in initSchedGroup() [all …]
|
H A D | SIMachineScheduler.h | 62 std::vector<SUnit*> SUnits; variable 135 int getCost() { return SUnits.size(); } in getCost()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/ |
H A D | LatencyPriorityQueue.h | 34 std::vector<SUnit> *SUnits = nullptr; variable 53 SUnits = &sunits; in initNodes() 54 NumNodesSolelyBlocking.resize(SUnits->size(), 0); in initNodes() 58 NumNodesSolelyBlocking.resize(SUnits->size(), 0); in addNode() 65 SUnits = nullptr; in releaseState() 69 assert(NodeNum < (*SUnits).size()); in getLatency() 70 return (*SUnits)[NodeNum].getHeight(); in getLatency()
|
H A D | ResourcePriorityQueue.h | 39 std::vector<SUnit> *SUnits; variable 84 NumNodesSolelyBlocking.resize(SUnits->size(), 0); in addNode() 90 SUnits = nullptr; in releaseState() 94 assert(NodeNum < (*SUnits).size()); in getLatency() 95 return (*SUnits)[NodeNum].getHeight(); in getLatency()
|
H A D | ScheduleDAGInstrs.h | 388 const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0]; in newSUnit() 390 SUnits.emplace_back(MI, (unsigned)SUnits.size()); in newSUnit() 391 assert((Addr == nullptr || Addr == &SUnits[0]) && in newSUnit() 393 return &SUnits.back(); in newSUnit()
|
H A D | ScheduleDAG.h | 527 virtual void initNodes(std::vector<SUnit> &SUnits) = 0; 579 std::vector<SUnit> SUnits; ///< The scheduling units. variable 709 return nodes_iterator(G->SUnits.begin()); 712 return nodes_iterator(G->SUnits.end()); 722 std::vector<SUnit> &SUnits; 756 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
|
H A D | MachinePipeliner.h | 169 std::vector<SUnit> &SUnits; variable 181 : SUnits(SUs), Blocked(SUs.size()), B(SUs.size()), AdjK(SUs.size()) { in Circuits() 195 B.assign(SUnits.size(), SmallPtrSet<SUnit *, 4>()); in reset() 214 Topo(SUnits, &ExitSU) { in SwingSchedulerDAG()
|
H A D | AntiDepBreaker.h | 43 virtual unsigned BreakAntiDependencies(const std::vector<SUnit> &SUnits,
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Hexagon/ |
H A D | HexagonSubtarget.cpp | 247 for (SUnit &SU : DAG->SUnits) { in apply() 260 for (SUnit &SU : DAG->SUnits) { in apply() 323 for (unsigned su = 0, e = DAG->SUnits.size(); su != e; ++su) { in apply() 325 if (DAG->SUnits[su].getInstr()->isCall()) in apply() 326 LastSequentialCall = &DAG->SUnits[su]; in apply() 328 else if (DAG->SUnits[su].getInstr()->isCompare() && LastSequentialCall) in apply() 329 DAG->addEdge(&DAG->SUnits[su], SDep(LastSequentialCall, SDep::Barrier)); in apply() 332 shouldTFRICallBind(HII, DAG->SUnits[su], DAG->SUnits[su+1])) in apply() 333 DAG->addEdge(&DAG->SUnits[su], SDep(&DAG->SUnits[su-1], SDep::Barrier)); in apply() 349 const MachineInstr *MI = DAG->SUnits[su].getInstr(); in apply() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | ScheduleDAG.cpp | 64 SUnits.clear(); in clearDAG() 396 for (const SUnit &SUnit : SUnits) { in VerifyScheduledDAG() 437 return SUnits.size() - DeadNodes; in VerifyScheduledDAG() 473 unsigned DAGSize = SUnits.size(); in InitDAGTopologicalSorting() 483 for (SUnit &SU : SUnits) { in InitDAGTopologicalSorting() 517 for (SUnit &SU : SUnits) { in InitDAGTopologicalSorting() 576 WorkList.reserve(SUnits.size()); in DFS() 615 WorkList.reserve(SUnits.size()); in GetSubGraph() 648 VisitedBack.resize(SUnits.size()); in GetSubGraph() 753 : SUnits(sunits), ExitSU(exitsu) {} in ScheduleDAGTopologicalSort()
|
H A D | MachinePipeliner.cpp | 842 for (auto &SU : SUnits) { in addLoopCarriedDependences() 939 for (SUnit &I : SUnits) { in updatePhiDependences() 1021 for (SUnit &I : SUnits) { in changeDependences() 1630 static void swapAntiDependences(std::vector<SUnit> &SUnits) { in swapAntiDependences() argument 1632 for (SUnit &SU : SUnits) { in swapAntiDependences() 1654 BitVector Added(SUnits.size()); in createAdjacencyStructure() 1656 for (int i = 0, e = SUnits.size(); i != e; ++i) { in createAdjacencyStructure() 1659 for (auto &SI : SUnits[i].Succs) { in createAdjacencyStructure() 1685 for (auto &PI : SUnits[i].Preds) { in createAdjacencyStructure() 1686 if (!SUnits[i].getInstr()->mayStore() || in createAdjacencyStructure() [all …]
|
H A D | MacroFusion.cpp | 124 for (SUnit &SU : DAG.SUnits) { in fuseInstructionPair() 170 // For each of the SUnits in the scheduling block, try to fuse the instr in in scheduleAdjacentImpl() 172 for (SUnit &ISU : DAG->SUnits) in scheduleAdjacentImpl()
|
H A D | PostRASchedulerList.cpp | 392 AntiDepBreak->BreakAntiDependencies(SUnits, RegionBegin, RegionEnd, in schedule() 414 AvailableQueue.initNodes(SUnits); in schedule() 531 for (SUnit &SUnit : SUnits) { in ListScheduleTopDown() 546 Sequence.reserve(SUnits.size()); in ListScheduleTopDown()
|
H A D | ScheduleDAGInstrs.cpp | 119 … Type::getVoidTy(mf.getFunction().getContext()))), Topo(SUnits, &ExitSU) { in ScheduleDAGInstrs() 581 SUnits.reserve(NumRegionInstrs); in initSUnits() 759 PDiffs->init(SUnits.size()); in buildSchedGraph() 1082 SUnit *newBarrierChain = &SUnits[*(NodeNums.end() - N)]; in reduceHugeMemNodeMaps() 1193 for (const SUnit &SU : SUnits) in dump() 1469 void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { in compute() argument 1474 for (const SUnit &SU : SUnits) { in compute()
|
H A D | CriticalAntiDepBreaker.h | 82 unsigned BreakAntiDependencies(const std::vector<SUnit> &SUnits,
|
H A D | CriticalAntiDepBreaker.cpp | 437 BreakAntiDependencies(const std::vector<SUnit> &SUnits, in BreakAntiDependencies() argument 444 if (SUnits.empty()) return 0; in BreakAntiDependencies() 454 for (const SUnit &SU : SUnits) { in BreakAntiDependencies()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
H A D | ScheduleDAGSDNodes.cpp | 72 if (!SUnits.empty()) in newSUnit() 73 Addr = &SUnits[0]; in newSUnit() 75 SUnits.emplace_back(N, (unsigned)SUnits.size()); in newSUnit() 76 assert((Addr == nullptr || Addr == &SUnits[0]) && in newSUnit() 78 SUnits.back().OrigNode = &SUnits.back(); in newSUnit() 79 SUnit *SU = &SUnits.back(); in newSUnit() 344 SUnits.reserve(NumNodes * 2); in BuildSchedUnits() 436 SUnit *SrcSU = &SUnits[SrcN->getNodeId()]; in BuildSchedUnits() 449 for (SUnit &SU : SUnits) { in AddSchedEdges() 481 SUnit *OpSU = &SUnits[OpN->getNodeId()]; in AddSchedEdges() [all …]
|
H A D | ScheduleDAGVLIW.cpp | 49 /// AvailableQueue - The priority queue to use for the available SUnits. 96 AvailableQueue->initNodes(SUnits); in Schedule() 168 for (SUnit &SU : SUnits) { in listScheduleTopDown() 179 Sequence.reserve(SUnits.size()); in listScheduleTopDown()
|
H A D | ScheduleDAGRRList.cpp | 193 AvailableQueue(availqueue), Topo(SUnits, nullptr) { in ScheduleDAGRRList() 277 unsigned NumSUnits = SUnits.size(); in CreateNewSUnit() 287 unsigned NumSUnits = SUnits.size(); in CreateClone() 378 AvailableQueue->initNodes(SUnits); in Schedule() 589 SUnit *Def = &SUnits[N->getNodeId()]; in ReleasePredecessors() 1002 LoadSU = &SUnits[LoadNode->getNodeId()]; in TryUnfoldSU() 1020 NewSU = &SUnits[N->getNodeId()]; in TryUnfoldSU() 1609 if (!SUnits.empty()) { in ListScheduleBottomUp() 1610 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; in ListScheduleBottomUp() 1618 Sequence.reserve(SUnits.size()); in ListScheduleBottomUp() [all …]
|
H A D | ScheduleDAGFast.cpp | 266 LoadSU = &SUnits[LoadNode->getNodeId()]; in CopyAndMoveSuccessors() 541 if (!SUnits.empty()) { in ListScheduleBottomUp() 542 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; in ListScheduleBottomUp() 552 Sequence.reserve(SUnits.size()); in ListScheduleBottomUp()
|
H A D | ResourcePriorityQueue.cpp | 164 SUnits = &sunits; in initNodes() 165 NumNodesSolelyBlocking.resize(SUnits->size(), 0); in initNodes() 167 for (SUnit &SU : *SUnits) { in initNodes()
|