1 //===- ResourcePriorityQueue.cpp - A DFA-oriented priority queue -*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the ResourcePriorityQueue class, which is a 10 // SchedulingPriorityQueue that prioritizes instructions using DFA state to 11 // reduce the length of the critical path through the basic block 12 // on VLIW platforms. 13 // The scheduler is basically a top-down adaptable list scheduler with DFA 14 // resource tracking added to the cost function. 15 // DFA is queried as a state machine to model "packets/bundles" during 16 // schedule. Currently packets/bundles are discarded at the end of 17 // scheduling, affecting only order of instructions. 18 // 19 //===----------------------------------------------------------------------===// 20 21 #include "llvm/CodeGen/ResourcePriorityQueue.h" 22 #include "llvm/CodeGen/DFAPacketizer.h" 23 #include "llvm/CodeGen/MachineInstr.h" 24 #include "llvm/CodeGen/SelectionDAGISel.h" 25 #include "llvm/CodeGen/SelectionDAGNodes.h" 26 #include "llvm/CodeGen/TargetInstrInfo.h" 27 #include "llvm/CodeGen/TargetLowering.h" 28 #include "llvm/CodeGen/TargetRegisterInfo.h" 29 #include "llvm/CodeGen/TargetSubtargetInfo.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include "llvm/Target/TargetMachine.h" 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "scheduler" 38 39 static cl::opt<bool> DisableDFASched("disable-dfa-sched", cl::Hidden, 40 cl::ZeroOrMore, cl::init(false), 41 cl::desc("Disable use of DFA during scheduling")); 42 43 static cl::opt<int> RegPressureThreshold( 44 "dfa-sched-reg-pressure-threshold", cl::Hidden, cl::ZeroOrMore, cl::init(5), 45 cl::desc("Track reg pressure and switch priority to in-depth")); 46 47 ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS) 48 : Picker(this), InstrItins(IS->MF->getSubtarget().getInstrItineraryData()) { 49 const TargetSubtargetInfo &STI = IS->MF->getSubtarget(); 50 TRI = STI.getRegisterInfo(); 51 TLI = IS->TLI; 52 TII = STI.getInstrInfo(); 53 ResourcesModel.reset(TII->CreateTargetScheduleState(STI)); 54 // This hard requirement could be relaxed, but for now 55 // do not let it proceed. 56 assert(ResourcesModel && "Unimplemented CreateTargetScheduleState."); 57 58 unsigned NumRC = TRI->getNumRegClasses(); 59 RegLimit.resize(NumRC); 60 RegPressure.resize(NumRC); 61 std::fill(RegLimit.begin(), RegLimit.end(), 0); 62 std::fill(RegPressure.begin(), RegPressure.end(), 0); 63 for (const TargetRegisterClass *RC : TRI->regclasses()) 64 RegLimit[RC->getID()] = TRI->getRegPressureLimit(RC, *IS->MF); 65 66 ParallelLiveRanges = 0; 67 HorizontalVerticalBalance = 0; 68 } 69 70 unsigned 71 ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) { 72 unsigned NumberDeps = 0; 73 for (SDep &Pred : SU->Preds) { 74 if (Pred.isCtrl()) 75 continue; 76 77 SUnit *PredSU = Pred.getSUnit(); 78 const SDNode *ScegN = PredSU->getNode(); 79 80 if (!ScegN) 81 continue; 82 83 // If value is passed to CopyToReg, it is probably 84 // live outside BB. 85 switch (ScegN->getOpcode()) { 86 default: break; 87 case ISD::TokenFactor: break; 88 case ISD::CopyFromReg: NumberDeps++; break; 89 case ISD::CopyToReg: break; 90 case ISD::INLINEASM: break; 91 case ISD::INLINEASM_BR: break; 92 } 93 if (!ScegN->isMachineOpcode()) 94 continue; 95 96 for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) { 97 MVT VT = ScegN->getSimpleValueType(i); 98 if (TLI->isTypeLegal(VT) 99 && (TLI->getRegClassFor(VT)->getID() == RCId)) { 100 NumberDeps++; 101 break; 102 } 103 } 104 } 105 return NumberDeps; 106 } 107 108 unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU, 109 unsigned RCId) { 110 unsigned NumberDeps = 0; 111 for (const SDep &Succ : SU->Succs) { 112 if (Succ.isCtrl()) 113 continue; 114 115 SUnit *SuccSU = Succ.getSUnit(); 116 const SDNode *ScegN = SuccSU->getNode(); 117 if (!ScegN) 118 continue; 119 120 // If value is passed to CopyToReg, it is probably 121 // live outside BB. 122 switch (ScegN->getOpcode()) { 123 default: break; 124 case ISD::TokenFactor: break; 125 case ISD::CopyFromReg: break; 126 case ISD::CopyToReg: NumberDeps++; break; 127 case ISD::INLINEASM: break; 128 case ISD::INLINEASM_BR: break; 129 } 130 if (!ScegN->isMachineOpcode()) 131 continue; 132 133 for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) { 134 const SDValue &Op = ScegN->getOperand(i); 135 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 136 if (TLI->isTypeLegal(VT) 137 && (TLI->getRegClassFor(VT)->getID() == RCId)) { 138 NumberDeps++; 139 break; 140 } 141 } 142 } 143 return NumberDeps; 144 } 145 146 static unsigned numberCtrlDepsInSU(SUnit *SU) { 147 unsigned NumberDeps = 0; 148 for (const SDep &Succ : SU->Succs) 149 if (Succ.isCtrl()) 150 NumberDeps++; 151 152 return NumberDeps; 153 } 154 155 static unsigned numberCtrlPredInSU(SUnit *SU) { 156 unsigned NumberDeps = 0; 157 for (SDep &Pred : SU->Preds) 158 if (Pred.isCtrl()) 159 NumberDeps++; 160 161 return NumberDeps; 162 } 163 164 /// 165 /// Initialize nodes. 166 /// 167 void ResourcePriorityQueue::initNodes(std::vector<SUnit> &sunits) { 168 SUnits = &sunits; 169 NumNodesSolelyBlocking.resize(SUnits->size(), 0); 170 171 for (SUnit &SU : *SUnits) { 172 initNumRegDefsLeft(&SU); 173 SU.NodeQueueId = 0; 174 } 175 } 176 177 /// This heuristic is used if DFA scheduling is not desired 178 /// for some VLIW platform. 179 bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const { 180 // The isScheduleHigh flag allows nodes with wraparound dependencies that 181 // cannot easily be modeled as edges with latencies to be scheduled as 182 // soon as possible in a top-down schedule. 183 if (LHS->isScheduleHigh && !RHS->isScheduleHigh) 184 return false; 185 186 if (!LHS->isScheduleHigh && RHS->isScheduleHigh) 187 return true; 188 189 unsigned LHSNum = LHS->NodeNum; 190 unsigned RHSNum = RHS->NodeNum; 191 192 // The most important heuristic is scheduling the critical path. 193 unsigned LHSLatency = PQ->getLatency(LHSNum); 194 unsigned RHSLatency = PQ->getLatency(RHSNum); 195 if (LHSLatency < RHSLatency) return true; 196 if (LHSLatency > RHSLatency) return false; 197 198 // After that, if two nodes have identical latencies, look to see if one will 199 // unblock more other nodes than the other. 200 unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum); 201 unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum); 202 if (LHSBlocked < RHSBlocked) return true; 203 if (LHSBlocked > RHSBlocked) return false; 204 205 // Finally, just to provide a stable ordering, use the node number as a 206 // deciding factor. 207 return LHSNum < RHSNum; 208 } 209 210 211 /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor 212 /// of SU, return it, otherwise return null. 213 SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) { 214 SUnit *OnlyAvailablePred = nullptr; 215 for (const SDep &Pred : SU->Preds) { 216 SUnit &PredSU = *Pred.getSUnit(); 217 if (!PredSU.isScheduled) { 218 // We found an available, but not scheduled, predecessor. If it's the 219 // only one we have found, keep track of it... otherwise give up. 220 if (OnlyAvailablePred && OnlyAvailablePred != &PredSU) 221 return nullptr; 222 OnlyAvailablePred = &PredSU; 223 } 224 } 225 return OnlyAvailablePred; 226 } 227 228 void ResourcePriorityQueue::push(SUnit *SU) { 229 // Look at all of the successors of this node. Count the number of nodes that 230 // this node is the sole unscheduled node for. 231 unsigned NumNodesBlocking = 0; 232 for (const SDep &Succ : SU->Succs) 233 if (getSingleUnscheduledPred(Succ.getSUnit()) == SU) 234 ++NumNodesBlocking; 235 236 NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking; 237 Queue.push_back(SU); 238 } 239 240 /// Check if scheduling of this SU is possible 241 /// in the current packet. 242 bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) { 243 if (!SU || !SU->getNode()) 244 return false; 245 246 // If this is a compound instruction, 247 // it is likely to be a call. Do not delay it. 248 if (SU->getNode()->getGluedNode()) 249 return true; 250 251 // First see if the pipeline could receive this instruction 252 // in the current cycle. 253 if (SU->getNode()->isMachineOpcode()) 254 switch (SU->getNode()->getMachineOpcode()) { 255 default: 256 if (!ResourcesModel->canReserveResources(&TII->get( 257 SU->getNode()->getMachineOpcode()))) 258 return false; 259 break; 260 case TargetOpcode::EXTRACT_SUBREG: 261 case TargetOpcode::INSERT_SUBREG: 262 case TargetOpcode::SUBREG_TO_REG: 263 case TargetOpcode::REG_SEQUENCE: 264 case TargetOpcode::IMPLICIT_DEF: 265 break; 266 } 267 268 // Now see if there are no other dependencies 269 // to instructions already in the packet. 270 for (const SUnit *S : Packet) 271 for (const SDep &Succ : S->Succs) { 272 // Since we do not add pseudos to packets, might as well 273 // ignore order deps. 274 if (Succ.isCtrl()) 275 continue; 276 277 if (Succ.getSUnit() == SU) 278 return false; 279 } 280 281 return true; 282 } 283 284 /// Keep track of available resources. 285 void ResourcePriorityQueue::reserveResources(SUnit *SU) { 286 // If this SU does not fit in the packet 287 // start a new one. 288 if (!isResourceAvailable(SU) || SU->getNode()->getGluedNode()) { 289 ResourcesModel->clearResources(); 290 Packet.clear(); 291 } 292 293 if (SU->getNode() && SU->getNode()->isMachineOpcode()) { 294 switch (SU->getNode()->getMachineOpcode()) { 295 default: 296 ResourcesModel->reserveResources(&TII->get( 297 SU->getNode()->getMachineOpcode())); 298 break; 299 case TargetOpcode::EXTRACT_SUBREG: 300 case TargetOpcode::INSERT_SUBREG: 301 case TargetOpcode::SUBREG_TO_REG: 302 case TargetOpcode::REG_SEQUENCE: 303 case TargetOpcode::IMPLICIT_DEF: 304 break; 305 } 306 Packet.push_back(SU); 307 } 308 // Forcefully end packet for PseudoOps. 309 else { 310 ResourcesModel->clearResources(); 311 Packet.clear(); 312 } 313 314 // If packet is now full, reset the state so in the next cycle 315 // we start fresh. 316 if (Packet.size() >= InstrItins->SchedModel.IssueWidth) { 317 ResourcesModel->clearResources(); 318 Packet.clear(); 319 } 320 } 321 322 int ResourcePriorityQueue::rawRegPressureDelta(SUnit *SU, unsigned RCId) { 323 int RegBalance = 0; 324 325 if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode()) 326 return RegBalance; 327 328 // Gen estimate. 329 for (unsigned i = 0, e = SU->getNode()->getNumValues(); i != e; ++i) { 330 MVT VT = SU->getNode()->getSimpleValueType(i); 331 if (TLI->isTypeLegal(VT) 332 && TLI->getRegClassFor(VT) 333 && TLI->getRegClassFor(VT)->getID() == RCId) 334 RegBalance += numberRCValSuccInSU(SU, RCId); 335 } 336 // Kill estimate. 337 for (unsigned i = 0, e = SU->getNode()->getNumOperands(); i != e; ++i) { 338 const SDValue &Op = SU->getNode()->getOperand(i); 339 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 340 if (isa<ConstantSDNode>(Op.getNode())) 341 continue; 342 343 if (TLI->isTypeLegal(VT) && TLI->getRegClassFor(VT) 344 && TLI->getRegClassFor(VT)->getID() == RCId) 345 RegBalance -= numberRCValPredInSU(SU, RCId); 346 } 347 return RegBalance; 348 } 349 350 /// Estimates change in reg pressure from this SU. 351 /// It is achieved by trivial tracking of defined 352 /// and used vregs in dependent instructions. 353 /// The RawPressure flag makes this function to ignore 354 /// existing reg file sizes, and report raw def/use 355 /// balance. 356 int ResourcePriorityQueue::regPressureDelta(SUnit *SU, bool RawPressure) { 357 int RegBalance = 0; 358 359 if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode()) 360 return RegBalance; 361 362 if (RawPressure) { 363 for (const TargetRegisterClass *RC : TRI->regclasses()) 364 RegBalance += rawRegPressureDelta(SU, RC->getID()); 365 } 366 else { 367 for (const TargetRegisterClass *RC : TRI->regclasses()) { 368 if ((RegPressure[RC->getID()] + 369 rawRegPressureDelta(SU, RC->getID()) > 0) && 370 (RegPressure[RC->getID()] + 371 rawRegPressureDelta(SU, RC->getID()) >= RegLimit[RC->getID()])) 372 RegBalance += rawRegPressureDelta(SU, RC->getID()); 373 } 374 } 375 376 return RegBalance; 377 } 378 379 // Constants used to denote relative importance of 380 // heuristic components for cost computation. 381 static const unsigned PriorityOne = 200; 382 static const unsigned PriorityTwo = 50; 383 static const unsigned PriorityThree = 15; 384 static const unsigned PriorityFour = 5; 385 static const unsigned ScaleOne = 20; 386 static const unsigned ScaleTwo = 10; 387 static const unsigned ScaleThree = 5; 388 static const unsigned FactorOne = 2; 389 390 /// Returns single number reflecting benefit of scheduling SU 391 /// in the current cycle. 392 int ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) { 393 // Initial trivial priority. 394 int ResCount = 1; 395 396 // Do not waste time on a node that is already scheduled. 397 if (SU->isScheduled) 398 return ResCount; 399 400 // Forced priority is high. 401 if (SU->isScheduleHigh) 402 ResCount += PriorityOne; 403 404 // Adaptable scheduling 405 // A small, but very parallel 406 // region, where reg pressure is an issue. 407 if (HorizontalVerticalBalance > RegPressureThreshold) { 408 // Critical path first 409 ResCount += (SU->getHeight() * ScaleTwo); 410 // If resources are available for it, multiply the 411 // chance of scheduling. 412 if (isResourceAvailable(SU)) 413 ResCount <<= FactorOne; 414 415 // Consider change to reg pressure from scheduling 416 // this SU. 417 ResCount -= (regPressureDelta(SU,true) * ScaleOne); 418 } 419 // Default heuristic, greeady and 420 // critical path driven. 421 else { 422 // Critical path first. 423 ResCount += (SU->getHeight() * ScaleTwo); 424 // Now see how many instructions is blocked by this SU. 425 ResCount += (NumNodesSolelyBlocking[SU->NodeNum] * ScaleTwo); 426 // If resources are available for it, multiply the 427 // chance of scheduling. 428 if (isResourceAvailable(SU)) 429 ResCount <<= FactorOne; 430 431 ResCount -= (regPressureDelta(SU) * ScaleTwo); 432 } 433 434 // These are platform-specific things. 435 // Will need to go into the back end 436 // and accessed from here via a hook. 437 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) { 438 if (N->isMachineOpcode()) { 439 const MCInstrDesc &TID = TII->get(N->getMachineOpcode()); 440 if (TID.isCall()) 441 ResCount += (PriorityTwo + (ScaleThree*N->getNumValues())); 442 } 443 else 444 switch (N->getOpcode()) { 445 default: break; 446 case ISD::TokenFactor: 447 case ISD::CopyFromReg: 448 case ISD::CopyToReg: 449 ResCount += PriorityFour; 450 break; 451 452 case ISD::INLINEASM: 453 case ISD::INLINEASM_BR: 454 ResCount += PriorityThree; 455 break; 456 } 457 } 458 return ResCount; 459 } 460 461 462 /// Main resource tracking point. 463 void ResourcePriorityQueue::scheduledNode(SUnit *SU) { 464 // Use NULL entry as an event marker to reset 465 // the DFA state. 466 if (!SU) { 467 ResourcesModel->clearResources(); 468 Packet.clear(); 469 return; 470 } 471 472 const SDNode *ScegN = SU->getNode(); 473 // Update reg pressure tracking. 474 // First update current node. 475 if (ScegN->isMachineOpcode()) { 476 // Estimate generated regs. 477 for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) { 478 MVT VT = ScegN->getSimpleValueType(i); 479 480 if (TLI->isTypeLegal(VT)) { 481 const TargetRegisterClass *RC = TLI->getRegClassFor(VT); 482 if (RC) 483 RegPressure[RC->getID()] += numberRCValSuccInSU(SU, RC->getID()); 484 } 485 } 486 // Estimate killed regs. 487 for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) { 488 const SDValue &Op = ScegN->getOperand(i); 489 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 490 491 if (TLI->isTypeLegal(VT)) { 492 const TargetRegisterClass *RC = TLI->getRegClassFor(VT); 493 if (RC) { 494 if (RegPressure[RC->getID()] > 495 (numberRCValPredInSU(SU, RC->getID()))) 496 RegPressure[RC->getID()] -= numberRCValPredInSU(SU, RC->getID()); 497 else RegPressure[RC->getID()] = 0; 498 } 499 } 500 } 501 for (SDep &Pred : SU->Preds) { 502 if (Pred.isCtrl() || (Pred.getSUnit()->NumRegDefsLeft == 0)) 503 continue; 504 --Pred.getSUnit()->NumRegDefsLeft; 505 } 506 } 507 508 // Reserve resources for this SU. 509 reserveResources(SU); 510 511 // Adjust number of parallel live ranges. 512 // Heuristic is simple - node with no data successors reduces 513 // number of live ranges. All others, increase it. 514 unsigned NumberNonControlDeps = 0; 515 516 for (const SDep &Succ : SU->Succs) { 517 adjustPriorityOfUnscheduledPreds(Succ.getSUnit()); 518 if (!Succ.isCtrl()) 519 NumberNonControlDeps++; 520 } 521 522 if (!NumberNonControlDeps) { 523 if (ParallelLiveRanges >= SU->NumPreds) 524 ParallelLiveRanges -= SU->NumPreds; 525 else 526 ParallelLiveRanges = 0; 527 528 } 529 else 530 ParallelLiveRanges += SU->NumRegDefsLeft; 531 532 // Track parallel live chains. 533 HorizontalVerticalBalance += (SU->Succs.size() - numberCtrlDepsInSU(SU)); 534 HorizontalVerticalBalance -= (SU->Preds.size() - numberCtrlPredInSU(SU)); 535 } 536 537 void ResourcePriorityQueue::initNumRegDefsLeft(SUnit *SU) { 538 unsigned NodeNumDefs = 0; 539 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) 540 if (N->isMachineOpcode()) { 541 const MCInstrDesc &TID = TII->get(N->getMachineOpcode()); 542 // No register need be allocated for this. 543 if (N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { 544 NodeNumDefs = 0; 545 break; 546 } 547 NodeNumDefs = std::min(N->getNumValues(), TID.getNumDefs()); 548 } 549 else 550 switch(N->getOpcode()) { 551 default: break; 552 case ISD::CopyFromReg: 553 NodeNumDefs++; 554 break; 555 case ISD::INLINEASM: 556 case ISD::INLINEASM_BR: 557 NodeNumDefs++; 558 break; 559 } 560 561 SU->NumRegDefsLeft = NodeNumDefs; 562 } 563 564 /// adjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just 565 /// scheduled. If SU is not itself available, then there is at least one 566 /// predecessor node that has not been scheduled yet. If SU has exactly ONE 567 /// unscheduled predecessor, we want to increase its priority: it getting 568 /// scheduled will make this node available, so it is better than some other 569 /// node of the same priority that will not make a node available. 570 void ResourcePriorityQueue::adjustPriorityOfUnscheduledPreds(SUnit *SU) { 571 if (SU->isAvailable) return; // All preds scheduled. 572 573 SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU); 574 if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) 575 return; 576 577 // Okay, we found a single predecessor that is available, but not scheduled. 578 // Since it is available, it must be in the priority queue. First remove it. 579 remove(OnlyAvailablePred); 580 581 // Reinsert the node into the priority queue, which recomputes its 582 // NumNodesSolelyBlocking value. 583 push(OnlyAvailablePred); 584 } 585 586 587 /// Main access point - returns next instructions 588 /// to be placed in scheduling sequence. 589 SUnit *ResourcePriorityQueue::pop() { 590 if (empty()) 591 return nullptr; 592 593 std::vector<SUnit *>::iterator Best = Queue.begin(); 594 if (!DisableDFASched) { 595 int BestCost = SUSchedulingCost(*Best); 596 for (auto I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) { 597 598 if (SUSchedulingCost(*I) > BestCost) { 599 BestCost = SUSchedulingCost(*I); 600 Best = I; 601 } 602 } 603 } 604 // Use default TD scheduling mechanism. 605 else { 606 for (auto I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) 607 if (Picker(*Best, *I)) 608 Best = I; 609 } 610 611 SUnit *V = *Best; 612 if (Best != std::prev(Queue.end())) 613 std::swap(*Best, Queue.back()); 614 615 Queue.pop_back(); 616 617 return V; 618 } 619 620 621 void ResourcePriorityQueue::remove(SUnit *SU) { 622 assert(!Queue.empty() && "Queue is empty!"); 623 std::vector<SUnit *>::iterator I = find(Queue, SU); 624 if (I != std::prev(Queue.end())) 625 std::swap(*I, Queue.back()); 626 627 Queue.pop_back(); 628 } 629