xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/GCNSchedStrategy.cpp (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
10b57cec5SDimitry Andric //===-- GCNSchedStrategy.cpp - GCN Scheduler Strategy ---------------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric /// \file
100b57cec5SDimitry Andric /// This contains a MachineSchedStrategy implementation for maximizing wave
110b57cec5SDimitry Andric /// occupancy on GCN hardware.
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #include "GCNSchedStrategy.h"
150b57cec5SDimitry Andric #include "AMDGPUSubtarget.h"
160b57cec5SDimitry Andric #include "SIInstrInfo.h"
170b57cec5SDimitry Andric #include "SIMachineFunctionInfo.h"
180b57cec5SDimitry Andric #include "SIRegisterInfo.h"
19480093f4SDimitry Andric #include "Utils/AMDGPUBaseInfo.h"
200b57cec5SDimitry Andric #include "llvm/CodeGen/RegisterClassInfo.h"
210b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h"
220b57cec5SDimitry Andric 
230b57cec5SDimitry Andric #define DEBUG_TYPE "machine-scheduler"
240b57cec5SDimitry Andric 
250b57cec5SDimitry Andric using namespace llvm;
260b57cec5SDimitry Andric 
270b57cec5SDimitry Andric GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
280b57cec5SDimitry Andric     const MachineSchedContext *C) :
290b57cec5SDimitry Andric     GenericScheduler(C), TargetOccupancy(0), MF(nullptr) { }
300b57cec5SDimitry Andric 
310b57cec5SDimitry Andric void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
320b57cec5SDimitry Andric   GenericScheduler::initialize(DAG);
330b57cec5SDimitry Andric 
340b57cec5SDimitry Andric   const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
350b57cec5SDimitry Andric 
360b57cec5SDimitry Andric   MF = &DAG->MF;
370b57cec5SDimitry Andric 
380b57cec5SDimitry Andric   const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>();
390b57cec5SDimitry Andric 
400b57cec5SDimitry Andric   // FIXME: This is also necessary, because some passes that run after
410b57cec5SDimitry Andric   // scheduling and before regalloc increase register pressure.
420b57cec5SDimitry Andric   const int ErrorMargin = 3;
430b57cec5SDimitry Andric 
440b57cec5SDimitry Andric   SGPRExcessLimit = Context->RegClassInfo
450b57cec5SDimitry Andric     ->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass) - ErrorMargin;
460b57cec5SDimitry Andric   VGPRExcessLimit = Context->RegClassInfo
470b57cec5SDimitry Andric     ->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass) - ErrorMargin;
480b57cec5SDimitry Andric   if (TargetOccupancy) {
490b57cec5SDimitry Andric     SGPRCriticalLimit = ST.getMaxNumSGPRs(TargetOccupancy, true);
500b57cec5SDimitry Andric     VGPRCriticalLimit = ST.getMaxNumVGPRs(TargetOccupancy);
510b57cec5SDimitry Andric   } else {
520b57cec5SDimitry Andric     SGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
53*5ffd83dbSDimitry Andric         AMDGPU::RegisterPressureSets::SReg_32);
540b57cec5SDimitry Andric     VGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
55*5ffd83dbSDimitry Andric         AMDGPU::RegisterPressureSets::VGPR_32);
560b57cec5SDimitry Andric   }
570b57cec5SDimitry Andric 
580b57cec5SDimitry Andric   SGPRCriticalLimit -= ErrorMargin;
590b57cec5SDimitry Andric   VGPRCriticalLimit -= ErrorMargin;
600b57cec5SDimitry Andric }
610b57cec5SDimitry Andric 
620b57cec5SDimitry Andric void GCNMaxOccupancySchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
630b57cec5SDimitry Andric                                      bool AtTop, const RegPressureTracker &RPTracker,
640b57cec5SDimitry Andric                                      const SIRegisterInfo *SRI,
650b57cec5SDimitry Andric                                      unsigned SGPRPressure,
660b57cec5SDimitry Andric                                      unsigned VGPRPressure) {
670b57cec5SDimitry Andric 
680b57cec5SDimitry Andric   Cand.SU = SU;
690b57cec5SDimitry Andric   Cand.AtTop = AtTop;
700b57cec5SDimitry Andric 
710b57cec5SDimitry Andric   // getDownwardPressure() and getUpwardPressure() make temporary changes to
720b57cec5SDimitry Andric   // the tracker, so we need to pass those function a non-const copy.
730b57cec5SDimitry Andric   RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
740b57cec5SDimitry Andric 
758bcb0991SDimitry Andric   Pressure.clear();
768bcb0991SDimitry Andric   MaxPressure.clear();
770b57cec5SDimitry Andric 
780b57cec5SDimitry Andric   if (AtTop)
790b57cec5SDimitry Andric     TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure);
800b57cec5SDimitry Andric   else {
810b57cec5SDimitry Andric     // FIXME: I think for bottom up scheduling, the register pressure is cached
820b57cec5SDimitry Andric     // and can be retrieved by DAG->getPressureDif(SU).
830b57cec5SDimitry Andric     TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
840b57cec5SDimitry Andric   }
850b57cec5SDimitry Andric 
86*5ffd83dbSDimitry Andric   unsigned NewSGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
87*5ffd83dbSDimitry Andric   unsigned NewVGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
880b57cec5SDimitry Andric 
890b57cec5SDimitry Andric   // If two instructions increase the pressure of different register sets
900b57cec5SDimitry Andric   // by the same amount, the generic scheduler will prefer to schedule the
910b57cec5SDimitry Andric   // instruction that increases the set with the least amount of registers,
920b57cec5SDimitry Andric   // which in our case would be SGPRs.  This is rarely what we want, so
930b57cec5SDimitry Andric   // when we report excess/critical register pressure, we do it either
940b57cec5SDimitry Andric   // only for VGPRs or only for SGPRs.
950b57cec5SDimitry Andric 
960b57cec5SDimitry Andric   // FIXME: Better heuristics to determine whether to prefer SGPRs or VGPRs.
970b57cec5SDimitry Andric   const unsigned MaxVGPRPressureInc = 16;
980b57cec5SDimitry Andric   bool ShouldTrackVGPRs = VGPRPressure + MaxVGPRPressureInc >= VGPRExcessLimit;
990b57cec5SDimitry Andric   bool ShouldTrackSGPRs = !ShouldTrackVGPRs && SGPRPressure >= SGPRExcessLimit;
1000b57cec5SDimitry Andric 
1010b57cec5SDimitry Andric 
1020b57cec5SDimitry Andric   // FIXME: We have to enter REG-EXCESS before we reach the actual threshold
1030b57cec5SDimitry Andric   // to increase the likelihood we don't go over the limits.  We should improve
1040b57cec5SDimitry Andric   // the analysis to look through dependencies to find the path with the least
1050b57cec5SDimitry Andric   // register pressure.
1060b57cec5SDimitry Andric 
1078bcb0991SDimitry Andric   // We only need to update the RPDelta for instructions that increase register
1088bcb0991SDimitry Andric   // pressure. Instructions that decrease or keep reg pressure the same will be
1098bcb0991SDimitry Andric   // marked as RegExcess in tryCandidate() when they are compared with
1108bcb0991SDimitry Andric   // instructions that increase the register pressure.
1110b57cec5SDimitry Andric   if (ShouldTrackVGPRs && NewVGPRPressure >= VGPRExcessLimit) {
112*5ffd83dbSDimitry Andric     Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::VGPR_32);
1130b57cec5SDimitry Andric     Cand.RPDelta.Excess.setUnitInc(NewVGPRPressure - VGPRExcessLimit);
1140b57cec5SDimitry Andric   }
1150b57cec5SDimitry Andric 
1160b57cec5SDimitry Andric   if (ShouldTrackSGPRs && NewSGPRPressure >= SGPRExcessLimit) {
117*5ffd83dbSDimitry Andric     Cand.RPDelta.Excess = PressureChange(AMDGPU::RegisterPressureSets::SReg_32);
1180b57cec5SDimitry Andric     Cand.RPDelta.Excess.setUnitInc(NewSGPRPressure - SGPRExcessLimit);
1190b57cec5SDimitry Andric   }
1200b57cec5SDimitry Andric 
1210b57cec5SDimitry Andric   // Register pressure is considered 'CRITICAL' if it is approaching a value
1220b57cec5SDimitry Andric   // that would reduce the wave occupancy for the execution unit.  When
1230b57cec5SDimitry Andric   // register pressure is 'CRITICAL', increading SGPR and VGPR pressure both
1240b57cec5SDimitry Andric   // has the same cost, so we don't need to prefer one over the other.
1250b57cec5SDimitry Andric 
1260b57cec5SDimitry Andric   int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit;
1270b57cec5SDimitry Andric   int VGPRDelta = NewVGPRPressure - VGPRCriticalLimit;
1280b57cec5SDimitry Andric 
1290b57cec5SDimitry Andric   if (SGPRDelta >= 0 || VGPRDelta >= 0) {
1300b57cec5SDimitry Andric     if (SGPRDelta > VGPRDelta) {
131*5ffd83dbSDimitry Andric       Cand.RPDelta.CriticalMax =
132*5ffd83dbSDimitry Andric         PressureChange(AMDGPU::RegisterPressureSets::SReg_32);
1330b57cec5SDimitry Andric       Cand.RPDelta.CriticalMax.setUnitInc(SGPRDelta);
1340b57cec5SDimitry Andric     } else {
135*5ffd83dbSDimitry Andric       Cand.RPDelta.CriticalMax =
136*5ffd83dbSDimitry Andric         PressureChange(AMDGPU::RegisterPressureSets::VGPR_32);
1370b57cec5SDimitry Andric       Cand.RPDelta.CriticalMax.setUnitInc(VGPRDelta);
1380b57cec5SDimitry Andric     }
1390b57cec5SDimitry Andric   }
1400b57cec5SDimitry Andric }
1410b57cec5SDimitry Andric 
1420b57cec5SDimitry Andric // This function is mostly cut and pasted from
1430b57cec5SDimitry Andric // GenericScheduler::pickNodeFromQueue()
1440b57cec5SDimitry Andric void GCNMaxOccupancySchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
1450b57cec5SDimitry Andric                                          const CandPolicy &ZonePolicy,
1460b57cec5SDimitry Andric                                          const RegPressureTracker &RPTracker,
1470b57cec5SDimitry Andric                                          SchedCandidate &Cand) {
1480b57cec5SDimitry Andric   const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
1490b57cec5SDimitry Andric   ArrayRef<unsigned> Pressure = RPTracker.getRegSetPressureAtPos();
150*5ffd83dbSDimitry Andric   unsigned SGPRPressure = Pressure[AMDGPU::RegisterPressureSets::SReg_32];
151*5ffd83dbSDimitry Andric   unsigned VGPRPressure = Pressure[AMDGPU::RegisterPressureSets::VGPR_32];
1520b57cec5SDimitry Andric   ReadyQueue &Q = Zone.Available;
1530b57cec5SDimitry Andric   for (SUnit *SU : Q) {
1540b57cec5SDimitry Andric 
1550b57cec5SDimitry Andric     SchedCandidate TryCand(ZonePolicy);
1560b57cec5SDimitry Andric     initCandidate(TryCand, SU, Zone.isTop(), RPTracker, SRI,
1570b57cec5SDimitry Andric                   SGPRPressure, VGPRPressure);
1580b57cec5SDimitry Andric     // Pass SchedBoundary only when comparing nodes from the same boundary.
1590b57cec5SDimitry Andric     SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
1600b57cec5SDimitry Andric     GenericScheduler::tryCandidate(Cand, TryCand, ZoneArg);
1610b57cec5SDimitry Andric     if (TryCand.Reason != NoCand) {
1620b57cec5SDimitry Andric       // Initialize resource delta if needed in case future heuristics query it.
1630b57cec5SDimitry Andric       if (TryCand.ResDelta == SchedResourceDelta())
1640b57cec5SDimitry Andric         TryCand.initResourceDelta(Zone.DAG, SchedModel);
1650b57cec5SDimitry Andric       Cand.setBest(TryCand);
1668bcb0991SDimitry Andric       LLVM_DEBUG(traceCandidate(Cand));
1670b57cec5SDimitry Andric     }
1680b57cec5SDimitry Andric   }
1690b57cec5SDimitry Andric }
1700b57cec5SDimitry Andric 
1710b57cec5SDimitry Andric // This function is mostly cut and pasted from
1720b57cec5SDimitry Andric // GenericScheduler::pickNodeBidirectional()
1730b57cec5SDimitry Andric SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) {
1740b57cec5SDimitry Andric   // Schedule as far as possible in the direction of no choice. This is most
1750b57cec5SDimitry Andric   // efficient, but also provides the best heuristics for CriticalPSets.
1760b57cec5SDimitry Andric   if (SUnit *SU = Bot.pickOnlyChoice()) {
1770b57cec5SDimitry Andric     IsTopNode = false;
1780b57cec5SDimitry Andric     return SU;
1790b57cec5SDimitry Andric   }
1800b57cec5SDimitry Andric   if (SUnit *SU = Top.pickOnlyChoice()) {
1810b57cec5SDimitry Andric     IsTopNode = true;
1820b57cec5SDimitry Andric     return SU;
1830b57cec5SDimitry Andric   }
1840b57cec5SDimitry Andric   // Set the bottom-up policy based on the state of the current bottom zone and
1850b57cec5SDimitry Andric   // the instructions outside the zone, including the top zone.
1860b57cec5SDimitry Andric   CandPolicy BotPolicy;
1870b57cec5SDimitry Andric   setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
1880b57cec5SDimitry Andric   // Set the top-down policy based on the state of the current top zone and
1890b57cec5SDimitry Andric   // the instructions outside the zone, including the bottom zone.
1900b57cec5SDimitry Andric   CandPolicy TopPolicy;
1910b57cec5SDimitry Andric   setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
1920b57cec5SDimitry Andric 
1930b57cec5SDimitry Andric   // See if BotCand is still valid (because we previously scheduled from Top).
1940b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
1950b57cec5SDimitry Andric   if (!BotCand.isValid() || BotCand.SU->isScheduled ||
1960b57cec5SDimitry Andric       BotCand.Policy != BotPolicy) {
1970b57cec5SDimitry Andric     BotCand.reset(CandPolicy());
1980b57cec5SDimitry Andric     pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
1990b57cec5SDimitry Andric     assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2000b57cec5SDimitry Andric   } else {
2010b57cec5SDimitry Andric     LLVM_DEBUG(traceCandidate(BotCand));
2028bcb0991SDimitry Andric #ifndef NDEBUG
2038bcb0991SDimitry Andric     if (VerifyScheduling) {
2048bcb0991SDimitry Andric       SchedCandidate TCand;
2058bcb0991SDimitry Andric       TCand.reset(CandPolicy());
2068bcb0991SDimitry Andric       pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
2078bcb0991SDimitry Andric       assert(TCand.SU == BotCand.SU &&
2088bcb0991SDimitry Andric              "Last pick result should correspond to re-picking right now");
2098bcb0991SDimitry Andric     }
2108bcb0991SDimitry Andric #endif
2110b57cec5SDimitry Andric   }
2120b57cec5SDimitry Andric 
2130b57cec5SDimitry Andric   // Check if the top Q has a better candidate.
2140b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Picking from Top:\n");
2150b57cec5SDimitry Andric   if (!TopCand.isValid() || TopCand.SU->isScheduled ||
2160b57cec5SDimitry Andric       TopCand.Policy != TopPolicy) {
2170b57cec5SDimitry Andric     TopCand.reset(CandPolicy());
2180b57cec5SDimitry Andric     pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
2190b57cec5SDimitry Andric     assert(TopCand.Reason != NoCand && "failed to find the first candidate");
2200b57cec5SDimitry Andric   } else {
2210b57cec5SDimitry Andric     LLVM_DEBUG(traceCandidate(TopCand));
2228bcb0991SDimitry Andric #ifndef NDEBUG
2238bcb0991SDimitry Andric     if (VerifyScheduling) {
2248bcb0991SDimitry Andric       SchedCandidate TCand;
2258bcb0991SDimitry Andric       TCand.reset(CandPolicy());
2268bcb0991SDimitry Andric       pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
2278bcb0991SDimitry Andric       assert(TCand.SU == TopCand.SU &&
2288bcb0991SDimitry Andric            "Last pick result should correspond to re-picking right now");
2298bcb0991SDimitry Andric     }
2308bcb0991SDimitry Andric #endif
2310b57cec5SDimitry Andric   }
2320b57cec5SDimitry Andric 
2330b57cec5SDimitry Andric   // Pick best from BotCand and TopCand.
2340b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Top Cand: "; traceCandidate(TopCand);
2350b57cec5SDimitry Andric              dbgs() << "Bot Cand: "; traceCandidate(BotCand););
236*5ffd83dbSDimitry Andric   SchedCandidate Cand = BotCand;
2370b57cec5SDimitry Andric   TopCand.Reason = NoCand;
2380b57cec5SDimitry Andric   GenericScheduler::tryCandidate(Cand, TopCand, nullptr);
2390b57cec5SDimitry Andric   if (TopCand.Reason != NoCand) {
2400b57cec5SDimitry Andric     Cand.setBest(TopCand);
2410b57cec5SDimitry Andric   }
2420b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Picking: "; traceCandidate(Cand););
2430b57cec5SDimitry Andric 
2440b57cec5SDimitry Andric   IsTopNode = Cand.AtTop;
2450b57cec5SDimitry Andric   return Cand.SU;
2460b57cec5SDimitry Andric }
2470b57cec5SDimitry Andric 
2480b57cec5SDimitry Andric // This function is mostly cut and pasted from
2490b57cec5SDimitry Andric // GenericScheduler::pickNode()
2500b57cec5SDimitry Andric SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) {
2510b57cec5SDimitry Andric   if (DAG->top() == DAG->bottom()) {
2520b57cec5SDimitry Andric     assert(Top.Available.empty() && Top.Pending.empty() &&
2530b57cec5SDimitry Andric            Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
2540b57cec5SDimitry Andric     return nullptr;
2550b57cec5SDimitry Andric   }
2560b57cec5SDimitry Andric   SUnit *SU;
2570b57cec5SDimitry Andric   do {
2580b57cec5SDimitry Andric     if (RegionPolicy.OnlyTopDown) {
2590b57cec5SDimitry Andric       SU = Top.pickOnlyChoice();
2600b57cec5SDimitry Andric       if (!SU) {
2610b57cec5SDimitry Andric         CandPolicy NoPolicy;
2620b57cec5SDimitry Andric         TopCand.reset(NoPolicy);
2630b57cec5SDimitry Andric         pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
2640b57cec5SDimitry Andric         assert(TopCand.Reason != NoCand && "failed to find a candidate");
2650b57cec5SDimitry Andric         SU = TopCand.SU;
2660b57cec5SDimitry Andric       }
2670b57cec5SDimitry Andric       IsTopNode = true;
2680b57cec5SDimitry Andric     } else if (RegionPolicy.OnlyBottomUp) {
2690b57cec5SDimitry Andric       SU = Bot.pickOnlyChoice();
2700b57cec5SDimitry Andric       if (!SU) {
2710b57cec5SDimitry Andric         CandPolicy NoPolicy;
2720b57cec5SDimitry Andric         BotCand.reset(NoPolicy);
2730b57cec5SDimitry Andric         pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
2740b57cec5SDimitry Andric         assert(BotCand.Reason != NoCand && "failed to find a candidate");
2750b57cec5SDimitry Andric         SU = BotCand.SU;
2760b57cec5SDimitry Andric       }
2770b57cec5SDimitry Andric       IsTopNode = false;
2780b57cec5SDimitry Andric     } else {
2790b57cec5SDimitry Andric       SU = pickNodeBidirectional(IsTopNode);
2800b57cec5SDimitry Andric     }
2810b57cec5SDimitry Andric   } while (SU->isScheduled);
2820b57cec5SDimitry Andric 
2830b57cec5SDimitry Andric   if (SU->isTopReady())
2840b57cec5SDimitry Andric     Top.removeReady(SU);
2850b57cec5SDimitry Andric   if (SU->isBottomReady())
2860b57cec5SDimitry Andric     Bot.removeReady(SU);
2870b57cec5SDimitry Andric 
2880b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
2890b57cec5SDimitry Andric                     << *SU->getInstr());
2900b57cec5SDimitry Andric   return SU;
2910b57cec5SDimitry Andric }
2920b57cec5SDimitry Andric 
2930b57cec5SDimitry Andric GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
2940b57cec5SDimitry Andric                         std::unique_ptr<MachineSchedStrategy> S) :
2950b57cec5SDimitry Andric   ScheduleDAGMILive(C, std::move(S)),
2960b57cec5SDimitry Andric   ST(MF.getSubtarget<GCNSubtarget>()),
2970b57cec5SDimitry Andric   MFI(*MF.getInfo<SIMachineFunctionInfo>()),
2980b57cec5SDimitry Andric   StartingOccupancy(MFI.getOccupancy()),
299*5ffd83dbSDimitry Andric   MinOccupancy(StartingOccupancy), Stage(Collect), RegionIdx(0) {
3000b57cec5SDimitry Andric 
3010b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
3020b57cec5SDimitry Andric }
3030b57cec5SDimitry Andric 
3040b57cec5SDimitry Andric void GCNScheduleDAGMILive::schedule() {
305*5ffd83dbSDimitry Andric   if (Stage == Collect) {
3060b57cec5SDimitry Andric     // Just record regions at the first pass.
3070b57cec5SDimitry Andric     Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
3080b57cec5SDimitry Andric     return;
3090b57cec5SDimitry Andric   }
3100b57cec5SDimitry Andric 
3110b57cec5SDimitry Andric   std::vector<MachineInstr*> Unsched;
3120b57cec5SDimitry Andric   Unsched.reserve(NumRegionInstrs);
3130b57cec5SDimitry Andric   for (auto &I : *this) {
3140b57cec5SDimitry Andric     Unsched.push_back(&I);
3150b57cec5SDimitry Andric   }
3160b57cec5SDimitry Andric 
3170b57cec5SDimitry Andric   GCNRegPressure PressureBefore;
3180b57cec5SDimitry Andric   if (LIS) {
3190b57cec5SDimitry Andric     PressureBefore = Pressure[RegionIdx];
3200b57cec5SDimitry Andric 
3210b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
3220b57cec5SDimitry Andric                GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
3230b57cec5SDimitry Andric                dbgs() << "Region live-in pressure:  ";
3240b57cec5SDimitry Andric                llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
3250b57cec5SDimitry Andric                dbgs() << "Region register pressure: ";
3260b57cec5SDimitry Andric                PressureBefore.print(dbgs()));
3270b57cec5SDimitry Andric   }
3280b57cec5SDimitry Andric 
3290b57cec5SDimitry Andric   ScheduleDAGMILive::schedule();
3300b57cec5SDimitry Andric   Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
331*5ffd83dbSDimitry Andric   RescheduleRegions[RegionIdx] = false;
3320b57cec5SDimitry Andric 
3330b57cec5SDimitry Andric   if (!LIS)
3340b57cec5SDimitry Andric     return;
3350b57cec5SDimitry Andric 
3360b57cec5SDimitry Andric   // Check the results of scheduling.
3370b57cec5SDimitry Andric   GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
3380b57cec5SDimitry Andric   auto PressureAfter = getRealRegPressure();
3390b57cec5SDimitry Andric 
3400b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";
3410b57cec5SDimitry Andric              PressureAfter.print(dbgs()));
3420b57cec5SDimitry Andric 
3430b57cec5SDimitry Andric   if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
3440b57cec5SDimitry Andric       PressureAfter.getVGPRNum() <= S.VGPRCriticalLimit) {
3450b57cec5SDimitry Andric     Pressure[RegionIdx] = PressureAfter;
3460b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
3470b57cec5SDimitry Andric     return;
3480b57cec5SDimitry Andric   }
3490b57cec5SDimitry Andric   unsigned Occ = MFI.getOccupancy();
3500b57cec5SDimitry Andric   unsigned WavesAfter = std::min(Occ, PressureAfter.getOccupancy(ST));
3510b57cec5SDimitry Andric   unsigned WavesBefore = std::min(Occ, PressureBefore.getOccupancy(ST));
3520b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
3530b57cec5SDimitry Andric                     << ", after " << WavesAfter << ".\n");
3540b57cec5SDimitry Andric 
3550b57cec5SDimitry Andric   // We could not keep current target occupancy because of the just scheduled
3560b57cec5SDimitry Andric   // region. Record new occupancy for next scheduling cycle.
3570b57cec5SDimitry Andric   unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
3580b57cec5SDimitry Andric   // Allow memory bound functions to drop to 4 waves if not limited by an
3590b57cec5SDimitry Andric   // attribute.
3600b57cec5SDimitry Andric   if (WavesAfter < WavesBefore && WavesAfter < MinOccupancy &&
3610b57cec5SDimitry Andric       WavesAfter >= MFI.getMinAllowedOccupancy()) {
3620b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "
3630b57cec5SDimitry Andric                       << MFI.getMinAllowedOccupancy() << " waves\n");
3640b57cec5SDimitry Andric     NewOccupancy = WavesAfter;
3650b57cec5SDimitry Andric   }
3660b57cec5SDimitry Andric   if (NewOccupancy < MinOccupancy) {
3670b57cec5SDimitry Andric     MinOccupancy = NewOccupancy;
3680b57cec5SDimitry Andric     MFI.limitOccupancy(MinOccupancy);
3690b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
3700b57cec5SDimitry Andric                       << MinOccupancy << ".\n");
3710b57cec5SDimitry Andric   }
3720b57cec5SDimitry Andric 
373*5ffd83dbSDimitry Andric   unsigned MaxVGPRs = ST.getMaxNumVGPRs(MF);
374*5ffd83dbSDimitry Andric   unsigned MaxSGPRs = ST.getMaxNumSGPRs(MF);
375*5ffd83dbSDimitry Andric   if (PressureAfter.getVGPRNum() > MaxVGPRs ||
376*5ffd83dbSDimitry Andric       PressureAfter.getSGPRNum() > MaxSGPRs)
377*5ffd83dbSDimitry Andric     RescheduleRegions[RegionIdx] = true;
378*5ffd83dbSDimitry Andric 
3790b57cec5SDimitry Andric   if (WavesAfter >= MinOccupancy) {
380*5ffd83dbSDimitry Andric     if (Stage == UnclusteredReschedule &&
381*5ffd83dbSDimitry Andric         !PressureAfter.less(ST, PressureBefore)) {
382*5ffd83dbSDimitry Andric       LLVM_DEBUG(dbgs() << "Unclustered reschedule did not help.\n");
383*5ffd83dbSDimitry Andric     } else if (WavesAfter > MFI.getMinWavesPerEU() ||
384480093f4SDimitry Andric         PressureAfter.less(ST, PressureBefore) ||
385*5ffd83dbSDimitry Andric         !RescheduleRegions[RegionIdx]) {
3860b57cec5SDimitry Andric       Pressure[RegionIdx] = PressureAfter;
3870b57cec5SDimitry Andric       return;
388*5ffd83dbSDimitry Andric     } else {
389480093f4SDimitry Andric       LLVM_DEBUG(dbgs() << "New pressure will result in more spilling.\n");
390480093f4SDimitry Andric     }
391*5ffd83dbSDimitry Andric   }
3920b57cec5SDimitry Andric 
3930b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
394*5ffd83dbSDimitry Andric   RescheduleRegions[RegionIdx] = true;
3950b57cec5SDimitry Andric   RegionEnd = RegionBegin;
3960b57cec5SDimitry Andric   for (MachineInstr *MI : Unsched) {
3970b57cec5SDimitry Andric     if (MI->isDebugInstr())
3980b57cec5SDimitry Andric       continue;
3990b57cec5SDimitry Andric 
4000b57cec5SDimitry Andric     if (MI->getIterator() != RegionEnd) {
4010b57cec5SDimitry Andric       BB->remove(MI);
4020b57cec5SDimitry Andric       BB->insert(RegionEnd, MI);
4030b57cec5SDimitry Andric       if (!MI->isDebugInstr())
4040b57cec5SDimitry Andric         LIS->handleMove(*MI, true);
4050b57cec5SDimitry Andric     }
4060b57cec5SDimitry Andric     // Reset read-undef flags and update them later.
4070b57cec5SDimitry Andric     for (auto &Op : MI->operands())
4080b57cec5SDimitry Andric       if (Op.isReg() && Op.isDef())
4090b57cec5SDimitry Andric         Op.setIsUndef(false);
4100b57cec5SDimitry Andric     RegisterOperands RegOpers;
4110b57cec5SDimitry Andric     RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
4120b57cec5SDimitry Andric     if (!MI->isDebugInstr()) {
4130b57cec5SDimitry Andric       if (ShouldTrackLaneMasks) {
4140b57cec5SDimitry Andric         // Adjust liveness and add missing dead+read-undef flags.
4150b57cec5SDimitry Andric         SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
4160b57cec5SDimitry Andric         RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
4170b57cec5SDimitry Andric       } else {
4180b57cec5SDimitry Andric         // Adjust for missing dead-def flags.
4190b57cec5SDimitry Andric         RegOpers.detectDeadDefs(*MI, *LIS);
4200b57cec5SDimitry Andric       }
4210b57cec5SDimitry Andric     }
4220b57cec5SDimitry Andric     RegionEnd = MI->getIterator();
4230b57cec5SDimitry Andric     ++RegionEnd;
4240b57cec5SDimitry Andric     LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
4250b57cec5SDimitry Andric   }
4260b57cec5SDimitry Andric   RegionBegin = Unsched.front()->getIterator();
4270b57cec5SDimitry Andric   Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
4280b57cec5SDimitry Andric 
4290b57cec5SDimitry Andric   placeDebugValues();
4300b57cec5SDimitry Andric }
4310b57cec5SDimitry Andric 
4320b57cec5SDimitry Andric GCNRegPressure GCNScheduleDAGMILive::getRealRegPressure() const {
4330b57cec5SDimitry Andric   GCNDownwardRPTracker RPTracker(*LIS);
4340b57cec5SDimitry Andric   RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
4350b57cec5SDimitry Andric   return RPTracker.moveMaxPressure();
4360b57cec5SDimitry Andric }
4370b57cec5SDimitry Andric 
4380b57cec5SDimitry Andric void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
4390b57cec5SDimitry Andric   GCNDownwardRPTracker RPTracker(*LIS);
4400b57cec5SDimitry Andric 
4410b57cec5SDimitry Andric   // If the block has the only successor then live-ins of that successor are
4420b57cec5SDimitry Andric   // live-outs of the current block. We can reuse calculated live set if the
4430b57cec5SDimitry Andric   // successor will be sent to scheduling past current block.
4440b57cec5SDimitry Andric   const MachineBasicBlock *OnlySucc = nullptr;
4450b57cec5SDimitry Andric   if (MBB->succ_size() == 1 && !(*MBB->succ_begin())->empty()) {
4460b57cec5SDimitry Andric     SlotIndexes *Ind = LIS->getSlotIndexes();
4470b57cec5SDimitry Andric     if (Ind->getMBBStartIdx(MBB) < Ind->getMBBStartIdx(*MBB->succ_begin()))
4480b57cec5SDimitry Andric       OnlySucc = *MBB->succ_begin();
4490b57cec5SDimitry Andric   }
4500b57cec5SDimitry Andric 
4510b57cec5SDimitry Andric   // Scheduler sends regions from the end of the block upwards.
4520b57cec5SDimitry Andric   size_t CurRegion = RegionIdx;
4530b57cec5SDimitry Andric   for (size_t E = Regions.size(); CurRegion != E; ++CurRegion)
4540b57cec5SDimitry Andric     if (Regions[CurRegion].first->getParent() != MBB)
4550b57cec5SDimitry Andric       break;
4560b57cec5SDimitry Andric   --CurRegion;
4570b57cec5SDimitry Andric 
4580b57cec5SDimitry Andric   auto I = MBB->begin();
4590b57cec5SDimitry Andric   auto LiveInIt = MBBLiveIns.find(MBB);
4600b57cec5SDimitry Andric   if (LiveInIt != MBBLiveIns.end()) {
4610b57cec5SDimitry Andric     auto LiveIn = std::move(LiveInIt->second);
4620b57cec5SDimitry Andric     RPTracker.reset(*MBB->begin(), &LiveIn);
4630b57cec5SDimitry Andric     MBBLiveIns.erase(LiveInIt);
4640b57cec5SDimitry Andric   } else {
4650b57cec5SDimitry Andric     auto &Rgn = Regions[CurRegion];
4660b57cec5SDimitry Andric     I = Rgn.first;
4670b57cec5SDimitry Andric     auto *NonDbgMI = &*skipDebugInstructionsForward(Rgn.first, Rgn.second);
4680b57cec5SDimitry Andric     auto LRS = BBLiveInMap.lookup(NonDbgMI);
4690b57cec5SDimitry Andric     assert(isEqual(getLiveRegsBefore(*NonDbgMI, *LIS), LRS));
4700b57cec5SDimitry Andric     RPTracker.reset(*I, &LRS);
4710b57cec5SDimitry Andric   }
4720b57cec5SDimitry Andric 
4730b57cec5SDimitry Andric   for ( ; ; ) {
4740b57cec5SDimitry Andric     I = RPTracker.getNext();
4750b57cec5SDimitry Andric 
4760b57cec5SDimitry Andric     if (Regions[CurRegion].first == I) {
4770b57cec5SDimitry Andric       LiveIns[CurRegion] = RPTracker.getLiveRegs();
4780b57cec5SDimitry Andric       RPTracker.clearMaxPressure();
4790b57cec5SDimitry Andric     }
4800b57cec5SDimitry Andric 
4810b57cec5SDimitry Andric     if (Regions[CurRegion].second == I) {
4820b57cec5SDimitry Andric       Pressure[CurRegion] = RPTracker.moveMaxPressure();
4830b57cec5SDimitry Andric       if (CurRegion-- == RegionIdx)
4840b57cec5SDimitry Andric         break;
4850b57cec5SDimitry Andric     }
4860b57cec5SDimitry Andric     RPTracker.advanceToNext();
4870b57cec5SDimitry Andric     RPTracker.advanceBeforeNext();
4880b57cec5SDimitry Andric   }
4890b57cec5SDimitry Andric 
4900b57cec5SDimitry Andric   if (OnlySucc) {
4910b57cec5SDimitry Andric     if (I != MBB->end()) {
4920b57cec5SDimitry Andric       RPTracker.advanceToNext();
4930b57cec5SDimitry Andric       RPTracker.advance(MBB->end());
4940b57cec5SDimitry Andric     }
4950b57cec5SDimitry Andric     RPTracker.reset(*OnlySucc->begin(), &RPTracker.getLiveRegs());
4960b57cec5SDimitry Andric     RPTracker.advanceBeforeNext();
4970b57cec5SDimitry Andric     MBBLiveIns[OnlySucc] = RPTracker.moveLiveRegs();
4980b57cec5SDimitry Andric   }
4990b57cec5SDimitry Andric }
5000b57cec5SDimitry Andric 
5010b57cec5SDimitry Andric DenseMap<MachineInstr *, GCNRPTracker::LiveRegSet>
5020b57cec5SDimitry Andric GCNScheduleDAGMILive::getBBLiveInMap() const {
5030b57cec5SDimitry Andric   assert(!Regions.empty());
5040b57cec5SDimitry Andric   std::vector<MachineInstr *> BBStarters;
5050b57cec5SDimitry Andric   BBStarters.reserve(Regions.size());
5060b57cec5SDimitry Andric   auto I = Regions.rbegin(), E = Regions.rend();
5070b57cec5SDimitry Andric   auto *BB = I->first->getParent();
5080b57cec5SDimitry Andric   do {
5090b57cec5SDimitry Andric     auto *MI = &*skipDebugInstructionsForward(I->first, I->second);
5100b57cec5SDimitry Andric     BBStarters.push_back(MI);
5110b57cec5SDimitry Andric     do {
5120b57cec5SDimitry Andric       ++I;
5130b57cec5SDimitry Andric     } while (I != E && I->first->getParent() == BB);
5140b57cec5SDimitry Andric   } while (I != E);
5150b57cec5SDimitry Andric   return getLiveRegMap(BBStarters, false /*After*/, *LIS);
5160b57cec5SDimitry Andric }
5170b57cec5SDimitry Andric 
5180b57cec5SDimitry Andric void GCNScheduleDAGMILive::finalizeSchedule() {
5190b57cec5SDimitry Andric   GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
5200b57cec5SDimitry Andric   LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
5210b57cec5SDimitry Andric 
5220b57cec5SDimitry Andric   LiveIns.resize(Regions.size());
5230b57cec5SDimitry Andric   Pressure.resize(Regions.size());
524*5ffd83dbSDimitry Andric   RescheduleRegions.resize(Regions.size());
525*5ffd83dbSDimitry Andric   RescheduleRegions.set();
5260b57cec5SDimitry Andric 
5270b57cec5SDimitry Andric   if (!Regions.empty())
5280b57cec5SDimitry Andric     BBLiveInMap = getBBLiveInMap();
5290b57cec5SDimitry Andric 
530*5ffd83dbSDimitry Andric   std::vector<std::unique_ptr<ScheduleDAGMutation>> SavedMutations;
531*5ffd83dbSDimitry Andric 
5320b57cec5SDimitry Andric   do {
5330b57cec5SDimitry Andric     Stage++;
5340b57cec5SDimitry Andric     RegionIdx = 0;
5350b57cec5SDimitry Andric     MachineBasicBlock *MBB = nullptr;
5360b57cec5SDimitry Andric 
537*5ffd83dbSDimitry Andric     if (Stage > InitialSchedule) {
538*5ffd83dbSDimitry Andric       if (!LIS)
539*5ffd83dbSDimitry Andric         break;
540*5ffd83dbSDimitry Andric 
5410b57cec5SDimitry Andric       // Retry function scheduling if we found resulting occupancy and it is
5420b57cec5SDimitry Andric       // lower than used for first pass scheduling. This will give more freedom
5430b57cec5SDimitry Andric       // to schedule low register pressure blocks.
5440b57cec5SDimitry Andric       // Code is partially copied from MachineSchedulerBase::scheduleRegions().
5450b57cec5SDimitry Andric 
546*5ffd83dbSDimitry Andric       if (Stage == UnclusteredReschedule) {
547*5ffd83dbSDimitry Andric         if (RescheduleRegions.none())
548*5ffd83dbSDimitry Andric           continue;
549*5ffd83dbSDimitry Andric         LLVM_DEBUG(dbgs() <<
550*5ffd83dbSDimitry Andric           "Retrying function scheduling without clustering.\n");
551*5ffd83dbSDimitry Andric       }
552*5ffd83dbSDimitry Andric 
553*5ffd83dbSDimitry Andric       if (Stage == ClusteredLowOccupancyReschedule) {
554*5ffd83dbSDimitry Andric         if (StartingOccupancy <= MinOccupancy)
5550b57cec5SDimitry Andric           break;
5560b57cec5SDimitry Andric 
5570b57cec5SDimitry Andric         LLVM_DEBUG(
5580b57cec5SDimitry Andric             dbgs()
5590b57cec5SDimitry Andric             << "Retrying function scheduling with lowest recorded occupancy "
5600b57cec5SDimitry Andric             << MinOccupancy << ".\n");
5610b57cec5SDimitry Andric 
5620b57cec5SDimitry Andric         S.setTargetOccupancy(MinOccupancy);
5630b57cec5SDimitry Andric       }
564*5ffd83dbSDimitry Andric     }
565*5ffd83dbSDimitry Andric 
566*5ffd83dbSDimitry Andric     if (Stage == UnclusteredReschedule)
567*5ffd83dbSDimitry Andric       SavedMutations.swap(Mutations);
5680b57cec5SDimitry Andric 
5690b57cec5SDimitry Andric     for (auto Region : Regions) {
570*5ffd83dbSDimitry Andric       if (Stage == UnclusteredReschedule && !RescheduleRegions[RegionIdx])
571*5ffd83dbSDimitry Andric         continue;
572*5ffd83dbSDimitry Andric 
5730b57cec5SDimitry Andric       RegionBegin = Region.first;
5740b57cec5SDimitry Andric       RegionEnd = Region.second;
5750b57cec5SDimitry Andric 
5760b57cec5SDimitry Andric       if (RegionBegin->getParent() != MBB) {
5770b57cec5SDimitry Andric         if (MBB) finishBlock();
5780b57cec5SDimitry Andric         MBB = RegionBegin->getParent();
5790b57cec5SDimitry Andric         startBlock(MBB);
580*5ffd83dbSDimitry Andric         if (Stage == InitialSchedule)
5810b57cec5SDimitry Andric           computeBlockPressure(MBB);
5820b57cec5SDimitry Andric       }
5830b57cec5SDimitry Andric 
5840b57cec5SDimitry Andric       unsigned NumRegionInstrs = std::distance(begin(), end());
5850b57cec5SDimitry Andric       enterRegion(MBB, begin(), end(), NumRegionInstrs);
5860b57cec5SDimitry Andric 
5870b57cec5SDimitry Andric       // Skip empty scheduling regions (0 or 1 schedulable instructions).
5880b57cec5SDimitry Andric       if (begin() == end() || begin() == std::prev(end())) {
5890b57cec5SDimitry Andric         exitRegion();
5900b57cec5SDimitry Andric         continue;
5910b57cec5SDimitry Andric       }
5920b57cec5SDimitry Andric 
5930b57cec5SDimitry Andric       LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
5940b57cec5SDimitry Andric       LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
5950b57cec5SDimitry Andric                         << MBB->getName() << "\n  From: " << *begin()
5960b57cec5SDimitry Andric                         << "    To: ";
5970b57cec5SDimitry Andric                  if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
5980b57cec5SDimitry Andric                  else dbgs() << "End";
5990b57cec5SDimitry Andric                  dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
6000b57cec5SDimitry Andric 
6010b57cec5SDimitry Andric       schedule();
6020b57cec5SDimitry Andric 
6030b57cec5SDimitry Andric       exitRegion();
6040b57cec5SDimitry Andric       ++RegionIdx;
6050b57cec5SDimitry Andric     }
6060b57cec5SDimitry Andric     finishBlock();
6070b57cec5SDimitry Andric 
608*5ffd83dbSDimitry Andric     if (Stage == UnclusteredReschedule)
609*5ffd83dbSDimitry Andric       SavedMutations.swap(Mutations);
610*5ffd83dbSDimitry Andric   } while (Stage != LastStage);
6110b57cec5SDimitry Andric }
612