xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp (revision 06c3fb2749bda94cb5201f81ffdb8fa6c3161b2e)
1 //===- SIInsertWaitcnts.cpp - Insert Wait Instructions --------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Insert wait instructions for memory reads and writes.
11 ///
12 /// Memory reads and writes are issued asynchronously, so we need to insert
13 /// S_WAITCNT instructions when we want to access any of their results or
14 /// overwrite any register that's used asynchronously.
15 ///
16 /// TODO: This pass currently keeps one timeline per hardware counter. A more
17 /// finely-grained approach that keeps one timeline per event type could
18 /// sometimes get away with generating weaker s_waitcnt instructions. For
19 /// example, when both SMEM and LDS are in flight and we need to wait for
20 /// the i-th-last LDS instruction, then an lgkmcnt(i) is actually sufficient,
21 /// but the pass will currently generate a conservative lgkmcnt(0) because
22 /// multiple event types are in flight.
23 //
24 //===----------------------------------------------------------------------===//
25 
26 #include "AMDGPU.h"
27 #include "GCNSubtarget.h"
28 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
29 #include "SIMachineFunctionInfo.h"
30 #include "Utils/AMDGPUBaseInfo.h"
31 #include "llvm/ADT/MapVector.h"
32 #include "llvm/ADT/PostOrderIterator.h"
33 #include "llvm/ADT/Sequence.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachinePostDominators.h"
36 #include "llvm/InitializePasses.h"
37 #include "llvm/Support/DebugCounter.h"
38 #include "llvm/TargetParser/TargetParser.h"
39 using namespace llvm;
40 
41 #define DEBUG_TYPE "si-insert-waitcnts"
42 
43 DEBUG_COUNTER(ForceExpCounter, DEBUG_TYPE"-forceexp",
44               "Force emit s_waitcnt expcnt(0) instrs");
45 DEBUG_COUNTER(ForceLgkmCounter, DEBUG_TYPE"-forcelgkm",
46               "Force emit s_waitcnt lgkmcnt(0) instrs");
47 DEBUG_COUNTER(ForceVMCounter, DEBUG_TYPE"-forcevm",
48               "Force emit s_waitcnt vmcnt(0) instrs");
49 
50 static cl::opt<bool> ForceEmitZeroFlag(
51   "amdgpu-waitcnt-forcezero",
52   cl::desc("Force all waitcnt instrs to be emitted as s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)"),
53   cl::init(false), cl::Hidden);
54 
55 namespace {
56 // Class of object that encapsulates latest instruction counter score
57 // associated with the operand.  Used for determining whether
58 // s_waitcnt instruction needs to be emitted.
59 
60 enum InstCounterType { VM_CNT = 0, LGKM_CNT, EXP_CNT, VS_CNT, NUM_INST_CNTS };
61 } // namespace
62 
63 namespace llvm {
64 template <> struct enum_iteration_traits<InstCounterType> {
65   static constexpr bool is_iterable = true;
66 };
67 } // namespace llvm
68 
69 namespace {
70 auto inst_counter_types() { return enum_seq(VM_CNT, NUM_INST_CNTS); }
71 
72 using RegInterval = std::pair<int, int>;
73 
74 struct HardwareLimits {
75   unsigned VmcntMax;
76   unsigned ExpcntMax;
77   unsigned LgkmcntMax;
78   unsigned VscntMax;
79 };
80 
81 struct RegisterEncoding {
82   unsigned VGPR0;
83   unsigned VGPRL;
84   unsigned SGPR0;
85   unsigned SGPRL;
86 };
87 
88 enum WaitEventType {
89   VMEM_ACCESS,          // vector-memory read & write
90   VMEM_READ_ACCESS,     // vector-memory read
91   VMEM_WRITE_ACCESS,    // vector-memory write that is not scratch
92   SCRATCH_WRITE_ACCESS, // vector-memory write that may be scratch
93   LDS_ACCESS,           // lds read & write
94   GDS_ACCESS,           // gds read & write
95   SQ_MESSAGE,           // send message
96   SMEM_ACCESS,          // scalar-memory read & write
97   EXP_GPR_LOCK,         // export holding on its data src
98   GDS_GPR_LOCK,         // GDS holding on its data and addr src
99   EXP_POS_ACCESS,       // write to export position
100   EXP_PARAM_ACCESS,     // write to export parameter
101   VMW_GPR_LOCK,         // vector-memory write holding on its data src
102   EXP_LDS_ACCESS,       // read by ldsdir counting as export
103   NUM_WAIT_EVENTS,
104 };
105 
106 static const unsigned WaitEventMaskForInst[NUM_INST_CNTS] = {
107     (1 << VMEM_ACCESS) | (1 << VMEM_READ_ACCESS),
108     (1 << SMEM_ACCESS) | (1 << LDS_ACCESS) | (1 << GDS_ACCESS) |
109         (1 << SQ_MESSAGE),
110     (1 << EXP_GPR_LOCK) | (1 << GDS_GPR_LOCK) | (1 << VMW_GPR_LOCK) |
111         (1 << EXP_PARAM_ACCESS) | (1 << EXP_POS_ACCESS) | (1 << EXP_LDS_ACCESS),
112     (1 << VMEM_WRITE_ACCESS) | (1 << SCRATCH_WRITE_ACCESS)};
113 
114 // The mapping is:
115 //  0                .. SQ_MAX_PGM_VGPRS-1               real VGPRs
116 //  SQ_MAX_PGM_VGPRS .. NUM_ALL_VGPRS-1                  extra VGPR-like slots
117 //  NUM_ALL_VGPRS    .. NUM_ALL_VGPRS+SQ_MAX_PGM_SGPRS-1 real SGPRs
118 // We reserve a fixed number of VGPR slots in the scoring tables for
119 // special tokens like SCMEM_LDS (needed for buffer load to LDS).
120 enum RegisterMapping {
121   SQ_MAX_PGM_VGPRS = 512, // Maximum programmable VGPRs across all targets.
122   AGPR_OFFSET = 256,      // Maximum programmable ArchVGPRs across all targets.
123   SQ_MAX_PGM_SGPRS = 256, // Maximum programmable SGPRs across all targets.
124   NUM_EXTRA_VGPRS = 1,    // A reserved slot for DS.
125   EXTRA_VGPR_LDS = 0,     // An artificial register to track LDS writes.
126   NUM_ALL_VGPRS = SQ_MAX_PGM_VGPRS + NUM_EXTRA_VGPRS, // Where SGPR starts.
127 };
128 
129 // Enumerate different types of result-returning VMEM operations. Although
130 // s_waitcnt orders them all with a single vmcnt counter, in the absence of
131 // s_waitcnt only instructions of the same VmemType are guaranteed to write
132 // their results in order -- so there is no need to insert an s_waitcnt between
133 // two instructions of the same type that write the same vgpr.
134 enum VmemType {
135   // BUF instructions and MIMG instructions without a sampler.
136   VMEM_NOSAMPLER,
137   // MIMG instructions with a sampler.
138   VMEM_SAMPLER,
139   // BVH instructions
140   VMEM_BVH
141 };
142 
143 static bool updateVMCntOnly(const MachineInstr &Inst) {
144   return SIInstrInfo::isVMEM(Inst) || SIInstrInfo::isFLATGlobal(Inst) ||
145          SIInstrInfo::isFLATScratch(Inst);
146 }
147 
148 VmemType getVmemType(const MachineInstr &Inst) {
149   assert(updateVMCntOnly(Inst));
150   if (!SIInstrInfo::isMIMG(Inst))
151     return VMEM_NOSAMPLER;
152   const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Inst.getOpcode());
153   const AMDGPU::MIMGBaseOpcodeInfo *BaseInfo =
154       AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode);
155   return BaseInfo->BVH ? VMEM_BVH
156                        : BaseInfo->Sampler ? VMEM_SAMPLER : VMEM_NOSAMPLER;
157 }
158 
159 void addWait(AMDGPU::Waitcnt &Wait, InstCounterType T, unsigned Count) {
160   switch (T) {
161   case VM_CNT:
162     Wait.VmCnt = std::min(Wait.VmCnt, Count);
163     break;
164   case EXP_CNT:
165     Wait.ExpCnt = std::min(Wait.ExpCnt, Count);
166     break;
167   case LGKM_CNT:
168     Wait.LgkmCnt = std::min(Wait.LgkmCnt, Count);
169     break;
170   case VS_CNT:
171     Wait.VsCnt = std::min(Wait.VsCnt, Count);
172     break;
173   default:
174     llvm_unreachable("bad InstCounterType");
175   }
176 }
177 
178 // This objects maintains the current score brackets of each wait counter, and
179 // a per-register scoreboard for each wait counter.
180 //
181 // We also maintain the latest score for every event type that can change the
182 // waitcnt in order to know if there are multiple types of events within
183 // the brackets. When multiple types of event happen in the bracket,
184 // wait count may get decreased out of order, therefore we need to put in
185 // "s_waitcnt 0" before use.
186 class WaitcntBrackets {
187 public:
188   WaitcntBrackets(const GCNSubtarget *SubTarget, HardwareLimits Limits,
189                   RegisterEncoding Encoding)
190       : ST(SubTarget), Limits(Limits), Encoding(Encoding) {}
191 
192   unsigned getWaitCountMax(InstCounterType T) const {
193     switch (T) {
194     case VM_CNT:
195       return Limits.VmcntMax;
196     case LGKM_CNT:
197       return Limits.LgkmcntMax;
198     case EXP_CNT:
199       return Limits.ExpcntMax;
200     case VS_CNT:
201       return Limits.VscntMax;
202     default:
203       break;
204     }
205     return 0;
206   }
207 
208   unsigned getScoreLB(InstCounterType T) const {
209     assert(T < NUM_INST_CNTS);
210     return ScoreLBs[T];
211   }
212 
213   unsigned getScoreUB(InstCounterType T) const {
214     assert(T < NUM_INST_CNTS);
215     return ScoreUBs[T];
216   }
217 
218   unsigned getScoreRange(InstCounterType T) const {
219     return getScoreUB(T) - getScoreLB(T);
220   }
221 
222   // Mapping from event to counter.
223   InstCounterType eventCounter(WaitEventType E) const {
224     for (auto T : inst_counter_types()) {
225       if (WaitEventMaskForInst[T] & (1 << E))
226         return T;
227     }
228     llvm_unreachable("event type has no associated counter");
229   }
230 
231   unsigned getRegScore(int GprNo, InstCounterType T) const {
232     if (GprNo < NUM_ALL_VGPRS) {
233       return VgprScores[T][GprNo];
234     }
235     assert(T == LGKM_CNT);
236     return SgprScores[GprNo - NUM_ALL_VGPRS];
237   }
238 
239   bool merge(const WaitcntBrackets &Other);
240 
241   RegInterval getRegInterval(const MachineInstr *MI, const SIInstrInfo *TII,
242                              const MachineRegisterInfo *MRI,
243                              const SIRegisterInfo *TRI, unsigned OpNo) const;
244 
245   bool counterOutOfOrder(InstCounterType T) const;
246   void simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const;
247   void simplifyWaitcnt(InstCounterType T, unsigned &Count) const;
248   void determineWait(InstCounterType T, int RegNo, AMDGPU::Waitcnt &Wait) const;
249   void applyWaitcnt(const AMDGPU::Waitcnt &Wait);
250   void applyWaitcnt(InstCounterType T, unsigned Count);
251   void updateByEvent(const SIInstrInfo *TII, const SIRegisterInfo *TRI,
252                      const MachineRegisterInfo *MRI, WaitEventType E,
253                      MachineInstr &MI);
254 
255   unsigned hasPendingEvent() const { return PendingEvents; }
256   unsigned hasPendingEvent(WaitEventType E) const {
257     return PendingEvents & (1 << E);
258   }
259   unsigned hasPendingEvent(InstCounterType T) const {
260     unsigned HasPending = PendingEvents & WaitEventMaskForInst[T];
261     assert((HasPending != 0) == (getScoreRange(T) != 0));
262     return HasPending;
263   }
264 
265   bool hasMixedPendingEvents(InstCounterType T) const {
266     unsigned Events = hasPendingEvent(T);
267     // Return true if more than one bit is set in Events.
268     return Events & (Events - 1);
269   }
270 
271   bool hasPendingFlat() const {
272     return ((LastFlat[LGKM_CNT] > ScoreLBs[LGKM_CNT] &&
273              LastFlat[LGKM_CNT] <= ScoreUBs[LGKM_CNT]) ||
274             (LastFlat[VM_CNT] > ScoreLBs[VM_CNT] &&
275              LastFlat[VM_CNT] <= ScoreUBs[VM_CNT]));
276   }
277 
278   void setPendingFlat() {
279     LastFlat[VM_CNT] = ScoreUBs[VM_CNT];
280     LastFlat[LGKM_CNT] = ScoreUBs[LGKM_CNT];
281   }
282 
283   // Return true if there might be pending writes to the specified vgpr by VMEM
284   // instructions with types different from V.
285   bool hasOtherPendingVmemTypes(int GprNo, VmemType V) const {
286     assert(GprNo < NUM_ALL_VGPRS);
287     return VgprVmemTypes[GprNo] & ~(1 << V);
288   }
289 
290   void clearVgprVmemTypes(int GprNo) {
291     assert(GprNo < NUM_ALL_VGPRS);
292     VgprVmemTypes[GprNo] = 0;
293   }
294 
295   void print(raw_ostream &);
296   void dump() { print(dbgs()); }
297 
298 private:
299   struct MergeInfo {
300     unsigned OldLB;
301     unsigned OtherLB;
302     unsigned MyShift;
303     unsigned OtherShift;
304   };
305   static bool mergeScore(const MergeInfo &M, unsigned &Score,
306                          unsigned OtherScore);
307 
308   void setScoreLB(InstCounterType T, unsigned Val) {
309     assert(T < NUM_INST_CNTS);
310     ScoreLBs[T] = Val;
311   }
312 
313   void setScoreUB(InstCounterType T, unsigned Val) {
314     assert(T < NUM_INST_CNTS);
315     ScoreUBs[T] = Val;
316 
317     if (T != EXP_CNT)
318       return;
319 
320     if (getScoreRange(EXP_CNT) > getWaitCountMax(EXP_CNT))
321       ScoreLBs[EXP_CNT] = ScoreUBs[EXP_CNT] - getWaitCountMax(EXP_CNT);
322   }
323 
324   void setRegScore(int GprNo, InstCounterType T, unsigned Val) {
325     if (GprNo < NUM_ALL_VGPRS) {
326       VgprUB = std::max(VgprUB, GprNo);
327       VgprScores[T][GprNo] = Val;
328     } else {
329       assert(T == LGKM_CNT);
330       SgprUB = std::max(SgprUB, GprNo - NUM_ALL_VGPRS);
331       SgprScores[GprNo - NUM_ALL_VGPRS] = Val;
332     }
333   }
334 
335   void setExpScore(const MachineInstr *MI, const SIInstrInfo *TII,
336                    const SIRegisterInfo *TRI, const MachineRegisterInfo *MRI,
337                    unsigned OpNo, unsigned Val);
338 
339   const GCNSubtarget *ST = nullptr;
340   HardwareLimits Limits = {};
341   RegisterEncoding Encoding = {};
342   unsigned ScoreLBs[NUM_INST_CNTS] = {0};
343   unsigned ScoreUBs[NUM_INST_CNTS] = {0};
344   unsigned PendingEvents = 0;
345   // Remember the last flat memory operation.
346   unsigned LastFlat[NUM_INST_CNTS] = {0};
347   // wait_cnt scores for every vgpr.
348   // Keep track of the VgprUB and SgprUB to make merge at join efficient.
349   int VgprUB = -1;
350   int SgprUB = -1;
351   unsigned VgprScores[NUM_INST_CNTS][NUM_ALL_VGPRS] = {{0}};
352   // Wait cnt scores for every sgpr, only lgkmcnt is relevant.
353   unsigned SgprScores[SQ_MAX_PGM_SGPRS] = {0};
354   // Bitmask of the VmemTypes of VMEM instructions that might have a pending
355   // write to each vgpr.
356   unsigned char VgprVmemTypes[NUM_ALL_VGPRS] = {0};
357 };
358 
359 class SIInsertWaitcnts : public MachineFunctionPass {
360 private:
361   const GCNSubtarget *ST = nullptr;
362   const SIInstrInfo *TII = nullptr;
363   const SIRegisterInfo *TRI = nullptr;
364   const MachineRegisterInfo *MRI = nullptr;
365   AMDGPU::IsaVersion IV;
366 
367   DenseSet<MachineInstr *> TrackedWaitcntSet;
368   DenseMap<const Value *, MachineBasicBlock *> SLoadAddresses;
369   DenseMap<MachineBasicBlock *, bool> PreheadersToFlush;
370   MachineLoopInfo *MLI;
371   MachinePostDominatorTree *PDT;
372 
373   struct BlockInfo {
374     std::unique_ptr<WaitcntBrackets> Incoming;
375     bool Dirty = true;
376   };
377 
378   MapVector<MachineBasicBlock *, BlockInfo> BlockInfos;
379 
380   // ForceEmitZeroWaitcnts: force all waitcnts insts to be s_waitcnt 0
381   // because of amdgpu-waitcnt-forcezero flag
382   bool ForceEmitZeroWaitcnts;
383   bool ForceEmitWaitcnt[NUM_INST_CNTS];
384 
385   // S_ENDPGM instructions before which we should insert a DEALLOC_VGPRS
386   // message.
387   DenseSet<MachineInstr *> ReleaseVGPRInsts;
388 
389 public:
390   static char ID;
391 
392   SIInsertWaitcnts() : MachineFunctionPass(ID) {
393     (void)ForceExpCounter;
394     (void)ForceLgkmCounter;
395     (void)ForceVMCounter;
396   }
397 
398   bool shouldFlushVmCnt(MachineLoop *ML, WaitcntBrackets &Brackets);
399   bool isPreheaderToFlush(MachineBasicBlock &MBB,
400                           WaitcntBrackets &ScoreBrackets);
401   bool isVMEMOrFlatVMEM(const MachineInstr &MI) const;
402   bool runOnMachineFunction(MachineFunction &MF) override;
403 
404   StringRef getPassName() const override {
405     return "SI insert wait instructions";
406   }
407 
408   void getAnalysisUsage(AnalysisUsage &AU) const override {
409     AU.setPreservesCFG();
410     AU.addRequired<MachineLoopInfo>();
411     AU.addRequired<MachinePostDominatorTree>();
412     MachineFunctionPass::getAnalysisUsage(AU);
413   }
414 
415   bool isForceEmitWaitcnt() const {
416     for (auto T : inst_counter_types())
417       if (ForceEmitWaitcnt[T])
418         return true;
419     return false;
420   }
421 
422   void setForceEmitWaitcnt() {
423 // For non-debug builds, ForceEmitWaitcnt has been initialized to false;
424 // For debug builds, get the debug counter info and adjust if need be
425 #ifndef NDEBUG
426     if (DebugCounter::isCounterSet(ForceExpCounter) &&
427         DebugCounter::shouldExecute(ForceExpCounter)) {
428       ForceEmitWaitcnt[EXP_CNT] = true;
429     } else {
430       ForceEmitWaitcnt[EXP_CNT] = false;
431     }
432 
433     if (DebugCounter::isCounterSet(ForceLgkmCounter) &&
434         DebugCounter::shouldExecute(ForceLgkmCounter)) {
435       ForceEmitWaitcnt[LGKM_CNT] = true;
436     } else {
437       ForceEmitWaitcnt[LGKM_CNT] = false;
438     }
439 
440     if (DebugCounter::isCounterSet(ForceVMCounter) &&
441         DebugCounter::shouldExecute(ForceVMCounter)) {
442       ForceEmitWaitcnt[VM_CNT] = true;
443     } else {
444       ForceEmitWaitcnt[VM_CNT] = false;
445     }
446 #endif // NDEBUG
447   }
448 
449   // Return the appropriate VMEM_*_ACCESS type for Inst, which must be a VMEM or
450   // FLAT instruction.
451   WaitEventType getVmemWaitEventType(const MachineInstr &Inst) const {
452     assert(SIInstrInfo::isVMEM(Inst) || SIInstrInfo::isFLAT(Inst));
453     if (!ST->hasVscnt())
454       return VMEM_ACCESS;
455     if (Inst.mayStore() && !SIInstrInfo::isAtomicRet(Inst)) {
456       // FLAT and SCRATCH instructions may access scratch. Other VMEM
457       // instructions do not.
458       if (SIInstrInfo::isFLAT(Inst) && mayAccessScratchThroughFlat(Inst))
459         return SCRATCH_WRITE_ACCESS;
460       return VMEM_WRITE_ACCESS;
461     }
462     return VMEM_READ_ACCESS;
463   }
464 
465   bool mayAccessVMEMThroughFlat(const MachineInstr &MI) const;
466   bool mayAccessLDSThroughFlat(const MachineInstr &MI) const;
467   bool mayAccessScratchThroughFlat(const MachineInstr &MI) const;
468   bool generateWaitcntInstBefore(MachineInstr &MI,
469                                  WaitcntBrackets &ScoreBrackets,
470                                  MachineInstr *OldWaitcntInstr,
471                                  bool FlushVmCnt);
472   bool generateWaitcntBlockEnd(MachineBasicBlock &Block,
473                                WaitcntBrackets &ScoreBrackets,
474                                MachineInstr *OldWaitcntInstr);
475   bool generateWaitcnt(AMDGPU::Waitcnt Wait,
476                        MachineBasicBlock::instr_iterator It,
477                        MachineBasicBlock &Block, WaitcntBrackets &ScoreBrackets,
478                        MachineInstr *OldWaitcntInstr);
479   void updateEventWaitcntAfter(MachineInstr &Inst,
480                                WaitcntBrackets *ScoreBrackets);
481   bool insertWaitcntInBlock(MachineFunction &MF, MachineBasicBlock &Block,
482                             WaitcntBrackets &ScoreBrackets);
483   bool applyPreexistingWaitcnt(WaitcntBrackets &ScoreBrackets,
484                                MachineInstr &OldWaitcntInstr,
485                                AMDGPU::Waitcnt &Wait,
486                                MachineBasicBlock::instr_iterator It) const;
487 };
488 
489 } // end anonymous namespace
490 
491 RegInterval WaitcntBrackets::getRegInterval(const MachineInstr *MI,
492                                             const SIInstrInfo *TII,
493                                             const MachineRegisterInfo *MRI,
494                                             const SIRegisterInfo *TRI,
495                                             unsigned OpNo) const {
496   const MachineOperand &Op = MI->getOperand(OpNo);
497   if (!TRI->isInAllocatableClass(Op.getReg()))
498     return {-1, -1};
499 
500   // A use via a PW operand does not need a waitcnt.
501   // A partial write is not a WAW.
502   assert(!Op.getSubReg() || !Op.isUndef());
503 
504   RegInterval Result;
505 
506   unsigned Reg = TRI->getEncodingValue(AMDGPU::getMCReg(Op.getReg(), *ST));
507 
508   if (TRI->isVectorRegister(*MRI, Op.getReg())) {
509     assert(Reg >= Encoding.VGPR0 && Reg <= Encoding.VGPRL);
510     Result.first = Reg - Encoding.VGPR0;
511     if (TRI->isAGPR(*MRI, Op.getReg()))
512       Result.first += AGPR_OFFSET;
513     assert(Result.first >= 0 && Result.first < SQ_MAX_PGM_VGPRS);
514   } else if (TRI->isSGPRReg(*MRI, Op.getReg())) {
515     assert(Reg >= Encoding.SGPR0 && Reg < SQ_MAX_PGM_SGPRS);
516     Result.first = Reg - Encoding.SGPR0 + NUM_ALL_VGPRS;
517     assert(Result.first >= NUM_ALL_VGPRS &&
518            Result.first < SQ_MAX_PGM_SGPRS + NUM_ALL_VGPRS);
519   }
520   // TODO: Handle TTMP
521   // else if (TRI->isTTMP(*MRI, Reg.getReg())) ...
522   else
523     return {-1, -1};
524 
525   const TargetRegisterClass *RC = TII->getOpRegClass(*MI, OpNo);
526   unsigned Size = TRI->getRegSizeInBits(*RC);
527   Result.second = Result.first + ((Size + 16) / 32);
528 
529   return Result;
530 }
531 
532 void WaitcntBrackets::setExpScore(const MachineInstr *MI,
533                                   const SIInstrInfo *TII,
534                                   const SIRegisterInfo *TRI,
535                                   const MachineRegisterInfo *MRI, unsigned OpNo,
536                                   unsigned Val) {
537   RegInterval Interval = getRegInterval(MI, TII, MRI, TRI, OpNo);
538   assert(TRI->isVectorRegister(*MRI, MI->getOperand(OpNo).getReg()));
539   for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
540     setRegScore(RegNo, EXP_CNT, Val);
541   }
542 }
543 
544 // MUBUF and FLAT LDS DMA operations need a wait on vmcnt before LDS written
545 // can be accessed. A load from LDS to VMEM does not need a wait.
546 static bool mayWriteLDSThroughDMA(const MachineInstr &MI) {
547   return SIInstrInfo::isVALU(MI) &&
548          (SIInstrInfo::isMUBUF(MI) || SIInstrInfo::isFLAT(MI)) &&
549          MI.getOpcode() != AMDGPU::BUFFER_STORE_LDS_DWORD;
550 }
551 
552 void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
553                                     const SIRegisterInfo *TRI,
554                                     const MachineRegisterInfo *MRI,
555                                     WaitEventType E, MachineInstr &Inst) {
556   InstCounterType T = eventCounter(E);
557   unsigned CurrScore = getScoreUB(T) + 1;
558   if (CurrScore == 0)
559     report_fatal_error("InsertWaitcnt score wraparound");
560   // PendingEvents and ScoreUB need to be update regardless if this event
561   // changes the score of a register or not.
562   // Examples including vm_cnt when buffer-store or lgkm_cnt when send-message.
563   PendingEvents |= 1 << E;
564   setScoreUB(T, CurrScore);
565 
566   if (T == EXP_CNT) {
567     // Put score on the source vgprs. If this is a store, just use those
568     // specific register(s).
569     if (TII->isDS(Inst) && (Inst.mayStore() || Inst.mayLoad())) {
570       int AddrOpIdx =
571           AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::addr);
572       // All GDS operations must protect their address register (same as
573       // export.)
574       if (AddrOpIdx != -1) {
575         setExpScore(&Inst, TII, TRI, MRI, AddrOpIdx, CurrScore);
576       }
577 
578       if (Inst.mayStore()) {
579         if (AMDGPU::hasNamedOperand(Inst.getOpcode(), AMDGPU::OpName::data0)) {
580           setExpScore(
581               &Inst, TII, TRI, MRI,
582               AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data0),
583               CurrScore);
584         }
585         if (AMDGPU::hasNamedOperand(Inst.getOpcode(), AMDGPU::OpName::data1)) {
586           setExpScore(&Inst, TII, TRI, MRI,
587                       AMDGPU::getNamedOperandIdx(Inst.getOpcode(),
588                                                  AMDGPU::OpName::data1),
589                       CurrScore);
590         }
591       } else if (SIInstrInfo::isAtomicRet(Inst) &&
592                  Inst.getOpcode() != AMDGPU::DS_GWS_INIT &&
593                  Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_V &&
594                  Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_BR &&
595                  Inst.getOpcode() != AMDGPU::DS_GWS_SEMA_P &&
596                  Inst.getOpcode() != AMDGPU::DS_GWS_BARRIER &&
597                  Inst.getOpcode() != AMDGPU::DS_APPEND &&
598                  Inst.getOpcode() != AMDGPU::DS_CONSUME &&
599                  Inst.getOpcode() != AMDGPU::DS_ORDERED_COUNT) {
600         for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
601           const MachineOperand &Op = Inst.getOperand(I);
602           if (Op.isReg() && !Op.isDef() &&
603               TRI->isVectorRegister(*MRI, Op.getReg())) {
604             setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
605           }
606         }
607       }
608     } else if (TII->isFLAT(Inst)) {
609       if (Inst.mayStore()) {
610         setExpScore(
611             &Inst, TII, TRI, MRI,
612             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
613             CurrScore);
614       } else if (SIInstrInfo::isAtomicRet(Inst)) {
615         setExpScore(
616             &Inst, TII, TRI, MRI,
617             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
618             CurrScore);
619       }
620     } else if (TII->isMIMG(Inst)) {
621       if (Inst.mayStore()) {
622         setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
623       } else if (SIInstrInfo::isAtomicRet(Inst)) {
624         setExpScore(
625             &Inst, TII, TRI, MRI,
626             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
627             CurrScore);
628       }
629     } else if (TII->isMTBUF(Inst)) {
630       if (Inst.mayStore()) {
631         setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
632       }
633     } else if (TII->isMUBUF(Inst)) {
634       if (Inst.mayStore()) {
635         setExpScore(&Inst, TII, TRI, MRI, 0, CurrScore);
636       } else if (SIInstrInfo::isAtomicRet(Inst)) {
637         setExpScore(
638             &Inst, TII, TRI, MRI,
639             AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::data),
640             CurrScore);
641       }
642     } else if (TII->isLDSDIR(Inst)) {
643       // LDSDIR instructions attach the score to the destination.
644       setExpScore(
645           &Inst, TII, TRI, MRI,
646           AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::vdst),
647           CurrScore);
648     } else {
649       if (TII->isEXP(Inst)) {
650         // For export the destination registers are really temps that
651         // can be used as the actual source after export patching, so
652         // we need to treat them like sources and set the EXP_CNT
653         // score.
654         for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
655           MachineOperand &DefMO = Inst.getOperand(I);
656           if (DefMO.isReg() && DefMO.isDef() &&
657               TRI->isVGPR(*MRI, DefMO.getReg())) {
658             setRegScore(
659                 TRI->getEncodingValue(AMDGPU::getMCReg(DefMO.getReg(), *ST)),
660                 EXP_CNT, CurrScore);
661           }
662         }
663       }
664       for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
665         MachineOperand &MO = Inst.getOperand(I);
666         if (MO.isReg() && !MO.isDef() &&
667             TRI->isVectorRegister(*MRI, MO.getReg())) {
668           setExpScore(&Inst, TII, TRI, MRI, I, CurrScore);
669         }
670       }
671     }
672 #if 0 // TODO: check if this is handled by MUBUF code above.
673   } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
674        Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
675        Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
676     MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data);
677     unsigned OpNo;//TODO: find the OpNo for this operand;
678     RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, OpNo);
679     for (int RegNo = Interval.first; RegNo < Interval.second;
680     ++RegNo) {
681       setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
682     }
683 #endif
684   } else {
685     // Match the score to the destination registers.
686     for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
687       auto &Op = Inst.getOperand(I);
688       if (!Op.isReg() || !Op.isDef())
689         continue;
690       RegInterval Interval = getRegInterval(&Inst, TII, MRI, TRI, I);
691       if (T == VM_CNT) {
692         if (Interval.first >= NUM_ALL_VGPRS)
693           continue;
694         if (updateVMCntOnly(Inst)) {
695           VmemType V = getVmemType(Inst);
696           for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo)
697             VgprVmemTypes[RegNo] |= 1 << V;
698         }
699       }
700       for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
701         setRegScore(RegNo, T, CurrScore);
702       }
703     }
704     if (Inst.mayStore() && (TII->isDS(Inst) || mayWriteLDSThroughDMA(Inst))) {
705       setRegScore(SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS, T, CurrScore);
706     }
707   }
708 }
709 
710 void WaitcntBrackets::print(raw_ostream &OS) {
711   OS << '\n';
712   for (auto T : inst_counter_types()) {
713     unsigned SR = getScoreRange(T);
714 
715     switch (T) {
716     case VM_CNT:
717       OS << "    VM_CNT(" << SR << "): ";
718       break;
719     case LGKM_CNT:
720       OS << "    LGKM_CNT(" << SR << "): ";
721       break;
722     case EXP_CNT:
723       OS << "    EXP_CNT(" << SR << "): ";
724       break;
725     case VS_CNT:
726       OS << "    VS_CNT(" << SR << "): ";
727       break;
728     default:
729       OS << "    UNKNOWN(" << SR << "): ";
730       break;
731     }
732 
733     if (SR != 0) {
734       // Print vgpr scores.
735       unsigned LB = getScoreLB(T);
736 
737       for (int J = 0; J <= VgprUB; J++) {
738         unsigned RegScore = getRegScore(J, T);
739         if (RegScore <= LB)
740           continue;
741         unsigned RelScore = RegScore - LB - 1;
742         if (J < SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS) {
743           OS << RelScore << ":v" << J << " ";
744         } else {
745           OS << RelScore << ":ds ";
746         }
747       }
748       // Also need to print sgpr scores for lgkm_cnt.
749       if (T == LGKM_CNT) {
750         for (int J = 0; J <= SgprUB; J++) {
751           unsigned RegScore = getRegScore(J + NUM_ALL_VGPRS, LGKM_CNT);
752           if (RegScore <= LB)
753             continue;
754           unsigned RelScore = RegScore - LB - 1;
755           OS << RelScore << ":s" << J << " ";
756         }
757       }
758     }
759     OS << '\n';
760   }
761   OS << '\n';
762 }
763 
764 /// Simplify the waitcnt, in the sense of removing redundant counts, and return
765 /// whether a waitcnt instruction is needed at all.
766 void WaitcntBrackets::simplifyWaitcnt(AMDGPU::Waitcnt &Wait) const {
767   simplifyWaitcnt(VM_CNT, Wait.VmCnt);
768   simplifyWaitcnt(EXP_CNT, Wait.ExpCnt);
769   simplifyWaitcnt(LGKM_CNT, Wait.LgkmCnt);
770   simplifyWaitcnt(VS_CNT, Wait.VsCnt);
771 }
772 
773 void WaitcntBrackets::simplifyWaitcnt(InstCounterType T,
774                                       unsigned &Count) const {
775   // The number of outstanding events for this type, T, can be calculated
776   // as (UB - LB). If the current Count is greater than or equal to the number
777   // of outstanding events, then the wait for this counter is redundant.
778   if (Count >= getScoreRange(T))
779     Count = ~0u;
780 }
781 
782 void WaitcntBrackets::determineWait(InstCounterType T, int RegNo,
783                                     AMDGPU::Waitcnt &Wait) const {
784   unsigned ScoreToWait = getRegScore(RegNo, T);
785 
786   // If the score of src_operand falls within the bracket, we need an
787   // s_waitcnt instruction.
788   const unsigned LB = getScoreLB(T);
789   const unsigned UB = getScoreUB(T);
790   if ((UB >= ScoreToWait) && (ScoreToWait > LB)) {
791     if ((T == VM_CNT || T == LGKM_CNT) &&
792         hasPendingFlat() &&
793         !ST->hasFlatLgkmVMemCountInOrder()) {
794       // If there is a pending FLAT operation, and this is a VMem or LGKM
795       // waitcnt and the target can report early completion, then we need
796       // to force a waitcnt 0.
797       addWait(Wait, T, 0);
798     } else if (counterOutOfOrder(T)) {
799       // Counter can get decremented out-of-order when there
800       // are multiple types event in the bracket. Also emit an s_wait counter
801       // with a conservative value of 0 for the counter.
802       addWait(Wait, T, 0);
803     } else {
804       // If a counter has been maxed out avoid overflow by waiting for
805       // MAX(CounterType) - 1 instead.
806       unsigned NeededWait = std::min(UB - ScoreToWait, getWaitCountMax(T) - 1);
807       addWait(Wait, T, NeededWait);
808     }
809   }
810 }
811 
812 void WaitcntBrackets::applyWaitcnt(const AMDGPU::Waitcnt &Wait) {
813   applyWaitcnt(VM_CNT, Wait.VmCnt);
814   applyWaitcnt(EXP_CNT, Wait.ExpCnt);
815   applyWaitcnt(LGKM_CNT, Wait.LgkmCnt);
816   applyWaitcnt(VS_CNT, Wait.VsCnt);
817 }
818 
819 void WaitcntBrackets::applyWaitcnt(InstCounterType T, unsigned Count) {
820   const unsigned UB = getScoreUB(T);
821   if (Count >= UB)
822     return;
823   if (Count != 0) {
824     if (counterOutOfOrder(T))
825       return;
826     setScoreLB(T, std::max(getScoreLB(T), UB - Count));
827   } else {
828     setScoreLB(T, UB);
829     PendingEvents &= ~WaitEventMaskForInst[T];
830   }
831 }
832 
833 // Where there are multiple types of event in the bracket of a counter,
834 // the decrement may go out of order.
835 bool WaitcntBrackets::counterOutOfOrder(InstCounterType T) const {
836   // Scalar memory read always can go out of order.
837   if (T == LGKM_CNT && hasPendingEvent(SMEM_ACCESS))
838     return true;
839   return hasMixedPendingEvents(T);
840 }
841 
842 INITIALIZE_PASS_BEGIN(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
843                       false)
844 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
845 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
846 INITIALIZE_PASS_END(SIInsertWaitcnts, DEBUG_TYPE, "SI Insert Waitcnts", false,
847                     false)
848 
849 char SIInsertWaitcnts::ID = 0;
850 
851 char &llvm::SIInsertWaitcntsID = SIInsertWaitcnts::ID;
852 
853 FunctionPass *llvm::createSIInsertWaitcntsPass() {
854   return new SIInsertWaitcnts();
855 }
856 
857 static bool updateOperandIfDifferent(MachineInstr &MI, uint16_t OpName,
858                                      unsigned NewEnc) {
859   int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName);
860   assert(OpIdx >= 0);
861 
862   MachineOperand &MO = MI.getOperand(OpIdx);
863 
864   if (NewEnc == MO.getImm())
865     return false;
866 
867   MO.setImm(NewEnc);
868   return true;
869 }
870 
871 /// Combine consecutive waitcnt instructions that precede \p It and follow
872 /// \p OldWaitcntInstr and apply any extra wait from waitcnt that were added
873 /// by previous passes. Currently this pass conservatively assumes that these
874 /// preexisting waitcnt are required for correctness.
875 bool SIInsertWaitcnts::applyPreexistingWaitcnt(
876     WaitcntBrackets &ScoreBrackets, MachineInstr &OldWaitcntInstr,
877     AMDGPU::Waitcnt &Wait, MachineBasicBlock::instr_iterator It) const {
878   bool Modified = false;
879   MachineInstr *WaitcntInstr = nullptr;
880   MachineInstr *WaitcntVsCntInstr = nullptr;
881 
882   for (auto &II :
883        make_early_inc_range(make_range(OldWaitcntInstr.getIterator(), It))) {
884     if (II.isMetaInstruction())
885       continue;
886 
887     if (II.getOpcode() == AMDGPU::S_WAITCNT) {
888       // Conservatively update required wait if this waitcnt was added in an
889       // earlier pass. In this case it will not exist in the tracked waitcnt
890       // set.
891       if (!TrackedWaitcntSet.count(&II)) {
892         unsigned IEnc = II.getOperand(0).getImm();
893         AMDGPU::Waitcnt OldWait = AMDGPU::decodeWaitcnt(IV, IEnc);
894         Wait = Wait.combined(OldWait);
895       }
896 
897       // Merge consecutive waitcnt of the same type by erasing multiples.
898       if (!WaitcntInstr) {
899         WaitcntInstr = &II;
900       } else {
901         II.eraseFromParent();
902         Modified = true;
903       }
904 
905     } else {
906       assert(II.getOpcode() == AMDGPU::S_WAITCNT_VSCNT);
907       assert(II.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
908       if (!TrackedWaitcntSet.count(&II)) {
909         unsigned OldVSCnt =
910             TII->getNamedOperand(II, AMDGPU::OpName::simm16)->getImm();
911         Wait.VsCnt = std::min(Wait.VsCnt, OldVSCnt);
912       }
913 
914       if (!WaitcntVsCntInstr) {
915         WaitcntVsCntInstr = &II;
916       } else {
917         II.eraseFromParent();
918         Modified = true;
919       }
920     }
921   }
922 
923   // Updated encoding of merged waitcnt with the required wait.
924   if (WaitcntInstr) {
925     if (Wait.hasWaitExceptVsCnt()) {
926       Modified |=
927           updateOperandIfDifferent(*WaitcntInstr, AMDGPU::OpName::simm16,
928                                    AMDGPU::encodeWaitcnt(IV, Wait));
929       ScoreBrackets.applyWaitcnt(Wait);
930       Wait.VmCnt = ~0u;
931       Wait.LgkmCnt = ~0u;
932       Wait.ExpCnt = ~0u;
933 
934       LLVM_DEBUG(It == OldWaitcntInstr.getParent()->end()
935                      ? dbgs() << "applyPreexistingWaitcnt\n"
936                               << "New Instr at block end: " << *WaitcntInstr
937                               << '\n'
938                      : dbgs() << "applyPreexistingWaitcnt\n"
939                               << "Old Instr: " << *It
940                               << "New Instr: " << *WaitcntInstr << '\n');
941 
942     } else {
943       WaitcntInstr->eraseFromParent();
944       Modified = true;
945     }
946   }
947 
948   if (WaitcntVsCntInstr) {
949     if (Wait.hasWaitVsCnt()) {
950       assert(ST->hasVscnt());
951       Modified |= updateOperandIfDifferent(*WaitcntVsCntInstr,
952                                            AMDGPU::OpName::simm16, Wait.VsCnt);
953       ScoreBrackets.applyWaitcnt(Wait);
954       Wait.VsCnt = ~0u;
955 
956       LLVM_DEBUG(It == OldWaitcntInstr.getParent()->end()
957                      ? dbgs() << "applyPreexistingWaitcnt\n"
958                               << "New Instr at block end: "
959                               << *WaitcntVsCntInstr << '\n'
960                      : dbgs() << "applyPreexistingWaitcnt\n"
961                               << "Old Instr: " << *It
962                               << "New Instr: " << *WaitcntVsCntInstr << '\n');
963     } else {
964       WaitcntVsCntInstr->eraseFromParent();
965       Modified = true;
966     }
967   }
968 
969   return Modified;
970 }
971 
972 static bool readsVCCZ(const MachineInstr &MI) {
973   unsigned Opc = MI.getOpcode();
974   return (Opc == AMDGPU::S_CBRANCH_VCCNZ || Opc == AMDGPU::S_CBRANCH_VCCZ) &&
975          !MI.getOperand(1).isUndef();
976 }
977 
978 /// \returns true if the callee inserts an s_waitcnt 0 on function entry.
979 static bool callWaitsOnFunctionEntry(const MachineInstr &MI) {
980   // Currently all conventions wait, but this may not always be the case.
981   //
982   // TODO: If IPRA is enabled, and the callee is isSafeForNoCSROpt, it may make
983   // senses to omit the wait and do it in the caller.
984   return true;
985 }
986 
987 /// \returns true if the callee is expected to wait for any outstanding waits
988 /// before returning.
989 static bool callWaitsOnFunctionReturn(const MachineInstr &MI) {
990   return true;
991 }
992 
993 ///  Generate s_waitcnt instruction to be placed before cur_Inst.
994 ///  Instructions of a given type are returned in order,
995 ///  but instructions of different types can complete out of order.
996 ///  We rely on this in-order completion
997 ///  and simply assign a score to the memory access instructions.
998 ///  We keep track of the active "score bracket" to determine
999 ///  if an access of a memory read requires an s_waitcnt
1000 ///  and if so what the value of each counter is.
1001 ///  The "score bracket" is bound by the lower bound and upper bound
1002 ///  scores (*_score_LB and *_score_ub respectively).
1003 ///  If FlushVmCnt is true, that means that we want to generate a s_waitcnt to
1004 ///  flush the vmcnt counter here.
1005 bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
1006                                                  WaitcntBrackets &ScoreBrackets,
1007                                                  MachineInstr *OldWaitcntInstr,
1008                                                  bool FlushVmCnt) {
1009   setForceEmitWaitcnt();
1010 
1011   if (MI.isMetaInstruction())
1012     return false;
1013 
1014   AMDGPU::Waitcnt Wait;
1015 
1016   // FIXME: This should have already been handled by the memory legalizer.
1017   // Removing this currently doesn't affect any lit tests, but we need to
1018   // verify that nothing was relying on this. The number of buffer invalidates
1019   // being handled here should not be expanded.
1020   if (MI.getOpcode() == AMDGPU::BUFFER_WBINVL1 ||
1021       MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_SC ||
1022       MI.getOpcode() == AMDGPU::BUFFER_WBINVL1_VOL ||
1023       MI.getOpcode() == AMDGPU::BUFFER_GL0_INV ||
1024       MI.getOpcode() == AMDGPU::BUFFER_GL1_INV) {
1025     Wait.VmCnt = 0;
1026   }
1027 
1028   // All waits must be resolved at call return.
1029   // NOTE: this could be improved with knowledge of all call sites or
1030   //   with knowledge of the called routines.
1031   if (MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG ||
1032       MI.getOpcode() == AMDGPU::SI_RETURN ||
1033       MI.getOpcode() == AMDGPU::S_SETPC_B64_return ||
1034       (MI.isReturn() && MI.isCall() && !callWaitsOnFunctionEntry(MI))) {
1035     Wait = Wait.combined(AMDGPU::Waitcnt::allZeroExceptVsCnt());
1036   }
1037   // Identify S_ENDPGM instructions which may have to wait for outstanding VMEM
1038   // stores. In this case it can be useful to send a message to explicitly
1039   // release all VGPRs before the stores have completed, but it is only safe to
1040   // do this if there are no outstanding scratch stores.
1041   else if (MI.getOpcode() == AMDGPU::S_ENDPGM ||
1042            MI.getOpcode() == AMDGPU::S_ENDPGM_SAVED) {
1043     if (ST->getGeneration() >= AMDGPUSubtarget::GFX11 &&
1044         ScoreBrackets.getScoreRange(VS_CNT) != 0 &&
1045         !ScoreBrackets.hasPendingEvent(SCRATCH_WRITE_ACCESS))
1046       ReleaseVGPRInsts.insert(&MI);
1047   }
1048   // Resolve vm waits before gs-done.
1049   else if ((MI.getOpcode() == AMDGPU::S_SENDMSG ||
1050             MI.getOpcode() == AMDGPU::S_SENDMSGHALT) &&
1051            ST->hasLegacyGeometry() &&
1052            ((MI.getOperand(0).getImm() & AMDGPU::SendMsg::ID_MASK_PreGFX11_) ==
1053             AMDGPU::SendMsg::ID_GS_DONE_PreGFX11)) {
1054     Wait.VmCnt = 0;
1055   }
1056 #if 0 // TODO: the following blocks of logic when we have fence.
1057   else if (MI.getOpcode() == SC_FENCE) {
1058     const unsigned int group_size =
1059       context->shader_info->GetMaxThreadGroupSize();
1060     // group_size == 0 means thread group size is unknown at compile time
1061     const bool group_is_multi_wave =
1062       (group_size == 0 || group_size > target_info->GetWaveFrontSize());
1063     const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
1064 
1065     for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
1066       SCRegType src_type = Inst->GetSrcType(i);
1067       switch (src_type) {
1068         case SCMEM_LDS:
1069           if (group_is_multi_wave ||
1070             context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
1071             EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
1072                                ScoreBrackets->getScoreUB(LGKM_CNT));
1073             // LDS may have to wait for VM_CNT after buffer load to LDS
1074             if (target_info->HasBufferLoadToLDS()) {
1075               EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
1076                                  ScoreBrackets->getScoreUB(VM_CNT));
1077             }
1078           }
1079           break;
1080 
1081         case SCMEM_GDS:
1082           if (group_is_multi_wave || fence_is_global) {
1083             EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
1084               ScoreBrackets->getScoreUB(EXP_CNT));
1085             EmitWaitcnt |= ScoreBrackets->updateByWait(LGKM_CNT,
1086               ScoreBrackets->getScoreUB(LGKM_CNT));
1087           }
1088           break;
1089 
1090         case SCMEM_UAV:
1091         case SCMEM_TFBUF:
1092         case SCMEM_RING:
1093         case SCMEM_SCATTER:
1094           if (group_is_multi_wave || fence_is_global) {
1095             EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
1096               ScoreBrackets->getScoreUB(EXP_CNT));
1097             EmitWaitcnt |= ScoreBrackets->updateByWait(VM_CNT,
1098               ScoreBrackets->getScoreUB(VM_CNT));
1099           }
1100           break;
1101 
1102         case SCMEM_SCRATCH:
1103         default:
1104           break;
1105       }
1106     }
1107   }
1108 #endif
1109 
1110   // Export & GDS instructions do not read the EXEC mask until after the export
1111   // is granted (which can occur well after the instruction is issued).
1112   // The shader program must flush all EXP operations on the export-count
1113   // before overwriting the EXEC mask.
1114   else {
1115     if (MI.modifiesRegister(AMDGPU::EXEC, TRI)) {
1116       // Export and GDS are tracked individually, either may trigger a waitcnt
1117       // for EXEC.
1118       if (ScoreBrackets.hasPendingEvent(EXP_GPR_LOCK) ||
1119           ScoreBrackets.hasPendingEvent(EXP_PARAM_ACCESS) ||
1120           ScoreBrackets.hasPendingEvent(EXP_POS_ACCESS) ||
1121           ScoreBrackets.hasPendingEvent(GDS_GPR_LOCK)) {
1122         Wait.ExpCnt = 0;
1123       }
1124     }
1125 
1126     if (MI.isCall() && callWaitsOnFunctionEntry(MI)) {
1127       // The function is going to insert a wait on everything in its prolog.
1128       // This still needs to be careful if the call target is a load (e.g. a GOT
1129       // load). We also need to check WAW dependency with saved PC.
1130       Wait = AMDGPU::Waitcnt();
1131 
1132       int CallAddrOpIdx =
1133           AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::src0);
1134 
1135       if (MI.getOperand(CallAddrOpIdx).isReg()) {
1136         RegInterval CallAddrOpInterval =
1137           ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, CallAddrOpIdx);
1138 
1139         for (int RegNo = CallAddrOpInterval.first;
1140              RegNo < CallAddrOpInterval.second; ++RegNo)
1141           ScoreBrackets.determineWait(LGKM_CNT, RegNo, Wait);
1142 
1143         int RtnAddrOpIdx =
1144           AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::dst);
1145         if (RtnAddrOpIdx != -1) {
1146           RegInterval RtnAddrOpInterval =
1147             ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, RtnAddrOpIdx);
1148 
1149           for (int RegNo = RtnAddrOpInterval.first;
1150                RegNo < RtnAddrOpInterval.second; ++RegNo)
1151             ScoreBrackets.determineWait(LGKM_CNT, RegNo, Wait);
1152         }
1153       }
1154     } else {
1155       // FIXME: Should not be relying on memoperands.
1156       // Look at the source operands of every instruction to see if
1157       // any of them results from a previous memory operation that affects
1158       // its current usage. If so, an s_waitcnt instruction needs to be
1159       // emitted.
1160       // If the source operand was defined by a load, add the s_waitcnt
1161       // instruction.
1162       //
1163       // Two cases are handled for destination operands:
1164       // 1) If the destination operand was defined by a load, add the s_waitcnt
1165       // instruction to guarantee the right WAW order.
1166       // 2) If a destination operand that was used by a recent export/store ins,
1167       // add s_waitcnt on exp_cnt to guarantee the WAR order.
1168       for (const MachineMemOperand *Memop : MI.memoperands()) {
1169         const Value *Ptr = Memop->getValue();
1170         if (Memop->isStore() && SLoadAddresses.count(Ptr)) {
1171           addWait(Wait, LGKM_CNT, 0);
1172           if (PDT->dominates(MI.getParent(), SLoadAddresses.find(Ptr)->second))
1173             SLoadAddresses.erase(Ptr);
1174         }
1175         unsigned AS = Memop->getAddrSpace();
1176         if (AS != AMDGPUAS::LOCAL_ADDRESS && AS != AMDGPUAS::FLAT_ADDRESS)
1177           continue;
1178         // No need to wait before load from VMEM to LDS.
1179         if (mayWriteLDSThroughDMA(MI))
1180           continue;
1181         unsigned RegNo = SQ_MAX_PGM_VGPRS + EXTRA_VGPR_LDS;
1182         // VM_CNT is only relevant to vgpr or LDS.
1183         ScoreBrackets.determineWait(VM_CNT, RegNo, Wait);
1184         if (Memop->isStore()) {
1185           ScoreBrackets.determineWait(EXP_CNT, RegNo, Wait);
1186         }
1187       }
1188 
1189       // Loop over use and def operands.
1190       for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
1191         MachineOperand &Op = MI.getOperand(I);
1192         if (!Op.isReg())
1193           continue;
1194 
1195         // If the instruction does not read tied source, skip the operand.
1196         if (Op.isTied() && Op.isUse() && TII->doesNotReadTiedSource(MI))
1197           continue;
1198 
1199         RegInterval Interval =
1200             ScoreBrackets.getRegInterval(&MI, TII, MRI, TRI, I);
1201 
1202         const bool IsVGPR = TRI->isVectorRegister(*MRI, Op.getReg());
1203         for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
1204           if (IsVGPR) {
1205             // RAW always needs an s_waitcnt. WAW needs an s_waitcnt unless the
1206             // previous write and this write are the same type of VMEM
1207             // instruction, in which case they're guaranteed to write their
1208             // results in order anyway.
1209             if (Op.isUse() || !updateVMCntOnly(MI) ||
1210                 ScoreBrackets.hasOtherPendingVmemTypes(RegNo,
1211                                                        getVmemType(MI))) {
1212               ScoreBrackets.determineWait(VM_CNT, RegNo, Wait);
1213               ScoreBrackets.clearVgprVmemTypes(RegNo);
1214             }
1215             if (Op.isDef() || ScoreBrackets.hasPendingEvent(EXP_LDS_ACCESS)) {
1216               ScoreBrackets.determineWait(EXP_CNT, RegNo, Wait);
1217             }
1218           }
1219           ScoreBrackets.determineWait(LGKM_CNT, RegNo, Wait);
1220         }
1221       }
1222     }
1223   }
1224 
1225   // The subtarget may have an implicit S_WAITCNT 0 before barriers. If it does
1226   // not, we need to ensure the subtarget is capable of backing off barrier
1227   // instructions in case there are any outstanding memory operations that may
1228   // cause an exception. Otherwise, insert an explicit S_WAITCNT 0 here.
1229   if (MI.getOpcode() == AMDGPU::S_BARRIER &&
1230       !ST->hasAutoWaitcntBeforeBarrier() && !ST->supportsBackOffBarrier()) {
1231     Wait = Wait.combined(AMDGPU::Waitcnt::allZero(ST->hasVscnt()));
1232   }
1233 
1234   // TODO: Remove this work-around, enable the assert for Bug 457939
1235   //       after fixing the scheduler. Also, the Shader Compiler code is
1236   //       independent of target.
1237   if (readsVCCZ(MI) && ST->hasReadVCCZBug()) {
1238     if (ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1239       Wait.LgkmCnt = 0;
1240     }
1241   }
1242 
1243   // Verify that the wait is actually needed.
1244   ScoreBrackets.simplifyWaitcnt(Wait);
1245 
1246   if (ForceEmitZeroWaitcnts)
1247     Wait = AMDGPU::Waitcnt::allZeroExceptVsCnt();
1248 
1249   if (ForceEmitWaitcnt[VM_CNT])
1250     Wait.VmCnt = 0;
1251   if (ForceEmitWaitcnt[EXP_CNT])
1252     Wait.ExpCnt = 0;
1253   if (ForceEmitWaitcnt[LGKM_CNT])
1254     Wait.LgkmCnt = 0;
1255 
1256   if (FlushVmCnt) {
1257     if (ScoreBrackets.hasPendingEvent(VM_CNT))
1258       Wait.VmCnt = 0;
1259   }
1260 
1261   return generateWaitcnt(Wait, MI.getIterator(), *MI.getParent(), ScoreBrackets,
1262                          OldWaitcntInstr);
1263 }
1264 
1265 // Add a waitcnt to flush the vmcnt counter at the end of the given block if
1266 // needed.
1267 bool SIInsertWaitcnts::generateWaitcntBlockEnd(MachineBasicBlock &Block,
1268                                                WaitcntBrackets &ScoreBrackets,
1269                                                MachineInstr *OldWaitcntInstr) {
1270   AMDGPU::Waitcnt Wait;
1271 
1272   if (!ScoreBrackets.hasPendingEvent(VM_CNT))
1273     return false;
1274 
1275   Wait.VmCnt = 0;
1276 
1277   return generateWaitcnt(Wait, Block.instr_end(), Block, ScoreBrackets,
1278                          OldWaitcntInstr);
1279 }
1280 
1281 bool SIInsertWaitcnts::generateWaitcnt(AMDGPU::Waitcnt Wait,
1282                                        MachineBasicBlock::instr_iterator It,
1283                                        MachineBasicBlock &Block,
1284                                        WaitcntBrackets &ScoreBrackets,
1285                                        MachineInstr *OldWaitcntInstr) {
1286   bool Modified = false;
1287   const DebugLoc &DL = Block.findDebugLoc(It);
1288 
1289   if (OldWaitcntInstr)
1290     // Try to merge the required wait with preexisting waitcnt instructions.
1291     // Also erase redundant waitcnt.
1292     Modified =
1293         applyPreexistingWaitcnt(ScoreBrackets, *OldWaitcntInstr, Wait, It);
1294   else
1295     ScoreBrackets.applyWaitcnt(Wait);
1296 
1297   // ExpCnt can be merged into VINTERP.
1298   if (Wait.ExpCnt != ~0u && It != Block.instr_end() &&
1299       SIInstrInfo::isVINTERP(*It)) {
1300     MachineOperand *WaitExp =
1301         TII->getNamedOperand(*It, AMDGPU::OpName::waitexp);
1302     if (Wait.ExpCnt < WaitExp->getImm()) {
1303       WaitExp->setImm(Wait.ExpCnt);
1304       Modified = true;
1305     }
1306     Wait.ExpCnt = ~0u;
1307 
1308     LLVM_DEBUG(dbgs() << "generateWaitcntInstBefore\n"
1309                       << "Update Instr: " << *It);
1310   }
1311 
1312   // Build new waitcnt instructions unless no wait is needed or the old waitcnt
1313   // instruction was modified to handle the required wait.
1314   if (Wait.hasWaitExceptVsCnt()) {
1315     unsigned Enc = AMDGPU::encodeWaitcnt(IV, Wait);
1316     auto SWaitInst =
1317         BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT)).addImm(Enc);
1318     TrackedWaitcntSet.insert(SWaitInst);
1319     Modified = true;
1320 
1321     LLVM_DEBUG(dbgs() << "generateWaitcnt\n";
1322                if (It != Block.instr_end()) dbgs() << "Old Instr: " << *It;
1323                dbgs() << "New Instr: " << *SWaitInst << '\n');
1324   }
1325 
1326   if (Wait.hasWaitVsCnt()) {
1327     assert(ST->hasVscnt());
1328 
1329     auto SWaitInst = BuildMI(Block, It, DL, TII->get(AMDGPU::S_WAITCNT_VSCNT))
1330                          .addReg(AMDGPU::SGPR_NULL, RegState::Undef)
1331                          .addImm(Wait.VsCnt);
1332     TrackedWaitcntSet.insert(SWaitInst);
1333     Modified = true;
1334 
1335     LLVM_DEBUG(dbgs() << "generateWaitcnt\n";
1336                if (It != Block.instr_end()) dbgs() << "Old Instr: " << *It;
1337                dbgs() << "New Instr: " << *SWaitInst << '\n');
1338   }
1339   return Modified;
1340 }
1341 
1342 // This is a flat memory operation. Check to see if it has memory tokens other
1343 // than LDS. Other address spaces supported by flat memory operations involve
1344 // global memory.
1345 bool SIInsertWaitcnts::mayAccessVMEMThroughFlat(const MachineInstr &MI) const {
1346   assert(TII->isFLAT(MI));
1347 
1348   // All flat instructions use the VMEM counter.
1349   assert(TII->usesVM_CNT(MI));
1350 
1351   // If there are no memory operands then conservatively assume the flat
1352   // operation may access VMEM.
1353   if (MI.memoperands_empty())
1354     return true;
1355 
1356   // See if any memory operand specifies an address space that involves VMEM.
1357   // Flat operations only supported FLAT, LOCAL (LDS), or address spaces
1358   // involving VMEM such as GLOBAL, CONSTANT, PRIVATE (SCRATCH), etc. The REGION
1359   // (GDS) address space is not supported by flat operations. Therefore, simply
1360   // return true unless only the LDS address space is found.
1361   for (const MachineMemOperand *Memop : MI.memoperands()) {
1362     unsigned AS = Memop->getAddrSpace();
1363     assert(AS != AMDGPUAS::REGION_ADDRESS);
1364     if (AS != AMDGPUAS::LOCAL_ADDRESS)
1365       return true;
1366   }
1367 
1368   return false;
1369 }
1370 
1371 // This is a flat memory operation. Check to see if it has memory tokens for
1372 // either LDS or FLAT.
1373 bool SIInsertWaitcnts::mayAccessLDSThroughFlat(const MachineInstr &MI) const {
1374   assert(TII->isFLAT(MI));
1375 
1376   // Flat instruction such as SCRATCH and GLOBAL do not use the lgkm counter.
1377   if (!TII->usesLGKM_CNT(MI))
1378     return false;
1379 
1380   // If in tgsplit mode then there can be no use of LDS.
1381   if (ST->isTgSplitEnabled())
1382     return false;
1383 
1384   // If there are no memory operands then conservatively assume the flat
1385   // operation may access LDS.
1386   if (MI.memoperands_empty())
1387     return true;
1388 
1389   // See if any memory operand specifies an address space that involves LDS.
1390   for (const MachineMemOperand *Memop : MI.memoperands()) {
1391     unsigned AS = Memop->getAddrSpace();
1392     if (AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS)
1393       return true;
1394   }
1395 
1396   return false;
1397 }
1398 
1399 // This is a flat memory operation. Check to see if it has memory tokens for
1400 // either scratch or FLAT.
1401 bool SIInsertWaitcnts::mayAccessScratchThroughFlat(
1402     const MachineInstr &MI) const {
1403   assert(TII->isFLAT(MI));
1404 
1405   // SCRATCH instructions always access scratch.
1406   if (TII->isFLATScratch(MI))
1407     return true;
1408 
1409   // GLOBAL instructions never access scratch.
1410   if (TII->isFLATGlobal(MI))
1411     return false;
1412 
1413   // If there are no memory operands then conservatively assume the flat
1414   // operation may access scratch.
1415   if (MI.memoperands_empty())
1416     return true;
1417 
1418   // See if any memory operand specifies an address space that involves scratch.
1419   return any_of(MI.memoperands(), [](const MachineMemOperand *Memop) {
1420     unsigned AS = Memop->getAddrSpace();
1421     return AS == AMDGPUAS::PRIVATE_ADDRESS || AS == AMDGPUAS::FLAT_ADDRESS;
1422   });
1423 }
1424 
1425 void SIInsertWaitcnts::updateEventWaitcntAfter(MachineInstr &Inst,
1426                                                WaitcntBrackets *ScoreBrackets) {
1427   // Now look at the instruction opcode. If it is a memory access
1428   // instruction, update the upper-bound of the appropriate counter's
1429   // bracket and the destination operand scores.
1430   // TODO: Use the (TSFlags & SIInstrFlags::LGKM_CNT) property everywhere.
1431   if (TII->isDS(Inst) && TII->usesLGKM_CNT(Inst)) {
1432     if (TII->isAlwaysGDS(Inst.getOpcode()) ||
1433         TII->hasModifiersSet(Inst, AMDGPU::OpName::gds)) {
1434       ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_ACCESS, Inst);
1435       ScoreBrackets->updateByEvent(TII, TRI, MRI, GDS_GPR_LOCK, Inst);
1436     } else {
1437       ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1438     }
1439   } else if (TII->isFLAT(Inst)) {
1440     assert(Inst.mayLoadOrStore());
1441 
1442     int FlatASCount = 0;
1443 
1444     if (mayAccessVMEMThroughFlat(Inst)) {
1445       ++FlatASCount;
1446       ScoreBrackets->updateByEvent(TII, TRI, MRI, getVmemWaitEventType(Inst),
1447                                    Inst);
1448     }
1449 
1450     if (mayAccessLDSThroughFlat(Inst)) {
1451       ++FlatASCount;
1452       ScoreBrackets->updateByEvent(TII, TRI, MRI, LDS_ACCESS, Inst);
1453     }
1454 
1455     // A Flat memory operation must access at least one address space.
1456     assert(FlatASCount);
1457 
1458     // This is a flat memory operation that access both VMEM and LDS, so note it
1459     // - it will require that both the VM and LGKM be flushed to zero if it is
1460     // pending when a VM or LGKM dependency occurs.
1461     if (FlatASCount > 1)
1462       ScoreBrackets->setPendingFlat();
1463   } else if (SIInstrInfo::isVMEM(Inst) &&
1464              !llvm::AMDGPU::getMUBUFIsBufferInv(Inst.getOpcode())) {
1465     ScoreBrackets->updateByEvent(TII, TRI, MRI, getVmemWaitEventType(Inst),
1466                                  Inst);
1467 
1468     if (ST->vmemWriteNeedsExpWaitcnt() &&
1469         (Inst.mayStore() || SIInstrInfo::isAtomicRet(Inst))) {
1470       ScoreBrackets->updateByEvent(TII, TRI, MRI, VMW_GPR_LOCK, Inst);
1471     }
1472   } else if (TII->isSMRD(Inst)) {
1473     ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1474   } else if (Inst.isCall()) {
1475     if (callWaitsOnFunctionReturn(Inst)) {
1476       // Act as a wait on everything
1477       ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt::allZeroExceptVsCnt());
1478     } else {
1479       // May need to way wait for anything.
1480       ScoreBrackets->applyWaitcnt(AMDGPU::Waitcnt());
1481     }
1482   } else if (SIInstrInfo::isLDSDIR(Inst)) {
1483     ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_LDS_ACCESS, Inst);
1484   } else if (TII->isVINTERP(Inst)) {
1485     int64_t Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::waitexp)->getImm();
1486     ScoreBrackets->applyWaitcnt(EXP_CNT, Imm);
1487   } else if (SIInstrInfo::isEXP(Inst)) {
1488     unsigned Imm = TII->getNamedOperand(Inst, AMDGPU::OpName::tgt)->getImm();
1489     if (Imm >= AMDGPU::Exp::ET_PARAM0 && Imm <= AMDGPU::Exp::ET_PARAM31)
1490       ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_PARAM_ACCESS, Inst);
1491     else if (Imm >= AMDGPU::Exp::ET_POS0 && Imm <= AMDGPU::Exp::ET_POS_LAST)
1492       ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_POS_ACCESS, Inst);
1493     else
1494       ScoreBrackets->updateByEvent(TII, TRI, MRI, EXP_GPR_LOCK, Inst);
1495   } else {
1496     switch (Inst.getOpcode()) {
1497     case AMDGPU::S_SENDMSG:
1498     case AMDGPU::S_SENDMSG_RTN_B32:
1499     case AMDGPU::S_SENDMSG_RTN_B64:
1500     case AMDGPU::S_SENDMSGHALT:
1501       ScoreBrackets->updateByEvent(TII, TRI, MRI, SQ_MESSAGE, Inst);
1502       break;
1503     case AMDGPU::S_MEMTIME:
1504     case AMDGPU::S_MEMREALTIME:
1505       ScoreBrackets->updateByEvent(TII, TRI, MRI, SMEM_ACCESS, Inst);
1506       break;
1507     }
1508   }
1509 }
1510 
1511 bool WaitcntBrackets::mergeScore(const MergeInfo &M, unsigned &Score,
1512                                  unsigned OtherScore) {
1513   unsigned MyShifted = Score <= M.OldLB ? 0 : Score + M.MyShift;
1514   unsigned OtherShifted =
1515       OtherScore <= M.OtherLB ? 0 : OtherScore + M.OtherShift;
1516   Score = std::max(MyShifted, OtherShifted);
1517   return OtherShifted > MyShifted;
1518 }
1519 
1520 /// Merge the pending events and associater score brackets of \p Other into
1521 /// this brackets status.
1522 ///
1523 /// Returns whether the merge resulted in a change that requires tighter waits
1524 /// (i.e. the merged brackets strictly dominate the original brackets).
1525 bool WaitcntBrackets::merge(const WaitcntBrackets &Other) {
1526   bool StrictDom = false;
1527 
1528   VgprUB = std::max(VgprUB, Other.VgprUB);
1529   SgprUB = std::max(SgprUB, Other.SgprUB);
1530 
1531   for (auto T : inst_counter_types()) {
1532     // Merge event flags for this counter
1533     const unsigned OldEvents = PendingEvents & WaitEventMaskForInst[T];
1534     const unsigned OtherEvents = Other.PendingEvents & WaitEventMaskForInst[T];
1535     if (OtherEvents & ~OldEvents)
1536       StrictDom = true;
1537     PendingEvents |= OtherEvents;
1538 
1539     // Merge scores for this counter
1540     const unsigned MyPending = ScoreUBs[T] - ScoreLBs[T];
1541     const unsigned OtherPending = Other.ScoreUBs[T] - Other.ScoreLBs[T];
1542     const unsigned NewUB = ScoreLBs[T] + std::max(MyPending, OtherPending);
1543     if (NewUB < ScoreLBs[T])
1544       report_fatal_error("waitcnt score overflow");
1545 
1546     MergeInfo M;
1547     M.OldLB = ScoreLBs[T];
1548     M.OtherLB = Other.ScoreLBs[T];
1549     M.MyShift = NewUB - ScoreUBs[T];
1550     M.OtherShift = NewUB - Other.ScoreUBs[T];
1551 
1552     ScoreUBs[T] = NewUB;
1553 
1554     StrictDom |= mergeScore(M, LastFlat[T], Other.LastFlat[T]);
1555 
1556     for (int J = 0; J <= VgprUB; J++)
1557       StrictDom |= mergeScore(M, VgprScores[T][J], Other.VgprScores[T][J]);
1558 
1559     if (T == LGKM_CNT) {
1560       for (int J = 0; J <= SgprUB; J++)
1561         StrictDom |= mergeScore(M, SgprScores[J], Other.SgprScores[J]);
1562     }
1563   }
1564 
1565   for (int J = 0; J <= VgprUB; J++) {
1566     unsigned char NewVmemTypes = VgprVmemTypes[J] | Other.VgprVmemTypes[J];
1567     StrictDom |= NewVmemTypes != VgprVmemTypes[J];
1568     VgprVmemTypes[J] = NewVmemTypes;
1569   }
1570 
1571   return StrictDom;
1572 }
1573 
1574 static bool isWaitInstr(MachineInstr &Inst) {
1575   return Inst.getOpcode() == AMDGPU::S_WAITCNT ||
1576          (Inst.getOpcode() == AMDGPU::S_WAITCNT_VSCNT &&
1577           Inst.getOperand(0).isReg() &&
1578           Inst.getOperand(0).getReg() == AMDGPU::SGPR_NULL);
1579 }
1580 
1581 // Generate s_waitcnt instructions where needed.
1582 bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
1583                                             MachineBasicBlock &Block,
1584                                             WaitcntBrackets &ScoreBrackets) {
1585   bool Modified = false;
1586 
1587   LLVM_DEBUG({
1588     dbgs() << "*** Block" << Block.getNumber() << " ***";
1589     ScoreBrackets.dump();
1590   });
1591 
1592   // Track the correctness of vccz through this basic block. There are two
1593   // reasons why it might be incorrect; see ST->hasReadVCCZBug() and
1594   // ST->partialVCCWritesUpdateVCCZ().
1595   bool VCCZCorrect = true;
1596   if (ST->hasReadVCCZBug()) {
1597     // vccz could be incorrect at a basic block boundary if a predecessor wrote
1598     // to vcc and then issued an smem load.
1599     VCCZCorrect = false;
1600   } else if (!ST->partialVCCWritesUpdateVCCZ()) {
1601     // vccz could be incorrect at a basic block boundary if a predecessor wrote
1602     // to vcc_lo or vcc_hi.
1603     VCCZCorrect = false;
1604   }
1605 
1606   // Walk over the instructions.
1607   MachineInstr *OldWaitcntInstr = nullptr;
1608 
1609   for (MachineBasicBlock::instr_iterator Iter = Block.instr_begin(),
1610                                          E = Block.instr_end();
1611        Iter != E;) {
1612     MachineInstr &Inst = *Iter;
1613 
1614     // Track pre-existing waitcnts that were added in earlier iterations or by
1615     // the memory legalizer.
1616     if (isWaitInstr(Inst)) {
1617       if (!OldWaitcntInstr)
1618         OldWaitcntInstr = &Inst;
1619       ++Iter;
1620       continue;
1621     }
1622 
1623     bool FlushVmCnt = Block.getFirstTerminator() == Inst &&
1624                       isPreheaderToFlush(Block, ScoreBrackets);
1625 
1626     // Generate an s_waitcnt instruction to be placed before Inst, if needed.
1627     Modified |= generateWaitcntInstBefore(Inst, ScoreBrackets, OldWaitcntInstr,
1628                                           FlushVmCnt);
1629     OldWaitcntInstr = nullptr;
1630 
1631     // Restore vccz if it's not known to be correct already.
1632     bool RestoreVCCZ = !VCCZCorrect && readsVCCZ(Inst);
1633 
1634     // Don't examine operands unless we need to track vccz correctness.
1635     if (ST->hasReadVCCZBug() || !ST->partialVCCWritesUpdateVCCZ()) {
1636       if (Inst.definesRegister(AMDGPU::VCC_LO) ||
1637           Inst.definesRegister(AMDGPU::VCC_HI)) {
1638         // Up to gfx9, writes to vcc_lo and vcc_hi don't update vccz.
1639         if (!ST->partialVCCWritesUpdateVCCZ())
1640           VCCZCorrect = false;
1641       } else if (Inst.definesRegister(AMDGPU::VCC)) {
1642         // There is a hardware bug on CI/SI where SMRD instruction may corrupt
1643         // vccz bit, so when we detect that an instruction may read from a
1644         // corrupt vccz bit, we need to:
1645         // 1. Insert s_waitcnt lgkm(0) to wait for all outstanding SMRD
1646         //    operations to complete.
1647         // 2. Restore the correct value of vccz by writing the current value
1648         //    of vcc back to vcc.
1649         if (ST->hasReadVCCZBug() &&
1650             ScoreBrackets.hasPendingEvent(SMEM_ACCESS)) {
1651           // Writes to vcc while there's an outstanding smem read may get
1652           // clobbered as soon as any read completes.
1653           VCCZCorrect = false;
1654         } else {
1655           // Writes to vcc will fix any incorrect value in vccz.
1656           VCCZCorrect = true;
1657         }
1658       }
1659     }
1660 
1661     if (TII->isSMRD(Inst)) {
1662       for (const MachineMemOperand *Memop : Inst.memoperands()) {
1663         // No need to handle invariant loads when avoiding WAR conflicts, as
1664         // there cannot be a vector store to the same memory location.
1665         if (!Memop->isInvariant()) {
1666           const Value *Ptr = Memop->getValue();
1667           SLoadAddresses.insert(std::pair(Ptr, Inst.getParent()));
1668         }
1669       }
1670       if (ST->hasReadVCCZBug()) {
1671         // This smem read could complete and clobber vccz at any time.
1672         VCCZCorrect = false;
1673       }
1674     }
1675 
1676     updateEventWaitcntAfter(Inst, &ScoreBrackets);
1677 
1678 #if 0 // TODO: implement resource type check controlled by options with ub = LB.
1679     // If this instruction generates a S_SETVSKIP because it is an
1680     // indexed resource, and we are on Tahiti, then it will also force
1681     // an S_WAITCNT vmcnt(0)
1682     if (RequireCheckResourceType(Inst, context)) {
1683       // Force the score to as if an S_WAITCNT vmcnt(0) is emitted.
1684       ScoreBrackets->setScoreLB(VM_CNT,
1685       ScoreBrackets->getScoreUB(VM_CNT));
1686     }
1687 #endif
1688 
1689     LLVM_DEBUG({
1690       Inst.print(dbgs());
1691       ScoreBrackets.dump();
1692     });
1693 
1694     // TODO: Remove this work-around after fixing the scheduler and enable the
1695     // assert above.
1696     if (RestoreVCCZ) {
1697       // Restore the vccz bit.  Any time a value is written to vcc, the vcc
1698       // bit is updated, so we can restore the bit by reading the value of
1699       // vcc and then writing it back to the register.
1700       BuildMI(Block, Inst, Inst.getDebugLoc(),
1701               TII->get(ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64),
1702               TRI->getVCC())
1703           .addReg(TRI->getVCC());
1704       VCCZCorrect = true;
1705       Modified = true;
1706     }
1707 
1708     ++Iter;
1709   }
1710 
1711   if (Block.getFirstTerminator() == Block.end() &&
1712       isPreheaderToFlush(Block, ScoreBrackets))
1713     Modified |= generateWaitcntBlockEnd(Block, ScoreBrackets, OldWaitcntInstr);
1714 
1715   return Modified;
1716 }
1717 
1718 // Return true if the given machine basic block is a preheader of a loop in
1719 // which we want to flush the vmcnt counter, and false otherwise.
1720 bool SIInsertWaitcnts::isPreheaderToFlush(MachineBasicBlock &MBB,
1721                                           WaitcntBrackets &ScoreBrackets) {
1722   if (PreheadersToFlush.count(&MBB))
1723     return PreheadersToFlush[&MBB];
1724 
1725   auto UpdateCache = [&](bool val) {
1726     PreheadersToFlush[&MBB] = val;
1727     return val;
1728   };
1729 
1730   MachineBasicBlock *Succ = MBB.getSingleSuccessor();
1731   if (!Succ)
1732     return UpdateCache(false);
1733 
1734   MachineLoop *Loop = MLI->getLoopFor(Succ);
1735   if (!Loop)
1736     return UpdateCache(false);
1737 
1738   if (Loop->getLoopPreheader() == &MBB && shouldFlushVmCnt(Loop, ScoreBrackets))
1739     return UpdateCache(true);
1740 
1741   return UpdateCache(false);
1742 }
1743 
1744 bool SIInsertWaitcnts::isVMEMOrFlatVMEM(const MachineInstr &MI) const {
1745   return SIInstrInfo::isVMEM(MI) ||
1746          (SIInstrInfo::isFLAT(MI) && mayAccessVMEMThroughFlat(MI));
1747 }
1748 
1749 // Return true if it is better to flush the vmcnt counter in the preheader of
1750 // the given loop. We currently decide to flush in two situations:
1751 // 1. The loop contains vmem store(s), no vmem load and at least one use of a
1752 //    vgpr containing a value that is loaded outside of the loop. (Only on
1753 //    targets with no vscnt counter).
1754 // 2. The loop contains vmem load(s), but the loaded values are not used in the
1755 //    loop, and at least one use of a vgpr containing a value that is loaded
1756 //    outside of the loop.
1757 bool SIInsertWaitcnts::shouldFlushVmCnt(MachineLoop *ML,
1758                                         WaitcntBrackets &Brackets) {
1759   bool HasVMemLoad = false;
1760   bool HasVMemStore = false;
1761   bool UsesVgprLoadedOutside = false;
1762   DenseSet<Register> VgprUse;
1763   DenseSet<Register> VgprDef;
1764 
1765   for (MachineBasicBlock *MBB : ML->blocks()) {
1766     for (MachineInstr &MI : *MBB) {
1767       if (isVMEMOrFlatVMEM(MI)) {
1768         if (MI.mayLoad())
1769           HasVMemLoad = true;
1770         if (MI.mayStore())
1771           HasVMemStore = true;
1772       }
1773       for (unsigned I = 0; I < MI.getNumOperands(); I++) {
1774         MachineOperand &Op = MI.getOperand(I);
1775         if (!Op.isReg() || !TRI->isVectorRegister(*MRI, Op.getReg()))
1776           continue;
1777         RegInterval Interval = Brackets.getRegInterval(&MI, TII, MRI, TRI, I);
1778         // Vgpr use
1779         if (Op.isUse()) {
1780           for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
1781             // If we find a register that is loaded inside the loop, 1. and 2.
1782             // are invalidated and we can exit.
1783             if (VgprDef.contains(RegNo))
1784               return false;
1785             VgprUse.insert(RegNo);
1786             // If at least one of Op's registers is in the score brackets, the
1787             // value is likely loaded outside of the loop.
1788             if (Brackets.getRegScore(RegNo, VM_CNT) > Brackets.getScoreLB(VM_CNT)) {
1789               UsesVgprLoadedOutside = true;
1790               break;
1791             }
1792           }
1793         }
1794         // VMem load vgpr def
1795         else if (isVMEMOrFlatVMEM(MI) && MI.mayLoad() && Op.isDef())
1796           for (int RegNo = Interval.first; RegNo < Interval.second; ++RegNo) {
1797             // If we find a register that is loaded inside the loop, 1. and 2.
1798             // are invalidated and we can exit.
1799             if (VgprUse.contains(RegNo))
1800               return false;
1801             VgprDef.insert(RegNo);
1802           }
1803       }
1804     }
1805   }
1806   if (!ST->hasVscnt() && HasVMemStore && !HasVMemLoad && UsesVgprLoadedOutside)
1807     return true;
1808   return HasVMemLoad && UsesVgprLoadedOutside;
1809 }
1810 
1811 bool SIInsertWaitcnts::runOnMachineFunction(MachineFunction &MF) {
1812   ST = &MF.getSubtarget<GCNSubtarget>();
1813   TII = ST->getInstrInfo();
1814   TRI = &TII->getRegisterInfo();
1815   MRI = &MF.getRegInfo();
1816   IV = AMDGPU::getIsaVersion(ST->getCPU());
1817   const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
1818   MLI = &getAnalysis<MachineLoopInfo>();
1819   PDT = &getAnalysis<MachinePostDominatorTree>();
1820 
1821   ForceEmitZeroWaitcnts = ForceEmitZeroFlag;
1822   for (auto T : inst_counter_types())
1823     ForceEmitWaitcnt[T] = false;
1824 
1825   HardwareLimits Limits = {};
1826   Limits.VmcntMax = AMDGPU::getVmcntBitMask(IV);
1827   Limits.ExpcntMax = AMDGPU::getExpcntBitMask(IV);
1828   Limits.LgkmcntMax = AMDGPU::getLgkmcntBitMask(IV);
1829   Limits.VscntMax = ST->hasVscnt() ? 63 : 0;
1830 
1831   unsigned NumVGPRsMax = ST->getAddressableNumVGPRs();
1832   unsigned NumSGPRsMax = ST->getAddressableNumSGPRs();
1833   assert(NumVGPRsMax <= SQ_MAX_PGM_VGPRS);
1834   assert(NumSGPRsMax <= SQ_MAX_PGM_SGPRS);
1835 
1836   RegisterEncoding Encoding = {};
1837   Encoding.VGPR0 = TRI->getEncodingValue(AMDGPU::VGPR0);
1838   Encoding.VGPRL = Encoding.VGPR0 + NumVGPRsMax - 1;
1839   Encoding.SGPR0 = TRI->getEncodingValue(AMDGPU::SGPR0);
1840   Encoding.SGPRL = Encoding.SGPR0 + NumSGPRsMax - 1;
1841 
1842   TrackedWaitcntSet.clear();
1843   BlockInfos.clear();
1844   bool Modified = false;
1845 
1846   if (!MFI->isEntryFunction()) {
1847     // Wait for any outstanding memory operations that the input registers may
1848     // depend on. We can't track them and it's better to do the wait after the
1849     // costly call sequence.
1850 
1851     // TODO: Could insert earlier and schedule more liberally with operations
1852     // that only use caller preserved registers.
1853     MachineBasicBlock &EntryBB = MF.front();
1854     MachineBasicBlock::iterator I = EntryBB.begin();
1855     for (MachineBasicBlock::iterator E = EntryBB.end();
1856          I != E && (I->isPHI() || I->isMetaInstruction()); ++I)
1857       ;
1858     BuildMI(EntryBB, I, DebugLoc(), TII->get(AMDGPU::S_WAITCNT)).addImm(0);
1859 
1860     Modified = true;
1861   }
1862 
1863   // Keep iterating over the blocks in reverse post order, inserting and
1864   // updating s_waitcnt where needed, until a fix point is reached.
1865   for (auto *MBB : ReversePostOrderTraversal<MachineFunction *>(&MF))
1866     BlockInfos.insert({MBB, BlockInfo()});
1867 
1868   std::unique_ptr<WaitcntBrackets> Brackets;
1869   bool Repeat;
1870   do {
1871     Repeat = false;
1872 
1873     for (auto BII = BlockInfos.begin(), BIE = BlockInfos.end(); BII != BIE;
1874          ++BII) {
1875       MachineBasicBlock *MBB = BII->first;
1876       BlockInfo &BI = BII->second;
1877       if (!BI.Dirty)
1878         continue;
1879 
1880       if (BI.Incoming) {
1881         if (!Brackets)
1882           Brackets = std::make_unique<WaitcntBrackets>(*BI.Incoming);
1883         else
1884           *Brackets = *BI.Incoming;
1885       } else {
1886         if (!Brackets)
1887           Brackets = std::make_unique<WaitcntBrackets>(ST, Limits, Encoding);
1888         else
1889           *Brackets = WaitcntBrackets(ST, Limits, Encoding);
1890       }
1891 
1892       Modified |= insertWaitcntInBlock(MF, *MBB, *Brackets);
1893       BI.Dirty = false;
1894 
1895       if (Brackets->hasPendingEvent()) {
1896         BlockInfo *MoveBracketsToSucc = nullptr;
1897         for (MachineBasicBlock *Succ : MBB->successors()) {
1898           auto SuccBII = BlockInfos.find(Succ);
1899           BlockInfo &SuccBI = SuccBII->second;
1900           if (!SuccBI.Incoming) {
1901             SuccBI.Dirty = true;
1902             if (SuccBII <= BII)
1903               Repeat = true;
1904             if (!MoveBracketsToSucc) {
1905               MoveBracketsToSucc = &SuccBI;
1906             } else {
1907               SuccBI.Incoming = std::make_unique<WaitcntBrackets>(*Brackets);
1908             }
1909           } else if (SuccBI.Incoming->merge(*Brackets)) {
1910             SuccBI.Dirty = true;
1911             if (SuccBII <= BII)
1912               Repeat = true;
1913           }
1914         }
1915         if (MoveBracketsToSucc)
1916           MoveBracketsToSucc->Incoming = std::move(Brackets);
1917       }
1918     }
1919   } while (Repeat);
1920 
1921   if (ST->hasScalarStores()) {
1922     SmallVector<MachineBasicBlock *, 4> EndPgmBlocks;
1923     bool HaveScalarStores = false;
1924 
1925     for (MachineBasicBlock &MBB : MF) {
1926       for (MachineInstr &MI : MBB) {
1927         if (!HaveScalarStores && TII->isScalarStore(MI))
1928           HaveScalarStores = true;
1929 
1930         if (MI.getOpcode() == AMDGPU::S_ENDPGM ||
1931             MI.getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG)
1932           EndPgmBlocks.push_back(&MBB);
1933       }
1934     }
1935 
1936     if (HaveScalarStores) {
1937       // If scalar writes are used, the cache must be flushed or else the next
1938       // wave to reuse the same scratch memory can be clobbered.
1939       //
1940       // Insert s_dcache_wb at wave termination points if there were any scalar
1941       // stores, and only if the cache hasn't already been flushed. This could
1942       // be improved by looking across blocks for flushes in postdominating
1943       // blocks from the stores but an explicitly requested flush is probably
1944       // very rare.
1945       for (MachineBasicBlock *MBB : EndPgmBlocks) {
1946         bool SeenDCacheWB = false;
1947 
1948         for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
1949              I != E; ++I) {
1950           if (I->getOpcode() == AMDGPU::S_DCACHE_WB)
1951             SeenDCacheWB = true;
1952           else if (TII->isScalarStore(*I))
1953             SeenDCacheWB = false;
1954 
1955           // FIXME: It would be better to insert this before a waitcnt if any.
1956           if ((I->getOpcode() == AMDGPU::S_ENDPGM ||
1957                I->getOpcode() == AMDGPU::SI_RETURN_TO_EPILOG) &&
1958               !SeenDCacheWB) {
1959             Modified = true;
1960             BuildMI(*MBB, I, I->getDebugLoc(), TII->get(AMDGPU::S_DCACHE_WB));
1961           }
1962         }
1963       }
1964     }
1965   }
1966 
1967   // Insert DEALLOC_VGPR messages before previously identified S_ENDPGM
1968   // instructions.
1969   for (MachineInstr *MI : ReleaseVGPRInsts) {
1970     if (ST->requiresNopBeforeDeallocVGPRs()) {
1971       BuildMI(*MI->getParent(), MI, DebugLoc(), TII->get(AMDGPU::S_NOP))
1972           .addImm(0);
1973     }
1974     BuildMI(*MI->getParent(), MI, DebugLoc(), TII->get(AMDGPU::S_SENDMSG))
1975         .addImm(AMDGPU::SendMsg::ID_DEALLOC_VGPRS_GFX11Plus);
1976     Modified = true;
1977   }
1978   ReleaseVGPRInsts.clear();
1979 
1980   return Modified;
1981 }
1982