xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIWholeQuadMode.cpp (revision 0eae32dcef82f6f06de6419a0d623d7def0cc8f6)
1 //===-- SIWholeQuadMode.cpp - enter and suspend whole quad mode -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass adds instructions to enable whole quad mode (strict or non-strict)
11 /// for pixel shaders, and strict whole wavefront mode for all programs.
12 ///
13 /// The "strict" prefix indicates that inactive lanes do not take part in
14 /// control flow, specifically an inactive lane enabled by a strict WQM/WWM will
15 /// always be enabled irrespective of control flow decisions. Conversely in
16 /// non-strict WQM inactive lanes may control flow decisions.
17 ///
18 /// Whole quad mode is required for derivative computations, but it interferes
19 /// with shader side effects (stores and atomics). It ensures that WQM is
20 /// enabled when necessary, but disabled around stores and atomics.
21 ///
22 /// When necessary, this pass creates a function prolog
23 ///
24 ///   S_MOV_B64 LiveMask, EXEC
25 ///   S_WQM_B64 EXEC, EXEC
26 ///
27 /// to enter WQM at the top of the function and surrounds blocks of Exact
28 /// instructions by
29 ///
30 ///   S_AND_SAVEEXEC_B64 Tmp, LiveMask
31 ///   ...
32 ///   S_MOV_B64 EXEC, Tmp
33 ///
34 /// We also compute when a sequence of instructions requires strict whole
35 /// wavefront mode (StrictWWM) and insert instructions to save and restore it:
36 ///
37 ///   S_OR_SAVEEXEC_B64 Tmp, -1
38 ///   ...
39 ///   S_MOV_B64 EXEC, Tmp
40 ///
41 /// When a sequence of instructions requires strict whole quad mode (StrictWQM)
42 /// we use a similar save and restore mechanism and force whole quad mode for
43 /// those instructions:
44 ///
45 ///  S_MOV_B64 Tmp, EXEC
46 ///  S_WQM_B64 EXEC, EXEC
47 ///  ...
48 ///  S_MOV_B64 EXEC, Tmp
49 ///
50 /// In order to avoid excessive switching during sequences of Exact
51 /// instructions, the pass first analyzes which instructions must be run in WQM
52 /// (aka which instructions produce values that lead to derivative
53 /// computations).
54 ///
55 /// Basic blocks are always exited in WQM as long as some successor needs WQM.
56 ///
57 /// There is room for improvement given better control flow analysis:
58 ///
59 ///  (1) at the top level (outside of control flow statements, and as long as
60 ///      kill hasn't been used), one SGPR can be saved by recovering WQM from
61 ///      the LiveMask (this is implemented for the entry block).
62 ///
63 ///  (2) when entire regions (e.g. if-else blocks or entire loops) only
64 ///      consist of exact and don't-care instructions, the switch only has to
65 ///      be done at the entry and exit points rather than potentially in each
66 ///      block of the region.
67 ///
68 //===----------------------------------------------------------------------===//
69 
70 #include "AMDGPU.h"
71 #include "GCNSubtarget.h"
72 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
73 #include "llvm/ADT/MapVector.h"
74 #include "llvm/ADT/PostOrderIterator.h"
75 #include "llvm/CodeGen/LiveIntervals.h"
76 #include "llvm/CodeGen/MachineBasicBlock.h"
77 #include "llvm/CodeGen/MachineDominators.h"
78 #include "llvm/CodeGen/MachineFunctionPass.h"
79 #include "llvm/CodeGen/MachineInstr.h"
80 #include "llvm/CodeGen/MachinePostDominators.h"
81 #include "llvm/IR/CallingConv.h"
82 #include "llvm/InitializePasses.h"
83 #include "llvm/Support/raw_ostream.h"
84 
85 using namespace llvm;
86 
87 #define DEBUG_TYPE "si-wqm"
88 
89 namespace {
90 
91 enum {
92   StateWQM = 0x1,
93   StateStrictWWM = 0x2,
94   StateStrictWQM = 0x4,
95   StateExact = 0x8,
96   StateStrict = StateStrictWWM | StateStrictWQM,
97 };
98 
99 struct PrintState {
100 public:
101   int State;
102 
103   explicit PrintState(int State) : State(State) {}
104 };
105 
106 #ifndef NDEBUG
107 static raw_ostream &operator<<(raw_ostream &OS, const PrintState &PS) {
108 
109   static const std::pair<char, const char *> Mapping[] = {
110       std::make_pair(StateWQM, "WQM"),
111       std::make_pair(StateStrictWWM, "StrictWWM"),
112       std::make_pair(StateStrictWQM, "StrictWQM"),
113       std::make_pair(StateExact, "Exact")};
114   char State = PS.State;
115   for (auto M : Mapping) {
116     if (State & M.first) {
117       OS << M.second;
118       State &= ~M.first;
119 
120       if (State)
121         OS << '|';
122     }
123   }
124   assert(State == 0);
125   return OS;
126 }
127 #endif
128 
129 struct InstrInfo {
130   char Needs = 0;
131   char Disabled = 0;
132   char OutNeeds = 0;
133 };
134 
135 struct BlockInfo {
136   char Needs = 0;
137   char InNeeds = 0;
138   char OutNeeds = 0;
139   char InitialState = 0;
140   bool NeedsLowering = false;
141 };
142 
143 struct WorkItem {
144   MachineBasicBlock *MBB = nullptr;
145   MachineInstr *MI = nullptr;
146 
147   WorkItem() = default;
148   WorkItem(MachineBasicBlock *MBB) : MBB(MBB) {}
149   WorkItem(MachineInstr *MI) : MI(MI) {}
150 };
151 
152 class SIWholeQuadMode : public MachineFunctionPass {
153 private:
154   const SIInstrInfo *TII;
155   const SIRegisterInfo *TRI;
156   const GCNSubtarget *ST;
157   MachineRegisterInfo *MRI;
158   LiveIntervals *LIS;
159   MachineDominatorTree *MDT;
160   MachinePostDominatorTree *PDT;
161 
162   unsigned AndOpc;
163   unsigned AndN2Opc;
164   unsigned XorOpc;
165   unsigned AndSaveExecOpc;
166   unsigned OrSaveExecOpc;
167   unsigned WQMOpc;
168   Register Exec;
169   Register LiveMaskReg;
170 
171   DenseMap<const MachineInstr *, InstrInfo> Instructions;
172   MapVector<MachineBasicBlock *, BlockInfo> Blocks;
173 
174   // Tracks state (WQM/StrictWWM/StrictWQM/Exact) after a given instruction
175   DenseMap<const MachineInstr *, char> StateTransition;
176 
177   SmallVector<MachineInstr *, 2> LiveMaskQueries;
178   SmallVector<MachineInstr *, 4> LowerToMovInstrs;
179   SmallVector<MachineInstr *, 4> LowerToCopyInstrs;
180   SmallVector<MachineInstr *, 4> KillInstrs;
181 
182   void printInfo();
183 
184   void markInstruction(MachineInstr &MI, char Flag,
185                        std::vector<WorkItem> &Worklist);
186   void markDefs(const MachineInstr &UseMI, LiveRange &LR, Register Reg,
187                 unsigned SubReg, char Flag, std::vector<WorkItem> &Worklist);
188   void markOperand(const MachineInstr &MI, const MachineOperand &Op, char Flag,
189                    std::vector<WorkItem> &Worklist);
190   void markInstructionUses(const MachineInstr &MI, char Flag,
191                            std::vector<WorkItem> &Worklist);
192   char scanInstructions(MachineFunction &MF, std::vector<WorkItem> &Worklist);
193   void propagateInstruction(MachineInstr &MI, std::vector<WorkItem> &Worklist);
194   void propagateBlock(MachineBasicBlock &MBB, std::vector<WorkItem> &Worklist);
195   char analyzeFunction(MachineFunction &MF);
196 
197   MachineBasicBlock::iterator saveSCC(MachineBasicBlock &MBB,
198                                       MachineBasicBlock::iterator Before);
199   MachineBasicBlock::iterator
200   prepareInsertion(MachineBasicBlock &MBB, MachineBasicBlock::iterator First,
201                    MachineBasicBlock::iterator Last, bool PreferLast,
202                    bool SaveSCC);
203   void toExact(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
204                Register SaveWQM);
205   void toWQM(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
206              Register SavedWQM);
207   void toStrictMode(MachineBasicBlock &MBB, MachineBasicBlock::iterator Before,
208                     Register SaveOrig, char StrictStateNeeded);
209   void fromStrictMode(MachineBasicBlock &MBB,
210                       MachineBasicBlock::iterator Before, Register SavedOrig,
211                       char NonStrictState, char CurrentStrictState);
212 
213   MachineBasicBlock *splitBlock(MachineBasicBlock *BB, MachineInstr *TermMI);
214 
215   MachineInstr *lowerKillI1(MachineBasicBlock &MBB, MachineInstr &MI,
216                             bool IsWQM);
217   MachineInstr *lowerKillF32(MachineBasicBlock &MBB, MachineInstr &MI);
218 
219   void lowerBlock(MachineBasicBlock &MBB);
220   void processBlock(MachineBasicBlock &MBB, bool IsEntry);
221 
222   void lowerLiveMaskQueries();
223   void lowerCopyInstrs();
224   void lowerKillInstrs(bool IsWQM);
225 
226 public:
227   static char ID;
228 
229   SIWholeQuadMode() :
230     MachineFunctionPass(ID) { }
231 
232   bool runOnMachineFunction(MachineFunction &MF) override;
233 
234   StringRef getPassName() const override { return "SI Whole Quad Mode"; }
235 
236   void getAnalysisUsage(AnalysisUsage &AU) const override {
237     AU.addRequired<LiveIntervals>();
238     AU.addPreserved<SlotIndexes>();
239     AU.addPreserved<LiveIntervals>();
240     AU.addRequired<MachineDominatorTree>();
241     AU.addPreserved<MachineDominatorTree>();
242     AU.addRequired<MachinePostDominatorTree>();
243     AU.addPreserved<MachinePostDominatorTree>();
244     MachineFunctionPass::getAnalysisUsage(AU);
245   }
246 
247   MachineFunctionProperties getClearedProperties() const override {
248     return MachineFunctionProperties().set(
249         MachineFunctionProperties::Property::IsSSA);
250   }
251 };
252 
253 } // end anonymous namespace
254 
255 char SIWholeQuadMode::ID = 0;
256 
257 INITIALIZE_PASS_BEGIN(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
258                       false)
259 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
260 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
261 INITIALIZE_PASS_DEPENDENCY(MachinePostDominatorTree)
262 INITIALIZE_PASS_END(SIWholeQuadMode, DEBUG_TYPE, "SI Whole Quad Mode", false,
263                     false)
264 
265 char &llvm::SIWholeQuadModeID = SIWholeQuadMode::ID;
266 
267 FunctionPass *llvm::createSIWholeQuadModePass() {
268   return new SIWholeQuadMode;
269 }
270 
271 #ifndef NDEBUG
272 LLVM_DUMP_METHOD void SIWholeQuadMode::printInfo() {
273   for (const auto &BII : Blocks) {
274     dbgs() << "\n"
275            << printMBBReference(*BII.first) << ":\n"
276            << "  InNeeds = " << PrintState(BII.second.InNeeds)
277            << ", Needs = " << PrintState(BII.second.Needs)
278            << ", OutNeeds = " << PrintState(BII.second.OutNeeds) << "\n\n";
279 
280     for (const MachineInstr &MI : *BII.first) {
281       auto III = Instructions.find(&MI);
282       if (III == Instructions.end())
283         continue;
284 
285       dbgs() << "  " << MI << "    Needs = " << PrintState(III->second.Needs)
286              << ", OutNeeds = " << PrintState(III->second.OutNeeds) << '\n';
287     }
288   }
289 }
290 #endif
291 
292 void SIWholeQuadMode::markInstruction(MachineInstr &MI, char Flag,
293                                       std::vector<WorkItem> &Worklist) {
294   InstrInfo &II = Instructions[&MI];
295 
296   assert(!(Flag & StateExact) && Flag != 0);
297 
298   // Remove any disabled states from the flag. The user that required it gets
299   // an undefined value in the helper lanes. For example, this can happen if
300   // the result of an atomic is used by instruction that requires WQM, where
301   // ignoring the request for WQM is correct as per the relevant specs.
302   Flag &= ~II.Disabled;
303 
304   // Ignore if the flag is already encompassed by the existing needs, or we
305   // just disabled everything.
306   if ((II.Needs & Flag) == Flag)
307     return;
308 
309   LLVM_DEBUG(dbgs() << "markInstruction " << PrintState(Flag) << ": " << MI);
310   II.Needs |= Flag;
311   Worklist.push_back(&MI);
312 }
313 
314 /// Mark all relevant definitions of register \p Reg in usage \p UseMI.
315 void SIWholeQuadMode::markDefs(const MachineInstr &UseMI, LiveRange &LR,
316                                Register Reg, unsigned SubReg, char Flag,
317                                std::vector<WorkItem> &Worklist) {
318   LLVM_DEBUG(dbgs() << "markDefs " << PrintState(Flag) << ": " << UseMI);
319 
320   LiveQueryResult UseLRQ = LR.Query(LIS->getInstructionIndex(UseMI));
321   const VNInfo *Value = UseLRQ.valueIn();
322   if (!Value)
323     return;
324 
325   // Note: this code assumes that lane masks on AMDGPU completely
326   // cover registers.
327   const LaneBitmask UseLanes =
328       SubReg ? TRI->getSubRegIndexLaneMask(SubReg)
329              : (Reg.isVirtual() ? MRI->getMaxLaneMaskForVReg(Reg)
330                                 : LaneBitmask::getNone());
331 
332   // Perform a depth-first iteration of the LiveRange graph marking defs.
333   // Stop processing of a given branch when all use lanes have been defined.
334   // The first definition stops processing for a physical register.
335   struct PhiEntry {
336     const VNInfo *Phi;
337     unsigned PredIdx;
338     LaneBitmask DefinedLanes;
339 
340     PhiEntry(const VNInfo *Phi, unsigned PredIdx, LaneBitmask DefinedLanes)
341         : Phi(Phi), PredIdx(PredIdx), DefinedLanes(DefinedLanes) {}
342   };
343   using VisitKey = std::pair<const VNInfo *, LaneBitmask>;
344   SmallVector<PhiEntry, 2> PhiStack;
345   SmallSet<VisitKey, 4> Visited;
346   LaneBitmask DefinedLanes;
347   unsigned NextPredIdx = 0; // Only used for processing phi nodes
348   do {
349     const VNInfo *NextValue = nullptr;
350     const VisitKey Key(Value, DefinedLanes);
351 
352     if (!Visited.count(Key)) {
353       Visited.insert(Key);
354       // On first visit to a phi then start processing first predecessor
355       NextPredIdx = 0;
356     }
357 
358     if (Value->isPHIDef()) {
359       // Each predecessor node in the phi must be processed as a subgraph
360       const MachineBasicBlock *MBB = LIS->getMBBFromIndex(Value->def);
361       assert(MBB && "Phi-def has no defining MBB");
362 
363       // Find next predecessor to process
364       unsigned Idx = NextPredIdx;
365       auto PI = MBB->pred_begin() + Idx;
366       auto PE = MBB->pred_end();
367       for (; PI != PE && !NextValue; ++PI, ++Idx) {
368         if (const VNInfo *VN = LR.getVNInfoBefore(LIS->getMBBEndIdx(*PI))) {
369           if (!Visited.count(VisitKey(VN, DefinedLanes)))
370             NextValue = VN;
371         }
372       }
373 
374       // If there are more predecessors to process; add phi to stack
375       if (PI != PE)
376         PhiStack.emplace_back(Value, Idx, DefinedLanes);
377     } else {
378       MachineInstr *MI = LIS->getInstructionFromIndex(Value->def);
379       assert(MI && "Def has no defining instruction");
380 
381       if (Reg.isVirtual()) {
382         // Iterate over all operands to find relevant definitions
383         bool HasDef = false;
384         for (const MachineOperand &Op : MI->operands()) {
385           if (!(Op.isReg() && Op.isDef() && Op.getReg() == Reg))
386             continue;
387 
388           // Compute lanes defined and overlap with use
389           LaneBitmask OpLanes =
390               Op.isUndef() ? LaneBitmask::getAll()
391                            : TRI->getSubRegIndexLaneMask(Op.getSubReg());
392           LaneBitmask Overlap = (UseLanes & OpLanes);
393 
394           // Record if this instruction defined any of use
395           HasDef |= Overlap.any();
396 
397           // Mark any lanes defined
398           DefinedLanes |= OpLanes;
399         }
400 
401         // Check if all lanes of use have been defined
402         if ((DefinedLanes & UseLanes) != UseLanes) {
403           // Definition not complete; need to process input value
404           LiveQueryResult LRQ = LR.Query(LIS->getInstructionIndex(*MI));
405           if (const VNInfo *VN = LRQ.valueIn()) {
406             if (!Visited.count(VisitKey(VN, DefinedLanes)))
407               NextValue = VN;
408           }
409         }
410 
411         // Only mark the instruction if it defines some part of the use
412         if (HasDef)
413           markInstruction(*MI, Flag, Worklist);
414       } else {
415         // For physical registers simply mark the defining instruction
416         markInstruction(*MI, Flag, Worklist);
417       }
418     }
419 
420     if (!NextValue && !PhiStack.empty()) {
421       // Reach end of chain; revert to processing last phi
422       PhiEntry &Entry = PhiStack.back();
423       NextValue = Entry.Phi;
424       NextPredIdx = Entry.PredIdx;
425       DefinedLanes = Entry.DefinedLanes;
426       PhiStack.pop_back();
427     }
428 
429     Value = NextValue;
430   } while (Value);
431 }
432 
433 void SIWholeQuadMode::markOperand(const MachineInstr &MI,
434                                   const MachineOperand &Op, char Flag,
435                                   std::vector<WorkItem> &Worklist) {
436   assert(Op.isReg());
437   Register Reg = Op.getReg();
438 
439   // Ignore some hardware registers
440   switch (Reg) {
441   case AMDGPU::EXEC:
442   case AMDGPU::EXEC_LO:
443     return;
444   default:
445     break;
446   }
447 
448   LLVM_DEBUG(dbgs() << "markOperand " << PrintState(Flag) << ": " << Op
449                     << " for " << MI);
450   if (Reg.isVirtual()) {
451     LiveRange &LR = LIS->getInterval(Reg);
452     markDefs(MI, LR, Reg, Op.getSubReg(), Flag, Worklist);
453   } else {
454     // Handle physical registers that we need to track; this is mostly relevant
455     // for VCC, which can appear as the (implicit) input of a uniform branch,
456     // e.g. when a loop counter is stored in a VGPR.
457     for (MCRegUnitIterator RegUnit(Reg.asMCReg(), TRI); RegUnit.isValid();
458          ++RegUnit) {
459       LiveRange &LR = LIS->getRegUnit(*RegUnit);
460       const VNInfo *Value = LR.Query(LIS->getInstructionIndex(MI)).valueIn();
461       if (!Value)
462         continue;
463 
464       markDefs(MI, LR, *RegUnit, AMDGPU::NoSubRegister, Flag, Worklist);
465     }
466   }
467 }
468 
469 /// Mark all instructions defining the uses in \p MI with \p Flag.
470 void SIWholeQuadMode::markInstructionUses(const MachineInstr &MI, char Flag,
471                                           std::vector<WorkItem> &Worklist) {
472   LLVM_DEBUG(dbgs() << "markInstructionUses " << PrintState(Flag) << ": "
473                     << MI);
474 
475   for (const MachineOperand &Use : MI.uses()) {
476     if (!Use.isReg() || !Use.isUse())
477       continue;
478     markOperand(MI, Use, Flag, Worklist);
479   }
480 }
481 
482 // Scan instructions to determine which ones require an Exact execmask and
483 // which ones seed WQM requirements.
484 char SIWholeQuadMode::scanInstructions(MachineFunction &MF,
485                                        std::vector<WorkItem> &Worklist) {
486   char GlobalFlags = 0;
487   bool WQMOutputs = MF.getFunction().hasFnAttribute("amdgpu-ps-wqm-outputs");
488   SmallVector<MachineInstr *, 4> SetInactiveInstrs;
489   SmallVector<MachineInstr *, 4> SoftWQMInstrs;
490   bool HasImplicitDerivatives =
491       MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS;
492 
493   // We need to visit the basic blocks in reverse post-order so that we visit
494   // defs before uses, in particular so that we don't accidentally mark an
495   // instruction as needing e.g. WQM before visiting it and realizing it needs
496   // WQM disabled.
497   ReversePostOrderTraversal<MachineFunction *> RPOT(&MF);
498   for (MachineBasicBlock *MBB : RPOT) {
499     BlockInfo &BBI = Blocks[MBB];
500 
501     for (MachineInstr &MI : *MBB) {
502       InstrInfo &III = Instructions[&MI];
503       unsigned Opcode = MI.getOpcode();
504       char Flags = 0;
505 
506       if (TII->isWQM(Opcode)) {
507         // If LOD is not supported WQM is not needed.
508         if (!ST->hasExtendedImageInsts())
509           continue;
510         // Only generate implicit WQM if implicit derivatives are required.
511         // This avoids inserting unintended WQM if a shader type without
512         // implicit derivatives uses an image sampling instruction.
513         if (!HasImplicitDerivatives)
514           continue;
515         // Sampling instructions don't need to produce results for all pixels
516         // in a quad, they just require all inputs of a quad to have been
517         // computed for derivatives.
518         markInstructionUses(MI, StateWQM, Worklist);
519         GlobalFlags |= StateWQM;
520         continue;
521       } else if (Opcode == AMDGPU::WQM) {
522         // The WQM intrinsic requires its output to have all the helper lanes
523         // correct, so we need it to be in WQM.
524         Flags = StateWQM;
525         LowerToCopyInstrs.push_back(&MI);
526       } else if (Opcode == AMDGPU::SOFT_WQM) {
527         LowerToCopyInstrs.push_back(&MI);
528         SoftWQMInstrs.push_back(&MI);
529         continue;
530       } else if (Opcode == AMDGPU::STRICT_WWM) {
531         // The STRICT_WWM intrinsic doesn't make the same guarantee, and plus
532         // it needs to be executed in WQM or Exact so that its copy doesn't
533         // clobber inactive lanes.
534         markInstructionUses(MI, StateStrictWWM, Worklist);
535         GlobalFlags |= StateStrictWWM;
536         LowerToMovInstrs.push_back(&MI);
537         continue;
538       } else if (Opcode == AMDGPU::STRICT_WQM) {
539         // STRICT_WQM is similar to STRICTWWM, but instead of enabling all
540         // threads of the wave like STRICTWWM, STRICT_WQM enables all threads in
541         // quads that have at least one active thread.
542         markInstructionUses(MI, StateStrictWQM, Worklist);
543         GlobalFlags |= StateStrictWQM;
544         LowerToMovInstrs.push_back(&MI);
545         continue;
546       } else if (Opcode == AMDGPU::V_SET_INACTIVE_B32 ||
547                  Opcode == AMDGPU::V_SET_INACTIVE_B64) {
548         III.Disabled = StateStrict;
549         MachineOperand &Inactive = MI.getOperand(2);
550         if (Inactive.isReg()) {
551           if (Inactive.isUndef()) {
552             LowerToCopyInstrs.push_back(&MI);
553           } else {
554             markOperand(MI, Inactive, StateStrictWWM, Worklist);
555           }
556         }
557         SetInactiveInstrs.push_back(&MI);
558         continue;
559       } else if (TII->isDisableWQM(MI)) {
560         BBI.Needs |= StateExact;
561         if (!(BBI.InNeeds & StateExact)) {
562           BBI.InNeeds |= StateExact;
563           Worklist.push_back(MBB);
564         }
565         GlobalFlags |= StateExact;
566         III.Disabled = StateWQM | StateStrict;
567         continue;
568       } else {
569         if (Opcode == AMDGPU::SI_PS_LIVE || Opcode == AMDGPU::SI_LIVE_MASK) {
570           LiveMaskQueries.push_back(&MI);
571         } else if (Opcode == AMDGPU::SI_KILL_I1_TERMINATOR ||
572                    Opcode == AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR ||
573                    Opcode == AMDGPU::SI_DEMOTE_I1) {
574           KillInstrs.push_back(&MI);
575           BBI.NeedsLowering = true;
576         } else if (WQMOutputs) {
577           // The function is in machine SSA form, which means that physical
578           // VGPRs correspond to shader inputs and outputs. Inputs are
579           // only used, outputs are only defined.
580           // FIXME: is this still valid?
581           for (const MachineOperand &MO : MI.defs()) {
582             if (!MO.isReg())
583               continue;
584 
585             Register Reg = MO.getReg();
586 
587             if (!Reg.isVirtual() &&
588                 TRI->hasVectorRegisters(TRI->getPhysRegClass(Reg))) {
589               Flags = StateWQM;
590               break;
591             }
592           }
593         }
594 
595         if (!Flags)
596           continue;
597       }
598 
599       markInstruction(MI, Flags, Worklist);
600       GlobalFlags |= Flags;
601     }
602   }
603 
604   // Mark sure that any SET_INACTIVE instructions are computed in WQM if WQM is
605   // ever used anywhere in the function. This implements the corresponding
606   // semantics of @llvm.amdgcn.set.inactive.
607   // Similarly for SOFT_WQM instructions, implementing @llvm.amdgcn.softwqm.
608   if (GlobalFlags & StateWQM) {
609     for (MachineInstr *MI : SetInactiveInstrs)
610       markInstruction(*MI, StateWQM, Worklist);
611     for (MachineInstr *MI : SoftWQMInstrs)
612       markInstruction(*MI, StateWQM, Worklist);
613   }
614 
615   return GlobalFlags;
616 }
617 
618 void SIWholeQuadMode::propagateInstruction(MachineInstr &MI,
619                                            std::vector<WorkItem>& Worklist) {
620   MachineBasicBlock *MBB = MI.getParent();
621   InstrInfo II = Instructions[&MI]; // take a copy to prevent dangling references
622   BlockInfo &BI = Blocks[MBB];
623 
624   // Control flow-type instructions and stores to temporary memory that are
625   // followed by WQM computations must themselves be in WQM.
626   if ((II.OutNeeds & StateWQM) && !(II.Disabled & StateWQM) &&
627       (MI.isTerminator() || (TII->usesVM_CNT(MI) && MI.mayStore()))) {
628     Instructions[&MI].Needs = StateWQM;
629     II.Needs = StateWQM;
630   }
631 
632   // Propagate to block level
633   if (II.Needs & StateWQM) {
634     BI.Needs |= StateWQM;
635     if (!(BI.InNeeds & StateWQM)) {
636       BI.InNeeds |= StateWQM;
637       Worklist.push_back(MBB);
638     }
639   }
640 
641   // Propagate backwards within block
642   if (MachineInstr *PrevMI = MI.getPrevNode()) {
643     char InNeeds = (II.Needs & ~StateStrict) | II.OutNeeds;
644     if (!PrevMI->isPHI()) {
645       InstrInfo &PrevII = Instructions[PrevMI];
646       if ((PrevII.OutNeeds | InNeeds) != PrevII.OutNeeds) {
647         PrevII.OutNeeds |= InNeeds;
648         Worklist.push_back(PrevMI);
649       }
650     }
651   }
652 
653   // Propagate WQM flag to instruction inputs
654   assert(!(II.Needs & StateExact));
655 
656   if (II.Needs != 0)
657     markInstructionUses(MI, II.Needs, Worklist);
658 
659   // Ensure we process a block containing StrictWWM/StrictWQM, even if it does
660   // not require any WQM transitions.
661   if (II.Needs & StateStrictWWM)
662     BI.Needs |= StateStrictWWM;
663   if (II.Needs & StateStrictWQM)
664     BI.Needs |= StateStrictWQM;
665 }
666 
667 void SIWholeQuadMode::propagateBlock(MachineBasicBlock &MBB,
668                                      std::vector<WorkItem>& Worklist) {
669   BlockInfo BI = Blocks[&MBB]; // Make a copy to prevent dangling references.
670 
671   // Propagate through instructions
672   if (!MBB.empty()) {
673     MachineInstr *LastMI = &*MBB.rbegin();
674     InstrInfo &LastII = Instructions[LastMI];
675     if ((LastII.OutNeeds | BI.OutNeeds) != LastII.OutNeeds) {
676       LastII.OutNeeds |= BI.OutNeeds;
677       Worklist.push_back(LastMI);
678     }
679   }
680 
681   // Predecessor blocks must provide for our WQM/Exact needs.
682   for (MachineBasicBlock *Pred : MBB.predecessors()) {
683     BlockInfo &PredBI = Blocks[Pred];
684     if ((PredBI.OutNeeds | BI.InNeeds) == PredBI.OutNeeds)
685       continue;
686 
687     PredBI.OutNeeds |= BI.InNeeds;
688     PredBI.InNeeds |= BI.InNeeds;
689     Worklist.push_back(Pred);
690   }
691 
692   // All successors must be prepared to accept the same set of WQM/Exact data.
693   for (MachineBasicBlock *Succ : MBB.successors()) {
694     BlockInfo &SuccBI = Blocks[Succ];
695     if ((SuccBI.InNeeds | BI.OutNeeds) == SuccBI.InNeeds)
696       continue;
697 
698     SuccBI.InNeeds |= BI.OutNeeds;
699     Worklist.push_back(Succ);
700   }
701 }
702 
703 char SIWholeQuadMode::analyzeFunction(MachineFunction &MF) {
704   std::vector<WorkItem> Worklist;
705   char GlobalFlags = scanInstructions(MF, Worklist);
706 
707   while (!Worklist.empty()) {
708     WorkItem WI = Worklist.back();
709     Worklist.pop_back();
710 
711     if (WI.MI)
712       propagateInstruction(*WI.MI, Worklist);
713     else
714       propagateBlock(*WI.MBB, Worklist);
715   }
716 
717   return GlobalFlags;
718 }
719 
720 MachineBasicBlock::iterator
721 SIWholeQuadMode::saveSCC(MachineBasicBlock &MBB,
722                          MachineBasicBlock::iterator Before) {
723   Register SaveReg = MRI->createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
724 
725   MachineInstr *Save =
726       BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), SaveReg)
727           .addReg(AMDGPU::SCC);
728   MachineInstr *Restore =
729       BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), AMDGPU::SCC)
730           .addReg(SaveReg);
731 
732   LIS->InsertMachineInstrInMaps(*Save);
733   LIS->InsertMachineInstrInMaps(*Restore);
734   LIS->createAndComputeVirtRegInterval(SaveReg);
735 
736   return Restore;
737 }
738 
739 MachineBasicBlock *SIWholeQuadMode::splitBlock(MachineBasicBlock *BB,
740                                                MachineInstr *TermMI) {
741   LLVM_DEBUG(dbgs() << "Split block " << printMBBReference(*BB) << " @ "
742                     << *TermMI << "\n");
743 
744   MachineBasicBlock *SplitBB =
745       BB->splitAt(*TermMI, /*UpdateLiveIns*/ true, LIS);
746 
747   // Convert last instruction in block to a terminator.
748   // Note: this only covers the expected patterns
749   unsigned NewOpcode = 0;
750   switch (TermMI->getOpcode()) {
751   case AMDGPU::S_AND_B32:
752     NewOpcode = AMDGPU::S_AND_B32_term;
753     break;
754   case AMDGPU::S_AND_B64:
755     NewOpcode = AMDGPU::S_AND_B64_term;
756     break;
757   case AMDGPU::S_MOV_B32:
758     NewOpcode = AMDGPU::S_MOV_B32_term;
759     break;
760   case AMDGPU::S_MOV_B64:
761     NewOpcode = AMDGPU::S_MOV_B64_term;
762     break;
763   default:
764     break;
765   }
766   if (NewOpcode)
767     TermMI->setDesc(TII->get(NewOpcode));
768 
769   if (SplitBB != BB) {
770     // Update dominator trees
771     using DomTreeT = DomTreeBase<MachineBasicBlock>;
772     SmallVector<DomTreeT::UpdateType, 16> DTUpdates;
773     for (MachineBasicBlock *Succ : SplitBB->successors()) {
774       DTUpdates.push_back({DomTreeT::Insert, SplitBB, Succ});
775       DTUpdates.push_back({DomTreeT::Delete, BB, Succ});
776     }
777     DTUpdates.push_back({DomTreeT::Insert, BB, SplitBB});
778     if (MDT)
779       MDT->getBase().applyUpdates(DTUpdates);
780     if (PDT)
781       PDT->getBase().applyUpdates(DTUpdates);
782 
783     // Link blocks
784     MachineInstr *MI =
785         BuildMI(*BB, BB->end(), DebugLoc(), TII->get(AMDGPU::S_BRANCH))
786             .addMBB(SplitBB);
787     LIS->InsertMachineInstrInMaps(*MI);
788   }
789 
790   return SplitBB;
791 }
792 
793 MachineInstr *SIWholeQuadMode::lowerKillF32(MachineBasicBlock &MBB,
794                                             MachineInstr &MI) {
795   const DebugLoc &DL = MI.getDebugLoc();
796   unsigned Opcode = 0;
797 
798   assert(MI.getOperand(0).isReg());
799 
800   // Comparison is for live lanes; however here we compute the inverse
801   // (killed lanes).  This is because VCMP will always generate 0 bits
802   // for inactive lanes so a mask of live lanes would not be correct
803   // inside control flow.
804   // Invert the comparison by swapping the operands and adjusting
805   // the comparison codes.
806 
807   switch (MI.getOperand(2).getImm()) {
808   case ISD::SETUEQ:
809     Opcode = AMDGPU::V_CMP_LG_F32_e64;
810     break;
811   case ISD::SETUGT:
812     Opcode = AMDGPU::V_CMP_GE_F32_e64;
813     break;
814   case ISD::SETUGE:
815     Opcode = AMDGPU::V_CMP_GT_F32_e64;
816     break;
817   case ISD::SETULT:
818     Opcode = AMDGPU::V_CMP_LE_F32_e64;
819     break;
820   case ISD::SETULE:
821     Opcode = AMDGPU::V_CMP_LT_F32_e64;
822     break;
823   case ISD::SETUNE:
824     Opcode = AMDGPU::V_CMP_EQ_F32_e64;
825     break;
826   case ISD::SETO:
827     Opcode = AMDGPU::V_CMP_O_F32_e64;
828     break;
829   case ISD::SETUO:
830     Opcode = AMDGPU::V_CMP_U_F32_e64;
831     break;
832   case ISD::SETOEQ:
833   case ISD::SETEQ:
834     Opcode = AMDGPU::V_CMP_NEQ_F32_e64;
835     break;
836   case ISD::SETOGT:
837   case ISD::SETGT:
838     Opcode = AMDGPU::V_CMP_NLT_F32_e64;
839     break;
840   case ISD::SETOGE:
841   case ISD::SETGE:
842     Opcode = AMDGPU::V_CMP_NLE_F32_e64;
843     break;
844   case ISD::SETOLT:
845   case ISD::SETLT:
846     Opcode = AMDGPU::V_CMP_NGT_F32_e64;
847     break;
848   case ISD::SETOLE:
849   case ISD::SETLE:
850     Opcode = AMDGPU::V_CMP_NGE_F32_e64;
851     break;
852   case ISD::SETONE:
853   case ISD::SETNE:
854     Opcode = AMDGPU::V_CMP_NLG_F32_e64;
855     break;
856   default:
857     llvm_unreachable("invalid ISD:SET cond code");
858   }
859 
860   // Pick opcode based on comparison type.
861   MachineInstr *VcmpMI;
862   const MachineOperand &Op0 = MI.getOperand(0);
863   const MachineOperand &Op1 = MI.getOperand(1);
864   if (TRI->isVGPR(*MRI, Op0.getReg())) {
865     Opcode = AMDGPU::getVOPe32(Opcode);
866     VcmpMI = BuildMI(MBB, &MI, DL, TII->get(Opcode)).add(Op1).add(Op0);
867   } else {
868     VcmpMI = BuildMI(MBB, &MI, DL, TII->get(Opcode))
869                  .addReg(AMDGPU::VCC, RegState::Define)
870                  .addImm(0) // src0 modifiers
871                  .add(Op1)
872                  .addImm(0) // src1 modifiers
873                  .add(Op0)
874                  .addImm(0); // omod
875   }
876 
877   // VCC represents lanes killed.
878   Register VCC = ST->isWave32() ? AMDGPU::VCC_LO : AMDGPU::VCC;
879 
880   MachineInstr *MaskUpdateMI =
881       BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
882           .addReg(LiveMaskReg)
883           .addReg(VCC);
884 
885   // State of SCC represents whether any lanes are live in mask,
886   // if SCC is 0 then no lanes will be alive anymore.
887   MachineInstr *EarlyTermMI =
888       BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_EARLY_TERMINATE_SCC0));
889 
890   MachineInstr *ExecMaskMI =
891       BuildMI(MBB, MI, DL, TII->get(AndN2Opc), Exec).addReg(Exec).addReg(VCC);
892 
893   assert(MBB.succ_size() == 1);
894   MachineInstr *NewTerm = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_BRANCH))
895                               .addMBB(*MBB.succ_begin());
896 
897   // Update live intervals
898   LIS->ReplaceMachineInstrInMaps(MI, *VcmpMI);
899   MBB.remove(&MI);
900 
901   LIS->InsertMachineInstrInMaps(*MaskUpdateMI);
902   LIS->InsertMachineInstrInMaps(*ExecMaskMI);
903   LIS->InsertMachineInstrInMaps(*EarlyTermMI);
904   LIS->InsertMachineInstrInMaps(*NewTerm);
905 
906   return NewTerm;
907 }
908 
909 MachineInstr *SIWholeQuadMode::lowerKillI1(MachineBasicBlock &MBB,
910                                            MachineInstr &MI, bool IsWQM) {
911   const DebugLoc &DL = MI.getDebugLoc();
912   MachineInstr *MaskUpdateMI = nullptr;
913 
914   const bool IsDemote = IsWQM && (MI.getOpcode() == AMDGPU::SI_DEMOTE_I1);
915   const MachineOperand &Op = MI.getOperand(0);
916   int64_t KillVal = MI.getOperand(1).getImm();
917   MachineInstr *ComputeKilledMaskMI = nullptr;
918   Register CndReg = !Op.isImm() ? Op.getReg() : Register();
919   Register TmpReg;
920 
921   // Is this a static or dynamic kill?
922   if (Op.isImm()) {
923     if (Op.getImm() == KillVal) {
924       // Static: all active lanes are killed
925       MaskUpdateMI = BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
926                          .addReg(LiveMaskReg)
927                          .addReg(Exec);
928     } else {
929       // Static: kill does nothing
930       MachineInstr *NewTerm = nullptr;
931       if (MI.getOpcode() == AMDGPU::SI_DEMOTE_I1) {
932         LIS->RemoveMachineInstrFromMaps(MI);
933       } else {
934         assert(MBB.succ_size() == 1);
935         NewTerm = BuildMI(MBB, MI, DL, TII->get(AMDGPU::S_BRANCH))
936                       .addMBB(*MBB.succ_begin());
937         LIS->ReplaceMachineInstrInMaps(MI, *NewTerm);
938       }
939       MBB.remove(&MI);
940       return NewTerm;
941     }
942   } else {
943     if (!KillVal) {
944       // Op represents live lanes after kill,
945       // so exec mask needs to be factored in.
946       TmpReg = MRI->createVirtualRegister(TRI->getBoolRC());
947       ComputeKilledMaskMI =
948           BuildMI(MBB, MI, DL, TII->get(XorOpc), TmpReg).add(Op).addReg(Exec);
949       MaskUpdateMI = BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
950                          .addReg(LiveMaskReg)
951                          .addReg(TmpReg);
952     } else {
953       // Op represents lanes to kill
954       MaskUpdateMI = BuildMI(MBB, MI, DL, TII->get(AndN2Opc), LiveMaskReg)
955                          .addReg(LiveMaskReg)
956                          .add(Op);
957     }
958   }
959 
960   // State of SCC represents whether any lanes are live in mask,
961   // if SCC is 0 then no lanes will be alive anymore.
962   MachineInstr *EarlyTermMI =
963       BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_EARLY_TERMINATE_SCC0));
964 
965   // In the case we got this far some lanes are still live,
966   // update EXEC to deactivate lanes as appropriate.
967   MachineInstr *NewTerm;
968   MachineInstr *WQMMaskMI = nullptr;
969   Register LiveMaskWQM;
970   if (IsDemote) {
971     // Demotes deactive quads with only helper lanes
972     LiveMaskWQM = MRI->createVirtualRegister(TRI->getBoolRC());
973     WQMMaskMI =
974         BuildMI(MBB, MI, DL, TII->get(WQMOpc), LiveMaskWQM).addReg(LiveMaskReg);
975     NewTerm = BuildMI(MBB, MI, DL, TII->get(AndOpc), Exec)
976                   .addReg(Exec)
977                   .addReg(LiveMaskWQM);
978   } else {
979     // Kills deactivate lanes
980     if (Op.isImm()) {
981       unsigned MovOpc = ST->isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
982       NewTerm = BuildMI(MBB, &MI, DL, TII->get(MovOpc), Exec).addImm(0);
983     } else if (!IsWQM) {
984       NewTerm = BuildMI(MBB, &MI, DL, TII->get(AndOpc), Exec)
985                     .addReg(Exec)
986                     .addReg(LiveMaskReg);
987     } else {
988       unsigned Opcode = KillVal ? AndN2Opc : AndOpc;
989       NewTerm =
990           BuildMI(MBB, &MI, DL, TII->get(Opcode), Exec).addReg(Exec).add(Op);
991     }
992   }
993 
994   // Update live intervals
995   LIS->RemoveMachineInstrFromMaps(MI);
996   MBB.remove(&MI);
997   assert(EarlyTermMI);
998   assert(MaskUpdateMI);
999   assert(NewTerm);
1000   if (ComputeKilledMaskMI)
1001     LIS->InsertMachineInstrInMaps(*ComputeKilledMaskMI);
1002   LIS->InsertMachineInstrInMaps(*MaskUpdateMI);
1003   LIS->InsertMachineInstrInMaps(*EarlyTermMI);
1004   if (WQMMaskMI)
1005     LIS->InsertMachineInstrInMaps(*WQMMaskMI);
1006   LIS->InsertMachineInstrInMaps(*NewTerm);
1007 
1008   if (CndReg) {
1009     LIS->removeInterval(CndReg);
1010     LIS->createAndComputeVirtRegInterval(CndReg);
1011   }
1012   if (TmpReg)
1013     LIS->createAndComputeVirtRegInterval(TmpReg);
1014   if (LiveMaskWQM)
1015     LIS->createAndComputeVirtRegInterval(LiveMaskWQM);
1016 
1017   return NewTerm;
1018 }
1019 
1020 // Replace (or supplement) instructions accessing live mask.
1021 // This can only happen once all the live mask registers have been created
1022 // and the execute state (WQM/StrictWWM/Exact) of instructions is known.
1023 void SIWholeQuadMode::lowerBlock(MachineBasicBlock &MBB) {
1024   auto BII = Blocks.find(&MBB);
1025   if (BII == Blocks.end())
1026     return;
1027 
1028   const BlockInfo &BI = BII->second;
1029   if (!BI.NeedsLowering)
1030     return;
1031 
1032   LLVM_DEBUG(dbgs() << "\nLowering block " << printMBBReference(MBB) << ":\n");
1033 
1034   SmallVector<MachineInstr *, 4> SplitPoints;
1035   char State = BI.InitialState;
1036 
1037   for (MachineInstr &MI : llvm::make_early_inc_range(
1038            llvm::make_range(MBB.getFirstNonPHI(), MBB.end()))) {
1039     if (StateTransition.count(&MI))
1040       State = StateTransition[&MI];
1041 
1042     MachineInstr *SplitPoint = nullptr;
1043     switch (MI.getOpcode()) {
1044     case AMDGPU::SI_DEMOTE_I1:
1045     case AMDGPU::SI_KILL_I1_TERMINATOR:
1046       SplitPoint = lowerKillI1(MBB, MI, State == StateWQM);
1047       break;
1048     case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1049       SplitPoint = lowerKillF32(MBB, MI);
1050       break;
1051     default:
1052       break;
1053     }
1054     if (SplitPoint)
1055       SplitPoints.push_back(SplitPoint);
1056   }
1057 
1058   // Perform splitting after instruction scan to simplify iteration.
1059   if (!SplitPoints.empty()) {
1060     MachineBasicBlock *BB = &MBB;
1061     for (MachineInstr *MI : SplitPoints) {
1062       BB = splitBlock(BB, MI);
1063     }
1064   }
1065 }
1066 
1067 // Return an iterator in the (inclusive) range [First, Last] at which
1068 // instructions can be safely inserted, keeping in mind that some of the
1069 // instructions we want to add necessarily clobber SCC.
1070 MachineBasicBlock::iterator SIWholeQuadMode::prepareInsertion(
1071     MachineBasicBlock &MBB, MachineBasicBlock::iterator First,
1072     MachineBasicBlock::iterator Last, bool PreferLast, bool SaveSCC) {
1073   if (!SaveSCC)
1074     return PreferLast ? Last : First;
1075 
1076   LiveRange &LR =
1077       LIS->getRegUnit(*MCRegUnitIterator(MCRegister::from(AMDGPU::SCC), TRI));
1078   auto MBBE = MBB.end();
1079   SlotIndex FirstIdx = First != MBBE ? LIS->getInstructionIndex(*First)
1080                                      : LIS->getMBBEndIdx(&MBB);
1081   SlotIndex LastIdx =
1082       Last != MBBE ? LIS->getInstructionIndex(*Last) : LIS->getMBBEndIdx(&MBB);
1083   SlotIndex Idx = PreferLast ? LastIdx : FirstIdx;
1084   const LiveRange::Segment *S;
1085 
1086   for (;;) {
1087     S = LR.getSegmentContaining(Idx);
1088     if (!S)
1089       break;
1090 
1091     if (PreferLast) {
1092       SlotIndex Next = S->start.getBaseIndex();
1093       if (Next < FirstIdx)
1094         break;
1095       Idx = Next;
1096     } else {
1097       MachineInstr *EndMI = LIS->getInstructionFromIndex(S->end.getBaseIndex());
1098       assert(EndMI && "Segment does not end on valid instruction");
1099       auto NextI = std::next(EndMI->getIterator());
1100       if (NextI == MBB.end())
1101         break;
1102       SlotIndex Next = LIS->getInstructionIndex(*NextI);
1103       if (Next > LastIdx)
1104         break;
1105       Idx = Next;
1106     }
1107   }
1108 
1109   MachineBasicBlock::iterator MBBI;
1110 
1111   if (MachineInstr *MI = LIS->getInstructionFromIndex(Idx))
1112     MBBI = MI;
1113   else {
1114     assert(Idx == LIS->getMBBEndIdx(&MBB));
1115     MBBI = MBB.end();
1116   }
1117 
1118   // Move insertion point past any operations modifying EXEC.
1119   // This assumes that the value of SCC defined by any of these operations
1120   // does not need to be preserved.
1121   while (MBBI != Last) {
1122     bool IsExecDef = false;
1123     for (const MachineOperand &MO : MBBI->operands()) {
1124       if (MO.isReg() && MO.isDef()) {
1125         IsExecDef |=
1126             MO.getReg() == AMDGPU::EXEC_LO || MO.getReg() == AMDGPU::EXEC;
1127       }
1128     }
1129     if (!IsExecDef)
1130       break;
1131     MBBI++;
1132     S = nullptr;
1133   }
1134 
1135   if (S)
1136     MBBI = saveSCC(MBB, MBBI);
1137 
1138   return MBBI;
1139 }
1140 
1141 void SIWholeQuadMode::toExact(MachineBasicBlock &MBB,
1142                               MachineBasicBlock::iterator Before,
1143                               Register SaveWQM) {
1144   MachineInstr *MI;
1145 
1146   if (SaveWQM) {
1147     MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AndSaveExecOpc), SaveWQM)
1148              .addReg(LiveMaskReg);
1149   } else {
1150     MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AndOpc), Exec)
1151              .addReg(Exec)
1152              .addReg(LiveMaskReg);
1153   }
1154 
1155   LIS->InsertMachineInstrInMaps(*MI);
1156   StateTransition[MI] = StateExact;
1157 }
1158 
1159 void SIWholeQuadMode::toWQM(MachineBasicBlock &MBB,
1160                             MachineBasicBlock::iterator Before,
1161                             Register SavedWQM) {
1162   MachineInstr *MI;
1163 
1164   if (SavedWQM) {
1165     MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::COPY), Exec)
1166              .addReg(SavedWQM);
1167   } else {
1168     MI = BuildMI(MBB, Before, DebugLoc(), TII->get(WQMOpc), Exec).addReg(Exec);
1169   }
1170 
1171   LIS->InsertMachineInstrInMaps(*MI);
1172   StateTransition[MI] = StateWQM;
1173 }
1174 
1175 void SIWholeQuadMode::toStrictMode(MachineBasicBlock &MBB,
1176                                    MachineBasicBlock::iterator Before,
1177                                    Register SaveOrig, char StrictStateNeeded) {
1178   MachineInstr *MI;
1179   assert(SaveOrig);
1180   assert(StrictStateNeeded == StateStrictWWM ||
1181          StrictStateNeeded == StateStrictWQM);
1182 
1183   if (StrictStateNeeded == StateStrictWWM) {
1184     MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::ENTER_STRICT_WWM),
1185                  SaveOrig)
1186              .addImm(-1);
1187   } else {
1188     MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::ENTER_STRICT_WQM),
1189                  SaveOrig)
1190              .addImm(-1);
1191   }
1192   LIS->InsertMachineInstrInMaps(*MI);
1193   StateTransition[MI] = StateStrictWWM;
1194 }
1195 
1196 void SIWholeQuadMode::fromStrictMode(MachineBasicBlock &MBB,
1197                                      MachineBasicBlock::iterator Before,
1198                                      Register SavedOrig, char NonStrictState,
1199                                      char CurrentStrictState) {
1200   MachineInstr *MI;
1201 
1202   assert(SavedOrig);
1203   assert(CurrentStrictState == StateStrictWWM ||
1204          CurrentStrictState == StateStrictWQM);
1205 
1206   if (CurrentStrictState == StateStrictWWM) {
1207     MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_STRICT_WWM),
1208                  Exec)
1209              .addReg(SavedOrig);
1210   } else {
1211     MI = BuildMI(MBB, Before, DebugLoc(), TII->get(AMDGPU::EXIT_STRICT_WQM),
1212                  Exec)
1213              .addReg(SavedOrig);
1214   }
1215   LIS->InsertMachineInstrInMaps(*MI);
1216   StateTransition[MI] = NonStrictState;
1217 }
1218 
1219 void SIWholeQuadMode::processBlock(MachineBasicBlock &MBB, bool IsEntry) {
1220   auto BII = Blocks.find(&MBB);
1221   if (BII == Blocks.end())
1222     return;
1223 
1224   BlockInfo &BI = BII->second;
1225 
1226   // This is a non-entry block that is WQM throughout, so no need to do
1227   // anything.
1228   if (!IsEntry && BI.Needs == StateWQM && BI.OutNeeds != StateExact) {
1229     BI.InitialState = StateWQM;
1230     return;
1231   }
1232 
1233   LLVM_DEBUG(dbgs() << "\nProcessing block " << printMBBReference(MBB)
1234                     << ":\n");
1235 
1236   Register SavedWQMReg;
1237   Register SavedNonStrictReg;
1238   bool WQMFromExec = IsEntry;
1239   char State = (IsEntry || !(BI.InNeeds & StateWQM)) ? StateExact : StateWQM;
1240   char NonStrictState = 0;
1241   const TargetRegisterClass *BoolRC = TRI->getBoolRC();
1242 
1243   auto II = MBB.getFirstNonPHI(), IE = MBB.end();
1244   if (IsEntry) {
1245     // Skip the instruction that saves LiveMask
1246     if (II != IE && II->getOpcode() == AMDGPU::COPY)
1247       ++II;
1248   }
1249 
1250   // This stores the first instruction where it's safe to switch from WQM to
1251   // Exact or vice versa.
1252   MachineBasicBlock::iterator FirstWQM = IE;
1253 
1254   // This stores the first instruction where it's safe to switch from Strict
1255   // mode to Exact/WQM or to switch to Strict mode. It must always be the same
1256   // as, or after, FirstWQM since if it's safe to switch to/from Strict, it must
1257   // be safe to switch to/from WQM as well.
1258   MachineBasicBlock::iterator FirstStrict = IE;
1259 
1260   // Record initial state is block information.
1261   BI.InitialState = State;
1262 
1263   for (;;) {
1264     MachineBasicBlock::iterator Next = II;
1265     char Needs = StateExact | StateWQM; // Strict mode is disabled by default.
1266     char OutNeeds = 0;
1267 
1268     if (FirstWQM == IE)
1269       FirstWQM = II;
1270 
1271     if (FirstStrict == IE)
1272       FirstStrict = II;
1273 
1274     // First, figure out the allowed states (Needs) based on the propagated
1275     // flags.
1276     if (II != IE) {
1277       MachineInstr &MI = *II;
1278 
1279       if (MI.isTerminator() || TII->mayReadEXEC(*MRI, MI)) {
1280         auto III = Instructions.find(&MI);
1281         if (III != Instructions.end()) {
1282           if (III->second.Needs & StateStrictWWM)
1283             Needs = StateStrictWWM;
1284           else if (III->second.Needs & StateStrictWQM)
1285             Needs = StateStrictWQM;
1286           else if (III->second.Needs & StateWQM)
1287             Needs = StateWQM;
1288           else
1289             Needs &= ~III->second.Disabled;
1290           OutNeeds = III->second.OutNeeds;
1291         }
1292       } else {
1293         // If the instruction doesn't actually need a correct EXEC, then we can
1294         // safely leave Strict mode enabled.
1295         Needs = StateExact | StateWQM | StateStrict;
1296       }
1297 
1298       if (MI.isTerminator() && OutNeeds == StateExact)
1299         Needs = StateExact;
1300 
1301       ++Next;
1302     } else {
1303       // End of basic block
1304       if (BI.OutNeeds & StateWQM)
1305         Needs = StateWQM;
1306       else if (BI.OutNeeds == StateExact)
1307         Needs = StateExact;
1308       else
1309         Needs = StateWQM | StateExact;
1310     }
1311 
1312     // Now, transition if necessary.
1313     if (!(Needs & State)) {
1314       MachineBasicBlock::iterator First;
1315       if (State == StateStrictWWM || Needs == StateStrictWWM ||
1316           State == StateStrictWQM || Needs == StateStrictWQM) {
1317         // We must switch to or from Strict mode.
1318         First = FirstStrict;
1319       } else {
1320         // We only need to switch to/from WQM, so we can use FirstWQM.
1321         First = FirstWQM;
1322       }
1323 
1324       // Whether we need to save SCC depends on start and end states.
1325       bool SaveSCC = false;
1326       switch (State) {
1327       case StateExact:
1328       case StateStrictWWM:
1329       case StateStrictWQM:
1330         // Exact/Strict -> Strict: save SCC
1331         // Exact/Strict -> WQM: save SCC if WQM mask is generated from exec
1332         // Exact/Strict -> Exact: no save
1333         SaveSCC = (Needs & StateStrict) || ((Needs & StateWQM) && WQMFromExec);
1334         break;
1335       case StateWQM:
1336         // WQM -> Exact/Strict: save SCC
1337         SaveSCC = !(Needs & StateWQM);
1338         break;
1339       default:
1340         llvm_unreachable("Unknown state");
1341         break;
1342       }
1343       MachineBasicBlock::iterator Before =
1344           prepareInsertion(MBB, First, II, Needs == StateWQM, SaveSCC);
1345 
1346       if (State & StateStrict) {
1347         assert(State == StateStrictWWM || State == StateStrictWQM);
1348         assert(SavedNonStrictReg);
1349         fromStrictMode(MBB, Before, SavedNonStrictReg, NonStrictState, State);
1350 
1351         LIS->createAndComputeVirtRegInterval(SavedNonStrictReg);
1352         SavedNonStrictReg = 0;
1353         State = NonStrictState;
1354       }
1355 
1356       if (Needs & StateStrict) {
1357         NonStrictState = State;
1358         assert(Needs == StateStrictWWM || Needs == StateStrictWQM);
1359         assert(!SavedNonStrictReg);
1360         SavedNonStrictReg = MRI->createVirtualRegister(BoolRC);
1361 
1362         toStrictMode(MBB, Before, SavedNonStrictReg, Needs);
1363         State = Needs;
1364 
1365       } else {
1366         if (State == StateWQM && (Needs & StateExact) && !(Needs & StateWQM)) {
1367           if (!WQMFromExec && (OutNeeds & StateWQM)) {
1368             assert(!SavedWQMReg);
1369             SavedWQMReg = MRI->createVirtualRegister(BoolRC);
1370           }
1371 
1372           toExact(MBB, Before, SavedWQMReg);
1373           State = StateExact;
1374         } else if (State == StateExact && (Needs & StateWQM) &&
1375                    !(Needs & StateExact)) {
1376           assert(WQMFromExec == (SavedWQMReg == 0));
1377 
1378           toWQM(MBB, Before, SavedWQMReg);
1379 
1380           if (SavedWQMReg) {
1381             LIS->createAndComputeVirtRegInterval(SavedWQMReg);
1382             SavedWQMReg = 0;
1383           }
1384           State = StateWQM;
1385         } else {
1386           // We can get here if we transitioned from StrictWWM to a
1387           // non-StrictWWM state that already matches our needs, but we
1388           // shouldn't need to do anything.
1389           assert(Needs & State);
1390         }
1391       }
1392     }
1393 
1394     if (Needs != (StateExact | StateWQM | StateStrict)) {
1395       if (Needs != (StateExact | StateWQM))
1396         FirstWQM = IE;
1397       FirstStrict = IE;
1398     }
1399 
1400     if (II == IE)
1401       break;
1402 
1403     II = Next;
1404   }
1405   assert(!SavedWQMReg);
1406   assert(!SavedNonStrictReg);
1407 }
1408 
1409 void SIWholeQuadMode::lowerLiveMaskQueries() {
1410   for (MachineInstr *MI : LiveMaskQueries) {
1411     const DebugLoc &DL = MI->getDebugLoc();
1412     Register Dest = MI->getOperand(0).getReg();
1413 
1414     MachineInstr *Copy =
1415         BuildMI(*MI->getParent(), MI, DL, TII->get(AMDGPU::COPY), Dest)
1416             .addReg(LiveMaskReg);
1417 
1418     LIS->ReplaceMachineInstrInMaps(*MI, *Copy);
1419     MI->eraseFromParent();
1420   }
1421 }
1422 
1423 void SIWholeQuadMode::lowerCopyInstrs() {
1424   for (MachineInstr *MI : LowerToMovInstrs) {
1425     assert(MI->getNumExplicitOperands() == 2);
1426 
1427     const Register Reg = MI->getOperand(0).getReg();
1428     const unsigned SubReg = MI->getOperand(0).getSubReg();
1429 
1430     if (TRI->isVGPR(*MRI, Reg)) {
1431       const TargetRegisterClass *regClass =
1432           Reg.isVirtual() ? MRI->getRegClass(Reg) : TRI->getPhysRegClass(Reg);
1433       if (SubReg)
1434         regClass = TRI->getSubRegClass(regClass, SubReg);
1435 
1436       const unsigned MovOp = TII->getMovOpcode(regClass);
1437       MI->setDesc(TII->get(MovOp));
1438 
1439       // Check that it already implicitly depends on exec (like all VALU movs
1440       // should do).
1441       assert(any_of(MI->implicit_operands(), [](const MachineOperand &MO) {
1442         return MO.isUse() && MO.getReg() == AMDGPU::EXEC;
1443       }));
1444     } else {
1445       // Remove early-clobber and exec dependency from simple SGPR copies.
1446       // This allows some to be eliminated during/post RA.
1447       LLVM_DEBUG(dbgs() << "simplify SGPR copy: " << *MI);
1448       if (MI->getOperand(0).isEarlyClobber()) {
1449         LIS->removeInterval(Reg);
1450         MI->getOperand(0).setIsEarlyClobber(false);
1451         LIS->createAndComputeVirtRegInterval(Reg);
1452       }
1453       int Index = MI->findRegisterUseOperandIdx(AMDGPU::EXEC);
1454       while (Index >= 0) {
1455         MI->RemoveOperand(Index);
1456         Index = MI->findRegisterUseOperandIdx(AMDGPU::EXEC);
1457       }
1458       MI->setDesc(TII->get(AMDGPU::COPY));
1459       LLVM_DEBUG(dbgs() << "  -> " << *MI);
1460     }
1461   }
1462   for (MachineInstr *MI : LowerToCopyInstrs) {
1463     if (MI->getOpcode() == AMDGPU::V_SET_INACTIVE_B32 ||
1464         MI->getOpcode() == AMDGPU::V_SET_INACTIVE_B64) {
1465       assert(MI->getNumExplicitOperands() == 3);
1466       // the only reason we should be here is V_SET_INACTIVE has
1467       // an undef input so it is being replaced by a simple copy.
1468       // There should be a second undef source that we should remove.
1469       assert(MI->getOperand(2).isUndef());
1470       MI->RemoveOperand(2);
1471       MI->untieRegOperand(1);
1472     } else {
1473       assert(MI->getNumExplicitOperands() == 2);
1474     }
1475 
1476     MI->setDesc(TII->get(AMDGPU::COPY));
1477   }
1478 }
1479 
1480 void SIWholeQuadMode::lowerKillInstrs(bool IsWQM) {
1481   for (MachineInstr *MI : KillInstrs) {
1482     MachineBasicBlock *MBB = MI->getParent();
1483     MachineInstr *SplitPoint = nullptr;
1484     switch (MI->getOpcode()) {
1485     case AMDGPU::SI_DEMOTE_I1:
1486     case AMDGPU::SI_KILL_I1_TERMINATOR:
1487       SplitPoint = lowerKillI1(*MBB, *MI, IsWQM);
1488       break;
1489     case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR:
1490       SplitPoint = lowerKillF32(*MBB, *MI);
1491       break;
1492     default:
1493       continue;
1494     }
1495     if (SplitPoint)
1496       splitBlock(MBB, SplitPoint);
1497   }
1498 }
1499 
1500 bool SIWholeQuadMode::runOnMachineFunction(MachineFunction &MF) {
1501   LLVM_DEBUG(dbgs() << "SI Whole Quad Mode on " << MF.getName()
1502                     << " ------------- \n");
1503   LLVM_DEBUG(MF.dump(););
1504 
1505   Instructions.clear();
1506   Blocks.clear();
1507   LiveMaskQueries.clear();
1508   LowerToCopyInstrs.clear();
1509   LowerToMovInstrs.clear();
1510   KillInstrs.clear();
1511   StateTransition.clear();
1512 
1513   ST = &MF.getSubtarget<GCNSubtarget>();
1514 
1515   TII = ST->getInstrInfo();
1516   TRI = &TII->getRegisterInfo();
1517   MRI = &MF.getRegInfo();
1518   LIS = &getAnalysis<LiveIntervals>();
1519   MDT = &getAnalysis<MachineDominatorTree>();
1520   PDT = &getAnalysis<MachinePostDominatorTree>();
1521 
1522   if (ST->isWave32()) {
1523     AndOpc = AMDGPU::S_AND_B32;
1524     AndN2Opc = AMDGPU::S_ANDN2_B32;
1525     XorOpc = AMDGPU::S_XOR_B32;
1526     AndSaveExecOpc = AMDGPU::S_AND_SAVEEXEC_B32;
1527     OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B32;
1528     WQMOpc = AMDGPU::S_WQM_B32;
1529     Exec = AMDGPU::EXEC_LO;
1530   } else {
1531     AndOpc = AMDGPU::S_AND_B64;
1532     AndN2Opc = AMDGPU::S_ANDN2_B64;
1533     XorOpc = AMDGPU::S_XOR_B64;
1534     AndSaveExecOpc = AMDGPU::S_AND_SAVEEXEC_B64;
1535     OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B64;
1536     WQMOpc = AMDGPU::S_WQM_B64;
1537     Exec = AMDGPU::EXEC;
1538   }
1539 
1540   const char GlobalFlags = analyzeFunction(MF);
1541   const bool NeedsLiveMask = !(KillInstrs.empty() && LiveMaskQueries.empty());
1542 
1543   LiveMaskReg = Exec;
1544 
1545   // Shader is simple does not need any state changes or any complex lowering
1546   if (!(GlobalFlags & (StateWQM | StateStrict)) && LowerToCopyInstrs.empty() &&
1547       LowerToMovInstrs.empty() && KillInstrs.empty()) {
1548     lowerLiveMaskQueries();
1549     return !LiveMaskQueries.empty();
1550   }
1551 
1552   MachineBasicBlock &Entry = MF.front();
1553   MachineBasicBlock::iterator EntryMI = Entry.getFirstNonPHI();
1554 
1555   // Store a copy of the original live mask when required
1556   if (NeedsLiveMask || (GlobalFlags & StateWQM)) {
1557     LiveMaskReg = MRI->createVirtualRegister(TRI->getBoolRC());
1558     MachineInstr *MI =
1559         BuildMI(Entry, EntryMI, DebugLoc(), TII->get(AMDGPU::COPY), LiveMaskReg)
1560             .addReg(Exec);
1561     LIS->InsertMachineInstrInMaps(*MI);
1562   }
1563 
1564   LLVM_DEBUG(printInfo());
1565 
1566   lowerLiveMaskQueries();
1567   lowerCopyInstrs();
1568 
1569   // Shader only needs WQM
1570   if (GlobalFlags == StateWQM) {
1571     auto MI = BuildMI(Entry, EntryMI, DebugLoc(), TII->get(WQMOpc), Exec)
1572                   .addReg(Exec);
1573     LIS->InsertMachineInstrInMaps(*MI);
1574     lowerKillInstrs(true);
1575   } else {
1576     for (auto BII : Blocks)
1577       processBlock(*BII.first, BII.first == &Entry);
1578     // Lowering blocks causes block splitting so perform as a second pass.
1579     for (auto BII : Blocks)
1580       lowerBlock(*BII.first);
1581   }
1582 
1583   // Compute live range for live mask
1584   if (LiveMaskReg != Exec)
1585     LIS->createAndComputeVirtRegInterval(LiveMaskReg);
1586 
1587   // Physical registers like SCC aren't tracked by default anyway, so just
1588   // removing the ranges we computed is the simplest option for maintaining
1589   // the analysis results.
1590   LIS->removeRegUnit(*MCRegUnitIterator(MCRegister::from(AMDGPU::SCC), TRI));
1591 
1592   // If we performed any kills then recompute EXEC
1593   if (!KillInstrs.empty())
1594     LIS->removeRegUnit(*MCRegUnitIterator(AMDGPU::EXEC, TRI));
1595 
1596   return true;
1597 }
1598