xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/SILowerControlFlow.cpp (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
10b57cec5SDimitry Andric //===-- SILowerControlFlow.cpp - Use predicates for control flow ----------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric /// \file
100b57cec5SDimitry Andric /// This pass lowers the pseudo control flow instructions to real
110b57cec5SDimitry Andric /// machine instructions.
120b57cec5SDimitry Andric ///
130b57cec5SDimitry Andric /// All control flow is handled using predicated instructions and
140b57cec5SDimitry Andric /// a predicate stack.  Each Scalar ALU controls the operations of 64 Vector
150b57cec5SDimitry Andric /// ALUs.  The Scalar ALU can update the predicate for any of the Vector ALUs
160b57cec5SDimitry Andric /// by writting to the 64-bit EXEC register (each bit corresponds to a
170b57cec5SDimitry Andric /// single vector ALU).  Typically, for predicates, a vector ALU will write
180b57cec5SDimitry Andric /// to its bit of the VCC register (like EXEC VCC is 64-bits, one for each
190b57cec5SDimitry Andric /// Vector ALU) and then the ScalarALU will AND the VCC register with the
200b57cec5SDimitry Andric /// EXEC to update the predicates.
210b57cec5SDimitry Andric ///
220b57cec5SDimitry Andric /// For example:
230b57cec5SDimitry Andric /// %vcc = V_CMP_GT_F32 %vgpr1, %vgpr2
240b57cec5SDimitry Andric /// %sgpr0 = SI_IF %vcc
250b57cec5SDimitry Andric ///   %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0
260b57cec5SDimitry Andric /// %sgpr0 = SI_ELSE %sgpr0
270b57cec5SDimitry Andric ///   %vgpr0 = V_SUB_F32 %vgpr0, %vgpr0
280b57cec5SDimitry Andric /// SI_END_CF %sgpr0
290b57cec5SDimitry Andric ///
300b57cec5SDimitry Andric /// becomes:
310b57cec5SDimitry Andric ///
320b57cec5SDimitry Andric /// %sgpr0 = S_AND_SAVEEXEC_B64 %vcc  // Save and update the exec mask
330b57cec5SDimitry Andric /// %sgpr0 = S_XOR_B64 %sgpr0, %exec  // Clear live bits from saved exec mask
340b57cec5SDimitry Andric /// S_CBRANCH_EXECZ label0            // This instruction is an optional
350b57cec5SDimitry Andric ///                                   // optimization which allows us to
360b57cec5SDimitry Andric ///                                   // branch if all the bits of
370b57cec5SDimitry Andric ///                                   // EXEC are zero.
380b57cec5SDimitry Andric /// %vgpr0 = V_ADD_F32 %vgpr0, %vgpr0 // Do the IF block of the branch
390b57cec5SDimitry Andric ///
400b57cec5SDimitry Andric /// label0:
41*5ffd83dbSDimitry Andric /// %sgpr0 = S_OR_SAVEEXEC_B64 %sgpr0  // Restore the exec mask for the Then block
42*5ffd83dbSDimitry Andric /// %exec = S_XOR_B64 %sgpr0, %exec    // Update the exec mask
430b57cec5SDimitry Andric /// S_BRANCH_EXECZ label1              // Use our branch optimization
440b57cec5SDimitry Andric ///                                    // instruction again.
450b57cec5SDimitry Andric /// %vgpr0 = V_SUB_F32 %vgpr0, %vgpr   // Do the THEN block
460b57cec5SDimitry Andric /// label1:
470b57cec5SDimitry Andric /// %exec = S_OR_B64 %exec, %sgpr0     // Re-enable saved exec mask bits
480b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
490b57cec5SDimitry Andric 
500b57cec5SDimitry Andric #include "AMDGPU.h"
510b57cec5SDimitry Andric #include "AMDGPUSubtarget.h"
520b57cec5SDimitry Andric #include "SIInstrInfo.h"
530b57cec5SDimitry Andric #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
54*5ffd83dbSDimitry Andric #include "llvm/ADT/SetVector.h"
55*5ffd83dbSDimitry Andric #include "llvm/ADT/SmallSet.h"
560b57cec5SDimitry Andric #include "llvm/ADT/SmallVector.h"
570b57cec5SDimitry Andric #include "llvm/ADT/StringRef.h"
580b57cec5SDimitry Andric #include "llvm/CodeGen/LiveIntervals.h"
590b57cec5SDimitry Andric #include "llvm/CodeGen/MachineBasicBlock.h"
600b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunction.h"
610b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunctionPass.h"
620b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstr.h"
630b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstrBuilder.h"
640b57cec5SDimitry Andric #include "llvm/CodeGen/MachineOperand.h"
650b57cec5SDimitry Andric #include "llvm/CodeGen/MachineRegisterInfo.h"
660b57cec5SDimitry Andric #include "llvm/CodeGen/Passes.h"
670b57cec5SDimitry Andric #include "llvm/CodeGen/SlotIndexes.h"
680b57cec5SDimitry Andric #include "llvm/CodeGen/TargetRegisterInfo.h"
690b57cec5SDimitry Andric #include "llvm/MC/MCRegisterInfo.h"
700b57cec5SDimitry Andric #include "llvm/Pass.h"
710b57cec5SDimitry Andric #include <cassert>
720b57cec5SDimitry Andric #include <iterator>
730b57cec5SDimitry Andric 
740b57cec5SDimitry Andric using namespace llvm;
750b57cec5SDimitry Andric 
760b57cec5SDimitry Andric #define DEBUG_TYPE "si-lower-control-flow"
770b57cec5SDimitry Andric 
78*5ffd83dbSDimitry Andric static cl::opt<bool>
79*5ffd83dbSDimitry Andric RemoveRedundantEndcf("amdgpu-remove-redundant-endcf",
80*5ffd83dbSDimitry Andric     cl::init(true), cl::ReallyHidden);
81*5ffd83dbSDimitry Andric 
820b57cec5SDimitry Andric namespace {
830b57cec5SDimitry Andric 
840b57cec5SDimitry Andric class SILowerControlFlow : public MachineFunctionPass {
850b57cec5SDimitry Andric private:
860b57cec5SDimitry Andric   const SIRegisterInfo *TRI = nullptr;
870b57cec5SDimitry Andric   const SIInstrInfo *TII = nullptr;
880b57cec5SDimitry Andric   LiveIntervals *LIS = nullptr;
890b57cec5SDimitry Andric   MachineRegisterInfo *MRI = nullptr;
90*5ffd83dbSDimitry Andric   SetVector<MachineInstr*> LoweredEndCf;
91*5ffd83dbSDimitry Andric   DenseSet<Register> LoweredIf;
92*5ffd83dbSDimitry Andric   SmallSet<MachineInstr *, 16> NeedsKillCleanup;
930b57cec5SDimitry Andric 
940b57cec5SDimitry Andric   const TargetRegisterClass *BoolRC = nullptr;
95*5ffd83dbSDimitry Andric   bool InsertKillCleanups;
960b57cec5SDimitry Andric   unsigned AndOpc;
970b57cec5SDimitry Andric   unsigned OrOpc;
980b57cec5SDimitry Andric   unsigned XorOpc;
990b57cec5SDimitry Andric   unsigned MovTermOpc;
1000b57cec5SDimitry Andric   unsigned Andn2TermOpc;
1010b57cec5SDimitry Andric   unsigned XorTermrOpc;
1020b57cec5SDimitry Andric   unsigned OrSaveExecOpc;
1030b57cec5SDimitry Andric   unsigned Exec;
1040b57cec5SDimitry Andric 
1050b57cec5SDimitry Andric   void emitIf(MachineInstr &MI);
1060b57cec5SDimitry Andric   void emitElse(MachineInstr &MI);
1070b57cec5SDimitry Andric   void emitIfBreak(MachineInstr &MI);
1080b57cec5SDimitry Andric   void emitLoop(MachineInstr &MI);
1090b57cec5SDimitry Andric   void emitEndCf(MachineInstr &MI);
1100b57cec5SDimitry Andric 
1110b57cec5SDimitry Andric   void findMaskOperands(MachineInstr &MI, unsigned OpNo,
1120b57cec5SDimitry Andric                         SmallVectorImpl<MachineOperand> &Src) const;
1130b57cec5SDimitry Andric 
1140b57cec5SDimitry Andric   void combineMasks(MachineInstr &MI);
1150b57cec5SDimitry Andric 
116*5ffd83dbSDimitry Andric   void process(MachineInstr &MI);
117*5ffd83dbSDimitry Andric 
118*5ffd83dbSDimitry Andric   // Skip to the next instruction, ignoring debug instructions, and trivial
119*5ffd83dbSDimitry Andric   // block boundaries (blocks that have one (typically fallthrough) successor,
120*5ffd83dbSDimitry Andric   // and the successor has one predecessor.
121*5ffd83dbSDimitry Andric   MachineBasicBlock::iterator
122*5ffd83dbSDimitry Andric   skipIgnoreExecInstsTrivialSucc(MachineBasicBlock &MBB,
123*5ffd83dbSDimitry Andric                                  MachineBasicBlock::iterator It) const;
124*5ffd83dbSDimitry Andric 
125*5ffd83dbSDimitry Andric   // Remove redundant SI_END_CF instructions.
126*5ffd83dbSDimitry Andric   void optimizeEndCf();
127*5ffd83dbSDimitry Andric 
1280b57cec5SDimitry Andric public:
1290b57cec5SDimitry Andric   static char ID;
1300b57cec5SDimitry Andric 
1310b57cec5SDimitry Andric   SILowerControlFlow() : MachineFunctionPass(ID) {}
1320b57cec5SDimitry Andric 
1330b57cec5SDimitry Andric   bool runOnMachineFunction(MachineFunction &MF) override;
1340b57cec5SDimitry Andric 
1350b57cec5SDimitry Andric   StringRef getPassName() const override {
1360b57cec5SDimitry Andric     return "SI Lower control flow pseudo instructions";
1370b57cec5SDimitry Andric   }
1380b57cec5SDimitry Andric 
1390b57cec5SDimitry Andric   void getAnalysisUsage(AnalysisUsage &AU) const override {
1400b57cec5SDimitry Andric     // Should preserve the same set that TwoAddressInstructions does.
1410b57cec5SDimitry Andric     AU.addPreserved<SlotIndexes>();
1420b57cec5SDimitry Andric     AU.addPreserved<LiveIntervals>();
1430b57cec5SDimitry Andric     AU.addPreservedID(LiveVariablesID);
1440b57cec5SDimitry Andric     AU.addPreservedID(MachineLoopInfoID);
1450b57cec5SDimitry Andric     AU.addPreservedID(MachineDominatorsID);
1460b57cec5SDimitry Andric     AU.setPreservesCFG();
1470b57cec5SDimitry Andric     MachineFunctionPass::getAnalysisUsage(AU);
1480b57cec5SDimitry Andric   }
1490b57cec5SDimitry Andric };
1500b57cec5SDimitry Andric 
1510b57cec5SDimitry Andric } // end anonymous namespace
1520b57cec5SDimitry Andric 
1530b57cec5SDimitry Andric char SILowerControlFlow::ID = 0;
1540b57cec5SDimitry Andric 
1550b57cec5SDimitry Andric INITIALIZE_PASS(SILowerControlFlow, DEBUG_TYPE,
1560b57cec5SDimitry Andric                "SI lower control flow", false, false)
1570b57cec5SDimitry Andric 
1580b57cec5SDimitry Andric static void setImpSCCDefDead(MachineInstr &MI, bool IsDead) {
1590b57cec5SDimitry Andric   MachineOperand &ImpDefSCC = MI.getOperand(3);
1600b57cec5SDimitry Andric   assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
1610b57cec5SDimitry Andric 
1620b57cec5SDimitry Andric   ImpDefSCC.setIsDead(IsDead);
1630b57cec5SDimitry Andric }
1640b57cec5SDimitry Andric 
1650b57cec5SDimitry Andric char &llvm::SILowerControlFlowID = SILowerControlFlow::ID;
1660b57cec5SDimitry Andric 
167*5ffd83dbSDimitry Andric static bool hasKill(const MachineBasicBlock *Begin,
168*5ffd83dbSDimitry Andric                     const MachineBasicBlock *End, const SIInstrInfo *TII) {
169*5ffd83dbSDimitry Andric   DenseSet<const MachineBasicBlock*> Visited;
170*5ffd83dbSDimitry Andric   SmallVector<MachineBasicBlock *, 4> Worklist(Begin->succ_begin(),
171*5ffd83dbSDimitry Andric                                                Begin->succ_end());
172*5ffd83dbSDimitry Andric 
173*5ffd83dbSDimitry Andric   while (!Worklist.empty()) {
174*5ffd83dbSDimitry Andric     MachineBasicBlock *MBB = Worklist.pop_back_val();
175*5ffd83dbSDimitry Andric 
176*5ffd83dbSDimitry Andric     if (MBB == End || !Visited.insert(MBB).second)
177*5ffd83dbSDimitry Andric       continue;
178*5ffd83dbSDimitry Andric     for (auto &Term : MBB->terminators())
179*5ffd83dbSDimitry Andric       if (TII->isKillTerminator(Term.getOpcode()))
180*5ffd83dbSDimitry Andric         return true;
181*5ffd83dbSDimitry Andric 
182*5ffd83dbSDimitry Andric     Worklist.append(MBB->succ_begin(), MBB->succ_end());
183*5ffd83dbSDimitry Andric   }
184*5ffd83dbSDimitry Andric 
185*5ffd83dbSDimitry Andric   return false;
186*5ffd83dbSDimitry Andric }
187*5ffd83dbSDimitry Andric 
188*5ffd83dbSDimitry Andric static bool isSimpleIf(const MachineInstr &MI, const MachineRegisterInfo *MRI) {
1898bcb0991SDimitry Andric   Register SaveExecReg = MI.getOperand(0).getReg();
1900b57cec5SDimitry Andric   auto U = MRI->use_instr_nodbg_begin(SaveExecReg);
1910b57cec5SDimitry Andric 
1920b57cec5SDimitry Andric   if (U == MRI->use_instr_nodbg_end() ||
1930b57cec5SDimitry Andric       std::next(U) != MRI->use_instr_nodbg_end() ||
1940b57cec5SDimitry Andric       U->getOpcode() != AMDGPU::SI_END_CF)
1950b57cec5SDimitry Andric     return false;
1960b57cec5SDimitry Andric 
1970b57cec5SDimitry Andric   return true;
1980b57cec5SDimitry Andric }
1990b57cec5SDimitry Andric 
2000b57cec5SDimitry Andric void SILowerControlFlow::emitIf(MachineInstr &MI) {
2010b57cec5SDimitry Andric   MachineBasicBlock &MBB = *MI.getParent();
2020b57cec5SDimitry Andric   const DebugLoc &DL = MI.getDebugLoc();
2030b57cec5SDimitry Andric   MachineBasicBlock::iterator I(&MI);
204*5ffd83dbSDimitry Andric   Register SaveExecReg = MI.getOperand(0).getReg();
2050b57cec5SDimitry Andric   MachineOperand& Cond = MI.getOperand(1);
2068bcb0991SDimitry Andric   assert(Cond.getSubReg() == AMDGPU::NoSubRegister);
2070b57cec5SDimitry Andric 
2080b57cec5SDimitry Andric   MachineOperand &ImpDefSCC = MI.getOperand(4);
2090b57cec5SDimitry Andric   assert(ImpDefSCC.getReg() == AMDGPU::SCC && ImpDefSCC.isDef());
2100b57cec5SDimitry Andric 
2110b57cec5SDimitry Andric   // If there is only one use of save exec register and that use is SI_END_CF,
2120b57cec5SDimitry Andric   // we can optimize SI_IF by returning the full saved exec mask instead of
2130b57cec5SDimitry Andric   // just cleared bits.
214*5ffd83dbSDimitry Andric   bool SimpleIf = isSimpleIf(MI, MRI);
215*5ffd83dbSDimitry Andric 
216*5ffd83dbSDimitry Andric   if (InsertKillCleanups) {
217*5ffd83dbSDimitry Andric     // Check for SI_KILL_*_TERMINATOR on full path of control flow and
218*5ffd83dbSDimitry Andric     // flag the associated SI_END_CF for insertion of a kill cleanup.
219*5ffd83dbSDimitry Andric     auto UseMI = MRI->use_instr_nodbg_begin(SaveExecReg);
220*5ffd83dbSDimitry Andric     while (UseMI->getOpcode() != AMDGPU::SI_END_CF) {
221*5ffd83dbSDimitry Andric       assert(std::next(UseMI) == MRI->use_instr_nodbg_end());
222*5ffd83dbSDimitry Andric       assert(UseMI->getOpcode() == AMDGPU::SI_ELSE);
223*5ffd83dbSDimitry Andric       MachineOperand &NextExec = UseMI->getOperand(0);
224*5ffd83dbSDimitry Andric       Register NextExecReg = NextExec.getReg();
225*5ffd83dbSDimitry Andric       if (NextExec.isDead()) {
226*5ffd83dbSDimitry Andric         assert(!SimpleIf);
227*5ffd83dbSDimitry Andric         break;
228*5ffd83dbSDimitry Andric       }
229*5ffd83dbSDimitry Andric       UseMI = MRI->use_instr_nodbg_begin(NextExecReg);
230*5ffd83dbSDimitry Andric     }
231*5ffd83dbSDimitry Andric     if (UseMI->getOpcode() == AMDGPU::SI_END_CF) {
232*5ffd83dbSDimitry Andric       if (hasKill(MI.getParent(), UseMI->getParent(), TII)) {
233*5ffd83dbSDimitry Andric         NeedsKillCleanup.insert(&*UseMI);
234*5ffd83dbSDimitry Andric         SimpleIf = false;
235*5ffd83dbSDimitry Andric       }
236*5ffd83dbSDimitry Andric     }
237*5ffd83dbSDimitry Andric   } else if (SimpleIf) {
238*5ffd83dbSDimitry Andric     // Check for SI_KILL_*_TERMINATOR on path from if to endif.
239*5ffd83dbSDimitry Andric     // if there is any such terminator simplifications are not safe.
240*5ffd83dbSDimitry Andric     auto UseMI = MRI->use_instr_nodbg_begin(SaveExecReg);
241*5ffd83dbSDimitry Andric     SimpleIf = !hasKill(MI.getParent(), UseMI->getParent(), TII);
242*5ffd83dbSDimitry Andric   }
2430b57cec5SDimitry Andric 
2440b57cec5SDimitry Andric   // Add an implicit def of exec to discourage scheduling VALU after this which
2450b57cec5SDimitry Andric   // will interfere with trying to form s_and_saveexec_b64 later.
2460b57cec5SDimitry Andric   Register CopyReg = SimpleIf ? SaveExecReg
2470b57cec5SDimitry Andric                        : MRI->createVirtualRegister(BoolRC);
2480b57cec5SDimitry Andric   MachineInstr *CopyExec =
2490b57cec5SDimitry Andric     BuildMI(MBB, I, DL, TII->get(AMDGPU::COPY), CopyReg)
2500b57cec5SDimitry Andric     .addReg(Exec)
2510b57cec5SDimitry Andric     .addReg(Exec, RegState::ImplicitDefine);
252*5ffd83dbSDimitry Andric   LoweredIf.insert(CopyReg);
2530b57cec5SDimitry Andric 
2548bcb0991SDimitry Andric   Register Tmp = MRI->createVirtualRegister(BoolRC);
2550b57cec5SDimitry Andric 
2560b57cec5SDimitry Andric   MachineInstr *And =
2570b57cec5SDimitry Andric     BuildMI(MBB, I, DL, TII->get(AndOpc), Tmp)
2580b57cec5SDimitry Andric     .addReg(CopyReg)
2590b57cec5SDimitry Andric     .add(Cond);
2600b57cec5SDimitry Andric 
2610b57cec5SDimitry Andric   setImpSCCDefDead(*And, true);
2620b57cec5SDimitry Andric 
2630b57cec5SDimitry Andric   MachineInstr *Xor = nullptr;
2640b57cec5SDimitry Andric   if (!SimpleIf) {
2650b57cec5SDimitry Andric     Xor =
2660b57cec5SDimitry Andric       BuildMI(MBB, I, DL, TII->get(XorOpc), SaveExecReg)
2670b57cec5SDimitry Andric       .addReg(Tmp)
2680b57cec5SDimitry Andric       .addReg(CopyReg);
2690b57cec5SDimitry Andric     setImpSCCDefDead(*Xor, ImpDefSCC.isDead());
2700b57cec5SDimitry Andric   }
2710b57cec5SDimitry Andric 
2720b57cec5SDimitry Andric   // Use a copy that is a terminator to get correct spill code placement it with
2730b57cec5SDimitry Andric   // fast regalloc.
2740b57cec5SDimitry Andric   MachineInstr *SetExec =
2750b57cec5SDimitry Andric     BuildMI(MBB, I, DL, TII->get(MovTermOpc), Exec)
2760b57cec5SDimitry Andric     .addReg(Tmp, RegState::Kill);
2770b57cec5SDimitry Andric 
278*5ffd83dbSDimitry Andric   // Insert the S_CBRANCH_EXECZ instruction which will be optimized later
279*5ffd83dbSDimitry Andric   // during SIRemoveShortExecBranches.
280*5ffd83dbSDimitry Andric   MachineInstr *NewBr = BuildMI(MBB, I, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
2810b57cec5SDimitry Andric                             .add(MI.getOperand(2));
2820b57cec5SDimitry Andric 
2830b57cec5SDimitry Andric   if (!LIS) {
2840b57cec5SDimitry Andric     MI.eraseFromParent();
2850b57cec5SDimitry Andric     return;
2860b57cec5SDimitry Andric   }
2870b57cec5SDimitry Andric 
2880b57cec5SDimitry Andric   LIS->InsertMachineInstrInMaps(*CopyExec);
2890b57cec5SDimitry Andric 
2900b57cec5SDimitry Andric   // Replace with and so we don't need to fix the live interval for condition
2910b57cec5SDimitry Andric   // register.
2920b57cec5SDimitry Andric   LIS->ReplaceMachineInstrInMaps(MI, *And);
2930b57cec5SDimitry Andric 
2940b57cec5SDimitry Andric   if (!SimpleIf)
2950b57cec5SDimitry Andric     LIS->InsertMachineInstrInMaps(*Xor);
2960b57cec5SDimitry Andric   LIS->InsertMachineInstrInMaps(*SetExec);
2970b57cec5SDimitry Andric   LIS->InsertMachineInstrInMaps(*NewBr);
2980b57cec5SDimitry Andric 
2990b57cec5SDimitry Andric   LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
3000b57cec5SDimitry Andric   MI.eraseFromParent();
3010b57cec5SDimitry Andric 
3020b57cec5SDimitry Andric   // FIXME: Is there a better way of adjusting the liveness? It shouldn't be
3030b57cec5SDimitry Andric   // hard to add another def here but I'm not sure how to correctly update the
3040b57cec5SDimitry Andric   // valno.
3050b57cec5SDimitry Andric   LIS->removeInterval(SaveExecReg);
3060b57cec5SDimitry Andric   LIS->createAndComputeVirtRegInterval(SaveExecReg);
3070b57cec5SDimitry Andric   LIS->createAndComputeVirtRegInterval(Tmp);
3080b57cec5SDimitry Andric   if (!SimpleIf)
3090b57cec5SDimitry Andric     LIS->createAndComputeVirtRegInterval(CopyReg);
3100b57cec5SDimitry Andric }
3110b57cec5SDimitry Andric 
3120b57cec5SDimitry Andric void SILowerControlFlow::emitElse(MachineInstr &MI) {
3130b57cec5SDimitry Andric   MachineBasicBlock &MBB = *MI.getParent();
3140b57cec5SDimitry Andric   const DebugLoc &DL = MI.getDebugLoc();
3150b57cec5SDimitry Andric 
316*5ffd83dbSDimitry Andric   Register DstReg = MI.getOperand(0).getReg();
3170b57cec5SDimitry Andric 
3180b57cec5SDimitry Andric   bool ExecModified = MI.getOperand(3).getImm() != 0;
3190b57cec5SDimitry Andric   MachineBasicBlock::iterator Start = MBB.begin();
3200b57cec5SDimitry Andric 
3210b57cec5SDimitry Andric   // We are running before TwoAddressInstructions, and si_else's operands are
3220b57cec5SDimitry Andric   // tied. In order to correctly tie the registers, split this into a copy of
3230b57cec5SDimitry Andric   // the src like it does.
3240b57cec5SDimitry Andric   Register CopyReg = MRI->createVirtualRegister(BoolRC);
3250b57cec5SDimitry Andric   MachineInstr *CopyExec =
3260b57cec5SDimitry Andric     BuildMI(MBB, Start, DL, TII->get(AMDGPU::COPY), CopyReg)
3270b57cec5SDimitry Andric       .add(MI.getOperand(1)); // Saved EXEC
3280b57cec5SDimitry Andric 
3290b57cec5SDimitry Andric   // This must be inserted before phis and any spill code inserted before the
3300b57cec5SDimitry Andric   // else.
3310b57cec5SDimitry Andric   Register SaveReg = ExecModified ?
3320b57cec5SDimitry Andric     MRI->createVirtualRegister(BoolRC) : DstReg;
3330b57cec5SDimitry Andric   MachineInstr *OrSaveExec =
3340b57cec5SDimitry Andric     BuildMI(MBB, Start, DL, TII->get(OrSaveExecOpc), SaveReg)
3350b57cec5SDimitry Andric     .addReg(CopyReg);
3360b57cec5SDimitry Andric 
3370b57cec5SDimitry Andric   MachineBasicBlock *DestBB = MI.getOperand(2).getMBB();
3380b57cec5SDimitry Andric 
3390b57cec5SDimitry Andric   MachineBasicBlock::iterator ElsePt(MI);
3400b57cec5SDimitry Andric 
3410b57cec5SDimitry Andric   if (ExecModified) {
3420b57cec5SDimitry Andric     MachineInstr *And =
3430b57cec5SDimitry Andric       BuildMI(MBB, ElsePt, DL, TII->get(AndOpc), DstReg)
3440b57cec5SDimitry Andric       .addReg(Exec)
3450b57cec5SDimitry Andric       .addReg(SaveReg);
3460b57cec5SDimitry Andric 
3470b57cec5SDimitry Andric     if (LIS)
3480b57cec5SDimitry Andric       LIS->InsertMachineInstrInMaps(*And);
3490b57cec5SDimitry Andric   }
3500b57cec5SDimitry Andric 
3510b57cec5SDimitry Andric   MachineInstr *Xor =
3520b57cec5SDimitry Andric     BuildMI(MBB, ElsePt, DL, TII->get(XorTermrOpc), Exec)
3530b57cec5SDimitry Andric     .addReg(Exec)
3540b57cec5SDimitry Andric     .addReg(DstReg);
3550b57cec5SDimitry Andric 
3560b57cec5SDimitry Andric   MachineInstr *Branch =
357*5ffd83dbSDimitry Andric       BuildMI(MBB, ElsePt, DL, TII->get(AMDGPU::S_CBRANCH_EXECZ))
3580b57cec5SDimitry Andric           .addMBB(DestBB);
3590b57cec5SDimitry Andric 
3600b57cec5SDimitry Andric   if (!LIS) {
3610b57cec5SDimitry Andric     MI.eraseFromParent();
3620b57cec5SDimitry Andric     return;
3630b57cec5SDimitry Andric   }
3640b57cec5SDimitry Andric 
3650b57cec5SDimitry Andric   LIS->RemoveMachineInstrFromMaps(MI);
3660b57cec5SDimitry Andric   MI.eraseFromParent();
3670b57cec5SDimitry Andric 
3680b57cec5SDimitry Andric   LIS->InsertMachineInstrInMaps(*CopyExec);
3690b57cec5SDimitry Andric   LIS->InsertMachineInstrInMaps(*OrSaveExec);
3700b57cec5SDimitry Andric 
3710b57cec5SDimitry Andric   LIS->InsertMachineInstrInMaps(*Xor);
3720b57cec5SDimitry Andric   LIS->InsertMachineInstrInMaps(*Branch);
3730b57cec5SDimitry Andric 
3740b57cec5SDimitry Andric   // src reg is tied to dst reg.
3750b57cec5SDimitry Andric   LIS->removeInterval(DstReg);
3760b57cec5SDimitry Andric   LIS->createAndComputeVirtRegInterval(DstReg);
3770b57cec5SDimitry Andric   LIS->createAndComputeVirtRegInterval(CopyReg);
3780b57cec5SDimitry Andric   if (ExecModified)
3790b57cec5SDimitry Andric     LIS->createAndComputeVirtRegInterval(SaveReg);
3800b57cec5SDimitry Andric 
3810b57cec5SDimitry Andric   // Let this be recomputed.
3820b57cec5SDimitry Andric   LIS->removeAllRegUnitsForPhysReg(AMDGPU::EXEC);
3830b57cec5SDimitry Andric }
3840b57cec5SDimitry Andric 
3850b57cec5SDimitry Andric void SILowerControlFlow::emitIfBreak(MachineInstr &MI) {
3860b57cec5SDimitry Andric   MachineBasicBlock &MBB = *MI.getParent();
3870b57cec5SDimitry Andric   const DebugLoc &DL = MI.getDebugLoc();
388*5ffd83dbSDimitry Andric   auto Dst = MI.getOperand(0).getReg();
3890b57cec5SDimitry Andric 
3900b57cec5SDimitry Andric   // Skip ANDing with exec if the break condition is already masked by exec
3910b57cec5SDimitry Andric   // because it is a V_CMP in the same basic block. (We know the break
3920b57cec5SDimitry Andric   // condition operand was an i1 in IR, so if it is a VALU instruction it must
3930b57cec5SDimitry Andric   // be one with a carry-out.)
3940b57cec5SDimitry Andric   bool SkipAnding = false;
3950b57cec5SDimitry Andric   if (MI.getOperand(1).isReg()) {
3960b57cec5SDimitry Andric     if (MachineInstr *Def = MRI->getUniqueVRegDef(MI.getOperand(1).getReg())) {
3970b57cec5SDimitry Andric       SkipAnding = Def->getParent() == MI.getParent()
3980b57cec5SDimitry Andric           && SIInstrInfo::isVALU(*Def);
3990b57cec5SDimitry Andric     }
4000b57cec5SDimitry Andric   }
4010b57cec5SDimitry Andric 
4020b57cec5SDimitry Andric   // AND the break condition operand with exec, then OR that into the "loop
4030b57cec5SDimitry Andric   // exit" mask.
4040b57cec5SDimitry Andric   MachineInstr *And = nullptr, *Or = nullptr;
4050b57cec5SDimitry Andric   if (!SkipAnding) {
406480093f4SDimitry Andric     Register AndReg = MRI->createVirtualRegister(BoolRC);
407480093f4SDimitry Andric     And = BuildMI(MBB, &MI, DL, TII->get(AndOpc), AndReg)
4080b57cec5SDimitry Andric              .addReg(Exec)
4090b57cec5SDimitry Andric              .add(MI.getOperand(1));
4100b57cec5SDimitry Andric     Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
411480093f4SDimitry Andric              .addReg(AndReg)
4120b57cec5SDimitry Andric              .add(MI.getOperand(2));
413480093f4SDimitry Andric     if (LIS)
414480093f4SDimitry Andric       LIS->createAndComputeVirtRegInterval(AndReg);
4150b57cec5SDimitry Andric   } else
4160b57cec5SDimitry Andric     Or = BuildMI(MBB, &MI, DL, TII->get(OrOpc), Dst)
4170b57cec5SDimitry Andric              .add(MI.getOperand(1))
4180b57cec5SDimitry Andric              .add(MI.getOperand(2));
4190b57cec5SDimitry Andric 
4200b57cec5SDimitry Andric   if (LIS) {
4210b57cec5SDimitry Andric     if (And)
4220b57cec5SDimitry Andric       LIS->InsertMachineInstrInMaps(*And);
4230b57cec5SDimitry Andric     LIS->ReplaceMachineInstrInMaps(MI, *Or);
4240b57cec5SDimitry Andric   }
4250b57cec5SDimitry Andric 
4260b57cec5SDimitry Andric   MI.eraseFromParent();
4270b57cec5SDimitry Andric }
4280b57cec5SDimitry Andric 
4290b57cec5SDimitry Andric void SILowerControlFlow::emitLoop(MachineInstr &MI) {
4300b57cec5SDimitry Andric   MachineBasicBlock &MBB = *MI.getParent();
4310b57cec5SDimitry Andric   const DebugLoc &DL = MI.getDebugLoc();
4320b57cec5SDimitry Andric 
4330b57cec5SDimitry Andric   MachineInstr *AndN2 =
4340b57cec5SDimitry Andric       BuildMI(MBB, &MI, DL, TII->get(Andn2TermOpc), Exec)
4350b57cec5SDimitry Andric           .addReg(Exec)
4360b57cec5SDimitry Andric           .add(MI.getOperand(0));
4370b57cec5SDimitry Andric 
4380b57cec5SDimitry Andric   MachineInstr *Branch =
4390b57cec5SDimitry Andric       BuildMI(MBB, &MI, DL, TII->get(AMDGPU::S_CBRANCH_EXECNZ))
4400b57cec5SDimitry Andric           .add(MI.getOperand(1));
4410b57cec5SDimitry Andric 
4420b57cec5SDimitry Andric   if (LIS) {
4430b57cec5SDimitry Andric     LIS->ReplaceMachineInstrInMaps(MI, *AndN2);
4440b57cec5SDimitry Andric     LIS->InsertMachineInstrInMaps(*Branch);
4450b57cec5SDimitry Andric   }
4460b57cec5SDimitry Andric 
4470b57cec5SDimitry Andric   MI.eraseFromParent();
4480b57cec5SDimitry Andric }
4490b57cec5SDimitry Andric 
450*5ffd83dbSDimitry Andric MachineBasicBlock::iterator
451*5ffd83dbSDimitry Andric SILowerControlFlow::skipIgnoreExecInstsTrivialSucc(
452*5ffd83dbSDimitry Andric   MachineBasicBlock &MBB, MachineBasicBlock::iterator It) const {
453*5ffd83dbSDimitry Andric 
454*5ffd83dbSDimitry Andric   SmallSet<const MachineBasicBlock *, 4> Visited;
455*5ffd83dbSDimitry Andric   MachineBasicBlock *B = &MBB;
456*5ffd83dbSDimitry Andric   do {
457*5ffd83dbSDimitry Andric     if (!Visited.insert(B).second)
458*5ffd83dbSDimitry Andric       return MBB.end();
459*5ffd83dbSDimitry Andric 
460*5ffd83dbSDimitry Andric     auto E = B->end();
461*5ffd83dbSDimitry Andric     for ( ; It != E; ++It) {
462*5ffd83dbSDimitry Andric       if (It->getOpcode() == AMDGPU::SI_KILL_CLEANUP)
463*5ffd83dbSDimitry Andric         continue;
464*5ffd83dbSDimitry Andric       if (TII->mayReadEXEC(*MRI, *It))
465*5ffd83dbSDimitry Andric         break;
466*5ffd83dbSDimitry Andric     }
467*5ffd83dbSDimitry Andric 
468*5ffd83dbSDimitry Andric     if (It != E)
469*5ffd83dbSDimitry Andric       return It;
470*5ffd83dbSDimitry Andric 
471*5ffd83dbSDimitry Andric     if (B->succ_size() != 1)
472*5ffd83dbSDimitry Andric       return MBB.end();
473*5ffd83dbSDimitry Andric 
474*5ffd83dbSDimitry Andric     // If there is one trivial successor, advance to the next block.
475*5ffd83dbSDimitry Andric     MachineBasicBlock *Succ = *B->succ_begin();
476*5ffd83dbSDimitry Andric 
477*5ffd83dbSDimitry Andric     It = Succ->begin();
478*5ffd83dbSDimitry Andric     B = Succ;
479*5ffd83dbSDimitry Andric   } while (true);
480*5ffd83dbSDimitry Andric }
481*5ffd83dbSDimitry Andric 
4820b57cec5SDimitry Andric void SILowerControlFlow::emitEndCf(MachineInstr &MI) {
4830b57cec5SDimitry Andric   MachineBasicBlock &MBB = *MI.getParent();
4848bcb0991SDimitry Andric   MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
4858bcb0991SDimitry Andric   unsigned CFMask = MI.getOperand(0).getReg();
4868bcb0991SDimitry Andric   MachineInstr *Def = MRI.getUniqueVRegDef(CFMask);
4870b57cec5SDimitry Andric   const DebugLoc &DL = MI.getDebugLoc();
4880b57cec5SDimitry Andric 
4898bcb0991SDimitry Andric   MachineBasicBlock::iterator InsPt =
4908bcb0991SDimitry Andric       Def && Def->getParent() == &MBB ? std::next(MachineBasicBlock::iterator(Def))
4918bcb0991SDimitry Andric                                : MBB.begin();
4928bcb0991SDimitry Andric   MachineInstr *NewMI = BuildMI(MBB, InsPt, DL, TII->get(OrOpc), Exec)
4930b57cec5SDimitry Andric                             .addReg(Exec)
4940b57cec5SDimitry Andric                             .add(MI.getOperand(0));
4950b57cec5SDimitry Andric 
496*5ffd83dbSDimitry Andric   LoweredEndCf.insert(NewMI);
497*5ffd83dbSDimitry Andric 
498*5ffd83dbSDimitry Andric   // If this ends control flow which contains kills (as flagged in emitIf)
499*5ffd83dbSDimitry Andric   // then insert an SI_KILL_CLEANUP immediately following the exec mask
500*5ffd83dbSDimitry Andric   // manipulation.  This can be lowered to early termination if appropriate.
501*5ffd83dbSDimitry Andric   MachineInstr *CleanUpMI = nullptr;
502*5ffd83dbSDimitry Andric   if (NeedsKillCleanup.count(&MI))
503*5ffd83dbSDimitry Andric     CleanUpMI = BuildMI(MBB, InsPt, DL, TII->get(AMDGPU::SI_KILL_CLEANUP));
504*5ffd83dbSDimitry Andric 
505*5ffd83dbSDimitry Andric   if (LIS) {
5060b57cec5SDimitry Andric     LIS->ReplaceMachineInstrInMaps(MI, *NewMI);
507*5ffd83dbSDimitry Andric     if (CleanUpMI)
508*5ffd83dbSDimitry Andric       LIS->InsertMachineInstrInMaps(*CleanUpMI);
509*5ffd83dbSDimitry Andric   }
5100b57cec5SDimitry Andric 
5110b57cec5SDimitry Andric   MI.eraseFromParent();
5120b57cec5SDimitry Andric 
5130b57cec5SDimitry Andric   if (LIS)
5140b57cec5SDimitry Andric     LIS->handleMove(*NewMI);
5150b57cec5SDimitry Andric }
5160b57cec5SDimitry Andric 
5170b57cec5SDimitry Andric // Returns replace operands for a logical operation, either single result
5180b57cec5SDimitry Andric // for exec or two operands if source was another equivalent operation.
5190b57cec5SDimitry Andric void SILowerControlFlow::findMaskOperands(MachineInstr &MI, unsigned OpNo,
5200b57cec5SDimitry Andric        SmallVectorImpl<MachineOperand> &Src) const {
5210b57cec5SDimitry Andric   MachineOperand &Op = MI.getOperand(OpNo);
5228bcb0991SDimitry Andric   if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) {
5230b57cec5SDimitry Andric     Src.push_back(Op);
5240b57cec5SDimitry Andric     return;
5250b57cec5SDimitry Andric   }
5260b57cec5SDimitry Andric 
5270b57cec5SDimitry Andric   MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
5280b57cec5SDimitry Andric   if (!Def || Def->getParent() != MI.getParent() ||
5290b57cec5SDimitry Andric       !(Def->isFullCopy() || (Def->getOpcode() == MI.getOpcode())))
5300b57cec5SDimitry Andric     return;
5310b57cec5SDimitry Andric 
5320b57cec5SDimitry Andric   // Make sure we do not modify exec between def and use.
5330b57cec5SDimitry Andric   // A copy with implcitly defined exec inserted earlier is an exclusion, it
5340b57cec5SDimitry Andric   // does not really modify exec.
5350b57cec5SDimitry Andric   for (auto I = Def->getIterator(); I != MI.getIterator(); ++I)
5360b57cec5SDimitry Andric     if (I->modifiesRegister(AMDGPU::EXEC, TRI) &&
5370b57cec5SDimitry Andric         !(I->isCopy() && I->getOperand(0).getReg() != Exec))
5380b57cec5SDimitry Andric       return;
5390b57cec5SDimitry Andric 
5400b57cec5SDimitry Andric   for (const auto &SrcOp : Def->explicit_operands())
5410b57cec5SDimitry Andric     if (SrcOp.isReg() && SrcOp.isUse() &&
5428bcb0991SDimitry Andric         (Register::isVirtualRegister(SrcOp.getReg()) || SrcOp.getReg() == Exec))
5430b57cec5SDimitry Andric       Src.push_back(SrcOp);
5440b57cec5SDimitry Andric }
5450b57cec5SDimitry Andric 
5460b57cec5SDimitry Andric // Search and combine pairs of equivalent instructions, like
5470b57cec5SDimitry Andric // S_AND_B64 x, (S_AND_B64 x, y) => S_AND_B64 x, y
5480b57cec5SDimitry Andric // S_OR_B64  x, (S_OR_B64  x, y) => S_OR_B64  x, y
5490b57cec5SDimitry Andric // One of the operands is exec mask.
5500b57cec5SDimitry Andric void SILowerControlFlow::combineMasks(MachineInstr &MI) {
5510b57cec5SDimitry Andric   assert(MI.getNumExplicitOperands() == 3);
5520b57cec5SDimitry Andric   SmallVector<MachineOperand, 4> Ops;
5530b57cec5SDimitry Andric   unsigned OpToReplace = 1;
5540b57cec5SDimitry Andric   findMaskOperands(MI, 1, Ops);
5550b57cec5SDimitry Andric   if (Ops.size() == 1) OpToReplace = 2; // First operand can be exec or its copy
5560b57cec5SDimitry Andric   findMaskOperands(MI, 2, Ops);
5570b57cec5SDimitry Andric   if (Ops.size() != 3) return;
5580b57cec5SDimitry Andric 
5590b57cec5SDimitry Andric   unsigned UniqueOpndIdx;
5600b57cec5SDimitry Andric   if (Ops[0].isIdenticalTo(Ops[1])) UniqueOpndIdx = 2;
5610b57cec5SDimitry Andric   else if (Ops[0].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
5620b57cec5SDimitry Andric   else if (Ops[1].isIdenticalTo(Ops[2])) UniqueOpndIdx = 1;
5630b57cec5SDimitry Andric   else return;
5640b57cec5SDimitry Andric 
5658bcb0991SDimitry Andric   Register Reg = MI.getOperand(OpToReplace).getReg();
5660b57cec5SDimitry Andric   MI.RemoveOperand(OpToReplace);
5670b57cec5SDimitry Andric   MI.addOperand(Ops[UniqueOpndIdx]);
5680b57cec5SDimitry Andric   if (MRI->use_empty(Reg))
5690b57cec5SDimitry Andric     MRI->getUniqueVRegDef(Reg)->eraseFromParent();
5700b57cec5SDimitry Andric }
5710b57cec5SDimitry Andric 
572*5ffd83dbSDimitry Andric void SILowerControlFlow::optimizeEndCf() {
573*5ffd83dbSDimitry Andric   // If the only instruction immediately following this END_CF is an another
574*5ffd83dbSDimitry Andric   // END_CF in the only successor we can avoid emitting exec mask restore here.
575*5ffd83dbSDimitry Andric   if (!RemoveRedundantEndcf)
576*5ffd83dbSDimitry Andric     return;
5770b57cec5SDimitry Andric 
578*5ffd83dbSDimitry Andric   for (MachineInstr *MI : LoweredEndCf) {
579*5ffd83dbSDimitry Andric     MachineBasicBlock &MBB = *MI->getParent();
580*5ffd83dbSDimitry Andric     auto Next =
581*5ffd83dbSDimitry Andric       skipIgnoreExecInstsTrivialSucc(MBB, std::next(MI->getIterator()));
582*5ffd83dbSDimitry Andric     if (Next == MBB.end() || !LoweredEndCf.count(&*Next))
583*5ffd83dbSDimitry Andric       continue;
584*5ffd83dbSDimitry Andric     // Only skip inner END_CF if outer ENDCF belongs to SI_IF.
585*5ffd83dbSDimitry Andric     // If that belongs to SI_ELSE then saved mask has an inverted value.
586*5ffd83dbSDimitry Andric     Register SavedExec
587*5ffd83dbSDimitry Andric       = TII->getNamedOperand(*Next, AMDGPU::OpName::src1)->getReg();
588*5ffd83dbSDimitry Andric     assert(SavedExec.isVirtual() && "Expected saved exec to be src1!");
5890b57cec5SDimitry Andric 
590*5ffd83dbSDimitry Andric     const MachineInstr *Def = MRI->getUniqueVRegDef(SavedExec);
591*5ffd83dbSDimitry Andric     if (Def && LoweredIf.count(SavedExec)) {
592*5ffd83dbSDimitry Andric       LLVM_DEBUG(dbgs() << "Skip redundant "; MI->dump());
593*5ffd83dbSDimitry Andric       if (LIS)
594*5ffd83dbSDimitry Andric         LIS->RemoveMachineInstrFromMaps(*MI);
595*5ffd83dbSDimitry Andric       MI->eraseFromParent();
596*5ffd83dbSDimitry Andric     }
597*5ffd83dbSDimitry Andric   }
5980b57cec5SDimitry Andric }
5990b57cec5SDimitry Andric 
600*5ffd83dbSDimitry Andric void SILowerControlFlow::process(MachineInstr &MI) {
601*5ffd83dbSDimitry Andric   MachineBasicBlock &MBB = *MI.getParent();
602*5ffd83dbSDimitry Andric   MachineBasicBlock::iterator I(MI);
603*5ffd83dbSDimitry Andric   MachineInstr *Prev = (I != MBB.begin()) ? &*(std::prev(I)) : nullptr;
6040b57cec5SDimitry Andric 
6050b57cec5SDimitry Andric   switch (MI.getOpcode()) {
6060b57cec5SDimitry Andric   case AMDGPU::SI_IF:
6070b57cec5SDimitry Andric     emitIf(MI);
6080b57cec5SDimitry Andric     break;
6090b57cec5SDimitry Andric 
6100b57cec5SDimitry Andric   case AMDGPU::SI_ELSE:
6110b57cec5SDimitry Andric     emitElse(MI);
6120b57cec5SDimitry Andric     break;
6130b57cec5SDimitry Andric 
6140b57cec5SDimitry Andric   case AMDGPU::SI_IF_BREAK:
6150b57cec5SDimitry Andric     emitIfBreak(MI);
6160b57cec5SDimitry Andric     break;
6170b57cec5SDimitry Andric 
6180b57cec5SDimitry Andric   case AMDGPU::SI_LOOP:
6190b57cec5SDimitry Andric     emitLoop(MI);
6200b57cec5SDimitry Andric     break;
6210b57cec5SDimitry Andric 
6220b57cec5SDimitry Andric   case AMDGPU::SI_END_CF:
6230b57cec5SDimitry Andric     emitEndCf(MI);
6240b57cec5SDimitry Andric     break;
6250b57cec5SDimitry Andric 
626*5ffd83dbSDimitry Andric   default:
627*5ffd83dbSDimitry Andric     assert(false && "Attempt to process unsupported instruction");
628*5ffd83dbSDimitry Andric     break;
629*5ffd83dbSDimitry Andric   }
630*5ffd83dbSDimitry Andric 
631*5ffd83dbSDimitry Andric   MachineBasicBlock::iterator Next;
632*5ffd83dbSDimitry Andric   for (I = Prev ? Prev->getIterator() : MBB.begin(); I != MBB.end(); I = Next) {
633*5ffd83dbSDimitry Andric     Next = std::next(I);
634*5ffd83dbSDimitry Andric     MachineInstr &MaskMI = *I;
635*5ffd83dbSDimitry Andric     switch (MaskMI.getOpcode()) {
6360b57cec5SDimitry Andric     case AMDGPU::S_AND_B64:
6370b57cec5SDimitry Andric     case AMDGPU::S_OR_B64:
6380b57cec5SDimitry Andric     case AMDGPU::S_AND_B32:
6390b57cec5SDimitry Andric     case AMDGPU::S_OR_B32:
6400b57cec5SDimitry Andric       // Cleanup bit manipulations on exec mask
641*5ffd83dbSDimitry Andric       combineMasks(MaskMI);
642*5ffd83dbSDimitry Andric       break;
643*5ffd83dbSDimitry Andric     default:
644*5ffd83dbSDimitry Andric       I = MBB.end();
645*5ffd83dbSDimitry Andric       break;
646*5ffd83dbSDimitry Andric     }
647*5ffd83dbSDimitry Andric   }
648*5ffd83dbSDimitry Andric }
649*5ffd83dbSDimitry Andric 
650*5ffd83dbSDimitry Andric bool SILowerControlFlow::runOnMachineFunction(MachineFunction &MF) {
651*5ffd83dbSDimitry Andric   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
652*5ffd83dbSDimitry Andric   TII = ST.getInstrInfo();
653*5ffd83dbSDimitry Andric   TRI = &TII->getRegisterInfo();
654*5ffd83dbSDimitry Andric 
655*5ffd83dbSDimitry Andric   // This doesn't actually need LiveIntervals, but we can preserve them.
656*5ffd83dbSDimitry Andric   LIS = getAnalysisIfAvailable<LiveIntervals>();
657*5ffd83dbSDimitry Andric   MRI = &MF.getRegInfo();
658*5ffd83dbSDimitry Andric   BoolRC = TRI->getBoolRC();
659*5ffd83dbSDimitry Andric   InsertKillCleanups =
660*5ffd83dbSDimitry Andric       MF.getFunction().getCallingConv() == CallingConv::AMDGPU_PS;
661*5ffd83dbSDimitry Andric 
662*5ffd83dbSDimitry Andric   if (ST.isWave32()) {
663*5ffd83dbSDimitry Andric     AndOpc = AMDGPU::S_AND_B32;
664*5ffd83dbSDimitry Andric     OrOpc = AMDGPU::S_OR_B32;
665*5ffd83dbSDimitry Andric     XorOpc = AMDGPU::S_XOR_B32;
666*5ffd83dbSDimitry Andric     MovTermOpc = AMDGPU::S_MOV_B32_term;
667*5ffd83dbSDimitry Andric     Andn2TermOpc = AMDGPU::S_ANDN2_B32_term;
668*5ffd83dbSDimitry Andric     XorTermrOpc = AMDGPU::S_XOR_B32_term;
669*5ffd83dbSDimitry Andric     OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B32;
670*5ffd83dbSDimitry Andric     Exec = AMDGPU::EXEC_LO;
671*5ffd83dbSDimitry Andric   } else {
672*5ffd83dbSDimitry Andric     AndOpc = AMDGPU::S_AND_B64;
673*5ffd83dbSDimitry Andric     OrOpc = AMDGPU::S_OR_B64;
674*5ffd83dbSDimitry Andric     XorOpc = AMDGPU::S_XOR_B64;
675*5ffd83dbSDimitry Andric     MovTermOpc = AMDGPU::S_MOV_B64_term;
676*5ffd83dbSDimitry Andric     Andn2TermOpc = AMDGPU::S_ANDN2_B64_term;
677*5ffd83dbSDimitry Andric     XorTermrOpc = AMDGPU::S_XOR_B64_term;
678*5ffd83dbSDimitry Andric     OrSaveExecOpc = AMDGPU::S_OR_SAVEEXEC_B64;
679*5ffd83dbSDimitry Andric     Exec = AMDGPU::EXEC;
680*5ffd83dbSDimitry Andric   }
681*5ffd83dbSDimitry Andric 
682*5ffd83dbSDimitry Andric   SmallVector<MachineInstr *, 32> Worklist;
683*5ffd83dbSDimitry Andric 
684*5ffd83dbSDimitry Andric   MachineFunction::iterator NextBB;
685*5ffd83dbSDimitry Andric   for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
686*5ffd83dbSDimitry Andric        BI != BE; BI = NextBB) {
687*5ffd83dbSDimitry Andric     NextBB = std::next(BI);
688*5ffd83dbSDimitry Andric     MachineBasicBlock &MBB = *BI;
689*5ffd83dbSDimitry Andric 
690*5ffd83dbSDimitry Andric     MachineBasicBlock::iterator I, Next;
691*5ffd83dbSDimitry Andric     for (I = MBB.begin(); I != MBB.end(); I = Next) {
692*5ffd83dbSDimitry Andric       Next = std::next(I);
693*5ffd83dbSDimitry Andric       MachineInstr &MI = *I;
694*5ffd83dbSDimitry Andric 
695*5ffd83dbSDimitry Andric       switch (MI.getOpcode()) {
696*5ffd83dbSDimitry Andric       case AMDGPU::SI_IF:
697*5ffd83dbSDimitry Andric         process(MI);
698*5ffd83dbSDimitry Andric         break;
699*5ffd83dbSDimitry Andric 
700*5ffd83dbSDimitry Andric       case AMDGPU::SI_ELSE:
701*5ffd83dbSDimitry Andric       case AMDGPU::SI_IF_BREAK:
702*5ffd83dbSDimitry Andric       case AMDGPU::SI_LOOP:
703*5ffd83dbSDimitry Andric       case AMDGPU::SI_END_CF:
704*5ffd83dbSDimitry Andric         // Only build worklist if SI_IF instructions must be processed first.
705*5ffd83dbSDimitry Andric         if (InsertKillCleanups)
706*5ffd83dbSDimitry Andric           Worklist.push_back(&MI);
707*5ffd83dbSDimitry Andric         else
708*5ffd83dbSDimitry Andric           process(MI);
709*5ffd83dbSDimitry Andric         break;
7100b57cec5SDimitry Andric 
7110b57cec5SDimitry Andric       default:
712*5ffd83dbSDimitry Andric         break;
713*5ffd83dbSDimitry Andric       }
714*5ffd83dbSDimitry Andric     }
7150b57cec5SDimitry Andric   }
7160b57cec5SDimitry Andric 
717*5ffd83dbSDimitry Andric   for (MachineInstr *MI : Worklist)
718*5ffd83dbSDimitry Andric     process(*MI);
719*5ffd83dbSDimitry Andric 
720*5ffd83dbSDimitry Andric   optimizeEndCf();
721*5ffd83dbSDimitry Andric 
722*5ffd83dbSDimitry Andric   LoweredEndCf.clear();
723*5ffd83dbSDimitry Andric   LoweredIf.clear();
724*5ffd83dbSDimitry Andric   NeedsKillCleanup.clear();
7250b57cec5SDimitry Andric 
7260b57cec5SDimitry Andric   return true;
7270b57cec5SDimitry Andric }
728