xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIPreEmitPeephole.cpp (revision 04eeddc0aa8e0a417a16eaf9d7d095207f4a8623)
1 //===-- SIPreEmitPeephole.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass performs the peephole optimizations before code emission.
11 ///
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "GCNSubtarget.h"
16 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
17 #include "llvm/CodeGen/MachineFunctionPass.h"
18 
19 using namespace llvm;
20 
21 #define DEBUG_TYPE "si-pre-emit-peephole"
22 
23 static unsigned SkipThreshold;
24 
25 static cl::opt<unsigned, true> SkipThresholdFlag(
26     "amdgpu-skip-threshold", cl::Hidden,
27     cl::desc(
28         "Number of instructions before jumping over divergent control flow"),
29     cl::location(SkipThreshold), cl::init(12));
30 
31 namespace {
32 
33 class SIPreEmitPeephole : public MachineFunctionPass {
34 private:
35   const SIInstrInfo *TII = nullptr;
36   const SIRegisterInfo *TRI = nullptr;
37 
38   bool optimizeVccBranch(MachineInstr &MI) const;
39   bool optimizeSetGPR(MachineInstr &First, MachineInstr &MI) const;
40   bool getBlockDestinations(MachineBasicBlock &SrcMBB,
41                             MachineBasicBlock *&TrueMBB,
42                             MachineBasicBlock *&FalseMBB,
43                             SmallVectorImpl<MachineOperand> &Cond);
44   bool mustRetainExeczBranch(const MachineBasicBlock &From,
45                              const MachineBasicBlock &To) const;
46   bool removeExeczBranch(MachineInstr &MI, MachineBasicBlock &SrcMBB);
47 
48 public:
49   static char ID;
50 
51   SIPreEmitPeephole() : MachineFunctionPass(ID) {
52     initializeSIPreEmitPeepholePass(*PassRegistry::getPassRegistry());
53   }
54 
55   bool runOnMachineFunction(MachineFunction &MF) override;
56 };
57 
58 } // End anonymous namespace.
59 
60 INITIALIZE_PASS(SIPreEmitPeephole, DEBUG_TYPE,
61                 "SI peephole optimizations", false, false)
62 
63 char SIPreEmitPeephole::ID = 0;
64 
65 char &llvm::SIPreEmitPeepholeID = SIPreEmitPeephole::ID;
66 
67 bool SIPreEmitPeephole::optimizeVccBranch(MachineInstr &MI) const {
68   // Match:
69   // sreg = -1 or 0
70   // vcc = S_AND_B64 exec, sreg or S_ANDN2_B64 exec, sreg
71   // S_CBRANCH_VCC[N]Z
72   // =>
73   // S_CBRANCH_EXEC[N]Z
74   // We end up with this pattern sometimes after basic block placement.
75   // It happens while combining a block which assigns -1 or 0 to a saved mask
76   // and another block which consumes that saved mask and then a branch.
77   bool Changed = false;
78   MachineBasicBlock &MBB = *MI.getParent();
79   const GCNSubtarget &ST = MBB.getParent()->getSubtarget<GCNSubtarget>();
80   const bool IsWave32 = ST.isWave32();
81   const unsigned CondReg = TRI->getVCC();
82   const unsigned ExecReg = IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC;
83   const unsigned And = IsWave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
84   const unsigned AndN2 = IsWave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
85   const unsigned Mov = IsWave32 ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
86 
87   MachineBasicBlock::reverse_iterator A = MI.getReverseIterator(),
88                                       E = MBB.rend();
89   bool ReadsCond = false;
90   unsigned Threshold = 5;
91   for (++A; A != E; ++A) {
92     if (!--Threshold)
93       return false;
94     if (A->modifiesRegister(ExecReg, TRI))
95       return false;
96     if (A->modifiesRegister(CondReg, TRI)) {
97       if (!A->definesRegister(CondReg, TRI) ||
98           (A->getOpcode() != And && A->getOpcode() != AndN2))
99         return false;
100       break;
101     }
102     ReadsCond |= A->readsRegister(CondReg, TRI);
103   }
104   if (A == E)
105     return false;
106 
107   MachineOperand &Op1 = A->getOperand(1);
108   MachineOperand &Op2 = A->getOperand(2);
109   if (Op1.getReg() != ExecReg && Op2.isReg() && Op2.getReg() == ExecReg) {
110     TII->commuteInstruction(*A);
111     Changed = true;
112   }
113   if (Op1.getReg() != ExecReg)
114     return Changed;
115   if (Op2.isImm() && !(Op2.getImm() == -1 || Op2.getImm() == 0))
116     return Changed;
117 
118   int64_t MaskValue = 0;
119   Register SReg;
120   if (Op2.isReg()) {
121     SReg = Op2.getReg();
122     auto M = std::next(A);
123     bool ReadsSreg = false;
124     for (; M != E; ++M) {
125       if (M->definesRegister(SReg, TRI))
126         break;
127       if (M->modifiesRegister(SReg, TRI))
128         return Changed;
129       ReadsSreg |= M->readsRegister(SReg, TRI);
130     }
131     if (M == E || !M->isMoveImmediate() || !M->getOperand(1).isImm() ||
132         (M->getOperand(1).getImm() != -1 && M->getOperand(1).getImm() != 0))
133       return Changed;
134     MaskValue = M->getOperand(1).getImm();
135     // First if sreg is only used in the AND instruction fold the immediate
136     // into into the AND.
137     if (!ReadsSreg && Op2.isKill()) {
138       A->getOperand(2).ChangeToImmediate(MaskValue);
139       M->eraseFromParent();
140     }
141   } else if (Op2.isImm()) {
142     MaskValue = Op2.getImm();
143   } else {
144     llvm_unreachable("Op2 must be register or immediate");
145   }
146 
147   // Invert mask for s_andn2
148   assert(MaskValue == 0 || MaskValue == -1);
149   if (A->getOpcode() == AndN2)
150     MaskValue = ~MaskValue;
151 
152   if (!ReadsCond && A->registerDefIsDead(AMDGPU::SCC)) {
153     if (!MI.killsRegister(CondReg, TRI)) {
154       // Replace AND with MOV
155       if (MaskValue == 0) {
156         BuildMI(*A->getParent(), *A, A->getDebugLoc(), TII->get(Mov), CondReg)
157             .addImm(0);
158       } else {
159         BuildMI(*A->getParent(), *A, A->getDebugLoc(), TII->get(Mov), CondReg)
160             .addReg(ExecReg);
161       }
162     }
163     // Remove AND instruction
164     A->eraseFromParent();
165   }
166 
167   bool IsVCCZ = MI.getOpcode() == AMDGPU::S_CBRANCH_VCCZ;
168   if (SReg == ExecReg) {
169     // EXEC is updated directly
170     if (IsVCCZ) {
171       MI.eraseFromParent();
172       return true;
173     }
174     MI.setDesc(TII->get(AMDGPU::S_BRANCH));
175   } else if (IsVCCZ && MaskValue == 0) {
176     // Will always branch
177     // Remove all successors shadowed by new unconditional branch
178     MachineBasicBlock *Parent = MI.getParent();
179     SmallVector<MachineInstr *, 4> ToRemove;
180     bool Found = false;
181     for (MachineInstr &Term : Parent->terminators()) {
182       if (Found) {
183         if (Term.isBranch())
184           ToRemove.push_back(&Term);
185       } else {
186         Found = Term.isIdenticalTo(MI);
187       }
188     }
189     assert(Found && "conditional branch is not terminator");
190     for (auto BranchMI : ToRemove) {
191       MachineOperand &Dst = BranchMI->getOperand(0);
192       assert(Dst.isMBB() && "destination is not basic block");
193       Parent->removeSuccessor(Dst.getMBB());
194       BranchMI->eraseFromParent();
195     }
196 
197     if (MachineBasicBlock *Succ = Parent->getFallThrough()) {
198       Parent->removeSuccessor(Succ);
199     }
200 
201     // Rewrite to unconditional branch
202     MI.setDesc(TII->get(AMDGPU::S_BRANCH));
203   } else if (!IsVCCZ && MaskValue == 0) {
204     // Will never branch
205     MachineOperand &Dst = MI.getOperand(0);
206     assert(Dst.isMBB() && "destination is not basic block");
207     MI.getParent()->removeSuccessor(Dst.getMBB());
208     MI.eraseFromParent();
209     return true;
210   } else if (MaskValue == -1) {
211     // Depends only on EXEC
212     MI.setDesc(
213         TII->get(IsVCCZ ? AMDGPU::S_CBRANCH_EXECZ : AMDGPU::S_CBRANCH_EXECNZ));
214   }
215 
216   MI.RemoveOperand(MI.findRegisterUseOperandIdx(CondReg, false /*Kill*/, TRI));
217   MI.addImplicitDefUseOperands(*MBB.getParent());
218 
219   return true;
220 }
221 
222 bool SIPreEmitPeephole::optimizeSetGPR(MachineInstr &First,
223                                        MachineInstr &MI) const {
224   MachineBasicBlock &MBB = *MI.getParent();
225   const MachineFunction &MF = *MBB.getParent();
226   const MachineRegisterInfo &MRI = MF.getRegInfo();
227   MachineOperand *Idx = TII->getNamedOperand(MI, AMDGPU::OpName::src0);
228   Register IdxReg = Idx->isReg() ? Idx->getReg() : Register();
229   SmallVector<MachineInstr *, 4> ToRemove;
230   bool IdxOn = true;
231 
232   if (!MI.isIdenticalTo(First))
233     return false;
234 
235   // Scan back to find an identical S_SET_GPR_IDX_ON
236   for (MachineBasicBlock::instr_iterator I = std::next(First.getIterator()),
237                                          E = MI.getIterator();
238        I != E; ++I) {
239     if (I->isBundle())
240       continue;
241     switch (I->getOpcode()) {
242     case AMDGPU::S_SET_GPR_IDX_MODE:
243       return false;
244     case AMDGPU::S_SET_GPR_IDX_OFF:
245       IdxOn = false;
246       ToRemove.push_back(&*I);
247       break;
248     default:
249       if (I->modifiesRegister(AMDGPU::M0, TRI))
250         return false;
251       if (IdxReg && I->modifiesRegister(IdxReg, TRI))
252         return false;
253       if (llvm::any_of(I->operands(),
254                        [&MRI, this](const MachineOperand &MO) {
255                          return MO.isReg() &&
256                                 TRI->isVectorRegister(MRI, MO.getReg());
257                        })) {
258         // The only exception allowed here is another indirect vector move
259         // with the same mode.
260         if (!IdxOn || !(I->getOpcode() == AMDGPU::V_MOV_B32_indirect_write ||
261                         I->getOpcode() == AMDGPU::V_MOV_B32_indirect_read))
262           return false;
263       }
264     }
265   }
266 
267   MI.eraseFromBundle();
268   for (MachineInstr *RI : ToRemove)
269     RI->eraseFromBundle();
270   return true;
271 }
272 
273 bool SIPreEmitPeephole::getBlockDestinations(
274     MachineBasicBlock &SrcMBB, MachineBasicBlock *&TrueMBB,
275     MachineBasicBlock *&FalseMBB, SmallVectorImpl<MachineOperand> &Cond) {
276   if (TII->analyzeBranch(SrcMBB, TrueMBB, FalseMBB, Cond))
277     return false;
278 
279   if (!FalseMBB)
280     FalseMBB = SrcMBB.getNextNode();
281 
282   return true;
283 }
284 
285 bool SIPreEmitPeephole::mustRetainExeczBranch(
286     const MachineBasicBlock &From, const MachineBasicBlock &To) const {
287   unsigned NumInstr = 0;
288   const MachineFunction *MF = From.getParent();
289 
290   for (MachineFunction::const_iterator MBBI(&From), ToI(&To), End = MF->end();
291        MBBI != End && MBBI != ToI; ++MBBI) {
292     const MachineBasicBlock &MBB = *MBBI;
293 
294     for (const MachineInstr &MI : MBB) {
295       // When a uniform loop is inside non-uniform control flow, the branch
296       // leaving the loop might never be taken when EXEC = 0.
297       // Hence we should retain cbranch out of the loop lest it become infinite.
298       if (MI.isConditionalBranch())
299         return true;
300 
301       if (TII->hasUnwantedEffectsWhenEXECEmpty(MI))
302         return true;
303 
304       // These instructions are potentially expensive even if EXEC = 0.
305       if (TII->isSMRD(MI) || TII->isVMEM(MI) || TII->isFLAT(MI) ||
306           TII->isDS(MI) || MI.getOpcode() == AMDGPU::S_WAITCNT)
307         return true;
308 
309       ++NumInstr;
310       if (NumInstr >= SkipThreshold)
311         return true;
312     }
313   }
314 
315   return false;
316 }
317 
318 // Returns true if the skip branch instruction is removed.
319 bool SIPreEmitPeephole::removeExeczBranch(MachineInstr &MI,
320                                           MachineBasicBlock &SrcMBB) {
321   MachineBasicBlock *TrueMBB = nullptr;
322   MachineBasicBlock *FalseMBB = nullptr;
323   SmallVector<MachineOperand, 1> Cond;
324 
325   if (!getBlockDestinations(SrcMBB, TrueMBB, FalseMBB, Cond))
326     return false;
327 
328   // Consider only the forward branches.
329   if ((SrcMBB.getNumber() >= TrueMBB->getNumber()) ||
330       mustRetainExeczBranch(*FalseMBB, *TrueMBB))
331     return false;
332 
333   LLVM_DEBUG(dbgs() << "Removing the execz branch: " << MI);
334   MI.eraseFromParent();
335   SrcMBB.removeSuccessor(TrueMBB);
336 
337   return true;
338 }
339 
340 bool SIPreEmitPeephole::runOnMachineFunction(MachineFunction &MF) {
341   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
342   TII = ST.getInstrInfo();
343   TRI = &TII->getRegisterInfo();
344   bool Changed = false;
345 
346   MF.RenumberBlocks();
347 
348   for (MachineBasicBlock &MBB : MF) {
349     MachineBasicBlock::iterator TermI = MBB.getFirstTerminator();
350     // Check first terminator for branches to optimize
351     if (TermI != MBB.end()) {
352       MachineInstr &MI = *TermI;
353       switch (MI.getOpcode()) {
354       case AMDGPU::S_CBRANCH_VCCZ:
355       case AMDGPU::S_CBRANCH_VCCNZ:
356         Changed |= optimizeVccBranch(MI);
357         break;
358       case AMDGPU::S_CBRANCH_EXECZ:
359         Changed |= removeExeczBranch(MI, MBB);
360         break;
361       }
362     }
363 
364     if (!ST.hasVGPRIndexMode())
365       continue;
366 
367     MachineInstr *SetGPRMI = nullptr;
368     const unsigned Threshold = 20;
369     unsigned Count = 0;
370     // Scan the block for two S_SET_GPR_IDX_ON instructions to see if a
371     // second is not needed. Do expensive checks in the optimizeSetGPR()
372     // and limit the distance to 20 instructions for compile time purposes.
373     // Note: this needs to work on bundles as S_SET_GPR_IDX* instructions
374     // may be bundled with the instructions they modify.
375     for (auto &MI :
376          make_early_inc_range(make_range(MBB.instr_begin(), MBB.instr_end()))) {
377       if (Count == Threshold)
378         SetGPRMI = nullptr;
379       else
380         ++Count;
381 
382       if (MI.getOpcode() != AMDGPU::S_SET_GPR_IDX_ON)
383         continue;
384 
385       Count = 0;
386       if (!SetGPRMI) {
387         SetGPRMI = &MI;
388         continue;
389       }
390 
391       if (optimizeSetGPR(*SetGPRMI, MI))
392         Changed = true;
393       else
394         SetGPRMI = &MI;
395     }
396   }
397 
398   return Changed;
399 }
400