xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/SIOptimizeExecMaskingPreRA.cpp (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 //===-- SIOptimizeExecMaskingPreRA.cpp ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass performs exec mask handling peephole optimizations which needs
11 /// to be done before register allocation to reduce register pressure.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AMDGPU.h"
16 #include "GCNSubtarget.h"
17 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
18 #include "llvm/CodeGen/LiveIntervals.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/InitializePasses.h"
21 
22 using namespace llvm;
23 
24 #define DEBUG_TYPE "si-optimize-exec-masking-pre-ra"
25 
26 namespace {
27 
28 class SIOptimizeExecMaskingPreRA : public MachineFunctionPass {
29 private:
30   const SIRegisterInfo *TRI;
31   const SIInstrInfo *TII;
32   MachineRegisterInfo *MRI;
33   LiveIntervals *LIS;
34 
35   unsigned AndOpc;
36   unsigned Andn2Opc;
37   unsigned OrSaveExecOpc;
38   unsigned XorTermrOpc;
39   MCRegister CondReg;
40   MCRegister ExecReg;
41 
42   bool optimizeVcndVcmpPair(MachineBasicBlock &MBB);
43   bool optimizeElseBranch(MachineBasicBlock &MBB);
44 
45 public:
46   static char ID;
47 
48   SIOptimizeExecMaskingPreRA() : MachineFunctionPass(ID) {
49     initializeSIOptimizeExecMaskingPreRAPass(*PassRegistry::getPassRegistry());
50   }
51 
52   bool runOnMachineFunction(MachineFunction &MF) override;
53 
54   StringRef getPassName() const override {
55     return "SI optimize exec mask operations pre-RA";
56   }
57 
58   void getAnalysisUsage(AnalysisUsage &AU) const override {
59     AU.addRequired<LiveIntervals>();
60     AU.setPreservesAll();
61     MachineFunctionPass::getAnalysisUsage(AU);
62   }
63 };
64 
65 } // End anonymous namespace.
66 
67 INITIALIZE_PASS_BEGIN(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
68                       "SI optimize exec mask operations pre-RA", false, false)
69 INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
70 INITIALIZE_PASS_END(SIOptimizeExecMaskingPreRA, DEBUG_TYPE,
71                     "SI optimize exec mask operations pre-RA", false, false)
72 
73 char SIOptimizeExecMaskingPreRA::ID = 0;
74 
75 char &llvm::SIOptimizeExecMaskingPreRAID = SIOptimizeExecMaskingPreRA::ID;
76 
77 FunctionPass *llvm::createSIOptimizeExecMaskingPreRAPass() {
78   return new SIOptimizeExecMaskingPreRA();
79 }
80 
81 // See if there is a def between \p AndIdx and \p SelIdx that needs to live
82 // beyond \p AndIdx.
83 static bool isDefBetween(const LiveRange &LR, SlotIndex AndIdx,
84                          SlotIndex SelIdx) {
85   LiveQueryResult AndLRQ = LR.Query(AndIdx);
86   return (!AndLRQ.isKill() && AndLRQ.valueIn() != LR.Query(SelIdx).valueOut());
87 }
88 
89 // FIXME: Why do we bother trying to handle physical registers here?
90 static bool isDefBetween(const SIRegisterInfo &TRI,
91                          LiveIntervals *LIS, Register Reg,
92                          const MachineInstr &Sel, const MachineInstr &And) {
93   SlotIndex AndIdx = LIS->getInstructionIndex(And).getRegSlot();
94   SlotIndex SelIdx = LIS->getInstructionIndex(Sel).getRegSlot();
95 
96   if (Reg.isVirtual())
97     return isDefBetween(LIS->getInterval(Reg), AndIdx, SelIdx);
98 
99   for (MCRegUnitIterator UI(Reg.asMCReg(), &TRI); UI.isValid(); ++UI) {
100     if (isDefBetween(LIS->getRegUnit(*UI), AndIdx, SelIdx))
101       return true;
102   }
103 
104   return false;
105 }
106 
107 // Optimize sequence
108 //    %sel = V_CNDMASK_B32_e64 0, 1, %cc
109 //    %cmp = V_CMP_NE_U32 1, %1
110 //    $vcc = S_AND_B64 $exec, %cmp
111 //    S_CBRANCH_VCC[N]Z
112 // =>
113 //    $vcc = S_ANDN2_B64 $exec, %cc
114 //    S_CBRANCH_VCC[N]Z
115 //
116 // It is the negation pattern inserted by DAGCombiner::visitBRCOND() in the
117 // rebuildSetCC(). We start with S_CBRANCH to avoid exhaustive search, but
118 // only 3 first instructions are really needed. S_AND_B64 with exec is a
119 // required part of the pattern since V_CNDMASK_B32 writes zeroes for inactive
120 // lanes.
121 //
122 // Returns true on success.
123 bool SIOptimizeExecMaskingPreRA::optimizeVcndVcmpPair(MachineBasicBlock &MBB) {
124   auto I = llvm::find_if(MBB.terminators(), [](const MachineInstr &MI) {
125                            unsigned Opc = MI.getOpcode();
126                            return Opc == AMDGPU::S_CBRANCH_VCCZ ||
127                                   Opc == AMDGPU::S_CBRANCH_VCCNZ; });
128   if (I == MBB.terminators().end())
129     return false;
130 
131   auto *And =
132       TRI->findReachingDef(CondReg, AMDGPU::NoSubRegister, *I, *MRI, LIS);
133   if (!And || And->getOpcode() != AndOpc ||
134       !And->getOperand(1).isReg() || !And->getOperand(2).isReg())
135     return false;
136 
137   MachineOperand *AndCC = &And->getOperand(1);
138   Register CmpReg = AndCC->getReg();
139   unsigned CmpSubReg = AndCC->getSubReg();
140   if (CmpReg == Register(ExecReg)) {
141     AndCC = &And->getOperand(2);
142     CmpReg = AndCC->getReg();
143     CmpSubReg = AndCC->getSubReg();
144   } else if (And->getOperand(2).getReg() != Register(ExecReg)) {
145     return false;
146   }
147 
148   auto *Cmp = TRI->findReachingDef(CmpReg, CmpSubReg, *And, *MRI, LIS);
149   if (!Cmp || !(Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e32 ||
150                 Cmp->getOpcode() == AMDGPU::V_CMP_NE_U32_e64) ||
151       Cmp->getParent() != And->getParent())
152     return false;
153 
154   MachineOperand *Op1 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src0);
155   MachineOperand *Op2 = TII->getNamedOperand(*Cmp, AMDGPU::OpName::src1);
156   if (Op1->isImm() && Op2->isReg())
157     std::swap(Op1, Op2);
158   if (!Op1->isReg() || !Op2->isImm() || Op2->getImm() != 1)
159     return false;
160 
161   Register SelReg = Op1->getReg();
162   if (SelReg.isPhysical())
163     return false;
164 
165   auto *Sel = TRI->findReachingDef(SelReg, Op1->getSubReg(), *Cmp, *MRI, LIS);
166   if (!Sel || Sel->getOpcode() != AMDGPU::V_CNDMASK_B32_e64)
167     return false;
168 
169   if (TII->hasModifiersSet(*Sel, AMDGPU::OpName::src0_modifiers) ||
170       TII->hasModifiersSet(*Sel, AMDGPU::OpName::src1_modifiers))
171     return false;
172 
173   Op1 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src0);
174   Op2 = TII->getNamedOperand(*Sel, AMDGPU::OpName::src1);
175   MachineOperand *CC = TII->getNamedOperand(*Sel, AMDGPU::OpName::src2);
176   if (!Op1->isImm() || !Op2->isImm() || !CC->isReg() ||
177       Op1->getImm() != 0 || Op2->getImm() != 1)
178     return false;
179 
180   Register CCReg = CC->getReg();
181 
182   // If there was a def between the select and the and, we would need to move it
183   // to fold this.
184   if (isDefBetween(*TRI, LIS, CCReg, *Sel, *And))
185     return false;
186 
187   // Cannot safely mirror live intervals with PHI nodes, so check for these
188   // before optimization.
189   SlotIndex SelIdx = LIS->getInstructionIndex(*Sel);
190   LiveInterval *SelLI = &LIS->getInterval(SelReg);
191   if (llvm::any_of(SelLI->vnis(),
192                     [](const VNInfo *VNI) {
193                       return VNI->isPHIDef();
194                     }))
195     return false;
196 
197   // TODO: Guard against implicit def operands?
198   LLVM_DEBUG(dbgs() << "Folding sequence:\n\t" << *Sel << '\t' << *Cmp << '\t'
199                     << *And);
200 
201   MachineInstr *Andn2 =
202       BuildMI(MBB, *And, And->getDebugLoc(), TII->get(Andn2Opc),
203               And->getOperand(0).getReg())
204           .addReg(ExecReg)
205           .addReg(CCReg, getUndefRegState(CC->isUndef()), CC->getSubReg());
206   MachineOperand &AndSCC = And->getOperand(3);
207   assert(AndSCC.getReg() == AMDGPU::SCC);
208   MachineOperand &Andn2SCC = Andn2->getOperand(3);
209   assert(Andn2SCC.getReg() == AMDGPU::SCC);
210   Andn2SCC.setIsDead(AndSCC.isDead());
211 
212   SlotIndex AndIdx = LIS->ReplaceMachineInstrInMaps(*And, *Andn2);
213   And->eraseFromParent();
214 
215   LLVM_DEBUG(dbgs() << "=>\n\t" << *Andn2 << '\n');
216 
217   // Update live intervals for CCReg before potentially removing CmpReg/SelReg,
218   // and their associated liveness information.
219   SlotIndex CmpIdx = LIS->getInstructionIndex(*Cmp);
220   if (CCReg.isVirtual()) {
221     // Apply live ranges from SelLI to CCReg potentially matching splits
222     // and extending to loop boundaries.
223 
224     auto applyLiveRanges = [&](LiveRange &Dst, VNInfo *VNI) {
225       // Copy live ranges from SelLI, adjusting start and end as required
226       auto DefSegment = SelLI->FindSegmentContaining(SelIdx.getRegSlot());
227       assert(DefSegment != SelLI->end() &&
228              "No live interval segment covering definition?");
229       for (auto I = DefSegment; I != SelLI->end(); ++I) {
230         SlotIndex Start = I->start < SelIdx.getRegSlot() ?
231                           SelIdx.getRegSlot() : I->start;
232         SlotIndex End = I->end < AndIdx.getRegSlot() || I->end.isBlock() ?
233                         I->end : AndIdx.getRegSlot();
234         Dst.addSegment(LiveRange::Segment(Start, End, VNI));
235       }
236       // If SelLI does not cover AndIdx (because Cmp killed Sel) then extend.
237       if (!SelLI->getSegmentContaining(AndIdx.getRegSlot()))
238         Dst.addSegment(LiveRange::Segment(CmpIdx.getRegSlot(), AndIdx.getRegSlot(), VNI));
239     };
240 
241     LiveInterval &CCLI = LIS->getInterval(CCReg);
242     auto CCQ = CCLI.Query(SelIdx.getRegSlot());
243     if (CCQ.valueIn())
244       applyLiveRanges(CCLI, CCQ.valueIn());
245 
246     if (CC->getSubReg()) {
247       LaneBitmask Mask = TRI->getSubRegIndexLaneMask(CC->getSubReg());
248       BumpPtrAllocator &Allocator = LIS->getVNInfoAllocator();
249       CCLI.refineSubRanges(
250           Allocator, Mask,
251           [=](LiveInterval::SubRange &SR) {
252             auto CCQS = SR.Query(SelIdx.getRegSlot());
253             if (CCQS.valueIn())
254               applyLiveRanges(SR, CCQS.valueIn());
255           },
256           *LIS->getSlotIndexes(), *TRI);
257       CCLI.removeEmptySubRanges();
258 
259       SmallVector<LiveInterval *> SplitLIs;
260       LIS->splitSeparateComponents(CCLI, SplitLIs);
261     }
262   } else
263     LIS->removeAllRegUnitsForPhysReg(CCReg);
264 
265   // Try to remove compare. Cmp value should not used in between of cmp
266   // and s_and_b64 if VCC or just unused if any other register.
267   LiveInterval *CmpLI = CmpReg.isVirtual() ? &LIS->getInterval(CmpReg) : nullptr;
268   if ((CmpLI && CmpLI->Query(AndIdx.getRegSlot()).isKill()) ||
269       (CmpReg == Register(CondReg) &&
270        std::none_of(std::next(Cmp->getIterator()), Andn2->getIterator(),
271                     [&](const MachineInstr &MI) {
272                       return MI.readsRegister(CondReg, TRI);
273                     }))) {
274     LLVM_DEBUG(dbgs() << "Erasing: " << *Cmp << '\n');
275     if (CmpLI)
276       LIS->removeVRegDefAt(*CmpLI, CmpIdx.getRegSlot());
277     LIS->RemoveMachineInstrFromMaps(*Cmp);
278     Cmp->eraseFromParent();
279 
280     // Try to remove v_cndmask_b32.
281     // Kill status must be checked before shrinking the live range.
282     bool IsKill = SelLI->Query(CmpIdx.getRegSlot()).isKill();
283     LIS->shrinkToUses(SelLI);
284     bool IsDead = SelLI->Query(SelIdx.getRegSlot()).isDeadDef();
285     if (MRI->use_nodbg_empty(SelReg) && (IsKill || IsDead)) {
286       LLVM_DEBUG(dbgs() << "Erasing: " << *Sel << '\n');
287 
288       LIS->removeVRegDefAt(*SelLI, SelIdx.getRegSlot());
289       LIS->RemoveMachineInstrFromMaps(*Sel);
290       Sel->eraseFromParent();
291     }
292   }
293 
294   return true;
295 }
296 
297 // Optimize sequence
298 //    %dst = S_OR_SAVEEXEC %src
299 //    ... instructions not modifying exec ...
300 //    %tmp = S_AND $exec, %dst
301 //    $exec = S_XOR_term $exec, %tmp
302 // =>
303 //    %dst = S_OR_SAVEEXEC %src
304 //    ... instructions not modifying exec ...
305 //    $exec = S_XOR_term $exec, %dst
306 //
307 // Clean up potentially unnecessary code added for safety during
308 // control flow lowering.
309 //
310 // Return whether any changes were made to MBB.
311 bool SIOptimizeExecMaskingPreRA::optimizeElseBranch(MachineBasicBlock &MBB) {
312   if (MBB.empty())
313     return false;
314 
315   // Check this is an else block.
316   auto First = MBB.begin();
317   MachineInstr &SaveExecMI = *First;
318   if (SaveExecMI.getOpcode() != OrSaveExecOpc)
319     return false;
320 
321   auto I = llvm::find_if(MBB.terminators(), [this](const MachineInstr &MI) {
322     return MI.getOpcode() == XorTermrOpc;
323   });
324   if (I == MBB.terminators().end())
325     return false;
326 
327   MachineInstr &XorTermMI = *I;
328   if (XorTermMI.getOperand(1).getReg() != Register(ExecReg))
329     return false;
330 
331   Register SavedExecReg = SaveExecMI.getOperand(0).getReg();
332   Register DstReg = XorTermMI.getOperand(2).getReg();
333 
334   // Find potentially unnecessary S_AND
335   MachineInstr *AndExecMI = nullptr;
336   I--;
337   while (I != First && !AndExecMI) {
338     if (I->getOpcode() == AndOpc && I->getOperand(0).getReg() == DstReg &&
339         I->getOperand(1).getReg() == Register(ExecReg))
340       AndExecMI = &*I;
341     I--;
342   }
343   if (!AndExecMI)
344     return false;
345 
346   // Check for exec modifying instructions.
347   // Note: exec defs do not create live ranges beyond the
348   // instruction so isDefBetween cannot be used.
349   // Instead just check that the def segments are adjacent.
350   SlotIndex StartIdx = LIS->getInstructionIndex(SaveExecMI);
351   SlotIndex EndIdx = LIS->getInstructionIndex(*AndExecMI);
352   for (MCRegUnitIterator UI(ExecReg, TRI); UI.isValid(); ++UI) {
353     LiveRange &RegUnit = LIS->getRegUnit(*UI);
354     if (RegUnit.find(StartIdx) != std::prev(RegUnit.find(EndIdx)))
355       return false;
356   }
357 
358   // Remove unnecessary S_AND
359   LIS->removeInterval(SavedExecReg);
360   LIS->removeInterval(DstReg);
361 
362   SaveExecMI.getOperand(0).setReg(DstReg);
363 
364   LIS->RemoveMachineInstrFromMaps(*AndExecMI);
365   AndExecMI->eraseFromParent();
366 
367   LIS->createAndComputeVirtRegInterval(DstReg);
368 
369   return true;
370 }
371 
372 bool SIOptimizeExecMaskingPreRA::runOnMachineFunction(MachineFunction &MF) {
373   if (skipFunction(MF.getFunction()))
374     return false;
375 
376   const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
377   TRI = ST.getRegisterInfo();
378   TII = ST.getInstrInfo();
379   MRI = &MF.getRegInfo();
380   LIS = &getAnalysis<LiveIntervals>();
381 
382   const bool Wave32 = ST.isWave32();
383   AndOpc = Wave32 ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64;
384   Andn2Opc = Wave32 ? AMDGPU::S_ANDN2_B32 : AMDGPU::S_ANDN2_B64;
385   OrSaveExecOpc =
386       Wave32 ? AMDGPU::S_OR_SAVEEXEC_B32 : AMDGPU::S_OR_SAVEEXEC_B64;
387   XorTermrOpc = Wave32 ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term;
388   CondReg = MCRegister::from(Wave32 ? AMDGPU::VCC_LO : AMDGPU::VCC);
389   ExecReg = MCRegister::from(Wave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC);
390 
391   DenseSet<Register> RecalcRegs({AMDGPU::EXEC_LO, AMDGPU::EXEC_HI});
392   bool Changed = false;
393 
394   for (MachineBasicBlock &MBB : MF) {
395 
396     if (optimizeElseBranch(MBB)) {
397       RecalcRegs.insert(AMDGPU::SCC);
398       Changed = true;
399     }
400 
401     if (optimizeVcndVcmpPair(MBB)) {
402       RecalcRegs.insert(AMDGPU::VCC_LO);
403       RecalcRegs.insert(AMDGPU::VCC_HI);
404       RecalcRegs.insert(AMDGPU::SCC);
405       Changed = true;
406     }
407 
408     // Try to remove unneeded instructions before s_endpgm.
409     if (MBB.succ_empty()) {
410       if (MBB.empty())
411         continue;
412 
413       // Skip this if the endpgm has any implicit uses, otherwise we would need
414       // to be careful to update / remove them.
415       // S_ENDPGM always has a single imm operand that is not used other than to
416       // end up in the encoding
417       MachineInstr &Term = MBB.back();
418       if (Term.getOpcode() != AMDGPU::S_ENDPGM || Term.getNumOperands() != 1)
419         continue;
420 
421       SmallVector<MachineBasicBlock*, 4> Blocks({&MBB});
422 
423       while (!Blocks.empty()) {
424         auto CurBB = Blocks.pop_back_val();
425         auto I = CurBB->rbegin(), E = CurBB->rend();
426         if (I != E) {
427           if (I->isUnconditionalBranch() || I->getOpcode() == AMDGPU::S_ENDPGM)
428             ++I;
429           else if (I->isBranch())
430             continue;
431         }
432 
433         while (I != E) {
434           if (I->isDebugInstr()) {
435             I = std::next(I);
436             continue;
437           }
438 
439           if (I->mayStore() || I->isBarrier() || I->isCall() ||
440               I->hasUnmodeledSideEffects() || I->hasOrderedMemoryRef())
441             break;
442 
443           LLVM_DEBUG(dbgs()
444                      << "Removing no effect instruction: " << *I << '\n');
445 
446           for (auto &Op : I->operands()) {
447             if (Op.isReg())
448               RecalcRegs.insert(Op.getReg());
449           }
450 
451           auto Next = std::next(I);
452           LIS->RemoveMachineInstrFromMaps(*I);
453           I->eraseFromParent();
454           I = Next;
455 
456           Changed = true;
457         }
458 
459         if (I != E)
460           continue;
461 
462         // Try to ascend predecessors.
463         for (auto *Pred : CurBB->predecessors()) {
464           if (Pred->succ_size() == 1)
465             Blocks.push_back(Pred);
466         }
467       }
468       continue;
469     }
470 
471     // If the only user of a logical operation is move to exec, fold it now
472     // to prevent forming of saveexec. I.e.:
473     //
474     //    %0:sreg_64 = COPY $exec
475     //    %1:sreg_64 = S_AND_B64 %0:sreg_64, %2:sreg_64
476     // =>
477     //    %1 = S_AND_B64 $exec, %2:sreg_64
478     unsigned ScanThreshold = 10;
479     for (auto I = MBB.rbegin(), E = MBB.rend(); I != E
480          && ScanThreshold--; ++I) {
481       // Continue scanning if this is not a full exec copy
482       if (!(I->isFullCopy() && I->getOperand(1).getReg() == Register(ExecReg)))
483         continue;
484 
485       Register SavedExec = I->getOperand(0).getReg();
486       if (SavedExec.isVirtual() && MRI->hasOneNonDBGUse(SavedExec)) {
487         MachineInstr *SingleExecUser = &*MRI->use_instr_nodbg_begin(SavedExec);
488         int Idx = SingleExecUser->findRegisterUseOperandIdx(SavedExec);
489         assert(Idx != -1);
490         if (SingleExecUser->getParent() == I->getParent() &&
491             !SingleExecUser->getOperand(Idx).isImplicit() &&
492             TII->isOperandLegal(*SingleExecUser, Idx, &I->getOperand(1))) {
493           LLVM_DEBUG(dbgs() << "Redundant EXEC COPY: " << *I << '\n');
494           LIS->RemoveMachineInstrFromMaps(*I);
495           I->eraseFromParent();
496           MRI->replaceRegWith(SavedExec, ExecReg);
497           LIS->removeInterval(SavedExec);
498           Changed = true;
499         }
500       }
501       break;
502     }
503   }
504 
505   if (Changed) {
506     for (auto Reg : RecalcRegs) {
507       if (Reg.isVirtual()) {
508         LIS->removeInterval(Reg);
509         if (!MRI->reg_empty(Reg))
510           LIS->createAndComputeVirtRegInterval(Reg);
511       } else {
512         LIS->removeAllRegUnitsForPhysReg(Reg);
513       }
514     }
515   }
516 
517   return Changed;
518 }
519