xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
23 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
24 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
25 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/IntrinsicsAMDGPU.h"
30 #include <optional>
31 
32 #define DEBUG_TYPE "amdgpu-isel"
33 
34 using namespace llvm;
35 using namespace MIPatternMatch;
36 
37 static cl::opt<bool> AllowRiskySelect(
38   "amdgpu-global-isel-risky-select",
39   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
40   cl::init(false),
41   cl::ReallyHidden);
42 
43 #define GET_GLOBALISEL_IMPL
44 #define AMDGPUSubtarget GCNSubtarget
45 #include "AMDGPUGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_IMPL
47 #undef AMDGPUSubtarget
48 
49 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
50     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
51     const AMDGPUTargetMachine &TM)
52     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
53       STI(STI),
54       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
55 #define GET_GLOBALISEL_PREDICATES_INIT
56 #include "AMDGPUGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATES_INIT
58 #define GET_GLOBALISEL_TEMPORARIES_INIT
59 #include "AMDGPUGenGlobalISel.inc"
60 #undef GET_GLOBALISEL_TEMPORARIES_INIT
61 {
62 }
63 
64 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
65 
66 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
67                                         CodeGenCoverage *CoverageInfo,
68                                         ProfileSummaryInfo *PSI,
69                                         BlockFrequencyInfo *BFI) {
70   MRI = &MF.getRegInfo();
71   Subtarget = &MF.getSubtarget<GCNSubtarget>();
72   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
73 }
74 
75 // Return the wave level SGPR base address if this is a wave address.
76 static Register getWaveAddress(const MachineInstr *Def) {
77   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
78              ? Def->getOperand(1).getReg()
79              : Register();
80 }
81 
82 bool AMDGPUInstructionSelector::isVCC(Register Reg,
83                                       const MachineRegisterInfo &MRI) const {
84   // The verifier is oblivious to s1 being a valid value for wavesize registers.
85   if (Reg.isPhysical())
86     return false;
87 
88   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
89   const TargetRegisterClass *RC =
90       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
91   if (RC) {
92     const LLT Ty = MRI.getType(Reg);
93     if (!Ty.isValid() || Ty.getSizeInBits() != 1)
94       return false;
95     // G_TRUNC s1 result is never vcc.
96     return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
97            RC->hasSuperClassEq(TRI.getBoolRC());
98   }
99 
100   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
101   return RB->getID() == AMDGPU::VCCRegBankID;
102 }
103 
104 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
105                                                         unsigned NewOpc) const {
106   MI.setDesc(TII.get(NewOpc));
107   MI.removeOperand(1); // Remove intrinsic ID.
108   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
109 
110   MachineOperand &Dst = MI.getOperand(0);
111   MachineOperand &Src = MI.getOperand(1);
112 
113   // TODO: This should be legalized to s32 if needed
114   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
115     return false;
116 
117   const TargetRegisterClass *DstRC
118     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
119   const TargetRegisterClass *SrcRC
120     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
121   if (!DstRC || DstRC != SrcRC)
122     return false;
123 
124   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
125          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
126 }
127 
128 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
129   const DebugLoc &DL = I.getDebugLoc();
130   MachineBasicBlock *BB = I.getParent();
131   I.setDesc(TII.get(TargetOpcode::COPY));
132 
133   const MachineOperand &Src = I.getOperand(1);
134   MachineOperand &Dst = I.getOperand(0);
135   Register DstReg = Dst.getReg();
136   Register SrcReg = Src.getReg();
137 
138   if (isVCC(DstReg, *MRI)) {
139     if (SrcReg == AMDGPU::SCC) {
140       const TargetRegisterClass *RC
141         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
142       if (!RC)
143         return true;
144       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
145     }
146 
147     if (!isVCC(SrcReg, *MRI)) {
148       // TODO: Should probably leave the copy and let copyPhysReg expand it.
149       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
150         return false;
151 
152       const TargetRegisterClass *SrcRC
153         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
154 
155       std::optional<ValueAndVReg> ConstVal =
156           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
157       if (ConstVal) {
158         unsigned MovOpc =
159             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
160         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
161             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
162       } else {
163         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
164 
165         // We can't trust the high bits at this point, so clear them.
166 
167         // TODO: Skip masking high bits if def is known boolean.
168 
169         bool IsSGPR = TRI.isSGPRClass(SrcRC);
170         unsigned AndOpc =
171             IsSGPR ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
172         auto And = BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
173             .addImm(1)
174             .addReg(SrcReg);
175         if (IsSGPR)
176           And.setOperandDead(3); // Dead scc
177 
178         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
179             .addImm(0)
180             .addReg(MaskedReg);
181       }
182 
183       if (!MRI->getRegClassOrNull(SrcReg))
184         MRI->setRegClass(SrcReg, SrcRC);
185       I.eraseFromParent();
186       return true;
187     }
188 
189     const TargetRegisterClass *RC =
190       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
191     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
192       return false;
193 
194     return true;
195   }
196 
197   for (const MachineOperand &MO : I.operands()) {
198     if (MO.getReg().isPhysical())
199       continue;
200 
201     const TargetRegisterClass *RC =
202             TRI.getConstrainedRegClassForOperand(MO, *MRI);
203     if (!RC)
204       continue;
205     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
206   }
207   return true;
208 }
209 
210 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
211   const Register DefReg = I.getOperand(0).getReg();
212   const LLT DefTy = MRI->getType(DefReg);
213   if (DefTy == LLT::scalar(1)) {
214     if (!AllowRiskySelect) {
215       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
216       return false;
217     }
218 
219     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
220   }
221 
222   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
223 
224   const RegClassOrRegBank &RegClassOrBank =
225     MRI->getRegClassOrRegBank(DefReg);
226 
227   const TargetRegisterClass *DefRC
228     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
229   if (!DefRC) {
230     if (!DefTy.isValid()) {
231       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
232       return false;
233     }
234 
235     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
236     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
237     if (!DefRC) {
238       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
239       return false;
240     }
241   }
242 
243   // TODO: Verify that all registers have the same bank
244   I.setDesc(TII.get(TargetOpcode::PHI));
245   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
246 }
247 
248 MachineOperand
249 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
250                                            const TargetRegisterClass &SubRC,
251                                            unsigned SubIdx) const {
252 
253   MachineInstr *MI = MO.getParent();
254   MachineBasicBlock *BB = MO.getParent()->getParent();
255   Register DstReg = MRI->createVirtualRegister(&SubRC);
256 
257   if (MO.isReg()) {
258     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
259     Register Reg = MO.getReg();
260     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
261             .addReg(Reg, 0, ComposedSubIdx);
262 
263     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
264                                      MO.isKill(), MO.isDead(), MO.isUndef(),
265                                      MO.isEarlyClobber(), 0, MO.isDebug(),
266                                      MO.isInternalRead());
267   }
268 
269   assert(MO.isImm());
270 
271   APInt Imm(64, MO.getImm());
272 
273   switch (SubIdx) {
274   default:
275     llvm_unreachable("do not know to split immediate with this sub index.");
276   case AMDGPU::sub0:
277     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
278   case AMDGPU::sub1:
279     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
280   }
281 }
282 
283 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
284   switch (Opc) {
285   case AMDGPU::G_AND:
286     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
287   case AMDGPU::G_OR:
288     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
289   case AMDGPU::G_XOR:
290     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
291   default:
292     llvm_unreachable("not a bit op");
293   }
294 }
295 
296 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
297   Register DstReg = I.getOperand(0).getReg();
298   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
299 
300   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
301   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
302       DstRB->getID() != AMDGPU::VCCRegBankID)
303     return false;
304 
305   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
306                             STI.isWave64());
307   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
308 
309   // Dead implicit-def of scc
310   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
311                                          true, // isImp
312                                          false, // isKill
313                                          true)); // isDead
314   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
315 }
316 
317 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
318   MachineBasicBlock *BB = I.getParent();
319   MachineFunction *MF = BB->getParent();
320   Register DstReg = I.getOperand(0).getReg();
321   const DebugLoc &DL = I.getDebugLoc();
322   LLT Ty = MRI->getType(DstReg);
323   if (Ty.isVector())
324     return false;
325 
326   unsigned Size = Ty.getSizeInBits();
327   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
328   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
329   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
330 
331   if (Size == 32) {
332     if (IsSALU) {
333       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
334       MachineInstr *Add =
335         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
336         .add(I.getOperand(1))
337         .add(I.getOperand(2))
338         .setOperandDead(3); // Dead scc
339       I.eraseFromParent();
340       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
341     }
342 
343     if (STI.hasAddNoCarry()) {
344       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
345       I.setDesc(TII.get(Opc));
346       I.addOperand(*MF, MachineOperand::CreateImm(0));
347       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
348       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
349     }
350 
351     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
352 
353     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
354     MachineInstr *Add
355       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
356       .addDef(UnusedCarry, RegState::Dead)
357       .add(I.getOperand(1))
358       .add(I.getOperand(2))
359       .addImm(0);
360     I.eraseFromParent();
361     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
362   }
363 
364   assert(!Sub && "illegal sub should not reach here");
365 
366   const TargetRegisterClass &RC
367     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
368   const TargetRegisterClass &HalfRC
369     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
370 
371   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
372   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
373   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
374   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
375 
376   Register DstLo = MRI->createVirtualRegister(&HalfRC);
377   Register DstHi = MRI->createVirtualRegister(&HalfRC);
378 
379   if (IsSALU) {
380     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
381       .add(Lo1)
382       .add(Lo2);
383     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
384       .add(Hi1)
385       .add(Hi2)
386       .setOperandDead(3); // Dead scc
387   } else {
388     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
389     Register CarryReg = MRI->createVirtualRegister(CarryRC);
390     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
391       .addDef(CarryReg)
392       .add(Lo1)
393       .add(Lo2)
394       .addImm(0);
395     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
396       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
397       .add(Hi1)
398       .add(Hi2)
399       .addReg(CarryReg, RegState::Kill)
400       .addImm(0);
401 
402     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
403       return false;
404   }
405 
406   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
407     .addReg(DstLo)
408     .addImm(AMDGPU::sub0)
409     .addReg(DstHi)
410     .addImm(AMDGPU::sub1);
411 
412 
413   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
414     return false;
415 
416   I.eraseFromParent();
417   return true;
418 }
419 
420 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
421   MachineInstr &I) const {
422   MachineBasicBlock *BB = I.getParent();
423   MachineFunction *MF = BB->getParent();
424   const DebugLoc &DL = I.getDebugLoc();
425   Register Dst0Reg = I.getOperand(0).getReg();
426   Register Dst1Reg = I.getOperand(1).getReg();
427   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
428                      I.getOpcode() == AMDGPU::G_UADDE;
429   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
430                           I.getOpcode() == AMDGPU::G_USUBE;
431 
432   if (isVCC(Dst1Reg, *MRI)) {
433     unsigned NoCarryOpc =
434         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
435     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
436     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
437     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
438     I.addOperand(*MF, MachineOperand::CreateImm(0));
439     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
440   }
441 
442   Register Src0Reg = I.getOperand(2).getReg();
443   Register Src1Reg = I.getOperand(3).getReg();
444 
445   if (HasCarryIn) {
446     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
447       .addReg(I.getOperand(4).getReg());
448   }
449 
450   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
451   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
452 
453   auto CarryInst = BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
454     .add(I.getOperand(2))
455     .add(I.getOperand(3));
456 
457   if (MRI->use_nodbg_empty(Dst1Reg)) {
458     CarryInst.setOperandDead(3); // Dead scc
459   } else {
460     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
461       .addReg(AMDGPU::SCC);
462     if (!MRI->getRegClassOrNull(Dst1Reg))
463       MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
464   }
465 
466   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
467       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
468       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
469     return false;
470 
471   if (HasCarryIn &&
472       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
473                                     AMDGPU::SReg_32RegClass, *MRI))
474     return false;
475 
476   I.eraseFromParent();
477   return true;
478 }
479 
480 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
481     MachineInstr &I) const {
482   MachineBasicBlock *BB = I.getParent();
483   MachineFunction *MF = BB->getParent();
484   const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
485 
486   unsigned Opc;
487   if (Subtarget->hasMADIntraFwdBug())
488     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
489                      : AMDGPU::V_MAD_I64_I32_gfx11_e64;
490   else
491     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
492   I.setDesc(TII.get(Opc));
493   I.addOperand(*MF, MachineOperand::CreateImm(0));
494   I.addImplicitDefUseOperands(*MF);
495   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
496 }
497 
498 // TODO: We should probably legalize these to only using 32-bit results.
499 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
500   MachineBasicBlock *BB = I.getParent();
501   Register DstReg = I.getOperand(0).getReg();
502   Register SrcReg = I.getOperand(1).getReg();
503   LLT DstTy = MRI->getType(DstReg);
504   LLT SrcTy = MRI->getType(SrcReg);
505   const unsigned SrcSize = SrcTy.getSizeInBits();
506   unsigned DstSize = DstTy.getSizeInBits();
507 
508   // TODO: Should handle any multiple of 32 offset.
509   unsigned Offset = I.getOperand(2).getImm();
510   if (Offset % 32 != 0 || DstSize > 128)
511     return false;
512 
513   // 16-bit operations really use 32-bit registers.
514   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
515   if (DstSize == 16)
516     DstSize = 32;
517 
518   const TargetRegisterClass *DstRC =
519     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
520   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
521     return false;
522 
523   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
524   const TargetRegisterClass *SrcRC =
525       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
526   if (!SrcRC)
527     return false;
528   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
529                                                          DstSize / 32);
530   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
531   if (!SrcRC)
532     return false;
533 
534   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
535                                     *SrcRC, I.getOperand(1));
536   const DebugLoc &DL = I.getDebugLoc();
537   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
538     .addReg(SrcReg, 0, SubReg);
539 
540   I.eraseFromParent();
541   return true;
542 }
543 
544 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
545   MachineBasicBlock *BB = MI.getParent();
546   Register DstReg = MI.getOperand(0).getReg();
547   LLT DstTy = MRI->getType(DstReg);
548   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
549 
550   const unsigned SrcSize = SrcTy.getSizeInBits();
551   if (SrcSize < 32)
552     return selectImpl(MI, *CoverageInfo);
553 
554   const DebugLoc &DL = MI.getDebugLoc();
555   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
556   const unsigned DstSize = DstTy.getSizeInBits();
557   const TargetRegisterClass *DstRC =
558       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
559   if (!DstRC)
560     return false;
561 
562   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
563   MachineInstrBuilder MIB =
564     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
565   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
566     MachineOperand &Src = MI.getOperand(I + 1);
567     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
568     MIB.addImm(SubRegs[I]);
569 
570     const TargetRegisterClass *SrcRC
571       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
572     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
573       return false;
574   }
575 
576   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
577     return false;
578 
579   MI.eraseFromParent();
580   return true;
581 }
582 
583 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
584   MachineBasicBlock *BB = MI.getParent();
585   const int NumDst = MI.getNumOperands() - 1;
586 
587   MachineOperand &Src = MI.getOperand(NumDst);
588 
589   Register SrcReg = Src.getReg();
590   Register DstReg0 = MI.getOperand(0).getReg();
591   LLT DstTy = MRI->getType(DstReg0);
592   LLT SrcTy = MRI->getType(SrcReg);
593 
594   const unsigned DstSize = DstTy.getSizeInBits();
595   const unsigned SrcSize = SrcTy.getSizeInBits();
596   const DebugLoc &DL = MI.getDebugLoc();
597   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
598 
599   const TargetRegisterClass *SrcRC =
600       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
601   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
602     return false;
603 
604   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
605   // source, and this relies on the fact that the same subregister indices are
606   // used for both.
607   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
608   for (int I = 0, E = NumDst; I != E; ++I) {
609     MachineOperand &Dst = MI.getOperand(I);
610     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
611       .addReg(SrcReg, 0, SubRegs[I]);
612 
613     // Make sure the subregister index is valid for the source register.
614     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
615     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
616       return false;
617 
618     const TargetRegisterClass *DstRC =
619       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
620     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
621       return false;
622   }
623 
624   MI.eraseFromParent();
625   return true;
626 }
627 
628 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR(MachineInstr &MI) const {
629   assert(MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC ||
630          MI.getOpcode() == AMDGPU::G_BUILD_VECTOR);
631 
632   Register Src0 = MI.getOperand(1).getReg();
633   Register Src1 = MI.getOperand(2).getReg();
634   LLT SrcTy = MRI->getType(Src0);
635   const unsigned SrcSize = SrcTy.getSizeInBits();
636 
637   // BUILD_VECTOR with >=32 bits source is handled by MERGE_VALUE.
638   if (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR && SrcSize >= 32) {
639     return selectG_MERGE_VALUES(MI);
640   }
641 
642   // Selection logic below is for V2S16 only.
643   // For G_BUILD_VECTOR_TRUNC, additionally check that the operands are s32.
644   Register Dst = MI.getOperand(0).getReg();
645   if (MRI->getType(Dst) != LLT::fixed_vector(2, 16) ||
646       (MI.getOpcode() == AMDGPU::G_BUILD_VECTOR_TRUNC &&
647        SrcTy != LLT::scalar(32)))
648     return selectImpl(MI, *CoverageInfo);
649 
650   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
651   if (DstBank->getID() == AMDGPU::AGPRRegBankID)
652     return false;
653 
654   assert(DstBank->getID() == AMDGPU::SGPRRegBankID ||
655          DstBank->getID() == AMDGPU::VGPRRegBankID);
656   const bool IsVector = DstBank->getID() == AMDGPU::VGPRRegBankID;
657 
658   const DebugLoc &DL = MI.getDebugLoc();
659   MachineBasicBlock *BB = MI.getParent();
660 
661   // First, before trying TableGen patterns, check if both sources are
662   // constants. In those cases, we can trivially compute the final constant
663   // and emit a simple move.
664   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
665   if (ConstSrc1) {
666     auto ConstSrc0 =
667         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
668     if (ConstSrc0) {
669       const int64_t K0 = ConstSrc0->Value.getSExtValue();
670       const int64_t K1 = ConstSrc1->Value.getSExtValue();
671       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
672       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
673       uint32_t Imm = Lo16 | (Hi16 << 16);
674 
675       // VALU
676       if (IsVector) {
677         BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), Dst).addImm(Imm);
678         MI.eraseFromParent();
679         return RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI);
680       }
681 
682       // SALU
683       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst).addImm(Imm);
684       MI.eraseFromParent();
685       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
686     }
687   }
688 
689   // Now try TableGen patterns.
690   if (selectImpl(MI, *CoverageInfo))
691     return true;
692 
693   // TODO: This should probably be a combine somewhere
694   // (build_vector $src0, undef)  -> copy $src0
695   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
696   if (Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
697     MI.setDesc(TII.get(AMDGPU::COPY));
698     MI.removeOperand(2);
699     const auto &RC =
700         IsVector ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
701     return RBI.constrainGenericRegister(Dst, RC, *MRI) &&
702            RBI.constrainGenericRegister(Src0, RC, *MRI);
703   }
704 
705   // TODO: Can be improved?
706   if (IsVector) {
707     Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
708     auto MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
709                    .addImm(0xFFFF)
710                    .addReg(Src0);
711     if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
712       return false;
713 
714     MIB = BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), Dst)
715               .addReg(Src1)
716               .addImm(16)
717               .addReg(TmpReg);
718     if (!constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI))
719       return false;
720 
721     MI.eraseFromParent();
722     return true;
723   }
724 
725   Register ShiftSrc0;
726   Register ShiftSrc1;
727 
728   // With multiple uses of the shift, this will duplicate the shift and
729   // increase register pressure.
730   //
731   // (build_vector (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
732   //  => (S_PACK_HH_B32_B16 $src0, $src1)
733   // (build_vector (lshr_oneuse SReg_32:$src0, 16), $src1)
734   //  => (S_PACK_HL_B32_B16 $src0, $src1)
735   // (build_vector $src0, (lshr_oneuse SReg_32:$src1, 16))
736   //  => (S_PACK_LH_B32_B16 $src0, $src1)
737   // (build_vector $src0, $src1)
738   //  => (S_PACK_LL_B32_B16 $src0, $src1)
739 
740   bool Shift0 = mi_match(
741       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
742 
743   bool Shift1 = mi_match(
744       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
745 
746   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
747   if (Shift0 && Shift1) {
748     Opc = AMDGPU::S_PACK_HH_B32_B16;
749     MI.getOperand(1).setReg(ShiftSrc0);
750     MI.getOperand(2).setReg(ShiftSrc1);
751   } else if (Shift1) {
752     Opc = AMDGPU::S_PACK_LH_B32_B16;
753     MI.getOperand(2).setReg(ShiftSrc1);
754   } else if (Shift0) {
755     auto ConstSrc1 =
756         getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
757     if (ConstSrc1 && ConstSrc1->Value == 0) {
758       // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
759       auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
760                      .addReg(ShiftSrc0)
761                      .addImm(16)
762                      .setOperandDead(3); // Dead scc
763 
764       MI.eraseFromParent();
765       return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
766     }
767     if (STI.hasSPackHL()) {
768       Opc = AMDGPU::S_PACK_HL_B32_B16;
769       MI.getOperand(1).setReg(ShiftSrc0);
770     }
771   }
772 
773   MI.setDesc(TII.get(Opc));
774   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
775 }
776 
777 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
778   return selectG_ADD_SUB(I);
779 }
780 
781 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
782   const MachineOperand &MO = I.getOperand(0);
783 
784   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
785   // regbank check here is to know why getConstrainedRegClassForOperand failed.
786   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
787   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
788       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
789     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
790     return true;
791   }
792 
793   return false;
794 }
795 
796 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
797   MachineBasicBlock *BB = I.getParent();
798 
799   Register DstReg = I.getOperand(0).getReg();
800   Register Src0Reg = I.getOperand(1).getReg();
801   Register Src1Reg = I.getOperand(2).getReg();
802   LLT Src1Ty = MRI->getType(Src1Reg);
803 
804   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
805   unsigned InsSize = Src1Ty.getSizeInBits();
806 
807   int64_t Offset = I.getOperand(3).getImm();
808 
809   // FIXME: These cases should have been illegal and unnecessary to check here.
810   if (Offset % 32 != 0 || InsSize % 32 != 0)
811     return false;
812 
813   // Currently not handled by getSubRegFromChannel.
814   if (InsSize > 128)
815     return false;
816 
817   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
818   if (SubReg == AMDGPU::NoSubRegister)
819     return false;
820 
821   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
822   const TargetRegisterClass *DstRC =
823       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
824   if (!DstRC)
825     return false;
826 
827   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
828   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
829   const TargetRegisterClass *Src0RC =
830       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
831   const TargetRegisterClass *Src1RC =
832       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
833 
834   // Deal with weird cases where the class only partially supports the subreg
835   // index.
836   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
837   if (!Src0RC || !Src1RC)
838     return false;
839 
840   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
841       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
842       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
843     return false;
844 
845   const DebugLoc &DL = I.getDebugLoc();
846   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
847     .addReg(Src0Reg)
848     .addReg(Src1Reg)
849     .addImm(SubReg);
850 
851   I.eraseFromParent();
852   return true;
853 }
854 
855 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
856   Register DstReg = MI.getOperand(0).getReg();
857   Register SrcReg = MI.getOperand(1).getReg();
858   Register OffsetReg = MI.getOperand(2).getReg();
859   Register WidthReg = MI.getOperand(3).getReg();
860 
861   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
862          "scalar BFX instructions are expanded in regbankselect");
863   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
864          "64-bit vector BFX instructions are expanded in regbankselect");
865 
866   const DebugLoc &DL = MI.getDebugLoc();
867   MachineBasicBlock *MBB = MI.getParent();
868 
869   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
870   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
871   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
872                  .addReg(SrcReg)
873                  .addReg(OffsetReg)
874                  .addReg(WidthReg);
875   MI.eraseFromParent();
876   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
877 }
878 
879 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
880   if (STI.getLDSBankCount() != 16)
881     return selectImpl(MI, *CoverageInfo);
882 
883   Register Dst = MI.getOperand(0).getReg();
884   Register Src0 = MI.getOperand(2).getReg();
885   Register M0Val = MI.getOperand(6).getReg();
886   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
887       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
888       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
889     return false;
890 
891   // This requires 2 instructions. It is possible to write a pattern to support
892   // this, but the generated isel emitter doesn't correctly deal with multiple
893   // output instructions using the same physical register input. The copy to m0
894   // is incorrectly placed before the second instruction.
895   //
896   // TODO: Match source modifiers.
897 
898   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
899   const DebugLoc &DL = MI.getDebugLoc();
900   MachineBasicBlock *MBB = MI.getParent();
901 
902   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
903     .addReg(M0Val);
904   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
905     .addImm(2)
906     .addImm(MI.getOperand(4).getImm())  // $attr
907     .addImm(MI.getOperand(3).getImm()); // $attrchan
908 
909   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
910     .addImm(0)                          // $src0_modifiers
911     .addReg(Src0)                       // $src0
912     .addImm(MI.getOperand(4).getImm())  // $attr
913     .addImm(MI.getOperand(3).getImm())  // $attrchan
914     .addImm(0)                          // $src2_modifiers
915     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
916     .addImm(MI.getOperand(5).getImm())  // $high
917     .addImm(0)                          // $clamp
918     .addImm(0);                         // $omod
919 
920   MI.eraseFromParent();
921   return true;
922 }
923 
924 // Writelane is special in that it can use SGPR and M0 (which would normally
925 // count as using the constant bus twice - but in this case it is allowed since
926 // the lane selector doesn't count as a use of the constant bus). However, it is
927 // still required to abide by the 1 SGPR rule. Fix this up if we might have
928 // multiple SGPRs.
929 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
930   // With a constant bus limit of at least 2, there's no issue.
931   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
932     return selectImpl(MI, *CoverageInfo);
933 
934   MachineBasicBlock *MBB = MI.getParent();
935   const DebugLoc &DL = MI.getDebugLoc();
936   Register VDst = MI.getOperand(0).getReg();
937   Register Val = MI.getOperand(2).getReg();
938   Register LaneSelect = MI.getOperand(3).getReg();
939   Register VDstIn = MI.getOperand(4).getReg();
940 
941   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
942 
943   std::optional<ValueAndVReg> ConstSelect =
944       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
945   if (ConstSelect) {
946     // The selector has to be an inline immediate, so we can use whatever for
947     // the other operands.
948     MIB.addReg(Val);
949     MIB.addImm(ConstSelect->Value.getSExtValue() &
950                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
951   } else {
952     std::optional<ValueAndVReg> ConstVal =
953         getIConstantVRegValWithLookThrough(Val, *MRI);
954 
955     // If the value written is an inline immediate, we can get away without a
956     // copy to m0.
957     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
958                                                  STI.hasInv2PiInlineImm())) {
959       MIB.addImm(ConstVal->Value.getSExtValue());
960       MIB.addReg(LaneSelect);
961     } else {
962       MIB.addReg(Val);
963 
964       // If the lane selector was originally in a VGPR and copied with
965       // readfirstlane, there's a hazard to read the same SGPR from the
966       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
967       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
968 
969       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
970         .addReg(LaneSelect);
971       MIB.addReg(AMDGPU::M0);
972     }
973   }
974 
975   MIB.addReg(VDstIn);
976 
977   MI.eraseFromParent();
978   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
979 }
980 
981 // We need to handle this here because tablegen doesn't support matching
982 // instructions with multiple outputs.
983 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
984   Register Dst0 = MI.getOperand(0).getReg();
985   Register Dst1 = MI.getOperand(1).getReg();
986 
987   LLT Ty = MRI->getType(Dst0);
988   unsigned Opc;
989   if (Ty == LLT::scalar(32))
990     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
991   else if (Ty == LLT::scalar(64))
992     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
993   else
994     return false;
995 
996   // TODO: Match source modifiers.
997 
998   const DebugLoc &DL = MI.getDebugLoc();
999   MachineBasicBlock *MBB = MI.getParent();
1000 
1001   Register Numer = MI.getOperand(3).getReg();
1002   Register Denom = MI.getOperand(4).getReg();
1003   unsigned ChooseDenom = MI.getOperand(5).getImm();
1004 
1005   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
1006 
1007   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
1008     .addDef(Dst1)
1009     .addImm(0)     // $src0_modifiers
1010     .addUse(Src0)  // $src0
1011     .addImm(0)     // $src1_modifiers
1012     .addUse(Denom) // $src1
1013     .addImm(0)     // $src2_modifiers
1014     .addUse(Numer) // $src2
1015     .addImm(0)     // $clamp
1016     .addImm(0);    // $omod
1017 
1018   MI.eraseFromParent();
1019   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1020 }
1021 
1022 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
1023   unsigned IntrinsicID = cast<GIntrinsic>(I).getIntrinsicID();
1024   switch (IntrinsicID) {
1025   case Intrinsic::amdgcn_if_break: {
1026     MachineBasicBlock *BB = I.getParent();
1027 
1028     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1029     // SelectionDAG uses for wave32 vs wave64.
1030     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
1031       .add(I.getOperand(0))
1032       .add(I.getOperand(2))
1033       .add(I.getOperand(3));
1034 
1035     Register DstReg = I.getOperand(0).getReg();
1036     Register Src0Reg = I.getOperand(2).getReg();
1037     Register Src1Reg = I.getOperand(3).getReg();
1038 
1039     I.eraseFromParent();
1040 
1041     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
1042       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1043 
1044     return true;
1045   }
1046   case Intrinsic::amdgcn_interp_p1_f16:
1047     return selectInterpP1F16(I);
1048   case Intrinsic::amdgcn_wqm:
1049     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
1050   case Intrinsic::amdgcn_softwqm:
1051     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
1052   case Intrinsic::amdgcn_strict_wwm:
1053   case Intrinsic::amdgcn_wwm:
1054     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
1055   case Intrinsic::amdgcn_strict_wqm:
1056     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
1057   case Intrinsic::amdgcn_writelane:
1058     return selectWritelane(I);
1059   case Intrinsic::amdgcn_div_scale:
1060     return selectDivScale(I);
1061   case Intrinsic::amdgcn_icmp:
1062   case Intrinsic::amdgcn_fcmp:
1063     if (selectImpl(I, *CoverageInfo))
1064       return true;
1065     return selectIntrinsicCmp(I);
1066   case Intrinsic::amdgcn_ballot:
1067     return selectBallot(I);
1068   case Intrinsic::amdgcn_inverse_ballot:
1069     return selectInverseBallot(I);
1070   case Intrinsic::amdgcn_reloc_constant:
1071     return selectRelocConstant(I);
1072   case Intrinsic::amdgcn_groupstaticsize:
1073     return selectGroupStaticSize(I);
1074   case Intrinsic::returnaddress:
1075     return selectReturnAddress(I);
1076   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
1077   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
1078   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
1079   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
1080   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1081   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1082   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
1083   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
1084   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
1085   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
1086   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
1087   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
1088   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
1089   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
1090     return selectSMFMACIntrin(I);
1091   default:
1092     return selectImpl(I, *CoverageInfo);
1093   }
1094 }
1095 
1096 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size,
1097                           const GCNSubtarget &ST) {
1098   if (Size != 16 && Size != 32 && Size != 64)
1099     return -1;
1100 
1101   if (Size == 16 && !ST.has16BitInsts())
1102     return -1;
1103 
1104   const auto Select = [&](unsigned S16Opc, unsigned TrueS16Opc, unsigned S32Opc,
1105                           unsigned S64Opc) {
1106     if (Size == 16)
1107       return ST.hasTrue16BitInsts() ? TrueS16Opc : S16Opc;
1108     if (Size == 32)
1109       return S32Opc;
1110     return S64Opc;
1111   };
1112 
1113   switch (P) {
1114   default:
1115     llvm_unreachable("Unknown condition code!");
1116   case CmpInst::ICMP_NE:
1117     return Select(AMDGPU::V_CMP_NE_U16_e64, AMDGPU::V_CMP_NE_U16_t16_e64,
1118                   AMDGPU::V_CMP_NE_U32_e64, AMDGPU::V_CMP_NE_U64_e64);
1119   case CmpInst::ICMP_EQ:
1120     return Select(AMDGPU::V_CMP_EQ_U16_e64, AMDGPU::V_CMP_EQ_U16_t16_e64,
1121                   AMDGPU::V_CMP_EQ_U32_e64, AMDGPU::V_CMP_EQ_U64_e64);
1122   case CmpInst::ICMP_SGT:
1123     return Select(AMDGPU::V_CMP_GT_I16_e64, AMDGPU::V_CMP_GT_I16_t16_e64,
1124                   AMDGPU::V_CMP_GT_I32_e64, AMDGPU::V_CMP_GT_I64_e64);
1125   case CmpInst::ICMP_SGE:
1126     return Select(AMDGPU::V_CMP_GE_I16_e64, AMDGPU::V_CMP_GE_I16_t16_e64,
1127                   AMDGPU::V_CMP_GE_I32_e64, AMDGPU::V_CMP_GE_I64_e64);
1128   case CmpInst::ICMP_SLT:
1129     return Select(AMDGPU::V_CMP_LT_I16_e64, AMDGPU::V_CMP_LT_I16_t16_e64,
1130                   AMDGPU::V_CMP_LT_I32_e64, AMDGPU::V_CMP_LT_I64_e64);
1131   case CmpInst::ICMP_SLE:
1132     return Select(AMDGPU::V_CMP_LE_I16_e64, AMDGPU::V_CMP_LE_I16_t16_e64,
1133                   AMDGPU::V_CMP_LE_I32_e64, AMDGPU::V_CMP_LE_I64_e64);
1134   case CmpInst::ICMP_UGT:
1135     return Select(AMDGPU::V_CMP_GT_U16_e64, AMDGPU::V_CMP_GT_U16_t16_e64,
1136                   AMDGPU::V_CMP_GT_U32_e64, AMDGPU::V_CMP_GT_U64_e64);
1137   case CmpInst::ICMP_UGE:
1138     return Select(AMDGPU::V_CMP_GE_U16_e64, AMDGPU::V_CMP_GE_U16_t16_e64,
1139                   AMDGPU::V_CMP_GE_U32_e64, AMDGPU::V_CMP_GE_U64_e64);
1140   case CmpInst::ICMP_ULT:
1141     return Select(AMDGPU::V_CMP_LT_U16_e64, AMDGPU::V_CMP_LT_U16_t16_e64,
1142                   AMDGPU::V_CMP_LT_U32_e64, AMDGPU::V_CMP_LT_U64_e64);
1143   case CmpInst::ICMP_ULE:
1144     return Select(AMDGPU::V_CMP_LE_U16_e64, AMDGPU::V_CMP_LE_U16_t16_e64,
1145                   AMDGPU::V_CMP_LE_U32_e64, AMDGPU::V_CMP_LE_U64_e64);
1146 
1147   case CmpInst::FCMP_OEQ:
1148     return Select(AMDGPU::V_CMP_EQ_F16_e64, AMDGPU::V_CMP_EQ_F16_t16_e64,
1149                   AMDGPU::V_CMP_EQ_F32_e64, AMDGPU::V_CMP_EQ_F64_e64);
1150   case CmpInst::FCMP_OGT:
1151     return Select(AMDGPU::V_CMP_GT_F16_e64, AMDGPU::V_CMP_GT_F16_t16_e64,
1152                   AMDGPU::V_CMP_GT_F32_e64, AMDGPU::V_CMP_GT_F64_e64);
1153   case CmpInst::FCMP_OGE:
1154     return Select(AMDGPU::V_CMP_GE_F16_e64, AMDGPU::V_CMP_GE_F16_t16_e64,
1155                   AMDGPU::V_CMP_GE_F32_e64, AMDGPU::V_CMP_GE_F64_e64);
1156   case CmpInst::FCMP_OLT:
1157     return Select(AMDGPU::V_CMP_LT_F16_e64, AMDGPU::V_CMP_LT_F16_t16_e64,
1158                   AMDGPU::V_CMP_LT_F32_e64, AMDGPU::V_CMP_LT_F64_e64);
1159   case CmpInst::FCMP_OLE:
1160     return Select(AMDGPU::V_CMP_LE_F16_e64, AMDGPU::V_CMP_LE_F16_t16_e64,
1161                   AMDGPU::V_CMP_LE_F32_e64, AMDGPU::V_CMP_LE_F64_e64);
1162   case CmpInst::FCMP_ONE:
1163     return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64,
1164                   AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64);
1165   case CmpInst::FCMP_ORD:
1166     return Select(AMDGPU::V_CMP_O_F16_e64, AMDGPU::V_CMP_O_F16_t16_e64,
1167                   AMDGPU::V_CMP_O_F32_e64, AMDGPU::V_CMP_O_F64_e64);
1168   case CmpInst::FCMP_UNO:
1169     return Select(AMDGPU::V_CMP_U_F16_e64, AMDGPU::V_CMP_U_F16_t16_e64,
1170                   AMDGPU::V_CMP_U_F32_e64, AMDGPU::V_CMP_U_F64_e64);
1171   case CmpInst::FCMP_UEQ:
1172     return Select(AMDGPU::V_CMP_NLG_F16_e64, AMDGPU::V_CMP_NLG_F16_t16_e64,
1173                   AMDGPU::V_CMP_NLG_F32_e64, AMDGPU::V_CMP_NLG_F64_e64);
1174   case CmpInst::FCMP_UGT:
1175     return Select(AMDGPU::V_CMP_NLE_F16_e64, AMDGPU::V_CMP_NLE_F16_t16_e64,
1176                   AMDGPU::V_CMP_NLE_F32_e64, AMDGPU::V_CMP_NLE_F64_e64);
1177   case CmpInst::FCMP_UGE:
1178     return Select(AMDGPU::V_CMP_NLT_F16_e64, AMDGPU::V_CMP_NLT_F16_t16_e64,
1179                   AMDGPU::V_CMP_NLT_F32_e64, AMDGPU::V_CMP_NLT_F64_e64);
1180   case CmpInst::FCMP_ULT:
1181     return Select(AMDGPU::V_CMP_NGE_F16_e64, AMDGPU::V_CMP_NGE_F16_t16_e64,
1182                   AMDGPU::V_CMP_NGE_F32_e64, AMDGPU::V_CMP_NGE_F64_e64);
1183   case CmpInst::FCMP_ULE:
1184     return Select(AMDGPU::V_CMP_NGT_F16_e64, AMDGPU::V_CMP_NGT_F16_t16_e64,
1185                   AMDGPU::V_CMP_NGT_F32_e64, AMDGPU::V_CMP_NGT_F64_e64);
1186   case CmpInst::FCMP_UNE:
1187     return Select(AMDGPU::V_CMP_NEQ_F16_e64, AMDGPU::V_CMP_NEQ_F16_t16_e64,
1188                   AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F64_e64);
1189   case CmpInst::FCMP_TRUE:
1190     return Select(AMDGPU::V_CMP_TRU_F16_e64, AMDGPU::V_CMP_TRU_F16_t16_e64,
1191                   AMDGPU::V_CMP_TRU_F32_e64, AMDGPU::V_CMP_TRU_F64_e64);
1192   case CmpInst::FCMP_FALSE:
1193     return Select(AMDGPU::V_CMP_F_F16_e64, AMDGPU::V_CMP_F_F16_t16_e64,
1194                   AMDGPU::V_CMP_F_F32_e64, AMDGPU::V_CMP_F_F64_e64);
1195   }
1196 }
1197 
1198 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1199                                               unsigned Size) const {
1200   if (Size == 64) {
1201     if (!STI.hasScalarCompareEq64())
1202       return -1;
1203 
1204     switch (P) {
1205     case CmpInst::ICMP_NE:
1206       return AMDGPU::S_CMP_LG_U64;
1207     case CmpInst::ICMP_EQ:
1208       return AMDGPU::S_CMP_EQ_U64;
1209     default:
1210       return -1;
1211     }
1212   }
1213 
1214   if (Size == 32) {
1215     switch (P) {
1216     case CmpInst::ICMP_NE:
1217       return AMDGPU::S_CMP_LG_U32;
1218     case CmpInst::ICMP_EQ:
1219       return AMDGPU::S_CMP_EQ_U32;
1220     case CmpInst::ICMP_SGT:
1221       return AMDGPU::S_CMP_GT_I32;
1222     case CmpInst::ICMP_SGE:
1223       return AMDGPU::S_CMP_GE_I32;
1224     case CmpInst::ICMP_SLT:
1225       return AMDGPU::S_CMP_LT_I32;
1226     case CmpInst::ICMP_SLE:
1227       return AMDGPU::S_CMP_LE_I32;
1228     case CmpInst::ICMP_UGT:
1229       return AMDGPU::S_CMP_GT_U32;
1230     case CmpInst::ICMP_UGE:
1231       return AMDGPU::S_CMP_GE_U32;
1232     case CmpInst::ICMP_ULT:
1233       return AMDGPU::S_CMP_LT_U32;
1234     case CmpInst::ICMP_ULE:
1235       return AMDGPU::S_CMP_LE_U32;
1236     case CmpInst::FCMP_OEQ:
1237       return AMDGPU::S_CMP_EQ_F32;
1238     case CmpInst::FCMP_OGT:
1239       return AMDGPU::S_CMP_GT_F32;
1240     case CmpInst::FCMP_OGE:
1241       return AMDGPU::S_CMP_GE_F32;
1242     case CmpInst::FCMP_OLT:
1243       return AMDGPU::S_CMP_LT_F32;
1244     case CmpInst::FCMP_OLE:
1245       return AMDGPU::S_CMP_LE_F32;
1246     case CmpInst::FCMP_ONE:
1247       return AMDGPU::S_CMP_LG_F32;
1248     case CmpInst::FCMP_ORD:
1249       return AMDGPU::S_CMP_O_F32;
1250     case CmpInst::FCMP_UNO:
1251       return AMDGPU::S_CMP_U_F32;
1252     case CmpInst::FCMP_UEQ:
1253       return AMDGPU::S_CMP_NLG_F32;
1254     case CmpInst::FCMP_UGT:
1255       return AMDGPU::S_CMP_NLE_F32;
1256     case CmpInst::FCMP_UGE:
1257       return AMDGPU::S_CMP_NLT_F32;
1258     case CmpInst::FCMP_ULT:
1259       return AMDGPU::S_CMP_NGE_F32;
1260     case CmpInst::FCMP_ULE:
1261       return AMDGPU::S_CMP_NGT_F32;
1262     case CmpInst::FCMP_UNE:
1263       return AMDGPU::S_CMP_NEQ_F32;
1264     default:
1265       llvm_unreachable("Unknown condition code!");
1266     }
1267   }
1268 
1269   if (Size == 16) {
1270     if (!STI.hasSALUFloatInsts())
1271       return -1;
1272 
1273     switch (P) {
1274     case CmpInst::FCMP_OEQ:
1275       return AMDGPU::S_CMP_EQ_F16;
1276     case CmpInst::FCMP_OGT:
1277       return AMDGPU::S_CMP_GT_F16;
1278     case CmpInst::FCMP_OGE:
1279       return AMDGPU::S_CMP_GE_F16;
1280     case CmpInst::FCMP_OLT:
1281       return AMDGPU::S_CMP_LT_F16;
1282     case CmpInst::FCMP_OLE:
1283       return AMDGPU::S_CMP_LE_F16;
1284     case CmpInst::FCMP_ONE:
1285       return AMDGPU::S_CMP_LG_F16;
1286     case CmpInst::FCMP_ORD:
1287       return AMDGPU::S_CMP_O_F16;
1288     case CmpInst::FCMP_UNO:
1289       return AMDGPU::S_CMP_U_F16;
1290     case CmpInst::FCMP_UEQ:
1291       return AMDGPU::S_CMP_NLG_F16;
1292     case CmpInst::FCMP_UGT:
1293       return AMDGPU::S_CMP_NLE_F16;
1294     case CmpInst::FCMP_UGE:
1295       return AMDGPU::S_CMP_NLT_F16;
1296     case CmpInst::FCMP_ULT:
1297       return AMDGPU::S_CMP_NGE_F16;
1298     case CmpInst::FCMP_ULE:
1299       return AMDGPU::S_CMP_NGT_F16;
1300     case CmpInst::FCMP_UNE:
1301       return AMDGPU::S_CMP_NEQ_F16;
1302     default:
1303       llvm_unreachable("Unknown condition code!");
1304     }
1305   }
1306 
1307   return -1;
1308 }
1309 
1310 bool AMDGPUInstructionSelector::selectG_ICMP_or_FCMP(MachineInstr &I) const {
1311 
1312   MachineBasicBlock *BB = I.getParent();
1313   const DebugLoc &DL = I.getDebugLoc();
1314 
1315   Register SrcReg = I.getOperand(2).getReg();
1316   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1317 
1318   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1319 
1320   Register CCReg = I.getOperand(0).getReg();
1321   if (!isVCC(CCReg, *MRI)) {
1322     int Opcode = getS_CMPOpcode(Pred, Size);
1323     if (Opcode == -1)
1324       return false;
1325     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1326             .add(I.getOperand(2))
1327             .add(I.getOperand(3));
1328     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1329       .addReg(AMDGPU::SCC);
1330     bool Ret =
1331         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1332         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1333     I.eraseFromParent();
1334     return Ret;
1335   }
1336 
1337   if (I.getOpcode() == AMDGPU::G_FCMP)
1338     return false;
1339 
1340   int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget);
1341   if (Opcode == -1)
1342     return false;
1343 
1344   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1345             I.getOperand(0).getReg())
1346             .add(I.getOperand(2))
1347             .add(I.getOperand(3));
1348   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1349                                *TRI.getBoolRC(), *MRI);
1350   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1351   I.eraseFromParent();
1352   return Ret;
1353 }
1354 
1355 bool AMDGPUInstructionSelector::selectIntrinsicCmp(MachineInstr &I) const {
1356   Register Dst = I.getOperand(0).getReg();
1357   if (isVCC(Dst, *MRI))
1358     return false;
1359 
1360   LLT DstTy = MRI->getType(Dst);
1361   if (DstTy.getSizeInBits() != STI.getWavefrontSize())
1362     return false;
1363 
1364   MachineBasicBlock *BB = I.getParent();
1365   const DebugLoc &DL = I.getDebugLoc();
1366   Register SrcReg = I.getOperand(2).getReg();
1367   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1368 
1369   // i1 inputs are not supported in GlobalISel.
1370   if (Size == 1)
1371     return false;
1372 
1373   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1374   if (!CmpInst::isIntPredicate(Pred) && !CmpInst::isFPPredicate(Pred)) {
1375     BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1376     I.eraseFromParent();
1377     return RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI);
1378   }
1379 
1380   const int Opcode = getV_CMPOpcode(Pred, Size, *Subtarget);
1381   if (Opcode == -1)
1382     return false;
1383 
1384   MachineInstrBuilder SelectedMI;
1385   MachineOperand &LHS = I.getOperand(2);
1386   MachineOperand &RHS = I.getOperand(3);
1387   auto [Src0, Src0Mods] = selectVOP3ModsImpl(LHS);
1388   auto [Src1, Src1Mods] = selectVOP3ModsImpl(RHS);
1389   Register Src0Reg =
1390       copyToVGPRIfSrcFolded(Src0, Src0Mods, LHS, &I, /*ForceVGPR*/ true);
1391   Register Src1Reg =
1392       copyToVGPRIfSrcFolded(Src1, Src1Mods, RHS, &I, /*ForceVGPR*/ true);
1393   SelectedMI = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst);
1394   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src0_modifiers))
1395     SelectedMI.addImm(Src0Mods);
1396   SelectedMI.addReg(Src0Reg);
1397   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::src1_modifiers))
1398     SelectedMI.addImm(Src1Mods);
1399   SelectedMI.addReg(Src1Reg);
1400   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::clamp))
1401     SelectedMI.addImm(0); // clamp
1402   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::op_sel))
1403     SelectedMI.addImm(0); // op_sel
1404 
1405   RBI.constrainGenericRegister(Dst, *TRI.getBoolRC(), *MRI);
1406   if (!constrainSelectedInstRegOperands(*SelectedMI, TII, TRI, RBI))
1407     return false;
1408 
1409   I.eraseFromParent();
1410   return true;
1411 }
1412 
1413 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1414   MachineBasicBlock *BB = I.getParent();
1415   const DebugLoc &DL = I.getDebugLoc();
1416   Register DstReg = I.getOperand(0).getReg();
1417   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1418   const bool Is64 = Size == 64;
1419   const bool IsWave32 = (STI.getWavefrontSize() == 32);
1420 
1421   // In the common case, the return type matches the wave size.
1422   // However we also support emitting i64 ballots in wave32 mode.
1423   if (Size != STI.getWavefrontSize() && (!Is64 || !IsWave32))
1424     return false;
1425 
1426   std::optional<ValueAndVReg> Arg =
1427       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1428 
1429   const auto BuildCopy = [&](Register SrcReg) {
1430     if (Size == STI.getWavefrontSize()) {
1431       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1432           .addReg(SrcReg);
1433       return;
1434     }
1435 
1436     // If emitting a i64 ballot in wave32, fill the upper bits with zeroes.
1437     Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1438     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg).addImm(0);
1439     BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1440         .addReg(SrcReg)
1441         .addImm(AMDGPU::sub0)
1442         .addReg(HiReg)
1443         .addImm(AMDGPU::sub1);
1444   };
1445 
1446   if (Arg) {
1447     const int64_t Value = Arg->Value.getSExtValue();
1448     if (Value == 0) {
1449       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1450       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1451     } else if (Value == -1) // all ones
1452       BuildCopy(IsWave32 ? AMDGPU::EXEC_LO : AMDGPU::EXEC);
1453     else
1454       return false;
1455   } else
1456     BuildCopy(I.getOperand(2).getReg());
1457 
1458   I.eraseFromParent();
1459   return true;
1460 }
1461 
1462 bool AMDGPUInstructionSelector::selectInverseBallot(MachineInstr &I) const {
1463   MachineBasicBlock *BB = I.getParent();
1464   const DebugLoc &DL = I.getDebugLoc();
1465   const Register DstReg = I.getOperand(0).getReg();
1466   const Register MaskReg = I.getOperand(2).getReg();
1467 
1468   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(MaskReg);
1469   I.eraseFromParent();
1470   return true;
1471 }
1472 
1473 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1474   Register DstReg = I.getOperand(0).getReg();
1475   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1476   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1477   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1478     return false;
1479 
1480   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1481 
1482   Module *M = MF->getFunction().getParent();
1483   const MDNode *Metadata = I.getOperand(2).getMetadata();
1484   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1485   auto RelocSymbol = cast<GlobalVariable>(
1486     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1487 
1488   MachineBasicBlock *BB = I.getParent();
1489   BuildMI(*BB, &I, I.getDebugLoc(),
1490           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1491     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1492 
1493   I.eraseFromParent();
1494   return true;
1495 }
1496 
1497 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1498   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1499 
1500   Register DstReg = I.getOperand(0).getReg();
1501   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1502   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1503     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1504 
1505   MachineBasicBlock *MBB = I.getParent();
1506   const DebugLoc &DL = I.getDebugLoc();
1507 
1508   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1509 
1510   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1511     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1512     MIB.addImm(MFI->getLDSSize());
1513   } else {
1514     Module *M = MF->getFunction().getParent();
1515     const GlobalValue *GV
1516       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1517     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1518   }
1519 
1520   I.eraseFromParent();
1521   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1522 }
1523 
1524 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1525   MachineBasicBlock *MBB = I.getParent();
1526   MachineFunction &MF = *MBB->getParent();
1527   const DebugLoc &DL = I.getDebugLoc();
1528 
1529   MachineOperand &Dst = I.getOperand(0);
1530   Register DstReg = Dst.getReg();
1531   unsigned Depth = I.getOperand(2).getImm();
1532 
1533   const TargetRegisterClass *RC
1534     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1535   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1536       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1537     return false;
1538 
1539   // Check for kernel and shader functions
1540   if (Depth != 0 ||
1541       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1542     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1543       .addImm(0);
1544     I.eraseFromParent();
1545     return true;
1546   }
1547 
1548   MachineFrameInfo &MFI = MF.getFrameInfo();
1549   // There is a call to @llvm.returnaddress in this function
1550   MFI.setReturnAddressIsTaken(true);
1551 
1552   // Get the return address reg and mark it as an implicit live-in
1553   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1554   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1555                                              AMDGPU::SReg_64RegClass, DL);
1556   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1557     .addReg(LiveIn);
1558   I.eraseFromParent();
1559   return true;
1560 }
1561 
1562 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1563   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1564   // SelectionDAG uses for wave32 vs wave64.
1565   MachineBasicBlock *BB = MI.getParent();
1566   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1567       .add(MI.getOperand(1));
1568 
1569   Register Reg = MI.getOperand(1).getReg();
1570   MI.eraseFromParent();
1571 
1572   if (!MRI->getRegClassOrNull(Reg))
1573     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1574   return true;
1575 }
1576 
1577 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1578   MachineInstr &MI, Intrinsic::ID IntrID) const {
1579   MachineBasicBlock *MBB = MI.getParent();
1580   MachineFunction *MF = MBB->getParent();
1581   const DebugLoc &DL = MI.getDebugLoc();
1582 
1583   unsigned IndexOperand = MI.getOperand(7).getImm();
1584   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1585   bool WaveDone = MI.getOperand(9).getImm() != 0;
1586 
1587   if (WaveDone && !WaveRelease)
1588     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1589 
1590   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1591   IndexOperand &= ~0x3f;
1592   unsigned CountDw = 0;
1593 
1594   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1595     CountDw = (IndexOperand >> 24) & 0xf;
1596     IndexOperand &= ~(0xf << 24);
1597 
1598     if (CountDw < 1 || CountDw > 4) {
1599       report_fatal_error(
1600         "ds_ordered_count: dword count must be between 1 and 4");
1601     }
1602   }
1603 
1604   if (IndexOperand)
1605     report_fatal_error("ds_ordered_count: bad index operand");
1606 
1607   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1608   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1609 
1610   unsigned Offset0 = OrderedCountIndex << 2;
1611   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1612 
1613   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1614     Offset1 |= (CountDw - 1) << 6;
1615 
1616   if (STI.getGeneration() < AMDGPUSubtarget::GFX11)
1617     Offset1 |= ShaderType << 2;
1618 
1619   unsigned Offset = Offset0 | (Offset1 << 8);
1620 
1621   Register M0Val = MI.getOperand(2).getReg();
1622   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1623     .addReg(M0Val);
1624 
1625   Register DstReg = MI.getOperand(0).getReg();
1626   Register ValReg = MI.getOperand(3).getReg();
1627   MachineInstrBuilder DS =
1628     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1629       .addReg(ValReg)
1630       .addImm(Offset)
1631       .cloneMemRefs(MI);
1632 
1633   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1634     return false;
1635 
1636   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1637   MI.eraseFromParent();
1638   return Ret;
1639 }
1640 
1641 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1642   switch (IntrID) {
1643   case Intrinsic::amdgcn_ds_gws_init:
1644     return AMDGPU::DS_GWS_INIT;
1645   case Intrinsic::amdgcn_ds_gws_barrier:
1646     return AMDGPU::DS_GWS_BARRIER;
1647   case Intrinsic::amdgcn_ds_gws_sema_v:
1648     return AMDGPU::DS_GWS_SEMA_V;
1649   case Intrinsic::amdgcn_ds_gws_sema_br:
1650     return AMDGPU::DS_GWS_SEMA_BR;
1651   case Intrinsic::amdgcn_ds_gws_sema_p:
1652     return AMDGPU::DS_GWS_SEMA_P;
1653   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1654     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1655   default:
1656     llvm_unreachable("not a gws intrinsic");
1657   }
1658 }
1659 
1660 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1661                                                      Intrinsic::ID IID) const {
1662   if (!STI.hasGWS() || (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1663                         !STI.hasGWSSemaReleaseAll()))
1664     return false;
1665 
1666   // intrinsic ID, vsrc, offset
1667   const bool HasVSrc = MI.getNumOperands() == 3;
1668   assert(HasVSrc || MI.getNumOperands() == 2);
1669 
1670   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1671   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1672   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1673     return false;
1674 
1675   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1676   unsigned ImmOffset;
1677 
1678   MachineBasicBlock *MBB = MI.getParent();
1679   const DebugLoc &DL = MI.getDebugLoc();
1680 
1681   MachineInstr *Readfirstlane = nullptr;
1682 
1683   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1684   // incoming offset, in case there's an add of a constant. We'll have to put it
1685   // back later.
1686   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1687     Readfirstlane = OffsetDef;
1688     BaseOffset = OffsetDef->getOperand(1).getReg();
1689     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1690   }
1691 
1692   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1693     // If we have a constant offset, try to use the 0 in m0 as the base.
1694     // TODO: Look into changing the default m0 initialization value. If the
1695     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1696     // the immediate offset.
1697 
1698     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1699     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1700       .addImm(0);
1701   } else {
1702     std::tie(BaseOffset, ImmOffset) =
1703         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset, KB);
1704 
1705     if (Readfirstlane) {
1706       // We have the constant offset now, so put the readfirstlane back on the
1707       // variable component.
1708       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1709         return false;
1710 
1711       Readfirstlane->getOperand(1).setReg(BaseOffset);
1712       BaseOffset = Readfirstlane->getOperand(0).getReg();
1713     } else {
1714       if (!RBI.constrainGenericRegister(BaseOffset,
1715                                         AMDGPU::SReg_32RegClass, *MRI))
1716         return false;
1717     }
1718 
1719     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1720     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1721       .addReg(BaseOffset)
1722       .addImm(16)
1723       .setOperandDead(3); // Dead scc
1724 
1725     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1726       .addReg(M0Base);
1727   }
1728 
1729   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1730   // offset field) % 64. Some versions of the programming guide omit the m0
1731   // part, or claim it's from offset 0.
1732   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1733 
1734   if (HasVSrc) {
1735     Register VSrc = MI.getOperand(1).getReg();
1736     MIB.addReg(VSrc);
1737 
1738     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1739       return false;
1740   }
1741 
1742   MIB.addImm(ImmOffset)
1743      .cloneMemRefs(MI);
1744 
1745   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1746 
1747   MI.eraseFromParent();
1748   return true;
1749 }
1750 
1751 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1752                                                       bool IsAppend) const {
1753   Register PtrBase = MI.getOperand(2).getReg();
1754   LLT PtrTy = MRI->getType(PtrBase);
1755   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1756 
1757   unsigned Offset;
1758   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1759 
1760   // TODO: Should this try to look through readfirstlane like GWS?
1761   if (!isDSOffsetLegal(PtrBase, Offset)) {
1762     PtrBase = MI.getOperand(2).getReg();
1763     Offset = 0;
1764   }
1765 
1766   MachineBasicBlock *MBB = MI.getParent();
1767   const DebugLoc &DL = MI.getDebugLoc();
1768   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1769 
1770   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1771     .addReg(PtrBase);
1772   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1773     return false;
1774 
1775   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1776     .addImm(Offset)
1777     .addImm(IsGDS ? -1 : 0)
1778     .cloneMemRefs(MI);
1779   MI.eraseFromParent();
1780   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1781 }
1782 
1783 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1784   if (TM.getOptLevel() > CodeGenOptLevel::None) {
1785     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1786     if (WGSize <= STI.getWavefrontSize()) {
1787       MachineBasicBlock *MBB = MI.getParent();
1788       const DebugLoc &DL = MI.getDebugLoc();
1789       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1790       MI.eraseFromParent();
1791       return true;
1792     }
1793   }
1794 
1795   // On GFX12 lower s_barrier into s_barrier_signal_imm and s_barrier_wait
1796   if (STI.hasSplitBarriers()) {
1797     MachineBasicBlock *MBB = MI.getParent();
1798     const DebugLoc &DL = MI.getDebugLoc();
1799     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_BARRIER_SIGNAL_IMM))
1800         .addImm(AMDGPU::Barrier::WORKGROUP);
1801     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_BARRIER_WAIT))
1802         .addImm(AMDGPU::Barrier::WORKGROUP);
1803     MI.eraseFromParent();
1804     return true;
1805   }
1806 
1807   return selectImpl(MI, *CoverageInfo);
1808 }
1809 
1810 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1811                          bool &IsTexFail) {
1812   if (TexFailCtrl)
1813     IsTexFail = true;
1814 
1815   TFE = (TexFailCtrl & 0x1) ? true : false;
1816   TexFailCtrl &= ~(uint64_t)0x1;
1817   LWE = (TexFailCtrl & 0x2) ? true : false;
1818   TexFailCtrl &= ~(uint64_t)0x2;
1819 
1820   return TexFailCtrl == 0;
1821 }
1822 
1823 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1824   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1825   MachineBasicBlock *MBB = MI.getParent();
1826   const DebugLoc &DL = MI.getDebugLoc();
1827 
1828   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1829     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1830 
1831   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1832   unsigned IntrOpcode = Intr->BaseOpcode;
1833   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1834   const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1835   const bool IsGFX12Plus = AMDGPU::isGFX12Plus(STI);
1836 
1837   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1838 
1839   Register VDataIn, VDataOut;
1840   LLT VDataTy;
1841   int NumVDataDwords = -1;
1842   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1843                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1844 
1845   bool Unorm;
1846   if (!BaseOpcode->Sampler)
1847     Unorm = true;
1848   else
1849     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1850 
1851   bool TFE;
1852   bool LWE;
1853   bool IsTexFail = false;
1854   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1855                     TFE, LWE, IsTexFail))
1856     return false;
1857 
1858   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1859   const bool IsA16 = (Flags & 1) != 0;
1860   const bool IsG16 = (Flags & 2) != 0;
1861 
1862   // A16 implies 16 bit gradients if subtarget doesn't support G16
1863   if (IsA16 && !STI.hasG16() && !IsG16)
1864     return false;
1865 
1866   unsigned DMask = 0;
1867   unsigned DMaskLanes = 0;
1868 
1869   if (BaseOpcode->Atomic) {
1870     VDataOut = MI.getOperand(0).getReg();
1871     VDataIn = MI.getOperand(2).getReg();
1872     LLT Ty = MRI->getType(VDataIn);
1873 
1874     // Be careful to allow atomic swap on 16-bit element vectors.
1875     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1876       Ty.getSizeInBits() == 128 :
1877       Ty.getSizeInBits() == 64;
1878 
1879     if (BaseOpcode->AtomicX2) {
1880       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1881 
1882       DMask = Is64Bit ? 0xf : 0x3;
1883       NumVDataDwords = Is64Bit ? 4 : 2;
1884     } else {
1885       DMask = Is64Bit ? 0x3 : 0x1;
1886       NumVDataDwords = Is64Bit ? 2 : 1;
1887     }
1888   } else {
1889     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1890     DMaskLanes = BaseOpcode->Gather4 ? 4 : llvm::popcount(DMask);
1891 
1892     if (BaseOpcode->Store) {
1893       VDataIn = MI.getOperand(1).getReg();
1894       VDataTy = MRI->getType(VDataIn);
1895       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1896     } else {
1897       VDataOut = MI.getOperand(0).getReg();
1898       VDataTy = MRI->getType(VDataOut);
1899       NumVDataDwords = DMaskLanes;
1900 
1901       if (IsD16 && !STI.hasUnpackedD16VMem())
1902         NumVDataDwords = (DMaskLanes + 1) / 2;
1903     }
1904   }
1905 
1906   // Set G16 opcode
1907   if (Subtarget->hasG16() && IsG16) {
1908     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1909         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1910     assert(G16MappingInfo);
1911     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1912   }
1913 
1914   // TODO: Check this in verifier.
1915   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1916 
1917   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1918   if (BaseOpcode->Atomic)
1919     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1920   if (CPol & ~(IsGFX12Plus ? AMDGPU::CPol::ALL : AMDGPU::CPol::ALL_pregfx12))
1921     return false;
1922 
1923   int NumVAddrRegs = 0;
1924   int NumVAddrDwords = 0;
1925   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1926     // Skip the $noregs and 0s inserted during legalization.
1927     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1928     if (!AddrOp.isReg())
1929       continue; // XXX - Break?
1930 
1931     Register Addr = AddrOp.getReg();
1932     if (!Addr)
1933       break;
1934 
1935     ++NumVAddrRegs;
1936     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1937   }
1938 
1939   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1940   // NSA, these should have been packed into a single value in the first
1941   // address register
1942   const bool UseNSA =
1943       NumVAddrRegs != 1 &&
1944       (STI.hasPartialNSAEncoding() ? NumVAddrDwords >= NumVAddrRegs
1945                                    : NumVAddrDwords == NumVAddrRegs);
1946   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1947     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1948     return false;
1949   }
1950 
1951   if (IsTexFail)
1952     ++NumVDataDwords;
1953 
1954   int Opcode = -1;
1955   if (IsGFX12Plus) {
1956     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx12,
1957                                    NumVDataDwords, NumVAddrDwords);
1958   } else if (IsGFX11Plus) {
1959     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1960                                    UseNSA ? AMDGPU::MIMGEncGfx11NSA
1961                                           : AMDGPU::MIMGEncGfx11Default,
1962                                    NumVDataDwords, NumVAddrDwords);
1963   } else if (IsGFX10Plus) {
1964     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1965                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1966                                           : AMDGPU::MIMGEncGfx10Default,
1967                                    NumVDataDwords, NumVAddrDwords);
1968   } else {
1969     if (Subtarget->hasGFX90AInsts()) {
1970       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1971                                      NumVDataDwords, NumVAddrDwords);
1972       if (Opcode == -1) {
1973         LLVM_DEBUG(
1974             dbgs()
1975             << "requested image instruction is not supported on this GPU\n");
1976         return false;
1977       }
1978     }
1979     if (Opcode == -1 &&
1980         STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1981       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1982                                      NumVDataDwords, NumVAddrDwords);
1983     if (Opcode == -1)
1984       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1985                                      NumVDataDwords, NumVAddrDwords);
1986   }
1987   if (Opcode == -1)
1988     return false;
1989 
1990   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1991     .cloneMemRefs(MI);
1992 
1993   if (VDataOut) {
1994     if (BaseOpcode->AtomicX2) {
1995       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1996 
1997       Register TmpReg = MRI->createVirtualRegister(
1998         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1999       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2000 
2001       MIB.addDef(TmpReg);
2002       if (!MRI->use_empty(VDataOut)) {
2003         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
2004             .addReg(TmpReg, RegState::Kill, SubReg);
2005       }
2006 
2007     } else {
2008       MIB.addDef(VDataOut); // vdata output
2009     }
2010   }
2011 
2012   if (VDataIn)
2013     MIB.addReg(VDataIn); // vdata input
2014 
2015   for (int I = 0; I != NumVAddrRegs; ++I) {
2016     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
2017     if (SrcOp.isReg()) {
2018       assert(SrcOp.getReg() != 0);
2019       MIB.addReg(SrcOp.getReg());
2020     }
2021   }
2022 
2023   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
2024   if (BaseOpcode->Sampler)
2025     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
2026 
2027   MIB.addImm(DMask); // dmask
2028 
2029   if (IsGFX10Plus)
2030     MIB.addImm(DimInfo->Encoding);
2031   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::unorm))
2032     MIB.addImm(Unorm);
2033 
2034   MIB.addImm(CPol);
2035   MIB.addImm(IsA16 &&  // a16 or r128
2036              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
2037   if (IsGFX10Plus)
2038     MIB.addImm(IsA16 ? -1 : 0);
2039 
2040   if (!Subtarget->hasGFX90AInsts()) {
2041     MIB.addImm(TFE); // tfe
2042   } else if (TFE) {
2043     LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
2044     return false;
2045   }
2046 
2047   if (AMDGPU::hasNamedOperand(Opcode, AMDGPU::OpName::lwe))
2048     MIB.addImm(LWE); // lwe
2049   if (!IsGFX10Plus)
2050     MIB.addImm(DimInfo->DA ? -1 : 0);
2051   if (BaseOpcode->HasD16)
2052     MIB.addImm(IsD16 ? -1 : 0);
2053 
2054   if (IsTexFail) {
2055     // An image load instruction with TFE/LWE only conditionally writes to its
2056     // result registers. Initialize them to zero so that we always get well
2057     // defined result values.
2058     assert(VDataOut && !VDataIn);
2059     Register Tied = MRI->cloneVirtualRegister(VDataOut);
2060     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2061     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
2062       .addImm(0);
2063     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
2064     if (STI.usePRTStrictNull()) {
2065       // With enable-prt-strict-null enabled, initialize all result registers to
2066       // zero.
2067       auto RegSeq =
2068           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
2069       for (auto Sub : Parts)
2070         RegSeq.addReg(Zero).addImm(Sub);
2071     } else {
2072       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
2073       // result register.
2074       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2075       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
2076       auto RegSeq =
2077           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
2078       for (auto Sub : Parts.drop_back(1))
2079         RegSeq.addReg(Undef).addImm(Sub);
2080       RegSeq.addReg(Zero).addImm(Parts.back());
2081     }
2082     MIB.addReg(Tied, RegState::Implicit);
2083     MIB->tieOperands(0, MIB->getNumOperands() - 1);
2084   }
2085 
2086   MI.eraseFromParent();
2087   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2088   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
2089   return true;
2090 }
2091 
2092 // We need to handle this here because tablegen doesn't support matching
2093 // instructions with multiple outputs.
2094 bool AMDGPUInstructionSelector::selectDSBvhStackIntrinsic(
2095     MachineInstr &MI) const {
2096   Register Dst0 = MI.getOperand(0).getReg();
2097   Register Dst1 = MI.getOperand(1).getReg();
2098 
2099   const DebugLoc &DL = MI.getDebugLoc();
2100   MachineBasicBlock *MBB = MI.getParent();
2101 
2102   Register Addr = MI.getOperand(3).getReg();
2103   Register Data0 = MI.getOperand(4).getReg();
2104   Register Data1 = MI.getOperand(5).getReg();
2105   unsigned Offset = MI.getOperand(6).getImm();
2106 
2107   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_BVH_STACK_RTN_B32), Dst0)
2108                  .addDef(Dst1)
2109                  .addUse(Addr)
2110                  .addUse(Data0)
2111                  .addUse(Data1)
2112                  .addImm(Offset)
2113                  .cloneMemRefs(MI);
2114 
2115   MI.eraseFromParent();
2116   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2117 }
2118 
2119 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
2120     MachineInstr &I) const {
2121   unsigned IntrinsicID = cast<GIntrinsic>(I).getIntrinsicID();
2122   switch (IntrinsicID) {
2123   case Intrinsic::amdgcn_end_cf:
2124     return selectEndCfIntrinsic(I);
2125   case Intrinsic::amdgcn_ds_ordered_add:
2126   case Intrinsic::amdgcn_ds_ordered_swap:
2127     return selectDSOrderedIntrinsic(I, IntrinsicID);
2128   case Intrinsic::amdgcn_ds_gws_init:
2129   case Intrinsic::amdgcn_ds_gws_barrier:
2130   case Intrinsic::amdgcn_ds_gws_sema_v:
2131   case Intrinsic::amdgcn_ds_gws_sema_br:
2132   case Intrinsic::amdgcn_ds_gws_sema_p:
2133   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2134     return selectDSGWSIntrinsic(I, IntrinsicID);
2135   case Intrinsic::amdgcn_ds_append:
2136     return selectDSAppendConsume(I, true);
2137   case Intrinsic::amdgcn_ds_consume:
2138     return selectDSAppendConsume(I, false);
2139   case Intrinsic::amdgcn_s_barrier:
2140     return selectSBarrier(I);
2141   case Intrinsic::amdgcn_raw_buffer_load_lds:
2142   case Intrinsic::amdgcn_raw_ptr_buffer_load_lds:
2143   case Intrinsic::amdgcn_struct_buffer_load_lds:
2144   case Intrinsic::amdgcn_struct_ptr_buffer_load_lds:
2145     return selectBufferLoadLds(I);
2146   case Intrinsic::amdgcn_global_load_lds:
2147     return selectGlobalLoadLds(I);
2148   case Intrinsic::amdgcn_exp_compr:
2149     if (!STI.hasCompressedExport()) {
2150       Function &F = I.getMF()->getFunction();
2151       DiagnosticInfoUnsupported NoFpRet(
2152           F, "intrinsic not supported on subtarget", I.getDebugLoc(), DS_Error);
2153       F.getContext().diagnose(NoFpRet);
2154       return false;
2155     }
2156     break;
2157   case Intrinsic::amdgcn_ds_bvh_stack_rtn:
2158     return selectDSBvhStackIntrinsic(I);
2159   case Intrinsic::amdgcn_s_barrier_init:
2160   case Intrinsic::amdgcn_s_barrier_join:
2161   case Intrinsic::amdgcn_s_wakeup_barrier:
2162   case Intrinsic::amdgcn_s_get_barrier_state:
2163     return selectNamedBarrierInst(I, IntrinsicID);
2164   case Intrinsic::amdgcn_s_barrier_signal_isfirst:
2165   case Intrinsic::amdgcn_s_barrier_signal_isfirst_var:
2166     return selectSBarrierSignalIsfirst(I, IntrinsicID);
2167   case Intrinsic::amdgcn_s_barrier_leave:
2168     return selectSBarrierLeave(I);
2169   }
2170   return selectImpl(I, *CoverageInfo);
2171 }
2172 
2173 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
2174   if (selectImpl(I, *CoverageInfo))
2175     return true;
2176 
2177   MachineBasicBlock *BB = I.getParent();
2178   const DebugLoc &DL = I.getDebugLoc();
2179 
2180   Register DstReg = I.getOperand(0).getReg();
2181   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
2182   assert(Size <= 32 || Size == 64);
2183   const MachineOperand &CCOp = I.getOperand(1);
2184   Register CCReg = CCOp.getReg();
2185   if (!isVCC(CCReg, *MRI)) {
2186     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
2187                                          AMDGPU::S_CSELECT_B32;
2188     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
2189             .addReg(CCReg);
2190 
2191     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
2192     // bank, because it does not cover the register class that we used to represent
2193     // for it.  So we need to manually set the register class here.
2194     if (!MRI->getRegClassOrNull(CCReg))
2195         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
2196     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
2197             .add(I.getOperand(2))
2198             .add(I.getOperand(3));
2199 
2200     bool Ret = false;
2201     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
2202     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
2203     I.eraseFromParent();
2204     return Ret;
2205   }
2206 
2207   // Wide VGPR select should have been split in RegBankSelect.
2208   if (Size > 32)
2209     return false;
2210 
2211   MachineInstr *Select =
2212       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
2213               .addImm(0)
2214               .add(I.getOperand(3))
2215               .addImm(0)
2216               .add(I.getOperand(2))
2217               .add(I.getOperand(1));
2218 
2219   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
2220   I.eraseFromParent();
2221   return Ret;
2222 }
2223 
2224 static int sizeToSubRegIndex(unsigned Size) {
2225   switch (Size) {
2226   case 32:
2227     return AMDGPU::sub0;
2228   case 64:
2229     return AMDGPU::sub0_sub1;
2230   case 96:
2231     return AMDGPU::sub0_sub1_sub2;
2232   case 128:
2233     return AMDGPU::sub0_sub1_sub2_sub3;
2234   case 256:
2235     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
2236   default:
2237     if (Size < 32)
2238       return AMDGPU::sub0;
2239     if (Size > 256)
2240       return -1;
2241     return sizeToSubRegIndex(llvm::bit_ceil(Size));
2242   }
2243 }
2244 
2245 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
2246   Register DstReg = I.getOperand(0).getReg();
2247   Register SrcReg = I.getOperand(1).getReg();
2248   const LLT DstTy = MRI->getType(DstReg);
2249   const LLT SrcTy = MRI->getType(SrcReg);
2250   const LLT S1 = LLT::scalar(1);
2251 
2252   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2253   const RegisterBank *DstRB;
2254   if (DstTy == S1) {
2255     // This is a special case. We don't treat s1 for legalization artifacts as
2256     // vcc booleans.
2257     DstRB = SrcRB;
2258   } else {
2259     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2260     if (SrcRB != DstRB)
2261       return false;
2262   }
2263 
2264   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2265 
2266   unsigned DstSize = DstTy.getSizeInBits();
2267   unsigned SrcSize = SrcTy.getSizeInBits();
2268 
2269   const TargetRegisterClass *SrcRC =
2270       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
2271   const TargetRegisterClass *DstRC =
2272       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
2273   if (!SrcRC || !DstRC)
2274     return false;
2275 
2276   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2277       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
2278     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
2279     return false;
2280   }
2281 
2282   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
2283     MachineBasicBlock *MBB = I.getParent();
2284     const DebugLoc &DL = I.getDebugLoc();
2285 
2286     Register LoReg = MRI->createVirtualRegister(DstRC);
2287     Register HiReg = MRI->createVirtualRegister(DstRC);
2288     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
2289       .addReg(SrcReg, 0, AMDGPU::sub0);
2290     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
2291       .addReg(SrcReg, 0, AMDGPU::sub1);
2292 
2293     if (IsVALU && STI.hasSDWA()) {
2294       // Write the low 16-bits of the high element into the high 16-bits of the
2295       // low element.
2296       MachineInstr *MovSDWA =
2297         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2298         .addImm(0)                             // $src0_modifiers
2299         .addReg(HiReg)                         // $src0
2300         .addImm(0)                             // $clamp
2301         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2302         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2303         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2304         .addReg(LoReg, RegState::Implicit);
2305       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2306     } else {
2307       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
2308       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
2309       Register ImmReg = MRI->createVirtualRegister(DstRC);
2310       if (IsVALU) {
2311         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
2312           .addImm(16)
2313           .addReg(HiReg);
2314       } else {
2315         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
2316           .addReg(HiReg)
2317           .addImm(16)
2318           .setOperandDead(3); // Dead scc
2319       }
2320 
2321       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2322       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2323       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
2324 
2325       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
2326         .addImm(0xffff);
2327       auto And = BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
2328         .addReg(LoReg)
2329         .addReg(ImmReg);
2330       auto Or = BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
2331         .addReg(TmpReg0)
2332         .addReg(TmpReg1);
2333 
2334       if (!IsVALU) {
2335         And.setOperandDead(3); // Dead scc
2336         Or.setOperandDead(3); // Dead scc
2337       }
2338     }
2339 
2340     I.eraseFromParent();
2341     return true;
2342   }
2343 
2344   if (!DstTy.isScalar())
2345     return false;
2346 
2347   if (SrcSize > 32) {
2348     int SubRegIdx = sizeToSubRegIndex(DstSize);
2349     if (SubRegIdx == -1)
2350       return false;
2351 
2352     // Deal with weird cases where the class only partially supports the subreg
2353     // index.
2354     const TargetRegisterClass *SrcWithSubRC
2355       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2356     if (!SrcWithSubRC)
2357       return false;
2358 
2359     if (SrcWithSubRC != SrcRC) {
2360       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2361         return false;
2362     }
2363 
2364     I.getOperand(1).setSubReg(SubRegIdx);
2365   }
2366 
2367   I.setDesc(TII.get(TargetOpcode::COPY));
2368   return true;
2369 }
2370 
2371 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2372 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2373   Mask = maskTrailingOnes<unsigned>(Size);
2374   int SignedMask = static_cast<int>(Mask);
2375   return SignedMask >= -16 && SignedMask <= 64;
2376 }
2377 
2378 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2379 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2380   Register Reg, const MachineRegisterInfo &MRI,
2381   const TargetRegisterInfo &TRI) const {
2382   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2383   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2384     return RB;
2385 
2386   // Ignore the type, since we don't use vcc in artifacts.
2387   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2388     return &RBI.getRegBankFromRegClass(*RC, LLT());
2389   return nullptr;
2390 }
2391 
2392 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2393   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2394   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2395   const DebugLoc &DL = I.getDebugLoc();
2396   MachineBasicBlock &MBB = *I.getParent();
2397   const Register DstReg = I.getOperand(0).getReg();
2398   const Register SrcReg = I.getOperand(1).getReg();
2399 
2400   const LLT DstTy = MRI->getType(DstReg);
2401   const LLT SrcTy = MRI->getType(SrcReg);
2402   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2403     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2404   const unsigned DstSize = DstTy.getSizeInBits();
2405   if (!DstTy.isScalar())
2406     return false;
2407 
2408   // Artifact casts should never use vcc.
2409   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2410 
2411   // FIXME: This should probably be illegal and split earlier.
2412   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2413     if (DstSize <= 32)
2414       return selectCOPY(I);
2415 
2416     const TargetRegisterClass *SrcRC =
2417         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2418     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2419     const TargetRegisterClass *DstRC =
2420         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2421 
2422     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2423     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2424     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2425       .addReg(SrcReg)
2426       .addImm(AMDGPU::sub0)
2427       .addReg(UndefReg)
2428       .addImm(AMDGPU::sub1);
2429     I.eraseFromParent();
2430 
2431     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2432            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2433   }
2434 
2435   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2436     // 64-bit should have been split up in RegBankSelect
2437 
2438     // Try to use an and with a mask if it will save code size.
2439     unsigned Mask;
2440     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2441       MachineInstr *ExtI =
2442       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2443         .addImm(Mask)
2444         .addReg(SrcReg);
2445       I.eraseFromParent();
2446       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2447     }
2448 
2449     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2450     MachineInstr *ExtI =
2451       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2452       .addReg(SrcReg)
2453       .addImm(0) // Offset
2454       .addImm(SrcSize); // Width
2455     I.eraseFromParent();
2456     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2457   }
2458 
2459   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2460     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2461       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2462     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2463       return false;
2464 
2465     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2466       const unsigned SextOpc = SrcSize == 8 ?
2467         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2468       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2469         .addReg(SrcReg);
2470       I.eraseFromParent();
2471       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2472     }
2473 
2474     // Using a single 32-bit SALU to calculate the high half is smaller than
2475     // S_BFE with a literal constant operand.
2476     if (DstSize > 32 && SrcSize == 32) {
2477       Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2478       unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister;
2479       if (Signed) {
2480         BuildMI(MBB, I, DL, TII.get(AMDGPU::S_ASHR_I32), HiReg)
2481           .addReg(SrcReg, 0, SubReg)
2482           .addImm(31)
2483           .setOperandDead(3); // Dead scc
2484       } else {
2485         BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B32), HiReg)
2486           .addImm(0);
2487       }
2488       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2489         .addReg(SrcReg, 0, SubReg)
2490         .addImm(AMDGPU::sub0)
2491         .addReg(HiReg)
2492         .addImm(AMDGPU::sub1);
2493       I.eraseFromParent();
2494       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass,
2495                                           *MRI);
2496     }
2497 
2498     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2499     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2500 
2501     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2502     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2503       // We need a 64-bit register source, but the high bits don't matter.
2504       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2505       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2506       unsigned SubReg = InReg ? AMDGPU::sub0 : AMDGPU::NoSubRegister;
2507 
2508       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2509       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2510         .addReg(SrcReg, 0, SubReg)
2511         .addImm(AMDGPU::sub0)
2512         .addReg(UndefReg)
2513         .addImm(AMDGPU::sub1);
2514 
2515       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2516         .addReg(ExtReg)
2517         .addImm(SrcSize << 16);
2518 
2519       I.eraseFromParent();
2520       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2521     }
2522 
2523     unsigned Mask;
2524     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2525       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2526         .addReg(SrcReg)
2527         .addImm(Mask)
2528         .setOperandDead(3); // Dead scc
2529     } else {
2530       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2531         .addReg(SrcReg)
2532         .addImm(SrcSize << 16);
2533     }
2534 
2535     I.eraseFromParent();
2536     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2537   }
2538 
2539   return false;
2540 }
2541 
2542 static bool isExtractHiElt(MachineRegisterInfo &MRI, Register In,
2543                            Register &Out) {
2544   Register LShlSrc;
2545   if (mi_match(In, MRI,
2546                m_GTrunc(m_GLShr(m_Reg(LShlSrc), m_SpecificICst(16))))) {
2547     Out = LShlSrc;
2548     return true;
2549   }
2550   return false;
2551 }
2552 
2553 bool AMDGPUInstructionSelector::selectG_FPEXT(MachineInstr &I) const {
2554   if (!Subtarget->hasSALUFloatInsts())
2555     return false;
2556 
2557   Register Dst = I.getOperand(0).getReg();
2558   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2559   if (DstRB->getID() != AMDGPU::SGPRRegBankID)
2560     return false;
2561 
2562   Register Src = I.getOperand(1).getReg();
2563 
2564   if (MRI->getType(Dst) == LLT::scalar(32) &&
2565       MRI->getType(Src) == LLT::scalar(16)) {
2566     if (isExtractHiElt(*MRI, Src, Src)) {
2567       MachineBasicBlock *BB = I.getParent();
2568       BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_CVT_HI_F32_F16), Dst)
2569           .addUse(Src);
2570       I.eraseFromParent();
2571       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
2572     }
2573   }
2574 
2575   return false;
2576 }
2577 
2578 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2579   MachineBasicBlock *BB = I.getParent();
2580   MachineOperand &ImmOp = I.getOperand(1);
2581   Register DstReg = I.getOperand(0).getReg();
2582   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2583   bool IsFP = false;
2584 
2585   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2586   if (ImmOp.isFPImm()) {
2587     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2588     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2589     IsFP = true;
2590   } else if (ImmOp.isCImm()) {
2591     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2592   } else {
2593     llvm_unreachable("Not supported by g_constants");
2594   }
2595 
2596   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2597   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2598 
2599   unsigned Opcode;
2600   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2601     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2602   } else if (Size == 64 &&
2603              AMDGPU::isValid32BitLiteral(I.getOperand(1).getImm(), IsFP)) {
2604     Opcode = IsSgpr ? AMDGPU::S_MOV_B64_IMM_PSEUDO : AMDGPU::V_MOV_B64_PSEUDO;
2605     I.setDesc(TII.get(Opcode));
2606     I.addImplicitDefUseOperands(*MF);
2607     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2608   } else {
2609     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2610 
2611     // We should never produce s1 values on banks other than VCC. If the user of
2612     // this already constrained the register, we may incorrectly think it's VCC
2613     // if it wasn't originally.
2614     if (Size == 1)
2615       return false;
2616   }
2617 
2618   if (Size != 64) {
2619     I.setDesc(TII.get(Opcode));
2620     I.addImplicitDefUseOperands(*MF);
2621     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2622   }
2623 
2624   const DebugLoc &DL = I.getDebugLoc();
2625 
2626   APInt Imm(Size, I.getOperand(1).getImm());
2627 
2628   MachineInstr *ResInst;
2629   if (IsSgpr && TII.isInlineConstant(Imm)) {
2630     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2631       .addImm(I.getOperand(1).getImm());
2632   } else {
2633     const TargetRegisterClass *RC = IsSgpr ?
2634       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2635     Register LoReg = MRI->createVirtualRegister(RC);
2636     Register HiReg = MRI->createVirtualRegister(RC);
2637 
2638     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2639       .addImm(Imm.trunc(32).getZExtValue());
2640 
2641     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2642       .addImm(Imm.ashr(32).getZExtValue());
2643 
2644     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2645       .addReg(LoReg)
2646       .addImm(AMDGPU::sub0)
2647       .addReg(HiReg)
2648       .addImm(AMDGPU::sub1);
2649   }
2650 
2651   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2652   // work for target independent opcodes
2653   I.eraseFromParent();
2654   const TargetRegisterClass *DstRC =
2655     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2656   if (!DstRC)
2657     return true;
2658   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2659 }
2660 
2661 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2662   // Only manually handle the f64 SGPR case.
2663   //
2664   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2665   // the bit ops theoretically have a second result due to the implicit def of
2666   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2667   // that is easy by disabling the check. The result works, but uses a
2668   // nonsensical sreg32orlds_and_sreg_1 regclass.
2669   //
2670   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2671   // the variadic REG_SEQUENCE operands.
2672 
2673   Register Dst = MI.getOperand(0).getReg();
2674   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2675   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2676       MRI->getType(Dst) != LLT::scalar(64))
2677     return false;
2678 
2679   Register Src = MI.getOperand(1).getReg();
2680   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2681   if (Fabs)
2682     Src = Fabs->getOperand(1).getReg();
2683 
2684   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2685       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2686     return false;
2687 
2688   MachineBasicBlock *BB = MI.getParent();
2689   const DebugLoc &DL = MI.getDebugLoc();
2690   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2691   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2692   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2693   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2694 
2695   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2696     .addReg(Src, 0, AMDGPU::sub0);
2697   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2698     .addReg(Src, 0, AMDGPU::sub1);
2699   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2700     .addImm(0x80000000);
2701 
2702   // Set or toggle sign bit.
2703   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2704   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2705     .addReg(HiReg)
2706     .addReg(ConstReg)
2707     .setOperandDead(3); // Dead scc
2708   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2709     .addReg(LoReg)
2710     .addImm(AMDGPU::sub0)
2711     .addReg(OpReg)
2712     .addImm(AMDGPU::sub1);
2713   MI.eraseFromParent();
2714   return true;
2715 }
2716 
2717 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2718 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2719   Register Dst = MI.getOperand(0).getReg();
2720   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2721   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2722       MRI->getType(Dst) != LLT::scalar(64))
2723     return false;
2724 
2725   Register Src = MI.getOperand(1).getReg();
2726   MachineBasicBlock *BB = MI.getParent();
2727   const DebugLoc &DL = MI.getDebugLoc();
2728   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2729   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2730   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2731   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2732 
2733   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2734       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2735     return false;
2736 
2737   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2738     .addReg(Src, 0, AMDGPU::sub0);
2739   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2740     .addReg(Src, 0, AMDGPU::sub1);
2741   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2742     .addImm(0x7fffffff);
2743 
2744   // Clear sign bit.
2745   // TODO: Should this used S_BITSET0_*?
2746   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2747     .addReg(HiReg)
2748     .addReg(ConstReg)
2749     .setOperandDead(3); // Dead scc
2750   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2751     .addReg(LoReg)
2752     .addImm(AMDGPU::sub0)
2753     .addReg(OpReg)
2754     .addImm(AMDGPU::sub1);
2755 
2756   MI.eraseFromParent();
2757   return true;
2758 }
2759 
2760 static bool isConstant(const MachineInstr &MI) {
2761   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2762 }
2763 
2764 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2765     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2766 
2767   unsigned OpNo = Load.getOpcode() == AMDGPU::G_PREFETCH ? 0 : 1;
2768   const MachineInstr *PtrMI =
2769       MRI.getUniqueVRegDef(Load.getOperand(OpNo).getReg());
2770 
2771   assert(PtrMI);
2772 
2773   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2774     return;
2775 
2776   GEPInfo GEPInfo;
2777 
2778   for (unsigned i = 1; i != 3; ++i) {
2779     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2780     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2781     assert(OpDef);
2782     if (i == 2 && isConstant(*OpDef)) {
2783       // TODO: Could handle constant base + variable offset, but a combine
2784       // probably should have commuted it.
2785       assert(GEPInfo.Imm == 0);
2786       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2787       continue;
2788     }
2789     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2790     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2791       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2792     else
2793       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2794   }
2795 
2796   AddrInfo.push_back(GEPInfo);
2797   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2798 }
2799 
2800 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2801   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2802 }
2803 
2804 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2805   if (!MI.hasOneMemOperand())
2806     return false;
2807 
2808   const MachineMemOperand *MMO = *MI.memoperands_begin();
2809   const Value *Ptr = MMO->getValue();
2810 
2811   // UndefValue means this is a load of a kernel input.  These are uniform.
2812   // Sometimes LDS instructions have constant pointers.
2813   // If Ptr is null, then that means this mem operand contains a
2814   // PseudoSourceValue like GOT.
2815   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2816       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2817     return true;
2818 
2819   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2820     return true;
2821 
2822   if (MI.getOpcode() == AMDGPU::G_PREFETCH)
2823     return RBI.getRegBank(MI.getOperand(0).getReg(), *MRI, TRI)->getID() ==
2824            AMDGPU::SGPRRegBankID;
2825 
2826   const Instruction *I = dyn_cast<Instruction>(Ptr);
2827   return I && I->getMetadata("amdgpu.uniform");
2828 }
2829 
2830 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2831   for (const GEPInfo &GEPInfo : AddrInfo) {
2832     if (!GEPInfo.VgprParts.empty())
2833       return true;
2834   }
2835   return false;
2836 }
2837 
2838 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2839   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2840   unsigned AS = PtrTy.getAddressSpace();
2841   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2842       STI.ldsRequiresM0Init()) {
2843     MachineBasicBlock *BB = I.getParent();
2844 
2845     // If DS instructions require M0 initialization, insert it before selecting.
2846     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2847       .addImm(-1);
2848   }
2849 }
2850 
2851 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2852   MachineInstr &I) const {
2853   initM0(I);
2854   return selectImpl(I, *CoverageInfo);
2855 }
2856 
2857 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2858   if (Reg.isPhysical())
2859     return false;
2860 
2861   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2862   const unsigned Opcode = MI.getOpcode();
2863 
2864   if (Opcode == AMDGPU::COPY)
2865     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2866 
2867   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2868       Opcode == AMDGPU::G_XOR)
2869     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2870            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2871 
2872   if (auto *GI = dyn_cast<GIntrinsic>(&MI))
2873     return GI->is(Intrinsic::amdgcn_class);
2874 
2875   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2876 }
2877 
2878 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2879   MachineBasicBlock *BB = I.getParent();
2880   MachineOperand &CondOp = I.getOperand(0);
2881   Register CondReg = CondOp.getReg();
2882   const DebugLoc &DL = I.getDebugLoc();
2883 
2884   unsigned BrOpcode;
2885   Register CondPhysReg;
2886   const TargetRegisterClass *ConstrainRC;
2887 
2888   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2889   // whether the branch is uniform when selecting the instruction. In
2890   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2891   // RegBankSelect knows what it's doing if the branch condition is scc, even
2892   // though it currently does not.
2893   if (!isVCC(CondReg, *MRI)) {
2894     if (MRI->getType(CondReg) != LLT::scalar(32))
2895       return false;
2896 
2897     CondPhysReg = AMDGPU::SCC;
2898     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2899     ConstrainRC = &AMDGPU::SReg_32RegClass;
2900   } else {
2901     // FIXME: Should scc->vcc copies and with exec?
2902 
2903     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2904     // need to insert an and with exec.
2905     if (!isVCmpResult(CondReg, *MRI)) {
2906       const bool Is64 = STI.isWave64();
2907       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2908       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2909 
2910       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2911       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2912           .addReg(CondReg)
2913           .addReg(Exec)
2914           .setOperandDead(3); // Dead scc
2915       CondReg = TmpReg;
2916     }
2917 
2918     CondPhysReg = TRI.getVCC();
2919     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2920     ConstrainRC = TRI.getBoolRC();
2921   }
2922 
2923   if (!MRI->getRegClassOrNull(CondReg))
2924     MRI->setRegClass(CondReg, ConstrainRC);
2925 
2926   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2927     .addReg(CondReg);
2928   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2929     .addMBB(I.getOperand(1).getMBB());
2930 
2931   I.eraseFromParent();
2932   return true;
2933 }
2934 
2935 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2936   MachineInstr &I) const {
2937   Register DstReg = I.getOperand(0).getReg();
2938   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2939   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2940   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2941   if (IsVGPR)
2942     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2943 
2944   return RBI.constrainGenericRegister(
2945     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2946 }
2947 
2948 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2949   Register DstReg = I.getOperand(0).getReg();
2950   Register SrcReg = I.getOperand(1).getReg();
2951   Register MaskReg = I.getOperand(2).getReg();
2952   LLT Ty = MRI->getType(DstReg);
2953   LLT MaskTy = MRI->getType(MaskReg);
2954   MachineBasicBlock *BB = I.getParent();
2955   const DebugLoc &DL = I.getDebugLoc();
2956 
2957   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2958   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2959   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2960   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2961   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2962     return false;
2963 
2964   // Try to avoid emitting a bit operation when we only need to touch half of
2965   // the 64-bit pointer.
2966   APInt MaskOnes = KB->getKnownOnes(MaskReg).zext(64);
2967   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2968   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2969 
2970   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2971   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2972 
2973   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2974       !CanCopyLow32 && !CanCopyHi32) {
2975     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2976       .addReg(SrcReg)
2977       .addReg(MaskReg)
2978       .setOperandDead(3); // Dead scc
2979     I.eraseFromParent();
2980     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2981   }
2982 
2983   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2984   const TargetRegisterClass &RegRC
2985     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2986 
2987   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2988   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2989   const TargetRegisterClass *MaskRC =
2990       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2991 
2992   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2993       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2994       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2995     return false;
2996 
2997   if (Ty.getSizeInBits() == 32) {
2998     assert(MaskTy.getSizeInBits() == 32 &&
2999            "ptrmask should have been narrowed during legalize");
3000 
3001     auto NewOp = BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
3002       .addReg(SrcReg)
3003       .addReg(MaskReg);
3004 
3005     if (!IsVGPR)
3006       NewOp.setOperandDead(3); // Dead scc
3007     I.eraseFromParent();
3008     return true;
3009   }
3010 
3011   Register HiReg = MRI->createVirtualRegister(&RegRC);
3012   Register LoReg = MRI->createVirtualRegister(&RegRC);
3013 
3014   // Extract the subregisters from the source pointer.
3015   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
3016     .addReg(SrcReg, 0, AMDGPU::sub0);
3017   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
3018     .addReg(SrcReg, 0, AMDGPU::sub1);
3019 
3020   Register MaskedLo, MaskedHi;
3021 
3022   if (CanCopyLow32) {
3023     // If all the bits in the low half are 1, we only need a copy for it.
3024     MaskedLo = LoReg;
3025   } else {
3026     // Extract the mask subregister and apply the and.
3027     Register MaskLo = MRI->createVirtualRegister(&RegRC);
3028     MaskedLo = MRI->createVirtualRegister(&RegRC);
3029 
3030     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
3031       .addReg(MaskReg, 0, AMDGPU::sub0);
3032     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
3033       .addReg(LoReg)
3034       .addReg(MaskLo);
3035   }
3036 
3037   if (CanCopyHi32) {
3038     // If all the bits in the high half are 1, we only need a copy for it.
3039     MaskedHi = HiReg;
3040   } else {
3041     Register MaskHi = MRI->createVirtualRegister(&RegRC);
3042     MaskedHi = MRI->createVirtualRegister(&RegRC);
3043 
3044     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
3045       .addReg(MaskReg, 0, AMDGPU::sub1);
3046     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
3047       .addReg(HiReg)
3048       .addReg(MaskHi);
3049   }
3050 
3051   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
3052     .addReg(MaskedLo)
3053     .addImm(AMDGPU::sub0)
3054     .addReg(MaskedHi)
3055     .addImm(AMDGPU::sub1);
3056   I.eraseFromParent();
3057   return true;
3058 }
3059 
3060 /// Return the register to use for the index value, and the subregister to use
3061 /// for the indirectly accessed register.
3062 static std::pair<Register, unsigned>
3063 computeIndirectRegIndex(MachineRegisterInfo &MRI, const SIRegisterInfo &TRI,
3064                         const TargetRegisterClass *SuperRC, Register IdxReg,
3065                         unsigned EltSize, GISelKnownBits &KnownBits) {
3066   Register IdxBaseReg;
3067   int Offset;
3068 
3069   std::tie(IdxBaseReg, Offset) =
3070       AMDGPU::getBaseWithConstantOffset(MRI, IdxReg, &KnownBits);
3071   if (IdxBaseReg == AMDGPU::NoRegister) {
3072     // This will happen if the index is a known constant. This should ordinarily
3073     // be legalized out, but handle it as a register just in case.
3074     assert(Offset == 0);
3075     IdxBaseReg = IdxReg;
3076   }
3077 
3078   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
3079 
3080   // Skip out of bounds offsets, or else we would end up using an undefined
3081   // register.
3082   if (static_cast<unsigned>(Offset) >= SubRegs.size())
3083     return std::pair(IdxReg, SubRegs[0]);
3084   return std::pair(IdxBaseReg, SubRegs[Offset]);
3085 }
3086 
3087 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
3088   MachineInstr &MI) const {
3089   Register DstReg = MI.getOperand(0).getReg();
3090   Register SrcReg = MI.getOperand(1).getReg();
3091   Register IdxReg = MI.getOperand(2).getReg();
3092 
3093   LLT DstTy = MRI->getType(DstReg);
3094   LLT SrcTy = MRI->getType(SrcReg);
3095 
3096   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3097   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
3098   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
3099 
3100   // The index must be scalar. If it wasn't RegBankSelect should have moved this
3101   // into a waterfall loop.
3102   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
3103     return false;
3104 
3105   const TargetRegisterClass *SrcRC =
3106       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
3107   const TargetRegisterClass *DstRC =
3108       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
3109   if (!SrcRC || !DstRC)
3110     return false;
3111   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
3112       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
3113       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
3114     return false;
3115 
3116   MachineBasicBlock *BB = MI.getParent();
3117   const DebugLoc &DL = MI.getDebugLoc();
3118   const bool Is64 = DstTy.getSizeInBits() == 64;
3119 
3120   unsigned SubReg;
3121   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(
3122       *MRI, TRI, SrcRC, IdxReg, DstTy.getSizeInBits() / 8, *KB);
3123 
3124   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
3125     if (DstTy.getSizeInBits() != 32 && !Is64)
3126       return false;
3127 
3128     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3129       .addReg(IdxReg);
3130 
3131     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
3132     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
3133       .addReg(SrcReg, 0, SubReg)
3134       .addReg(SrcReg, RegState::Implicit);
3135     MI.eraseFromParent();
3136     return true;
3137   }
3138 
3139   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
3140     return false;
3141 
3142   if (!STI.useVGPRIndexMode()) {
3143     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3144       .addReg(IdxReg);
3145     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
3146       .addReg(SrcReg, 0, SubReg)
3147       .addReg(SrcReg, RegState::Implicit);
3148     MI.eraseFromParent();
3149     return true;
3150   }
3151 
3152   const MCInstrDesc &GPRIDXDesc =
3153       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
3154   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
3155       .addReg(SrcReg)
3156       .addReg(IdxReg)
3157       .addImm(SubReg);
3158 
3159   MI.eraseFromParent();
3160   return true;
3161 }
3162 
3163 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
3164 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
3165   MachineInstr &MI) const {
3166   Register DstReg = MI.getOperand(0).getReg();
3167   Register VecReg = MI.getOperand(1).getReg();
3168   Register ValReg = MI.getOperand(2).getReg();
3169   Register IdxReg = MI.getOperand(3).getReg();
3170 
3171   LLT VecTy = MRI->getType(DstReg);
3172   LLT ValTy = MRI->getType(ValReg);
3173   unsigned VecSize = VecTy.getSizeInBits();
3174   unsigned ValSize = ValTy.getSizeInBits();
3175 
3176   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
3177   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
3178   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
3179 
3180   assert(VecTy.getElementType() == ValTy);
3181 
3182   // The index must be scalar. If it wasn't RegBankSelect should have moved this
3183   // into a waterfall loop.
3184   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
3185     return false;
3186 
3187   const TargetRegisterClass *VecRC =
3188       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
3189   const TargetRegisterClass *ValRC =
3190       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
3191 
3192   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
3193       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
3194       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
3195       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
3196     return false;
3197 
3198   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
3199     return false;
3200 
3201   unsigned SubReg;
3202   std::tie(IdxReg, SubReg) =
3203       computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, ValSize / 8, *KB);
3204 
3205   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
3206                          STI.useVGPRIndexMode();
3207 
3208   MachineBasicBlock *BB = MI.getParent();
3209   const DebugLoc &DL = MI.getDebugLoc();
3210 
3211   if (!IndexMode) {
3212     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3213       .addReg(IdxReg);
3214 
3215     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
3216         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
3217     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
3218         .addReg(VecReg)
3219         .addReg(ValReg)
3220         .addImm(SubReg);
3221     MI.eraseFromParent();
3222     return true;
3223   }
3224 
3225   const MCInstrDesc &GPRIDXDesc =
3226       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
3227   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
3228       .addReg(VecReg)
3229       .addReg(ValReg)
3230       .addReg(IdxReg)
3231       .addImm(SubReg);
3232 
3233   MI.eraseFromParent();
3234   return true;
3235 }
3236 
3237 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3238   assert(!AMDGPU::isGFX12Plus(STI));
3239   unsigned Opc;
3240   unsigned Size = MI.getOperand(3).getImm();
3241 
3242   // The struct intrinsic variants add one additional operand over raw.
3243   const bool HasVIndex = MI.getNumOperands() == 9;
3244   Register VIndex;
3245   int OpOffset = 0;
3246   if (HasVIndex) {
3247     VIndex = MI.getOperand(4).getReg();
3248     OpOffset = 1;
3249   }
3250 
3251   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3252   std::optional<ValueAndVReg> MaybeVOffset =
3253       getIConstantVRegValWithLookThrough(VOffset, *MRI);
3254   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3255 
3256   switch (Size) {
3257   default:
3258     return false;
3259   case 1:
3260     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3261                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3262                     : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3263                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3264     break;
3265   case 2:
3266     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3267                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3268                     : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3269                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3270     break;
3271   case 4:
3272     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3273                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3274                     : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3275                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3276     break;
3277   }
3278 
3279   MachineBasicBlock *MBB = MI.getParent();
3280   const DebugLoc &DL = MI.getDebugLoc();
3281   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3282     .add(MI.getOperand(2));
3283 
3284   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3285 
3286   if (HasVIndex && HasVOffset) {
3287     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3288     BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3289       .addReg(VIndex)
3290       .addImm(AMDGPU::sub0)
3291       .addReg(VOffset)
3292       .addImm(AMDGPU::sub1);
3293 
3294     MIB.addReg(IdxReg);
3295   } else if (HasVIndex) {
3296     MIB.addReg(VIndex);
3297   } else if (HasVOffset) {
3298     MIB.addReg(VOffset);
3299   }
3300 
3301   MIB.add(MI.getOperand(1));            // rsrc
3302   MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3303   MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3304   unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3305   MIB.addImm(Aux & AMDGPU::CPol::ALL);                  // cpol
3306   MIB.addImm(Aux & AMDGPU::CPol::SWZ_pregfx12 ? 1 : 0); // swz
3307 
3308   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3309   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3310   LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3311   MachinePointerInfo StorePtrI = LoadPtrI;
3312   StorePtrI.V = nullptr;
3313   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3314 
3315   auto F = LoadMMO->getFlags() &
3316            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3317   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3318                                      Size, LoadMMO->getBaseAlign());
3319 
3320   MachineMemOperand *StoreMMO =
3321       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3322                                sizeof(int32_t), LoadMMO->getBaseAlign());
3323 
3324   MIB.setMemRefs({LoadMMO, StoreMMO});
3325 
3326   MI.eraseFromParent();
3327   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3328 }
3329 
3330 /// Match a zero extend from a 32-bit value to 64-bits.
3331 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3332   Register ZExtSrc;
3333   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3334     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3335 
3336   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3337   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3338   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3339     return Register();
3340 
3341   assert(Def->getNumOperands() == 3 &&
3342          MRI.getType(Def->getOperand(0).getReg()) == LLT::scalar(64));
3343   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3344     return Def->getOperand(1).getReg();
3345   }
3346 
3347   return Register();
3348 }
3349 
3350 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3351   unsigned Opc;
3352   unsigned Size = MI.getOperand(3).getImm();
3353 
3354   switch (Size) {
3355   default:
3356     return false;
3357   case 1:
3358     Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3359     break;
3360   case 2:
3361     Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3362     break;
3363   case 4:
3364     Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3365     break;
3366   }
3367 
3368   MachineBasicBlock *MBB = MI.getParent();
3369   const DebugLoc &DL = MI.getDebugLoc();
3370   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3371     .add(MI.getOperand(2));
3372 
3373   Register Addr = MI.getOperand(1).getReg();
3374   Register VOffset;
3375   // Try to split SAddr and VOffset. Global and LDS pointers share the same
3376   // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3377   if (!isSGPR(Addr)) {
3378     auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3379     if (isSGPR(AddrDef->Reg)) {
3380       Addr = AddrDef->Reg;
3381     } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3382       Register SAddr =
3383           getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3384       if (isSGPR(SAddr)) {
3385         Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3386         if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3387           Addr = SAddr;
3388           VOffset = Off;
3389         }
3390       }
3391     }
3392   }
3393 
3394   if (isSGPR(Addr)) {
3395     Opc = AMDGPU::getGlobalSaddrOp(Opc);
3396     if (!VOffset) {
3397       VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3398       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3399         .addImm(0);
3400     }
3401   }
3402 
3403   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3404     .addReg(Addr);
3405 
3406   if (isSGPR(Addr))
3407     MIB.addReg(VOffset);
3408 
3409   MIB.add(MI.getOperand(4))  // offset
3410      .add(MI.getOperand(5)); // cpol
3411 
3412   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3413   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3414   LoadPtrI.Offset = MI.getOperand(4).getImm();
3415   MachinePointerInfo StorePtrI = LoadPtrI;
3416   LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3417   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3418   auto F = LoadMMO->getFlags() &
3419            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3420   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3421                                      Size, LoadMMO->getBaseAlign());
3422   MachineMemOperand *StoreMMO =
3423       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3424                                sizeof(int32_t), Align(4));
3425 
3426   MIB.setMemRefs({LoadMMO, StoreMMO});
3427 
3428   MI.eraseFromParent();
3429   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3430 }
3431 
3432 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3433   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3434   MI.removeOperand(1);
3435   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3436   return true;
3437 }
3438 
3439 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3440   unsigned Opc;
3441   switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
3442   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3443     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3444     break;
3445   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3446     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3447     break;
3448   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3449     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3450     break;
3451   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3452     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3453     break;
3454   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3455     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3456     break;
3457   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3458     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3459     break;
3460   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
3461     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_BF8_e64;
3462     break;
3463   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
3464     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_FP8_e64;
3465     break;
3466   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
3467     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_BF8_e64;
3468     break;
3469   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
3470     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_FP8_e64;
3471     break;
3472   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
3473     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_BF8_e64;
3474     break;
3475   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
3476     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_FP8_e64;
3477     break;
3478   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
3479     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_BF8_e64;
3480     break;
3481   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
3482     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_FP8_e64;
3483     break;
3484   default:
3485     llvm_unreachable("unhandled smfmac intrinsic");
3486   }
3487 
3488   auto VDst_In = MI.getOperand(4);
3489 
3490   MI.setDesc(TII.get(Opc));
3491   MI.removeOperand(4); // VDst_In
3492   MI.removeOperand(1); // Intrinsic ID
3493   MI.addOperand(VDst_In); // Readd VDst_In to the end
3494   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3495   return true;
3496 }
3497 
3498 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3499   Register DstReg = MI.getOperand(0).getReg();
3500   Register SrcReg = MI.getOperand(1).getReg();
3501   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3502   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3503   MachineBasicBlock *MBB = MI.getParent();
3504   const DebugLoc &DL = MI.getDebugLoc();
3505 
3506   if (IsVALU) {
3507     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3508       .addImm(Subtarget->getWavefrontSizeLog2())
3509       .addReg(SrcReg);
3510   } else {
3511     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3512       .addReg(SrcReg)
3513       .addImm(Subtarget->getWavefrontSizeLog2())
3514       .setOperandDead(3); // Dead scc
3515   }
3516 
3517   const TargetRegisterClass &RC =
3518       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3519   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3520     return false;
3521 
3522   MI.eraseFromParent();
3523   return true;
3524 }
3525 
3526 bool AMDGPUInstructionSelector::selectStackRestore(MachineInstr &MI) const {
3527   Register SrcReg = MI.getOperand(0).getReg();
3528   if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, *MRI))
3529     return false;
3530 
3531   MachineInstr *DefMI = MRI->getVRegDef(SrcReg);
3532   Register SP =
3533       Subtarget->getTargetLowering()->getStackPointerRegisterToSaveRestore();
3534   Register WaveAddr = getWaveAddress(DefMI);
3535   MachineBasicBlock *MBB = MI.getParent();
3536   const DebugLoc &DL = MI.getDebugLoc();
3537 
3538   if (!WaveAddr) {
3539     WaveAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3540     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), WaveAddr)
3541       .addReg(SrcReg)
3542       .addImm(Subtarget->getWavefrontSizeLog2())
3543       .setOperandDead(3); // Dead scc
3544   }
3545 
3546   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), SP)
3547     .addReg(WaveAddr);
3548 
3549   MI.eraseFromParent();
3550   return true;
3551 }
3552 
3553 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3554   if (I.isPHI())
3555     return selectPHI(I);
3556 
3557   if (!I.isPreISelOpcode()) {
3558     if (I.isCopy())
3559       return selectCOPY(I);
3560     return true;
3561   }
3562 
3563   switch (I.getOpcode()) {
3564   case TargetOpcode::G_AND:
3565   case TargetOpcode::G_OR:
3566   case TargetOpcode::G_XOR:
3567     if (selectImpl(I, *CoverageInfo))
3568       return true;
3569     return selectG_AND_OR_XOR(I);
3570   case TargetOpcode::G_ADD:
3571   case TargetOpcode::G_SUB:
3572     if (selectImpl(I, *CoverageInfo))
3573       return true;
3574     return selectG_ADD_SUB(I);
3575   case TargetOpcode::G_UADDO:
3576   case TargetOpcode::G_USUBO:
3577   case TargetOpcode::G_UADDE:
3578   case TargetOpcode::G_USUBE:
3579     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3580   case AMDGPU::G_AMDGPU_MAD_U64_U32:
3581   case AMDGPU::G_AMDGPU_MAD_I64_I32:
3582     return selectG_AMDGPU_MAD_64_32(I);
3583   case TargetOpcode::G_INTTOPTR:
3584   case TargetOpcode::G_BITCAST:
3585   case TargetOpcode::G_PTRTOINT:
3586     return selectCOPY(I);
3587   case TargetOpcode::G_CONSTANT:
3588   case TargetOpcode::G_FCONSTANT:
3589     return selectG_CONSTANT(I);
3590   case TargetOpcode::G_FNEG:
3591     if (selectImpl(I, *CoverageInfo))
3592       return true;
3593     return selectG_FNEG(I);
3594   case TargetOpcode::G_FABS:
3595     if (selectImpl(I, *CoverageInfo))
3596       return true;
3597     return selectG_FABS(I);
3598   case TargetOpcode::G_EXTRACT:
3599     return selectG_EXTRACT(I);
3600   case TargetOpcode::G_MERGE_VALUES:
3601   case TargetOpcode::G_CONCAT_VECTORS:
3602     return selectG_MERGE_VALUES(I);
3603   case TargetOpcode::G_UNMERGE_VALUES:
3604     return selectG_UNMERGE_VALUES(I);
3605   case TargetOpcode::G_BUILD_VECTOR:
3606   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3607     return selectG_BUILD_VECTOR(I);
3608   case TargetOpcode::G_PTR_ADD:
3609     if (selectImpl(I, *CoverageInfo))
3610       return true;
3611     return selectG_PTR_ADD(I);
3612   case TargetOpcode::G_IMPLICIT_DEF:
3613     return selectG_IMPLICIT_DEF(I);
3614   case TargetOpcode::G_FREEZE:
3615     return selectCOPY(I);
3616   case TargetOpcode::G_INSERT:
3617     return selectG_INSERT(I);
3618   case TargetOpcode::G_INTRINSIC:
3619   case TargetOpcode::G_INTRINSIC_CONVERGENT:
3620     return selectG_INTRINSIC(I);
3621   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3622   case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
3623     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3624   case TargetOpcode::G_ICMP:
3625   case TargetOpcode::G_FCMP:
3626     if (selectG_ICMP_or_FCMP(I))
3627       return true;
3628     return selectImpl(I, *CoverageInfo);
3629   case TargetOpcode::G_LOAD:
3630   case TargetOpcode::G_STORE:
3631   case TargetOpcode::G_ATOMIC_CMPXCHG:
3632   case TargetOpcode::G_ATOMICRMW_XCHG:
3633   case TargetOpcode::G_ATOMICRMW_ADD:
3634   case TargetOpcode::G_ATOMICRMW_SUB:
3635   case TargetOpcode::G_ATOMICRMW_AND:
3636   case TargetOpcode::G_ATOMICRMW_OR:
3637   case TargetOpcode::G_ATOMICRMW_XOR:
3638   case TargetOpcode::G_ATOMICRMW_MIN:
3639   case TargetOpcode::G_ATOMICRMW_MAX:
3640   case TargetOpcode::G_ATOMICRMW_UMIN:
3641   case TargetOpcode::G_ATOMICRMW_UMAX:
3642   case TargetOpcode::G_ATOMICRMW_UINC_WRAP:
3643   case TargetOpcode::G_ATOMICRMW_UDEC_WRAP:
3644   case TargetOpcode::G_ATOMICRMW_FADD:
3645   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3646   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3647     return selectG_LOAD_STORE_ATOMICRMW(I);
3648   case TargetOpcode::G_SELECT:
3649     return selectG_SELECT(I);
3650   case TargetOpcode::G_TRUNC:
3651     return selectG_TRUNC(I);
3652   case TargetOpcode::G_SEXT:
3653   case TargetOpcode::G_ZEXT:
3654   case TargetOpcode::G_ANYEXT:
3655   case TargetOpcode::G_SEXT_INREG:
3656     // This is a workaround. For extension from type i1, `selectImpl()` uses
3657     // patterns from TD file and generates an illegal VGPR to SGPR COPY as type
3658     // i1 can only be hold in a SGPR class.
3659     if (MRI->getType(I.getOperand(1).getReg()) != LLT::scalar(1) &&
3660         selectImpl(I, *CoverageInfo))
3661       return true;
3662     return selectG_SZA_EXT(I);
3663   case TargetOpcode::G_FPEXT:
3664     if (selectG_FPEXT(I))
3665       return true;
3666     return selectImpl(I, *CoverageInfo);
3667   case TargetOpcode::G_BRCOND:
3668     return selectG_BRCOND(I);
3669   case TargetOpcode::G_GLOBAL_VALUE:
3670     return selectG_GLOBAL_VALUE(I);
3671   case TargetOpcode::G_PTRMASK:
3672     return selectG_PTRMASK(I);
3673   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3674     return selectG_EXTRACT_VECTOR_ELT(I);
3675   case TargetOpcode::G_INSERT_VECTOR_ELT:
3676     return selectG_INSERT_VECTOR_ELT(I);
3677   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3678   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3679   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3680   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3681     const AMDGPU::ImageDimIntrinsicInfo *Intr =
3682         AMDGPU::getImageDimIntrinsicInfo(AMDGPU::getIntrinsicID(I));
3683     assert(Intr && "not an image intrinsic with image pseudo");
3684     return selectImageIntrinsic(I, Intr);
3685   }
3686   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3687     return selectBVHIntrinsic(I);
3688   case AMDGPU::G_SBFX:
3689   case AMDGPU::G_UBFX:
3690     return selectG_SBFX_UBFX(I);
3691   case AMDGPU::G_SI_CALL:
3692     I.setDesc(TII.get(AMDGPU::SI_CALL));
3693     return true;
3694   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3695     return selectWaveAddress(I);
3696   case AMDGPU::G_STACKRESTORE:
3697     return selectStackRestore(I);
3698   default:
3699     return selectImpl(I, *CoverageInfo);
3700   }
3701   return false;
3702 }
3703 
3704 InstructionSelector::ComplexRendererFns
3705 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3706   return {{
3707       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3708   }};
3709 
3710 }
3711 
3712 std::pair<Register, unsigned>
3713 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3714                                               bool IsCanonicalizing,
3715                                               bool AllowAbs, bool OpSel) const {
3716   Register Src = Root.getReg();
3717   unsigned Mods = 0;
3718   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3719 
3720   if (MI->getOpcode() == AMDGPU::G_FNEG) {
3721     Src = MI->getOperand(1).getReg();
3722     Mods |= SISrcMods::NEG;
3723     MI = getDefIgnoringCopies(Src, *MRI);
3724   } else if (MI->getOpcode() == AMDGPU::G_FSUB && IsCanonicalizing) {
3725     // Fold fsub [+-]0 into fneg. This may not have folded depending on the
3726     // denormal mode, but we're implicitly canonicalizing in a source operand.
3727     const ConstantFP *LHS =
3728         getConstantFPVRegVal(MI->getOperand(1).getReg(), *MRI);
3729     if (LHS && LHS->isZero()) {
3730       Mods |= SISrcMods::NEG;
3731       Src = MI->getOperand(2).getReg();
3732     }
3733   }
3734 
3735   if (AllowAbs && MI->getOpcode() == AMDGPU::G_FABS) {
3736     Src = MI->getOperand(1).getReg();
3737     Mods |= SISrcMods::ABS;
3738   }
3739 
3740   if (OpSel)
3741     Mods |= SISrcMods::OP_SEL_0;
3742 
3743   return std::pair(Src, Mods);
3744 }
3745 
3746 Register AMDGPUInstructionSelector::copyToVGPRIfSrcFolded(
3747     Register Src, unsigned Mods, MachineOperand Root, MachineInstr *InsertPt,
3748     bool ForceVGPR) const {
3749   if ((Mods != 0 || ForceVGPR) &&
3750       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3751 
3752     // If we looked through copies to find source modifiers on an SGPR operand,
3753     // we now have an SGPR register source. To avoid potentially violating the
3754     // constant bus restriction, we need to insert a copy to a VGPR.
3755     Register VGPRSrc = MRI->cloneVirtualRegister(Root.getReg());
3756     BuildMI(*InsertPt->getParent(), InsertPt, InsertPt->getDebugLoc(),
3757             TII.get(AMDGPU::COPY), VGPRSrc)
3758         .addReg(Src);
3759     Src = VGPRSrc;
3760   }
3761 
3762   return Src;
3763 }
3764 
3765 ///
3766 /// This will select either an SGPR or VGPR operand and will save us from
3767 /// having to write an extra tablegen pattern.
3768 InstructionSelector::ComplexRendererFns
3769 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3770   return {{
3771       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3772   }};
3773 }
3774 
3775 InstructionSelector::ComplexRendererFns
3776 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3777   Register Src;
3778   unsigned Mods;
3779   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3780 
3781   return {{
3782       [=](MachineInstrBuilder &MIB) {
3783         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3784       },
3785       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3786       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3787       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3788   }};
3789 }
3790 
3791 InstructionSelector::ComplexRendererFns
3792 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3793   Register Src;
3794   unsigned Mods;
3795   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3796                                            /*IsCanonicalizing=*/true,
3797                                            /*AllowAbs=*/false);
3798 
3799   return {{
3800       [=](MachineInstrBuilder &MIB) {
3801         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3802       },
3803       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3804       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3805       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3806   }};
3807 }
3808 
3809 InstructionSelector::ComplexRendererFns
3810 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3811   return {{
3812       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3813       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3814       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3815   }};
3816 }
3817 
3818 InstructionSelector::ComplexRendererFns
3819 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3820   Register Src;
3821   unsigned Mods;
3822   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3823 
3824   return {{
3825       [=](MachineInstrBuilder &MIB) {
3826         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3827       },
3828       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3829   }};
3830 }
3831 
3832 InstructionSelector::ComplexRendererFns
3833 AMDGPUInstructionSelector::selectVOP3ModsNonCanonicalizing(
3834     MachineOperand &Root) const {
3835   Register Src;
3836   unsigned Mods;
3837   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/false);
3838 
3839   return {{
3840       [=](MachineInstrBuilder &MIB) {
3841         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3842       },
3843       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3844   }};
3845 }
3846 
3847 InstructionSelector::ComplexRendererFns
3848 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3849   Register Src;
3850   unsigned Mods;
3851   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /*IsCanonicalizing=*/true,
3852                                            /*AllowAbs=*/false);
3853 
3854   return {{
3855       [=](MachineInstrBuilder &MIB) {
3856         MIB.addReg(copyToVGPRIfSrcFolded(Src, Mods, Root, MIB));
3857       },
3858       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3859   }};
3860 }
3861 
3862 InstructionSelector::ComplexRendererFns
3863 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3864   Register Reg = Root.getReg();
3865   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3866   if (Def->getOpcode() == AMDGPU::G_FNEG || Def->getOpcode() == AMDGPU::G_FABS)
3867     return {};
3868   return {{
3869       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3870   }};
3871 }
3872 
3873 std::pair<Register, unsigned>
3874 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3875   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3876   unsigned Mods = 0;
3877   MachineInstr *MI = MRI.getVRegDef(Src);
3878 
3879   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3880       // It's possible to see an f32 fneg here, but unlikely.
3881       // TODO: Treat f32 fneg as only high bit.
3882       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3883     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3884     Src = MI->getOperand(1).getReg();
3885     MI = MRI.getVRegDef(Src);
3886   }
3887 
3888   // TODO: Handle G_FSUB 0 as fneg
3889 
3890   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3891   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3892 
3893   // Packed instructions do not have abs modifiers.
3894   Mods |= SISrcMods::OP_SEL_1;
3895 
3896   return std::pair(Src, Mods);
3897 }
3898 
3899 InstructionSelector::ComplexRendererFns
3900 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3901   MachineRegisterInfo &MRI
3902     = Root.getParent()->getParent()->getParent()->getRegInfo();
3903 
3904   Register Src;
3905   unsigned Mods;
3906   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3907 
3908   return {{
3909       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3910       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3911   }};
3912 }
3913 
3914 InstructionSelector::ComplexRendererFns
3915 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3916   MachineRegisterInfo &MRI
3917     = Root.getParent()->getParent()->getParent()->getRegInfo();
3918 
3919   Register Src;
3920   unsigned Mods;
3921   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3922 
3923   return {{
3924       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3925       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3926   }};
3927 }
3928 
3929 InstructionSelector::ComplexRendererFns
3930 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3931   // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3932   // Value is in Imm operand as i1 sign extended to int64_t.
3933   // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3934   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3935          "expected i1 value");
3936   unsigned Mods = SISrcMods::OP_SEL_1;
3937   if (Root.getImm() == -1)
3938     Mods ^= SISrcMods::NEG;
3939   return {{
3940       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3941   }};
3942 }
3943 
3944 InstructionSelector::ComplexRendererFns
3945 AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods(
3946     MachineOperand &Root) const {
3947   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3948          "expected i1 value");
3949   unsigned Mods = SISrcMods::OP_SEL_1;
3950   if (Root.getImm() != 0)
3951     Mods |= SISrcMods::OP_SEL_0;
3952 
3953   return {{
3954       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3955   }};
3956 }
3957 
3958 InstructionSelector::ComplexRendererFns
3959 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3960   Register Src;
3961   unsigned Mods;
3962   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3963 
3964   // FIXME: Handle op_sel
3965   return {{
3966       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3967       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3968   }};
3969 }
3970 
3971 InstructionSelector::ComplexRendererFns
3972 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3973   Register Src;
3974   unsigned Mods;
3975   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3976                                            /*IsCanonicalizing=*/true,
3977                                            /*AllowAbs=*/false,
3978                                            /*OpSel=*/false);
3979 
3980   return {{
3981       [=](MachineInstrBuilder &MIB) {
3982         MIB.addReg(
3983             copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true));
3984       },
3985       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3986   }};
3987 }
3988 
3989 InstructionSelector::ComplexRendererFns
3990 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3991   Register Src;
3992   unsigned Mods;
3993   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3994                                            /*IsCanonicalizing=*/true,
3995                                            /*AllowAbs=*/false,
3996                                            /*OpSel=*/true);
3997 
3998   return {{
3999       [=](MachineInstrBuilder &MIB) {
4000         MIB.addReg(
4001             copyToVGPRIfSrcFolded(Src, Mods, Root, MIB, /* ForceVGPR */ true));
4002       },
4003       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
4004   }};
4005 }
4006 
4007 bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
4008                                                  Register &Base,
4009                                                  Register *SOffset,
4010                                                  int64_t *Offset) const {
4011   MachineInstr *MI = Root.getParent();
4012   MachineBasicBlock *MBB = MI->getParent();
4013 
4014   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
4015   // then we can select all ptr + 32-bit offsets.
4016   SmallVector<GEPInfo, 4> AddrInfo;
4017   getAddrModeInfo(*MI, *MRI, AddrInfo);
4018 
4019   if (AddrInfo.empty())
4020     return false;
4021 
4022   const GEPInfo &GEPI = AddrInfo[0];
4023   std::optional<int64_t> EncodedImm =
4024       AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);
4025 
4026   if (SOffset && Offset) {
4027     if (GEPI.SgprParts.size() == 1 && GEPI.Imm != 0 && EncodedImm &&
4028         AddrInfo.size() > 1) {
4029       const GEPInfo &GEPI2 = AddrInfo[1];
4030       if (GEPI2.SgprParts.size() == 2 && GEPI2.Imm == 0) {
4031         if (Register OffsetReg =
4032                 matchZeroExtendFromS32(*MRI, GEPI2.SgprParts[1])) {
4033           Base = GEPI2.SgprParts[0];
4034           *SOffset = OffsetReg;
4035           *Offset = *EncodedImm;
4036           return true;
4037         }
4038       }
4039     }
4040     return false;
4041   }
4042 
4043   if (Offset && GEPI.SgprParts.size() == 1 && EncodedImm) {
4044     Base = GEPI.SgprParts[0];
4045     *Offset = *EncodedImm;
4046     return true;
4047   }
4048 
4049   // SGPR offset is unsigned.
4050   if (SOffset && GEPI.SgprParts.size() == 1 && isUInt<32>(GEPI.Imm) &&
4051       GEPI.Imm != 0) {
4052     // If we make it this far we have a load with an 32-bit immediate offset.
4053     // It is OK to select this using a sgpr offset, because we have already
4054     // failed trying to select this load into one of the _IMM variants since
4055     // the _IMM Patterns are considered before the _SGPR patterns.
4056     Base = GEPI.SgprParts[0];
4057     *SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4058     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), *SOffset)
4059         .addImm(GEPI.Imm);
4060     return true;
4061   }
4062 
4063   if (SOffset && GEPI.SgprParts.size() && GEPI.Imm == 0) {
4064     if (Register OffsetReg = matchZeroExtendFromS32(*MRI, GEPI.SgprParts[1])) {
4065       Base = GEPI.SgprParts[0];
4066       *SOffset = OffsetReg;
4067       return true;
4068     }
4069   }
4070 
4071   return false;
4072 }
4073 
4074 InstructionSelector::ComplexRendererFns
4075 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
4076   Register Base;
4077   int64_t Offset;
4078   if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset))
4079     return std::nullopt;
4080 
4081   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
4082            [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
4083 }
4084 
4085 InstructionSelector::ComplexRendererFns
4086 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
4087   SmallVector<GEPInfo, 4> AddrInfo;
4088   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
4089 
4090   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
4091     return std::nullopt;
4092 
4093   const GEPInfo &GEPInfo = AddrInfo[0];
4094   Register PtrReg = GEPInfo.SgprParts[0];
4095   std::optional<int64_t> EncodedImm =
4096       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
4097   if (!EncodedImm)
4098     return std::nullopt;
4099 
4100   return {{
4101     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
4102     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
4103   }};
4104 }
4105 
4106 InstructionSelector::ComplexRendererFns
4107 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
4108   Register Base, SOffset;
4109   if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr))
4110     return std::nullopt;
4111 
4112   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
4113            [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}};
4114 }
4115 
4116 InstructionSelector::ComplexRendererFns
4117 AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const {
4118   Register Base, SOffset;
4119   int64_t Offset;
4120   if (!selectSmrdOffset(Root, Base, &SOffset, &Offset))
4121     return std::nullopt;
4122 
4123   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
4124            [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
4125            [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
4126 }
4127 
4128 std::pair<Register, int>
4129 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
4130                                                 uint64_t FlatVariant) const {
4131   MachineInstr *MI = Root.getParent();
4132 
4133   auto Default = std::pair(Root.getReg(), 0);
4134 
4135   if (!STI.hasFlatInstOffsets())
4136     return Default;
4137 
4138   Register PtrBase;
4139   int64_t ConstOffset;
4140   std::tie(PtrBase, ConstOffset) =
4141       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4142 
4143   if (ConstOffset == 0 || (FlatVariant == SIInstrFlags::FlatScratch &&
4144                            !isFlatScratchBaseLegal(Root.getReg())))
4145     return Default;
4146 
4147   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
4148   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
4149     return Default;
4150 
4151   return std::pair(PtrBase, ConstOffset);
4152 }
4153 
4154 InstructionSelector::ComplexRendererFns
4155 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
4156   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
4157 
4158   return {{
4159       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
4160       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
4161     }};
4162 }
4163 
4164 InstructionSelector::ComplexRendererFns
4165 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
4166   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
4167 
4168   return {{
4169       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
4170       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
4171   }};
4172 }
4173 
4174 InstructionSelector::ComplexRendererFns
4175 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
4176   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
4177 
4178   return {{
4179       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
4180       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
4181     }};
4182 }
4183 
4184 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
4185 InstructionSelector::ComplexRendererFns
4186 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
4187   Register Addr = Root.getReg();
4188   Register PtrBase;
4189   int64_t ConstOffset;
4190   int64_t ImmOffset = 0;
4191 
4192   // Match the immediate offset first, which canonically is moved as low as
4193   // possible.
4194   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4195 
4196   if (ConstOffset != 0) {
4197     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
4198                               SIInstrFlags::FlatGlobal)) {
4199       Addr = PtrBase;
4200       ImmOffset = ConstOffset;
4201     } else {
4202       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
4203       if (isSGPR(PtrBaseDef->Reg)) {
4204         if (ConstOffset > 0) {
4205           // Offset is too large.
4206           //
4207           // saddr + large_offset -> saddr +
4208           //                         (voffset = large_offset & ~MaxOffset) +
4209           //                         (large_offset & MaxOffset);
4210           int64_t SplitImmOffset, RemainderOffset;
4211           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
4212               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
4213 
4214           if (isUInt<32>(RemainderOffset)) {
4215             MachineInstr *MI = Root.getParent();
4216             MachineBasicBlock *MBB = MI->getParent();
4217             Register HighBits =
4218                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4219 
4220             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4221                     HighBits)
4222                 .addImm(RemainderOffset);
4223 
4224             return {{
4225                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
4226                 [=](MachineInstrBuilder &MIB) {
4227                   MIB.addReg(HighBits);
4228                 }, // voffset
4229                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
4230             }};
4231           }
4232         }
4233 
4234         // We are adding a 64 bit SGPR and a constant. If constant bus limit
4235         // is 1 we would need to perform 1 or 2 extra moves for each half of
4236         // the constant and it is better to do a scalar add and then issue a
4237         // single VALU instruction to materialize zero. Otherwise it is less
4238         // instructions to perform VALU adds with immediates or inline literals.
4239         unsigned NumLiterals =
4240             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
4241             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
4242         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
4243           return std::nullopt;
4244       }
4245     }
4246   }
4247 
4248   // Match the variable offset.
4249   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4250   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4251     // Look through the SGPR->VGPR copy.
4252     Register SAddr =
4253         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
4254 
4255     if (isSGPR(SAddr)) {
4256       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
4257 
4258       // It's possible voffset is an SGPR here, but the copy to VGPR will be
4259       // inserted later.
4260       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
4261         return {{[=](MachineInstrBuilder &MIB) { // saddr
4262                    MIB.addReg(SAddr);
4263                  },
4264                  [=](MachineInstrBuilder &MIB) { // voffset
4265                    MIB.addReg(VOffset);
4266                  },
4267                  [=](MachineInstrBuilder &MIB) { // offset
4268                    MIB.addImm(ImmOffset);
4269                  }}};
4270       }
4271     }
4272   }
4273 
4274   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
4275   // drop this.
4276   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
4277       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
4278     return std::nullopt;
4279 
4280   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
4281   // moves required to copy a 64-bit SGPR to VGPR.
4282   MachineInstr *MI = Root.getParent();
4283   MachineBasicBlock *MBB = MI->getParent();
4284   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4285 
4286   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4287       .addImm(0);
4288 
4289   return {{
4290       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4291       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
4292       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
4293   }};
4294 }
4295 
4296 InstructionSelector::ComplexRendererFns
4297 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4298   Register Addr = Root.getReg();
4299   Register PtrBase;
4300   int64_t ConstOffset;
4301   int64_t ImmOffset = 0;
4302 
4303   // Match the immediate offset first, which canonically is moved as low as
4304   // possible.
4305   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4306 
4307   if (ConstOffset != 0 && isFlatScratchBaseLegal(Addr) &&
4308       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
4309                             SIInstrFlags::FlatScratch)) {
4310     Addr = PtrBase;
4311     ImmOffset = ConstOffset;
4312   }
4313 
4314   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4315   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4316     int FI = AddrDef->MI->getOperand(1).getIndex();
4317     return {{
4318         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4319         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4320     }};
4321   }
4322 
4323   Register SAddr = AddrDef->Reg;
4324 
4325   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4326     Register LHS = AddrDef->MI->getOperand(1).getReg();
4327     Register RHS = AddrDef->MI->getOperand(2).getReg();
4328     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4329     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4330 
4331     if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4332         isSGPR(RHSDef->Reg)) {
4333       int FI = LHSDef->MI->getOperand(1).getIndex();
4334       MachineInstr &I = *Root.getParent();
4335       MachineBasicBlock *BB = I.getParent();
4336       const DebugLoc &DL = I.getDebugLoc();
4337       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4338 
4339       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4340           .addFrameIndex(FI)
4341           .addReg(RHSDef->Reg)
4342           .setOperandDead(3); // Dead scc
4343     }
4344   }
4345 
4346   if (!isSGPR(SAddr))
4347     return std::nullopt;
4348 
4349   return {{
4350       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4351       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4352   }};
4353 }
4354 
4355 // Check whether the flat scratch SVS swizzle bug affects this access.
4356 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4357     Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4358   if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4359     return false;
4360 
4361   // The bug affects the swizzling of SVS accesses if there is any carry out
4362   // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4363   // voffset to (soffset + inst_offset).
4364   auto VKnown = KB->getKnownBits(VAddr);
4365   auto SKnown = KnownBits::computeForAddSub(
4366       true, false, KB->getKnownBits(SAddr),
4367       KnownBits::makeConstant(APInt(32, ImmOffset)));
4368   uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4369   uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4370   return (VMax & 3) + (SMax & 3) >= 4;
4371 }
4372 
4373 InstructionSelector::ComplexRendererFns
4374 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4375   Register Addr = Root.getReg();
4376   Register PtrBase;
4377   int64_t ConstOffset;
4378   int64_t ImmOffset = 0;
4379 
4380   // Match the immediate offset first, which canonically is moved as low as
4381   // possible.
4382   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4383 
4384   Register OrigAddr = Addr;
4385   if (ConstOffset != 0 &&
4386       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4387     Addr = PtrBase;
4388     ImmOffset = ConstOffset;
4389   }
4390 
4391   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4392   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4393     return std::nullopt;
4394 
4395   Register RHS = AddrDef->MI->getOperand(2).getReg();
4396   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4397     return std::nullopt;
4398 
4399   Register LHS = AddrDef->MI->getOperand(1).getReg();
4400   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4401 
4402   if (OrigAddr != Addr) {
4403     if (!isFlatScratchBaseLegalSVImm(OrigAddr))
4404       return std::nullopt;
4405   } else {
4406     if (!isFlatScratchBaseLegalSV(OrigAddr))
4407       return std::nullopt;
4408   }
4409 
4410   if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4411     return std::nullopt;
4412 
4413   if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4414     int FI = LHSDef->MI->getOperand(1).getIndex();
4415     return {{
4416         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4417         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4418         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4419     }};
4420   }
4421 
4422   if (!isSGPR(LHS))
4423     return std::nullopt;
4424 
4425   return {{
4426       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4427       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4428       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4429   }};
4430 }
4431 
4432 InstructionSelector::ComplexRendererFns
4433 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4434   MachineInstr *MI = Root.getParent();
4435   MachineBasicBlock *MBB = MI->getParent();
4436   MachineFunction *MF = MBB->getParent();
4437   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4438 
4439   int64_t Offset = 0;
4440   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4441       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4442     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4443 
4444     // TODO: Should this be inside the render function? The iterator seems to
4445     // move.
4446     const uint32_t MaxOffset = SIInstrInfo::getMaxMUBUFImmOffset(*Subtarget);
4447     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4448             HighBits)
4449         .addImm(Offset & ~MaxOffset);
4450 
4451     return {{[=](MachineInstrBuilder &MIB) { // rsrc
4452                MIB.addReg(Info->getScratchRSrcReg());
4453              },
4454              [=](MachineInstrBuilder &MIB) { // vaddr
4455                MIB.addReg(HighBits);
4456              },
4457              [=](MachineInstrBuilder &MIB) { // soffset
4458                // Use constant zero for soffset and rely on eliminateFrameIndex
4459                // to choose the appropriate frame register if need be.
4460                MIB.addImm(0);
4461              },
4462              [=](MachineInstrBuilder &MIB) { // offset
4463                MIB.addImm(Offset & MaxOffset);
4464              }}};
4465   }
4466 
4467   assert(Offset == 0 || Offset == -1);
4468 
4469   // Try to fold a frame index directly into the MUBUF vaddr field, and any
4470   // offsets.
4471   std::optional<int> FI;
4472   Register VAddr = Root.getReg();
4473   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4474     Register PtrBase;
4475     int64_t ConstOffset;
4476     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4477     if (ConstOffset != 0) {
4478       if (TII.isLegalMUBUFImmOffset(ConstOffset) &&
4479           (!STI.privateMemoryResourceIsRangeChecked() ||
4480            KB->signBitIsZero(PtrBase))) {
4481         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4482         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4483           FI = PtrBaseDef->getOperand(1).getIndex();
4484         else
4485           VAddr = PtrBase;
4486         Offset = ConstOffset;
4487       }
4488     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4489       FI = RootDef->getOperand(1).getIndex();
4490     }
4491   }
4492 
4493   return {{[=](MachineInstrBuilder &MIB) { // rsrc
4494              MIB.addReg(Info->getScratchRSrcReg());
4495            },
4496            [=](MachineInstrBuilder &MIB) { // vaddr
4497              if (FI)
4498                MIB.addFrameIndex(*FI);
4499              else
4500                MIB.addReg(VAddr);
4501            },
4502            [=](MachineInstrBuilder &MIB) { // soffset
4503              // Use constant zero for soffset and rely on eliminateFrameIndex
4504              // to choose the appropriate frame register if need be.
4505              MIB.addImm(0);
4506            },
4507            [=](MachineInstrBuilder &MIB) { // offset
4508              MIB.addImm(Offset);
4509            }}};
4510 }
4511 
4512 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4513                                                 int64_t Offset) const {
4514   if (!isUInt<16>(Offset))
4515     return false;
4516 
4517   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4518     return true;
4519 
4520   // On Southern Islands instruction with a negative base value and an offset
4521   // don't seem to work.
4522   return KB->signBitIsZero(Base);
4523 }
4524 
4525 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4526                                                  int64_t Offset1,
4527                                                  unsigned Size) const {
4528   if (Offset0 % Size != 0 || Offset1 % Size != 0)
4529     return false;
4530   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4531     return false;
4532 
4533   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4534     return true;
4535 
4536   // On Southern Islands instruction with a negative base value and an offset
4537   // don't seem to work.
4538   return KB->signBitIsZero(Base);
4539 }
4540 
4541 // Return whether the operation has NoUnsignedWrap property.
4542 static bool isNoUnsignedWrap(MachineInstr *Addr) {
4543   return Addr->getOpcode() == TargetOpcode::G_OR ||
4544          (Addr->getOpcode() == TargetOpcode::G_PTR_ADD &&
4545           Addr->getFlag(MachineInstr::NoUWrap));
4546 }
4547 
4548 // Check that the base address of flat scratch load/store in the form of `base +
4549 // offset` is legal to be put in SGPR/VGPR (i.e. unsigned per hardware
4550 // requirement). We always treat the first operand as the base address here.
4551 bool AMDGPUInstructionSelector::isFlatScratchBaseLegal(Register Addr) const {
4552   MachineInstr *AddrMI = getDefIgnoringCopies(Addr, *MRI);
4553 
4554   if (isNoUnsignedWrap(AddrMI))
4555     return true;
4556 
4557   // Starting with GFX12, VADDR and SADDR fields in VSCRATCH can use negative
4558   // values.
4559   if (AMDGPU::isGFX12Plus(STI))
4560     return true;
4561 
4562   Register LHS = AddrMI->getOperand(1).getReg();
4563   Register RHS = AddrMI->getOperand(2).getReg();
4564 
4565   if (AddrMI->getOpcode() == TargetOpcode::G_PTR_ADD) {
4566     std::optional<ValueAndVReg> RhsValReg =
4567         getIConstantVRegValWithLookThrough(RHS, *MRI);
4568     // If the immediate offset is negative and within certain range, the base
4569     // address cannot also be negative. If the base is also negative, the sum
4570     // would be either negative or much larger than the valid range of scratch
4571     // memory a thread can access.
4572     if (RhsValReg && RhsValReg->Value.getSExtValue() < 0 &&
4573         RhsValReg->Value.getSExtValue() > -0x40000000)
4574       return true;
4575   }
4576 
4577   return KB->signBitIsZero(LHS);
4578 }
4579 
4580 // Check address value in SGPR/VGPR are legal for flat scratch in the form
4581 // of: SGPR + VGPR.
4582 bool AMDGPUInstructionSelector::isFlatScratchBaseLegalSV(Register Addr) const {
4583   MachineInstr *AddrMI = getDefIgnoringCopies(Addr, *MRI);
4584 
4585   if (isNoUnsignedWrap(AddrMI))
4586     return true;
4587 
4588   Register LHS = AddrMI->getOperand(1).getReg();
4589   Register RHS = AddrMI->getOperand(2).getReg();
4590   return KB->signBitIsZero(RHS) && KB->signBitIsZero(LHS);
4591 }
4592 
4593 // Check address value in SGPR/VGPR are legal for flat scratch in the form
4594 // of: SGPR + VGPR + Imm.
4595 bool AMDGPUInstructionSelector::isFlatScratchBaseLegalSVImm(
4596     Register Addr) const {
4597   MachineInstr *AddrMI = getDefIgnoringCopies(Addr, *MRI);
4598   Register Base = AddrMI->getOperand(1).getReg();
4599   std::optional<DefinitionAndSourceRegister> BaseDef =
4600       getDefSrcRegIgnoringCopies(Base, *MRI);
4601   std::optional<ValueAndVReg> RHSOffset =
4602       getIConstantVRegValWithLookThrough(AddrMI->getOperand(2).getReg(), *MRI);
4603   assert(RHSOffset);
4604 
4605   // If the immediate offset is negative and within certain range, the base
4606   // address cannot also be negative. If the base is also negative, the sum
4607   // would be either negative or much larger than the valid range of scratch
4608   // memory a thread can access.
4609   if (isNoUnsignedWrap(BaseDef->MI) &&
4610       (isNoUnsignedWrap(AddrMI) ||
4611        (RHSOffset->Value.getSExtValue() < 0 &&
4612         RHSOffset->Value.getSExtValue() > -0x40000000)))
4613     return true;
4614 
4615   Register LHS = BaseDef->MI->getOperand(1).getReg();
4616   Register RHS = BaseDef->MI->getOperand(2).getReg();
4617   return KB->signBitIsZero(RHS) && KB->signBitIsZero(LHS);
4618 }
4619 
4620 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4621                                                     unsigned ShAmtBits) const {
4622   assert(MI.getOpcode() == TargetOpcode::G_AND);
4623 
4624   std::optional<APInt> RHS =
4625       getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4626   if (!RHS)
4627     return false;
4628 
4629   if (RHS->countr_one() >= ShAmtBits)
4630     return true;
4631 
4632   const APInt &LHSKnownZeros = KB->getKnownZeroes(MI.getOperand(1).getReg());
4633   return (LHSKnownZeros | *RHS).countr_one() >= ShAmtBits;
4634 }
4635 
4636 InstructionSelector::ComplexRendererFns
4637 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4638     MachineOperand &Root) const {
4639   Register Reg = Root.getReg();
4640   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4641 
4642   std::optional<DefinitionAndSourceRegister> Def =
4643     getDefSrcRegIgnoringCopies(Reg, *MRI);
4644   assert(Def && "this shouldn't be an optional result");
4645   Reg = Def->Reg;
4646 
4647   if (Register WaveBase = getWaveAddress(Def->MI)) {
4648     return {{
4649         [=](MachineInstrBuilder &MIB) { // rsrc
4650           MIB.addReg(Info->getScratchRSrcReg());
4651         },
4652         [=](MachineInstrBuilder &MIB) { // soffset
4653           MIB.addReg(WaveBase);
4654         },
4655         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4656     }};
4657   }
4658 
4659   int64_t Offset = 0;
4660 
4661   // FIXME: Copy check is a hack
4662   Register BasePtr;
4663   if (mi_match(Reg, *MRI,
4664                m_GPtrAdd(m_Reg(BasePtr),
4665                          m_any_of(m_ICst(Offset), m_Copy(m_ICst(Offset)))))) {
4666     if (!TII.isLegalMUBUFImmOffset(Offset))
4667       return {};
4668     MachineInstr *BasePtrDef = getDefIgnoringCopies(BasePtr, *MRI);
4669     Register WaveBase = getWaveAddress(BasePtrDef);
4670     if (!WaveBase)
4671       return {};
4672 
4673     return {{
4674         [=](MachineInstrBuilder &MIB) { // rsrc
4675           MIB.addReg(Info->getScratchRSrcReg());
4676         },
4677         [=](MachineInstrBuilder &MIB) { // soffset
4678           MIB.addReg(WaveBase);
4679         },
4680         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4681     }};
4682   }
4683 
4684   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4685       !TII.isLegalMUBUFImmOffset(Offset))
4686     return {};
4687 
4688   return {{
4689       [=](MachineInstrBuilder &MIB) { // rsrc
4690         MIB.addReg(Info->getScratchRSrcReg());
4691       },
4692       [=](MachineInstrBuilder &MIB) { // soffset
4693         MIB.addImm(0);
4694       },
4695       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4696   }};
4697 }
4698 
4699 std::pair<Register, unsigned>
4700 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4701   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4702   if (!RootDef)
4703     return std::pair(Root.getReg(), 0);
4704 
4705   int64_t ConstAddr = 0;
4706 
4707   Register PtrBase;
4708   int64_t Offset;
4709   std::tie(PtrBase, Offset) =
4710     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4711 
4712   if (Offset) {
4713     if (isDSOffsetLegal(PtrBase, Offset)) {
4714       // (add n0, c0)
4715       return std::pair(PtrBase, Offset);
4716     }
4717   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4718     // TODO
4719 
4720 
4721   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4722     // TODO
4723 
4724   }
4725 
4726   return std::pair(Root.getReg(), 0);
4727 }
4728 
4729 InstructionSelector::ComplexRendererFns
4730 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4731   Register Reg;
4732   unsigned Offset;
4733   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4734   return {{
4735       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4736       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4737     }};
4738 }
4739 
4740 InstructionSelector::ComplexRendererFns
4741 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4742   return selectDSReadWrite2(Root, 4);
4743 }
4744 
4745 InstructionSelector::ComplexRendererFns
4746 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4747   return selectDSReadWrite2(Root, 8);
4748 }
4749 
4750 InstructionSelector::ComplexRendererFns
4751 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4752                                               unsigned Size) const {
4753   Register Reg;
4754   unsigned Offset;
4755   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4756   return {{
4757       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4758       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4759       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4760     }};
4761 }
4762 
4763 std::pair<Register, unsigned>
4764 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4765                                                   unsigned Size) const {
4766   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4767   if (!RootDef)
4768     return std::pair(Root.getReg(), 0);
4769 
4770   int64_t ConstAddr = 0;
4771 
4772   Register PtrBase;
4773   int64_t Offset;
4774   std::tie(PtrBase, Offset) =
4775     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4776 
4777   if (Offset) {
4778     int64_t OffsetValue0 = Offset;
4779     int64_t OffsetValue1 = Offset + Size;
4780     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4781       // (add n0, c0)
4782       return std::pair(PtrBase, OffsetValue0 / Size);
4783     }
4784   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4785     // TODO
4786 
4787   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4788     // TODO
4789 
4790   }
4791 
4792   return std::pair(Root.getReg(), 0);
4793 }
4794 
4795 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4796 /// the base value with the constant offset. There may be intervening copies
4797 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4798 /// not match the pattern.
4799 std::pair<Register, int64_t>
4800 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4801   Register Root, const MachineRegisterInfo &MRI) const {
4802   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4803   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4804     return {Root, 0};
4805 
4806   MachineOperand &RHS = RootI->getOperand(2);
4807   std::optional<ValueAndVReg> MaybeOffset =
4808       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4809   if (!MaybeOffset)
4810     return {Root, 0};
4811   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4812 }
4813 
4814 static void addZeroImm(MachineInstrBuilder &MIB) {
4815   MIB.addImm(0);
4816 }
4817 
4818 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4819 /// BasePtr is not valid, a null base pointer will be used.
4820 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4821                           uint32_t FormatLo, uint32_t FormatHi,
4822                           Register BasePtr) {
4823   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4824   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4825   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4826   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4827 
4828   B.buildInstr(AMDGPU::S_MOV_B32)
4829     .addDef(RSrc2)
4830     .addImm(FormatLo);
4831   B.buildInstr(AMDGPU::S_MOV_B32)
4832     .addDef(RSrc3)
4833     .addImm(FormatHi);
4834 
4835   // Build the half of the subregister with the constants before building the
4836   // full 128-bit register. If we are building multiple resource descriptors,
4837   // this will allow CSEing of the 2-component register.
4838   B.buildInstr(AMDGPU::REG_SEQUENCE)
4839     .addDef(RSrcHi)
4840     .addReg(RSrc2)
4841     .addImm(AMDGPU::sub0)
4842     .addReg(RSrc3)
4843     .addImm(AMDGPU::sub1);
4844 
4845   Register RSrcLo = BasePtr;
4846   if (!BasePtr) {
4847     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4848     B.buildInstr(AMDGPU::S_MOV_B64)
4849       .addDef(RSrcLo)
4850       .addImm(0);
4851   }
4852 
4853   B.buildInstr(AMDGPU::REG_SEQUENCE)
4854     .addDef(RSrc)
4855     .addReg(RSrcLo)
4856     .addImm(AMDGPU::sub0_sub1)
4857     .addReg(RSrcHi)
4858     .addImm(AMDGPU::sub2_sub3);
4859 
4860   return RSrc;
4861 }
4862 
4863 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4864                                 const SIInstrInfo &TII, Register BasePtr) {
4865   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4866 
4867   // FIXME: Why are half the "default" bits ignored based on the addressing
4868   // mode?
4869   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4870 }
4871 
4872 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4873                                const SIInstrInfo &TII, Register BasePtr) {
4874   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4875 
4876   // FIXME: Why are half the "default" bits ignored based on the addressing
4877   // mode?
4878   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4879 }
4880 
4881 AMDGPUInstructionSelector::MUBUFAddressData
4882 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4883   MUBUFAddressData Data;
4884   Data.N0 = Src;
4885 
4886   Register PtrBase;
4887   int64_t Offset;
4888 
4889   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4890   if (isUInt<32>(Offset)) {
4891     Data.N0 = PtrBase;
4892     Data.Offset = Offset;
4893   }
4894 
4895   if (MachineInstr *InputAdd
4896       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4897     Data.N2 = InputAdd->getOperand(1).getReg();
4898     Data.N3 = InputAdd->getOperand(2).getReg();
4899 
4900     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4901     // FIXME: Don't know this was defined by operand 0
4902     //
4903     // TODO: Remove this when we have copy folding optimizations after
4904     // RegBankSelect.
4905     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4906     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4907   }
4908 
4909   return Data;
4910 }
4911 
4912 /// Return if the addr64 mubuf mode should be used for the given address.
4913 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4914   // (ptr_add N2, N3) -> addr64, or
4915   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4916   if (Addr.N2)
4917     return true;
4918 
4919   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4920   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4921 }
4922 
4923 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4924 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4925 /// component.
4926 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4927   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4928   if (TII.isLegalMUBUFImmOffset(ImmOffset))
4929     return;
4930 
4931   // Illegal offset, store it in soffset.
4932   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4933   B.buildInstr(AMDGPU::S_MOV_B32)
4934     .addDef(SOffset)
4935     .addImm(ImmOffset);
4936   ImmOffset = 0;
4937 }
4938 
4939 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4940   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4941   Register &SOffset, int64_t &Offset) const {
4942   // FIXME: Predicates should stop this from reaching here.
4943   // addr64 bit was removed for volcanic islands.
4944   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4945     return false;
4946 
4947   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4948   if (!shouldUseAddr64(AddrData))
4949     return false;
4950 
4951   Register N0 = AddrData.N0;
4952   Register N2 = AddrData.N2;
4953   Register N3 = AddrData.N3;
4954   Offset = AddrData.Offset;
4955 
4956   // Base pointer for the SRD.
4957   Register SRDPtr;
4958 
4959   if (N2) {
4960     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4961       assert(N3);
4962       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4963         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4964         // addr64, and construct the default resource from a 0 address.
4965         VAddr = N0;
4966       } else {
4967         SRDPtr = N3;
4968         VAddr = N2;
4969       }
4970     } else {
4971       // N2 is not divergent.
4972       SRDPtr = N2;
4973       VAddr = N3;
4974     }
4975   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4976     // Use the default null pointer in the resource
4977     VAddr = N0;
4978   } else {
4979     // N0 -> offset, or
4980     // (N0 + C1) -> offset
4981     SRDPtr = N0;
4982   }
4983 
4984   MachineIRBuilder B(*Root.getParent());
4985   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4986   splitIllegalMUBUFOffset(B, SOffset, Offset);
4987   return true;
4988 }
4989 
4990 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4991   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4992   int64_t &Offset) const {
4993 
4994   // FIXME: Pattern should not reach here.
4995   if (STI.useFlatForGlobal())
4996     return false;
4997 
4998   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4999   if (shouldUseAddr64(AddrData))
5000     return false;
5001 
5002   // N0 -> offset, or
5003   // (N0 + C1) -> offset
5004   Register SRDPtr = AddrData.N0;
5005   Offset = AddrData.Offset;
5006 
5007   // TODO: Look through extensions for 32-bit soffset.
5008   MachineIRBuilder B(*Root.getParent());
5009 
5010   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
5011   splitIllegalMUBUFOffset(B, SOffset, Offset);
5012   return true;
5013 }
5014 
5015 InstructionSelector::ComplexRendererFns
5016 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
5017   Register VAddr;
5018   Register RSrcReg;
5019   Register SOffset;
5020   int64_t Offset = 0;
5021 
5022   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
5023     return {};
5024 
5025   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
5026   // pattern.
5027   return {{
5028       [=](MachineInstrBuilder &MIB) {  // rsrc
5029         MIB.addReg(RSrcReg);
5030       },
5031       [=](MachineInstrBuilder &MIB) { // vaddr
5032         MIB.addReg(VAddr);
5033       },
5034       [=](MachineInstrBuilder &MIB) { // soffset
5035         if (SOffset)
5036           MIB.addReg(SOffset);
5037         else if (STI.hasRestrictedSOffset())
5038           MIB.addReg(AMDGPU::SGPR_NULL);
5039         else
5040           MIB.addImm(0);
5041       },
5042       [=](MachineInstrBuilder &MIB) { // offset
5043         MIB.addImm(Offset);
5044       },
5045       addZeroImm, //  cpol
5046       addZeroImm, //  tfe
5047       addZeroImm  //  swz
5048     }};
5049 }
5050 
5051 InstructionSelector::ComplexRendererFns
5052 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
5053   Register RSrcReg;
5054   Register SOffset;
5055   int64_t Offset = 0;
5056 
5057   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
5058     return {};
5059 
5060   return {{
5061       [=](MachineInstrBuilder &MIB) {  // rsrc
5062         MIB.addReg(RSrcReg);
5063       },
5064       [=](MachineInstrBuilder &MIB) { // soffset
5065         if (SOffset)
5066           MIB.addReg(SOffset);
5067         else if (STI.hasRestrictedSOffset())
5068           MIB.addReg(AMDGPU::SGPR_NULL);
5069         else
5070           MIB.addImm(0);
5071       },
5072       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
5073       addZeroImm, //  cpol
5074       addZeroImm, //  tfe
5075       addZeroImm, //  swz
5076     }};
5077 }
5078 
5079 InstructionSelector::ComplexRendererFns
5080 AMDGPUInstructionSelector::selectBUFSOffset(MachineOperand &Root) const {
5081 
5082   Register SOffset = Root.getReg();
5083 
5084   if (STI.hasRestrictedSOffset() && mi_match(SOffset, *MRI, m_ZeroInt()))
5085     SOffset = AMDGPU::SGPR_NULL;
5086 
5087   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}};
5088 }
5089 
5090 /// Get an immediate that must be 32-bits, and treated as zero extended.
5091 static std::optional<uint64_t>
5092 getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) {
5093   // getIConstantVRegVal sexts any values, so see if that matters.
5094   std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
5095   if (!OffsetVal || !isInt<32>(*OffsetVal))
5096     return std::nullopt;
5097   return Lo_32(*OffsetVal);
5098 }
5099 
5100 InstructionSelector::ComplexRendererFns
5101 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
5102   std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
5103   if (!OffsetVal)
5104     return {};
5105 
5106   std::optional<int64_t> EncodedImm =
5107       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
5108   if (!EncodedImm)
5109     return {};
5110 
5111   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
5112 }
5113 
5114 InstructionSelector::ComplexRendererFns
5115 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
5116   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
5117 
5118   std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
5119   if (!OffsetVal)
5120     return {};
5121 
5122   std::optional<int64_t> EncodedImm =
5123       AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
5124   if (!EncodedImm)
5125     return {};
5126 
5127   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
5128 }
5129 
5130 InstructionSelector::ComplexRendererFns
5131 AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
5132   // Match the (soffset + offset) pair as a 32-bit register base and
5133   // an immediate offset.
5134   Register SOffset;
5135   unsigned Offset;
5136   std::tie(SOffset, Offset) = AMDGPU::getBaseWithConstantOffset(
5137       *MRI, Root.getReg(), KB, /*CheckNUW*/ true);
5138   if (!SOffset)
5139     return std::nullopt;
5140 
5141   std::optional<int64_t> EncodedOffset =
5142       AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true);
5143   if (!EncodedOffset)
5144     return std::nullopt;
5145 
5146   assert(MRI->getType(SOffset) == LLT::scalar(32));
5147   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
5148            [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedOffset); }}};
5149 }
5150 
5151 // Variant of stripBitCast that returns the instruction instead of a
5152 // MachineOperand.
5153 static MachineInstr *stripBitCast(MachineInstr *MI, MachineRegisterInfo &MRI) {
5154   if (MI->getOpcode() == AMDGPU::G_BITCAST)
5155     return getDefIgnoringCopies(MI->getOperand(1).getReg(), MRI);
5156   return MI;
5157 }
5158 
5159 // Figure out if this is really an extract of the high 16-bits of a dword,
5160 // returns nullptr if it isn't.
5161 static MachineInstr *isExtractHiElt(MachineInstr *Inst,
5162                                     MachineRegisterInfo &MRI) {
5163   Inst = stripBitCast(Inst, MRI);
5164 
5165   if (Inst->getOpcode() != AMDGPU::G_TRUNC)
5166     return nullptr;
5167 
5168   MachineInstr *TruncOp =
5169       getDefIgnoringCopies(Inst->getOperand(1).getReg(), MRI);
5170   TruncOp = stripBitCast(TruncOp, MRI);
5171 
5172   // G_LSHR x, (G_CONSTANT i32 16)
5173   if (TruncOp->getOpcode() == AMDGPU::G_LSHR) {
5174     auto SrlAmount = getIConstantVRegValWithLookThrough(
5175         TruncOp->getOperand(2).getReg(), MRI);
5176     if (SrlAmount && SrlAmount->Value.getZExtValue() == 16) {
5177       MachineInstr *SrlOp =
5178           getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
5179       return stripBitCast(SrlOp, MRI);
5180     }
5181   }
5182 
5183   // G_SHUFFLE_VECTOR x, y, shufflemask(1, 1|0)
5184   //    1, 0 swaps the low/high 16 bits.
5185   //    1, 1 sets the high 16 bits to be the same as the low 16.
5186   // in any case, it selects the high elts.
5187   if (TruncOp->getOpcode() == AMDGPU::G_SHUFFLE_VECTOR) {
5188     assert(MRI.getType(TruncOp->getOperand(0).getReg()) ==
5189            LLT::fixed_vector(2, 16));
5190 
5191     ArrayRef<int> Mask = TruncOp->getOperand(3).getShuffleMask();
5192     assert(Mask.size() == 2);
5193 
5194     if (Mask[0] == 1 && Mask[1] <= 1) {
5195       MachineInstr *LHS =
5196           getDefIgnoringCopies(TruncOp->getOperand(1).getReg(), MRI);
5197       return stripBitCast(LHS, MRI);
5198     }
5199   }
5200 
5201   return nullptr;
5202 }
5203 
5204 std::pair<Register, unsigned>
5205 AMDGPUInstructionSelector::selectVOP3PMadMixModsImpl(MachineOperand &Root,
5206                                                      bool &Matched) const {
5207   Matched = false;
5208 
5209   Register Src;
5210   unsigned Mods;
5211   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
5212 
5213   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
5214   if (MI->getOpcode() == AMDGPU::G_FPEXT) {
5215     MachineOperand *MO = &MI->getOperand(1);
5216     Src = MO->getReg();
5217     MI = getDefIgnoringCopies(Src, *MRI);
5218 
5219     assert(MRI->getType(Src) == LLT::scalar(16));
5220 
5221     // See through bitcasts.
5222     // FIXME: Would be nice to use stripBitCast here.
5223     if (MI->getOpcode() == AMDGPU::G_BITCAST) {
5224       MO = &MI->getOperand(1);
5225       Src = MO->getReg();
5226       MI = getDefIgnoringCopies(Src, *MRI);
5227     }
5228 
5229     const auto CheckAbsNeg = [&]() {
5230       // Be careful about folding modifiers if we already have an abs. fneg is
5231       // applied last, so we don't want to apply an earlier fneg.
5232       if ((Mods & SISrcMods::ABS) == 0) {
5233         unsigned ModsTmp;
5234         std::tie(Src, ModsTmp) = selectVOP3ModsImpl(*MO);
5235         MI = getDefIgnoringCopies(Src, *MRI);
5236 
5237         if ((ModsTmp & SISrcMods::NEG) != 0)
5238           Mods ^= SISrcMods::NEG;
5239 
5240         if ((ModsTmp & SISrcMods::ABS) != 0)
5241           Mods |= SISrcMods::ABS;
5242       }
5243     };
5244 
5245     CheckAbsNeg();
5246 
5247     // op_sel/op_sel_hi decide the source type and source.
5248     // If the source's op_sel_hi is set, it indicates to do a conversion from
5249     // fp16. If the sources's op_sel is set, it picks the high half of the
5250     // source register.
5251 
5252     Mods |= SISrcMods::OP_SEL_1;
5253 
5254     if (MachineInstr *ExtractHiEltMI = isExtractHiElt(MI, *MRI)) {
5255       Mods |= SISrcMods::OP_SEL_0;
5256       MI = ExtractHiEltMI;
5257       MO = &MI->getOperand(0);
5258       Src = MO->getReg();
5259 
5260       CheckAbsNeg();
5261     }
5262 
5263     Matched = true;
5264   }
5265 
5266   return {Src, Mods};
5267 }
5268 
5269 InstructionSelector::ComplexRendererFns
5270 AMDGPUInstructionSelector::selectVOP3PMadMixModsExt(
5271     MachineOperand &Root) const {
5272   Register Src;
5273   unsigned Mods;
5274   bool Matched;
5275   std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched);
5276   if (!Matched)
5277     return {};
5278 
5279   return {{
5280       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
5281       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
5282   }};
5283 }
5284 
5285 InstructionSelector::ComplexRendererFns
5286 AMDGPUInstructionSelector::selectVOP3PMadMixMods(MachineOperand &Root) const {
5287   Register Src;
5288   unsigned Mods;
5289   bool Matched;
5290   std::tie(Src, Mods) = selectVOP3PMadMixModsImpl(Root, Matched);
5291 
5292   return {{
5293       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
5294       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
5295   }};
5296 }
5297 
5298 bool AMDGPUInstructionSelector::selectSBarrierSignalIsfirst(
5299     MachineInstr &I, Intrinsic::ID IntrID) const {
5300   MachineBasicBlock *MBB = I.getParent();
5301   const DebugLoc &DL = I.getDebugLoc();
5302   Register CCReg = I.getOperand(0).getReg();
5303 
5304   bool HasM0 = IntrID == Intrinsic::amdgcn_s_barrier_signal_isfirst_var;
5305 
5306   if (HasM0) {
5307     auto CopyMIB = BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
5308                        .addReg(I.getOperand(2).getReg());
5309     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_BARRIER_SIGNAL_ISFIRST_M0));
5310     if (!constrainSelectedInstRegOperands(*CopyMIB, TII, TRI, RBI))
5311       return false;
5312   } else {
5313     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM))
5314         .addImm(I.getOperand(2).getImm());
5315   }
5316 
5317   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), CCReg).addReg(AMDGPU::SCC);
5318 
5319   I.eraseFromParent();
5320   return RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32_XM0_XEXECRegClass,
5321                                       *MRI);
5322 }
5323 
5324 unsigned getNamedBarrierOp(bool HasInlineConst, Intrinsic::ID IntrID) {
5325   if (HasInlineConst) {
5326     switch (IntrID) {
5327     default:
5328       llvm_unreachable("not a named barrier op");
5329     case Intrinsic::amdgcn_s_barrier_init:
5330       return AMDGPU::S_BARRIER_INIT_IMM;
5331     case Intrinsic::amdgcn_s_barrier_join:
5332       return AMDGPU::S_BARRIER_JOIN_IMM;
5333     case Intrinsic::amdgcn_s_wakeup_barrier:
5334       return AMDGPU::S_WAKEUP_BARRIER_IMM;
5335     case Intrinsic::amdgcn_s_get_barrier_state:
5336       return AMDGPU::S_GET_BARRIER_STATE_IMM;
5337     };
5338   } else {
5339     switch (IntrID) {
5340     default:
5341       llvm_unreachable("not a named barrier op");
5342     case Intrinsic::amdgcn_s_barrier_init:
5343       return AMDGPU::S_BARRIER_INIT_M0;
5344     case Intrinsic::amdgcn_s_barrier_join:
5345       return AMDGPU::S_BARRIER_JOIN_M0;
5346     case Intrinsic::amdgcn_s_wakeup_barrier:
5347       return AMDGPU::S_WAKEUP_BARRIER_M0;
5348     case Intrinsic::amdgcn_s_get_barrier_state:
5349       return AMDGPU::S_GET_BARRIER_STATE_M0;
5350     };
5351   }
5352 }
5353 
5354 bool AMDGPUInstructionSelector::selectNamedBarrierInst(
5355     MachineInstr &I, Intrinsic::ID IntrID) const {
5356   MachineBasicBlock *MBB = I.getParent();
5357   const DebugLoc &DL = I.getDebugLoc();
5358   MachineOperand BarOp = IntrID == Intrinsic::amdgcn_s_get_barrier_state
5359                              ? I.getOperand(2)
5360                              : I.getOperand(1);
5361   std::optional<int64_t> BarValImm =
5362       getIConstantVRegSExtVal(BarOp.getReg(), *MRI);
5363   Register M0Val;
5364   Register TmpReg0;
5365 
5366   // For S_BARRIER_INIT, member count will always be read from M0[16:22]
5367   if (IntrID == Intrinsic::amdgcn_s_barrier_init) {
5368     Register MemberCount = I.getOperand(2).getReg();
5369     TmpReg0 = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
5370     // TODO: This should be expanded during legalization so that the the S_LSHL
5371     // and S_OR can be constant-folded
5372     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
5373         .addImm(16)
5374         .addReg(MemberCount);
5375     M0Val = TmpReg0;
5376   }
5377 
5378   // If not inlinable, get reference to barrier depending on the instruction
5379   if (!BarValImm) {
5380     if (IntrID == Intrinsic::amdgcn_s_barrier_init) {
5381       // If reference to barrier id is not an inlinable constant then it must be
5382       // referenced with M0[4:0]. Perform an OR with the member count to include
5383       // it in M0 for S_BARRIER_INIT.
5384       Register TmpReg1 = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
5385       BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_OR_B32), TmpReg1)
5386           .addReg(BarOp.getReg())
5387           .addReg(TmpReg0);
5388       M0Val = TmpReg1;
5389     } else {
5390       M0Val = BarOp.getReg();
5391     }
5392   }
5393 
5394   // Build copy to M0 if needed. For S_BARRIER_INIT, M0 is always required.
5395   if (M0Val) {
5396     auto CopyMIB =
5397         BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::M0).addReg(M0Val);
5398     constrainSelectedInstRegOperands(*CopyMIB, TII, TRI, RBI);
5399   }
5400 
5401   MachineInstrBuilder MIB;
5402   unsigned Opc = getNamedBarrierOp(BarValImm.has_value(), IntrID);
5403   MIB = BuildMI(*MBB, &I, DL, TII.get(Opc));
5404 
5405   if (IntrID == Intrinsic::amdgcn_s_get_barrier_state)
5406     MIB.addDef(I.getOperand(0).getReg());
5407 
5408   if (BarValImm)
5409     MIB.addImm(*BarValImm);
5410 
5411   I.eraseFromParent();
5412   return true;
5413 }
5414 bool AMDGPUInstructionSelector::selectSBarrierLeave(MachineInstr &I) const {
5415   MachineBasicBlock *BB = I.getParent();
5416   const DebugLoc &DL = I.getDebugLoc();
5417   Register CCReg = I.getOperand(0).getReg();
5418 
5419   BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_BARRIER_LEAVE));
5420   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg).addReg(AMDGPU::SCC);
5421 
5422   I.eraseFromParent();
5423   return RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32_XM0_XEXECRegClass,
5424                                       *MRI);
5425 }
5426 
5427 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
5428                                                  const MachineInstr &MI,
5429                                                  int OpIdx) const {
5430   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
5431          "Expected G_CONSTANT");
5432   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
5433 }
5434 
5435 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
5436                                                 const MachineInstr &MI,
5437                                                 int OpIdx) const {
5438   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
5439          "Expected G_CONSTANT");
5440   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
5441 }
5442 
5443 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
5444                                                  const MachineInstr &MI,
5445                                                  int OpIdx) const {
5446   assert(OpIdx == -1);
5447 
5448   const MachineOperand &Op = MI.getOperand(1);
5449   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
5450     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
5451   else {
5452     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
5453     MIB.addImm(Op.getCImm()->getSExtValue());
5454   }
5455 }
5456 
5457 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
5458                                                 const MachineInstr &MI,
5459                                                 int OpIdx) const {
5460   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
5461          "Expected G_CONSTANT");
5462   MIB.addImm(MI.getOperand(1).getCImm()->getValue().popcount());
5463 }
5464 
5465 /// This only really exists to satisfy DAG type checking machinery, so is a
5466 /// no-op here.
5467 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
5468                                                 const MachineInstr &MI,
5469                                                 int OpIdx) const {
5470   MIB.addImm(MI.getOperand(OpIdx).getImm());
5471 }
5472 
5473 void AMDGPUInstructionSelector::renderOpSelTImm(MachineInstrBuilder &MIB,
5474                                                 const MachineInstr &MI,
5475                                                 int OpIdx) const {
5476   assert(OpIdx >= 0 && "expected to match an immediate operand");
5477   MIB.addImm(MI.getOperand(OpIdx).getImm() ? (int64_t)SISrcMods::OP_SEL_0 : 0);
5478 }
5479 
5480 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
5481                                                   const MachineInstr &MI,
5482                                                   int OpIdx) const {
5483   assert(OpIdx >= 0 && "expected to match an immediate operand");
5484   MIB.addImm(MI.getOperand(OpIdx).getImm() &
5485              (AMDGPU::isGFX12Plus(STI) ? AMDGPU::CPol::ALL
5486                                        : AMDGPU::CPol::ALL_pregfx12));
5487 }
5488 
5489 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
5490                                                  const MachineInstr &MI,
5491                                                  int OpIdx) const {
5492   assert(OpIdx >= 0 && "expected to match an immediate operand");
5493   const bool Swizzle = MI.getOperand(OpIdx).getImm() &
5494                        (AMDGPU::isGFX12Plus(STI) ? AMDGPU::CPol::SWZ
5495                                                  : AMDGPU::CPol::SWZ_pregfx12);
5496   MIB.addImm(Swizzle);
5497 }
5498 
5499 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
5500                                              const MachineInstr &MI,
5501                                              int OpIdx) const {
5502   assert(OpIdx >= 0 && "expected to match an immediate operand");
5503   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
5504 }
5505 
5506 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
5507                                                  const MachineInstr &MI,
5508                                                  int OpIdx) const {
5509   MIB.addFrameIndex(MI.getOperand(1).getIndex());
5510 }
5511 
5512 void AMDGPUInstructionSelector::renderFPPow2ToExponent(MachineInstrBuilder &MIB,
5513                                                        const MachineInstr &MI,
5514                                                        int OpIdx) const {
5515   const APFloat &APF = MI.getOperand(1).getFPImm()->getValueAPF();
5516   int ExpVal = APF.getExactLog2Abs();
5517   assert(ExpVal != INT_MIN);
5518   MIB.addImm(ExpVal);
5519 }
5520 
5521 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
5522   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
5523 }
5524 
5525 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
5526   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
5527 }
5528 
5529 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
5530   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
5531 }
5532 
5533 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
5534   return TII.isInlineConstant(Imm);
5535 }
5536