xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (revision 5956d97f4b3204318ceb6aa9c77bd0bc6ea87a41)
1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/IR/DiagnosticInfo.h"
27 #include "llvm/IR/IntrinsicsAMDGPU.h"
28 
29 #define DEBUG_TYPE "amdgpu-isel"
30 
31 using namespace llvm;
32 using namespace MIPatternMatch;
33 
34 static cl::opt<bool> AllowRiskySelect(
35   "amdgpu-global-isel-risky-select",
36   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
37   cl::init(false),
38   cl::ReallyHidden);
39 
40 #define GET_GLOBALISEL_IMPL
41 #define AMDGPUSubtarget GCNSubtarget
42 #include "AMDGPUGenGlobalISel.inc"
43 #undef GET_GLOBALISEL_IMPL
44 #undef AMDGPUSubtarget
45 
46 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
47     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
48     const AMDGPUTargetMachine &TM)
49     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
50       STI(STI),
51       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
52 #define GET_GLOBALISEL_PREDICATES_INIT
53 #include "AMDGPUGenGlobalISel.inc"
54 #undef GET_GLOBALISEL_PREDICATES_INIT
55 #define GET_GLOBALISEL_TEMPORARIES_INIT
56 #include "AMDGPUGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_TEMPORARIES_INIT
58 {
59 }
60 
61 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
62 
63 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
64                                         CodeGenCoverage &CoverageInfo,
65                                         ProfileSummaryInfo *PSI,
66                                         BlockFrequencyInfo *BFI) {
67   MRI = &MF.getRegInfo();
68   Subtarget = &MF.getSubtarget<GCNSubtarget>();
69   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
70 }
71 
72 bool AMDGPUInstructionSelector::isVCC(Register Reg,
73                                       const MachineRegisterInfo &MRI) const {
74   // The verifier is oblivious to s1 being a valid value for wavesize registers.
75   if (Reg.isPhysical())
76     return false;
77 
78   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
79   const TargetRegisterClass *RC =
80       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
81   if (RC) {
82     const LLT Ty = MRI.getType(Reg);
83     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
84            Ty.isValid() && Ty.getSizeInBits() == 1;
85   }
86 
87   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
88   return RB->getID() == AMDGPU::VCCRegBankID;
89 }
90 
91 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
92                                                         unsigned NewOpc) const {
93   MI.setDesc(TII.get(NewOpc));
94   MI.RemoveOperand(1); // Remove intrinsic ID.
95   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
96 
97   MachineOperand &Dst = MI.getOperand(0);
98   MachineOperand &Src = MI.getOperand(1);
99 
100   // TODO: This should be legalized to s32 if needed
101   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
102     return false;
103 
104   const TargetRegisterClass *DstRC
105     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
106   const TargetRegisterClass *SrcRC
107     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
108   if (!DstRC || DstRC != SrcRC)
109     return false;
110 
111   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
112          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
113 }
114 
115 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
116   const DebugLoc &DL = I.getDebugLoc();
117   MachineBasicBlock *BB = I.getParent();
118   I.setDesc(TII.get(TargetOpcode::COPY));
119 
120   const MachineOperand &Src = I.getOperand(1);
121   MachineOperand &Dst = I.getOperand(0);
122   Register DstReg = Dst.getReg();
123   Register SrcReg = Src.getReg();
124 
125   if (isVCC(DstReg, *MRI)) {
126     if (SrcReg == AMDGPU::SCC) {
127       const TargetRegisterClass *RC
128         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
129       if (!RC)
130         return true;
131       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
132     }
133 
134     if (!isVCC(SrcReg, *MRI)) {
135       // TODO: Should probably leave the copy and let copyPhysReg expand it.
136       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
137         return false;
138 
139       const TargetRegisterClass *SrcRC
140         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
141 
142       Optional<ValueAndVReg> ConstVal =
143           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
144       if (ConstVal) {
145         unsigned MovOpc =
146             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
147         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
148             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
149       } else {
150         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
151 
152         // We can't trust the high bits at this point, so clear them.
153 
154         // TODO: Skip masking high bits if def is known boolean.
155 
156         unsigned AndOpc =
157             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
158         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
159             .addImm(1)
160             .addReg(SrcReg);
161         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
162             .addImm(0)
163             .addReg(MaskedReg);
164       }
165 
166       if (!MRI->getRegClassOrNull(SrcReg))
167         MRI->setRegClass(SrcReg, SrcRC);
168       I.eraseFromParent();
169       return true;
170     }
171 
172     const TargetRegisterClass *RC =
173       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
174     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
175       return false;
176 
177     return true;
178   }
179 
180   for (const MachineOperand &MO : I.operands()) {
181     if (MO.getReg().isPhysical())
182       continue;
183 
184     const TargetRegisterClass *RC =
185             TRI.getConstrainedRegClassForOperand(MO, *MRI);
186     if (!RC)
187       continue;
188     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
189   }
190   return true;
191 }
192 
193 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
194   const Register DefReg = I.getOperand(0).getReg();
195   const LLT DefTy = MRI->getType(DefReg);
196   if (DefTy == LLT::scalar(1)) {
197     if (!AllowRiskySelect) {
198       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
199       return false;
200     }
201 
202     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
203   }
204 
205   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
206 
207   const RegClassOrRegBank &RegClassOrBank =
208     MRI->getRegClassOrRegBank(DefReg);
209 
210   const TargetRegisterClass *DefRC
211     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
212   if (!DefRC) {
213     if (!DefTy.isValid()) {
214       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
215       return false;
216     }
217 
218     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
219     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
220     if (!DefRC) {
221       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
222       return false;
223     }
224   }
225 
226   // TODO: Verify that all registers have the same bank
227   I.setDesc(TII.get(TargetOpcode::PHI));
228   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
229 }
230 
231 MachineOperand
232 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
233                                            const TargetRegisterClass &SubRC,
234                                            unsigned SubIdx) const {
235 
236   MachineInstr *MI = MO.getParent();
237   MachineBasicBlock *BB = MO.getParent()->getParent();
238   Register DstReg = MRI->createVirtualRegister(&SubRC);
239 
240   if (MO.isReg()) {
241     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
242     Register Reg = MO.getReg();
243     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
244             .addReg(Reg, 0, ComposedSubIdx);
245 
246     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
247                                      MO.isKill(), MO.isDead(), MO.isUndef(),
248                                      MO.isEarlyClobber(), 0, MO.isDebug(),
249                                      MO.isInternalRead());
250   }
251 
252   assert(MO.isImm());
253 
254   APInt Imm(64, MO.getImm());
255 
256   switch (SubIdx) {
257   default:
258     llvm_unreachable("do not know to split immediate with this sub index.");
259   case AMDGPU::sub0:
260     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
261   case AMDGPU::sub1:
262     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
263   }
264 }
265 
266 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
267   switch (Opc) {
268   case AMDGPU::G_AND:
269     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
270   case AMDGPU::G_OR:
271     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
272   case AMDGPU::G_XOR:
273     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
274   default:
275     llvm_unreachable("not a bit op");
276   }
277 }
278 
279 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
280   Register DstReg = I.getOperand(0).getReg();
281   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
282 
283   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
284   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
285       DstRB->getID() != AMDGPU::VCCRegBankID)
286     return false;
287 
288   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
289                             STI.isWave64());
290   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
291 
292   // Dead implicit-def of scc
293   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
294                                          true, // isImp
295                                          false, // isKill
296                                          true)); // isDead
297   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
298 }
299 
300 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
301   MachineBasicBlock *BB = I.getParent();
302   MachineFunction *MF = BB->getParent();
303   Register DstReg = I.getOperand(0).getReg();
304   const DebugLoc &DL = I.getDebugLoc();
305   LLT Ty = MRI->getType(DstReg);
306   if (Ty.isVector())
307     return false;
308 
309   unsigned Size = Ty.getSizeInBits();
310   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
311   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
312   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
313 
314   if (Size == 32) {
315     if (IsSALU) {
316       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
317       MachineInstr *Add =
318         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
319         .add(I.getOperand(1))
320         .add(I.getOperand(2));
321       I.eraseFromParent();
322       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
323     }
324 
325     if (STI.hasAddNoCarry()) {
326       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
327       I.setDesc(TII.get(Opc));
328       I.addOperand(*MF, MachineOperand::CreateImm(0));
329       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
330       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
331     }
332 
333     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
334 
335     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
336     MachineInstr *Add
337       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
338       .addDef(UnusedCarry, RegState::Dead)
339       .add(I.getOperand(1))
340       .add(I.getOperand(2))
341       .addImm(0);
342     I.eraseFromParent();
343     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
344   }
345 
346   assert(!Sub && "illegal sub should not reach here");
347 
348   const TargetRegisterClass &RC
349     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
350   const TargetRegisterClass &HalfRC
351     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
352 
353   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
354   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
355   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
356   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
357 
358   Register DstLo = MRI->createVirtualRegister(&HalfRC);
359   Register DstHi = MRI->createVirtualRegister(&HalfRC);
360 
361   if (IsSALU) {
362     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
363       .add(Lo1)
364       .add(Lo2);
365     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
366       .add(Hi1)
367       .add(Hi2);
368   } else {
369     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
370     Register CarryReg = MRI->createVirtualRegister(CarryRC);
371     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
372       .addDef(CarryReg)
373       .add(Lo1)
374       .add(Lo2)
375       .addImm(0);
376     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
377       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
378       .add(Hi1)
379       .add(Hi2)
380       .addReg(CarryReg, RegState::Kill)
381       .addImm(0);
382 
383     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
384       return false;
385   }
386 
387   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
388     .addReg(DstLo)
389     .addImm(AMDGPU::sub0)
390     .addReg(DstHi)
391     .addImm(AMDGPU::sub1);
392 
393 
394   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
395     return false;
396 
397   I.eraseFromParent();
398   return true;
399 }
400 
401 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
402   MachineInstr &I) const {
403   MachineBasicBlock *BB = I.getParent();
404   MachineFunction *MF = BB->getParent();
405   const DebugLoc &DL = I.getDebugLoc();
406   Register Dst0Reg = I.getOperand(0).getReg();
407   Register Dst1Reg = I.getOperand(1).getReg();
408   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
409                      I.getOpcode() == AMDGPU::G_UADDE;
410   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
411                           I.getOpcode() == AMDGPU::G_USUBE;
412 
413   if (isVCC(Dst1Reg, *MRI)) {
414     unsigned NoCarryOpc =
415         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
416     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
417     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
418     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
419     I.addOperand(*MF, MachineOperand::CreateImm(0));
420     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
421   }
422 
423   Register Src0Reg = I.getOperand(2).getReg();
424   Register Src1Reg = I.getOperand(3).getReg();
425 
426   if (HasCarryIn) {
427     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
428       .addReg(I.getOperand(4).getReg());
429   }
430 
431   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
432   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
433 
434   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
435     .add(I.getOperand(2))
436     .add(I.getOperand(3));
437   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
438     .addReg(AMDGPU::SCC);
439 
440   if (!MRI->getRegClassOrNull(Dst1Reg))
441     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
442 
443   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
444       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
445       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
446     return false;
447 
448   if (HasCarryIn &&
449       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
450                                     AMDGPU::SReg_32RegClass, *MRI))
451     return false;
452 
453   I.eraseFromParent();
454   return true;
455 }
456 
457 // TODO: We should probably legalize these to only using 32-bit results.
458 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
459   MachineBasicBlock *BB = I.getParent();
460   Register DstReg = I.getOperand(0).getReg();
461   Register SrcReg = I.getOperand(1).getReg();
462   LLT DstTy = MRI->getType(DstReg);
463   LLT SrcTy = MRI->getType(SrcReg);
464   const unsigned SrcSize = SrcTy.getSizeInBits();
465   unsigned DstSize = DstTy.getSizeInBits();
466 
467   // TODO: Should handle any multiple of 32 offset.
468   unsigned Offset = I.getOperand(2).getImm();
469   if (Offset % 32 != 0 || DstSize > 128)
470     return false;
471 
472   // 16-bit operations really use 32-bit registers.
473   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
474   if (DstSize == 16)
475     DstSize = 32;
476 
477   const TargetRegisterClass *DstRC =
478     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
479   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
480     return false;
481 
482   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
483   const TargetRegisterClass *SrcRC =
484     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
485   if (!SrcRC)
486     return false;
487   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
488                                                          DstSize / 32);
489   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
490   if (!SrcRC)
491     return false;
492 
493   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
494                                     *SrcRC, I.getOperand(1));
495   const DebugLoc &DL = I.getDebugLoc();
496   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
497     .addReg(SrcReg, 0, SubReg);
498 
499   I.eraseFromParent();
500   return true;
501 }
502 
503 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
504   MachineBasicBlock *BB = MI.getParent();
505   Register DstReg = MI.getOperand(0).getReg();
506   LLT DstTy = MRI->getType(DstReg);
507   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
508 
509   const unsigned SrcSize = SrcTy.getSizeInBits();
510   if (SrcSize < 32)
511     return selectImpl(MI, *CoverageInfo);
512 
513   const DebugLoc &DL = MI.getDebugLoc();
514   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
515   const unsigned DstSize = DstTy.getSizeInBits();
516   const TargetRegisterClass *DstRC =
517     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
518   if (!DstRC)
519     return false;
520 
521   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
522   MachineInstrBuilder MIB =
523     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
524   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
525     MachineOperand &Src = MI.getOperand(I + 1);
526     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
527     MIB.addImm(SubRegs[I]);
528 
529     const TargetRegisterClass *SrcRC
530       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
531     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
532       return false;
533   }
534 
535   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
536     return false;
537 
538   MI.eraseFromParent();
539   return true;
540 }
541 
542 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
543   MachineBasicBlock *BB = MI.getParent();
544   const int NumDst = MI.getNumOperands() - 1;
545 
546   MachineOperand &Src = MI.getOperand(NumDst);
547 
548   Register SrcReg = Src.getReg();
549   Register DstReg0 = MI.getOperand(0).getReg();
550   LLT DstTy = MRI->getType(DstReg0);
551   LLT SrcTy = MRI->getType(SrcReg);
552 
553   const unsigned DstSize = DstTy.getSizeInBits();
554   const unsigned SrcSize = SrcTy.getSizeInBits();
555   const DebugLoc &DL = MI.getDebugLoc();
556   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
557 
558   const TargetRegisterClass *SrcRC =
559     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
560   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
561     return false;
562 
563   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
564   // source, and this relies on the fact that the same subregister indices are
565   // used for both.
566   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
567   for (int I = 0, E = NumDst; I != E; ++I) {
568     MachineOperand &Dst = MI.getOperand(I);
569     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
570       .addReg(SrcReg, 0, SubRegs[I]);
571 
572     // Make sure the subregister index is valid for the source register.
573     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
574     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
575       return false;
576 
577     const TargetRegisterClass *DstRC =
578       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
579     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
580       return false;
581   }
582 
583   MI.eraseFromParent();
584   return true;
585 }
586 
587 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
588   MachineInstr &MI) const {
589   if (selectImpl(MI, *CoverageInfo))
590     return true;
591 
592   const LLT S32 = LLT::scalar(32);
593   const LLT V2S16 = LLT::fixed_vector(2, 16);
594 
595   Register Dst = MI.getOperand(0).getReg();
596   if (MRI->getType(Dst) != V2S16)
597     return false;
598 
599   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
600   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
601     return false;
602 
603   Register Src0 = MI.getOperand(1).getReg();
604   Register Src1 = MI.getOperand(2).getReg();
605   if (MRI->getType(Src0) != S32)
606     return false;
607 
608   const DebugLoc &DL = MI.getDebugLoc();
609   MachineBasicBlock *BB = MI.getParent();
610 
611   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
612   if (ConstSrc1) {
613     auto ConstSrc0 =
614         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
615     if (ConstSrc0) {
616       const int64_t K0 = ConstSrc0->Value.getSExtValue();
617       const int64_t K1 = ConstSrc1->Value.getSExtValue();
618       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
619       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
620 
621       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
622         .addImm(Lo16 | (Hi16 << 16));
623       MI.eraseFromParent();
624       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
625     }
626   }
627 
628   // TODO: This should probably be a combine somewhere
629   // (build_vector_trunc $src0, undef -> copy $src0
630   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
631   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
632     MI.setDesc(TII.get(AMDGPU::COPY));
633     MI.RemoveOperand(2);
634     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
635            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
636   }
637 
638   Register ShiftSrc0;
639   Register ShiftSrc1;
640 
641   // With multiple uses of the shift, this will duplicate the shift and
642   // increase register pressure.
643   //
644   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
645   //  => (S_PACK_HH_B32_B16 $src0, $src1)
646   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
647   //  => (S_PACK_LH_B32_B16 $src0, $src1)
648   // (build_vector_trunc $src0, $src1)
649   //  => (S_PACK_LL_B32_B16 $src0, $src1)
650 
651   bool Shift0 = mi_match(
652       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
653 
654   bool Shift1 = mi_match(
655       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
656 
657   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
658   if (Shift0 && Shift1) {
659     Opc = AMDGPU::S_PACK_HH_B32_B16;
660     MI.getOperand(1).setReg(ShiftSrc0);
661     MI.getOperand(2).setReg(ShiftSrc1);
662   } else if (Shift1) {
663     Opc = AMDGPU::S_PACK_LH_B32_B16;
664     MI.getOperand(2).setReg(ShiftSrc1);
665   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
666     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
667     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
668       .addReg(ShiftSrc0)
669       .addImm(16);
670 
671     MI.eraseFromParent();
672     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
673   }
674 
675   MI.setDesc(TII.get(Opc));
676   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
677 }
678 
679 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
680   return selectG_ADD_SUB(I);
681 }
682 
683 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
684   const MachineOperand &MO = I.getOperand(0);
685 
686   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
687   // regbank check here is to know why getConstrainedRegClassForOperand failed.
688   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
689   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
690       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
691     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
692     return true;
693   }
694 
695   return false;
696 }
697 
698 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
699   MachineBasicBlock *BB = I.getParent();
700 
701   Register DstReg = I.getOperand(0).getReg();
702   Register Src0Reg = I.getOperand(1).getReg();
703   Register Src1Reg = I.getOperand(2).getReg();
704   LLT Src1Ty = MRI->getType(Src1Reg);
705 
706   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
707   unsigned InsSize = Src1Ty.getSizeInBits();
708 
709   int64_t Offset = I.getOperand(3).getImm();
710 
711   // FIXME: These cases should have been illegal and unnecessary to check here.
712   if (Offset % 32 != 0 || InsSize % 32 != 0)
713     return false;
714 
715   // Currently not handled by getSubRegFromChannel.
716   if (InsSize > 128)
717     return false;
718 
719   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
720   if (SubReg == AMDGPU::NoSubRegister)
721     return false;
722 
723   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
724   const TargetRegisterClass *DstRC =
725     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
726   if (!DstRC)
727     return false;
728 
729   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
730   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
731   const TargetRegisterClass *Src0RC =
732     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
733   const TargetRegisterClass *Src1RC =
734     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
735 
736   // Deal with weird cases where the class only partially supports the subreg
737   // index.
738   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
739   if (!Src0RC || !Src1RC)
740     return false;
741 
742   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
743       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
744       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
745     return false;
746 
747   const DebugLoc &DL = I.getDebugLoc();
748   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
749     .addReg(Src0Reg)
750     .addReg(Src1Reg)
751     .addImm(SubReg);
752 
753   I.eraseFromParent();
754   return true;
755 }
756 
757 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
758   Register DstReg = MI.getOperand(0).getReg();
759   Register SrcReg = MI.getOperand(1).getReg();
760   Register OffsetReg = MI.getOperand(2).getReg();
761   Register WidthReg = MI.getOperand(3).getReg();
762 
763   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
764          "scalar BFX instructions are expanded in regbankselect");
765   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
766          "64-bit vector BFX instructions are expanded in regbankselect");
767 
768   const DebugLoc &DL = MI.getDebugLoc();
769   MachineBasicBlock *MBB = MI.getParent();
770 
771   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
772   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
773   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
774                  .addReg(SrcReg)
775                  .addReg(OffsetReg)
776                  .addReg(WidthReg);
777   MI.eraseFromParent();
778   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
779 }
780 
781 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
782   if (STI.getLDSBankCount() != 16)
783     return selectImpl(MI, *CoverageInfo);
784 
785   Register Dst = MI.getOperand(0).getReg();
786   Register Src0 = MI.getOperand(2).getReg();
787   Register M0Val = MI.getOperand(6).getReg();
788   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
789       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
790       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
791     return false;
792 
793   // This requires 2 instructions. It is possible to write a pattern to support
794   // this, but the generated isel emitter doesn't correctly deal with multiple
795   // output instructions using the same physical register input. The copy to m0
796   // is incorrectly placed before the second instruction.
797   //
798   // TODO: Match source modifiers.
799 
800   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
801   const DebugLoc &DL = MI.getDebugLoc();
802   MachineBasicBlock *MBB = MI.getParent();
803 
804   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
805     .addReg(M0Val);
806   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
807     .addImm(2)
808     .addImm(MI.getOperand(4).getImm())  // $attr
809     .addImm(MI.getOperand(3).getImm()); // $attrchan
810 
811   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
812     .addImm(0)                          // $src0_modifiers
813     .addReg(Src0)                       // $src0
814     .addImm(MI.getOperand(4).getImm())  // $attr
815     .addImm(MI.getOperand(3).getImm())  // $attrchan
816     .addImm(0)                          // $src2_modifiers
817     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
818     .addImm(MI.getOperand(5).getImm())  // $high
819     .addImm(0)                          // $clamp
820     .addImm(0);                         // $omod
821 
822   MI.eraseFromParent();
823   return true;
824 }
825 
826 // Writelane is special in that it can use SGPR and M0 (which would normally
827 // count as using the constant bus twice - but in this case it is allowed since
828 // the lane selector doesn't count as a use of the constant bus). However, it is
829 // still required to abide by the 1 SGPR rule. Fix this up if we might have
830 // multiple SGPRs.
831 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
832   // With a constant bus limit of at least 2, there's no issue.
833   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
834     return selectImpl(MI, *CoverageInfo);
835 
836   MachineBasicBlock *MBB = MI.getParent();
837   const DebugLoc &DL = MI.getDebugLoc();
838   Register VDst = MI.getOperand(0).getReg();
839   Register Val = MI.getOperand(2).getReg();
840   Register LaneSelect = MI.getOperand(3).getReg();
841   Register VDstIn = MI.getOperand(4).getReg();
842 
843   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
844 
845   Optional<ValueAndVReg> ConstSelect =
846       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
847   if (ConstSelect) {
848     // The selector has to be an inline immediate, so we can use whatever for
849     // the other operands.
850     MIB.addReg(Val);
851     MIB.addImm(ConstSelect->Value.getSExtValue() &
852                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
853   } else {
854     Optional<ValueAndVReg> ConstVal =
855         getIConstantVRegValWithLookThrough(Val, *MRI);
856 
857     // If the value written is an inline immediate, we can get away without a
858     // copy to m0.
859     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
860                                                  STI.hasInv2PiInlineImm())) {
861       MIB.addImm(ConstVal->Value.getSExtValue());
862       MIB.addReg(LaneSelect);
863     } else {
864       MIB.addReg(Val);
865 
866       // If the lane selector was originally in a VGPR and copied with
867       // readfirstlane, there's a hazard to read the same SGPR from the
868       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
869       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
870 
871       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
872         .addReg(LaneSelect);
873       MIB.addReg(AMDGPU::M0);
874     }
875   }
876 
877   MIB.addReg(VDstIn);
878 
879   MI.eraseFromParent();
880   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
881 }
882 
883 // We need to handle this here because tablegen doesn't support matching
884 // instructions with multiple outputs.
885 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
886   Register Dst0 = MI.getOperand(0).getReg();
887   Register Dst1 = MI.getOperand(1).getReg();
888 
889   LLT Ty = MRI->getType(Dst0);
890   unsigned Opc;
891   if (Ty == LLT::scalar(32))
892     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
893   else if (Ty == LLT::scalar(64))
894     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
895   else
896     return false;
897 
898   // TODO: Match source modifiers.
899 
900   const DebugLoc &DL = MI.getDebugLoc();
901   MachineBasicBlock *MBB = MI.getParent();
902 
903   Register Numer = MI.getOperand(3).getReg();
904   Register Denom = MI.getOperand(4).getReg();
905   unsigned ChooseDenom = MI.getOperand(5).getImm();
906 
907   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
908 
909   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
910     .addDef(Dst1)
911     .addImm(0)     // $src0_modifiers
912     .addUse(Src0)  // $src0
913     .addImm(0)     // $src1_modifiers
914     .addUse(Denom) // $src1
915     .addImm(0)     // $src2_modifiers
916     .addUse(Numer) // $src2
917     .addImm(0)     // $clamp
918     .addImm(0);    // $omod
919 
920   MI.eraseFromParent();
921   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
922 }
923 
924 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
925   unsigned IntrinsicID = I.getIntrinsicID();
926   switch (IntrinsicID) {
927   case Intrinsic::amdgcn_if_break: {
928     MachineBasicBlock *BB = I.getParent();
929 
930     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
931     // SelectionDAG uses for wave32 vs wave64.
932     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
933       .add(I.getOperand(0))
934       .add(I.getOperand(2))
935       .add(I.getOperand(3));
936 
937     Register DstReg = I.getOperand(0).getReg();
938     Register Src0Reg = I.getOperand(2).getReg();
939     Register Src1Reg = I.getOperand(3).getReg();
940 
941     I.eraseFromParent();
942 
943     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
944       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
945 
946     return true;
947   }
948   case Intrinsic::amdgcn_interp_p1_f16:
949     return selectInterpP1F16(I);
950   case Intrinsic::amdgcn_wqm:
951     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
952   case Intrinsic::amdgcn_softwqm:
953     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
954   case Intrinsic::amdgcn_strict_wwm:
955   case Intrinsic::amdgcn_wwm:
956     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
957   case Intrinsic::amdgcn_strict_wqm:
958     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
959   case Intrinsic::amdgcn_writelane:
960     return selectWritelane(I);
961   case Intrinsic::amdgcn_div_scale:
962     return selectDivScale(I);
963   case Intrinsic::amdgcn_icmp:
964     return selectIntrinsicIcmp(I);
965   case Intrinsic::amdgcn_ballot:
966     return selectBallot(I);
967   case Intrinsic::amdgcn_reloc_constant:
968     return selectRelocConstant(I);
969   case Intrinsic::amdgcn_groupstaticsize:
970     return selectGroupStaticSize(I);
971   case Intrinsic::returnaddress:
972     return selectReturnAddress(I);
973   default:
974     return selectImpl(I, *CoverageInfo);
975   }
976 }
977 
978 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
979   if (Size != 32 && Size != 64)
980     return -1;
981   switch (P) {
982   default:
983     llvm_unreachable("Unknown condition code!");
984   case CmpInst::ICMP_NE:
985     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
986   case CmpInst::ICMP_EQ:
987     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
988   case CmpInst::ICMP_SGT:
989     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
990   case CmpInst::ICMP_SGE:
991     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
992   case CmpInst::ICMP_SLT:
993     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
994   case CmpInst::ICMP_SLE:
995     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
996   case CmpInst::ICMP_UGT:
997     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
998   case CmpInst::ICMP_UGE:
999     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1000   case CmpInst::ICMP_ULT:
1001     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1002   case CmpInst::ICMP_ULE:
1003     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1004   }
1005 }
1006 
1007 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1008                                               unsigned Size) const {
1009   if (Size == 64) {
1010     if (!STI.hasScalarCompareEq64())
1011       return -1;
1012 
1013     switch (P) {
1014     case CmpInst::ICMP_NE:
1015       return AMDGPU::S_CMP_LG_U64;
1016     case CmpInst::ICMP_EQ:
1017       return AMDGPU::S_CMP_EQ_U64;
1018     default:
1019       return -1;
1020     }
1021   }
1022 
1023   if (Size != 32)
1024     return -1;
1025 
1026   switch (P) {
1027   case CmpInst::ICMP_NE:
1028     return AMDGPU::S_CMP_LG_U32;
1029   case CmpInst::ICMP_EQ:
1030     return AMDGPU::S_CMP_EQ_U32;
1031   case CmpInst::ICMP_SGT:
1032     return AMDGPU::S_CMP_GT_I32;
1033   case CmpInst::ICMP_SGE:
1034     return AMDGPU::S_CMP_GE_I32;
1035   case CmpInst::ICMP_SLT:
1036     return AMDGPU::S_CMP_LT_I32;
1037   case CmpInst::ICMP_SLE:
1038     return AMDGPU::S_CMP_LE_I32;
1039   case CmpInst::ICMP_UGT:
1040     return AMDGPU::S_CMP_GT_U32;
1041   case CmpInst::ICMP_UGE:
1042     return AMDGPU::S_CMP_GE_U32;
1043   case CmpInst::ICMP_ULT:
1044     return AMDGPU::S_CMP_LT_U32;
1045   case CmpInst::ICMP_ULE:
1046     return AMDGPU::S_CMP_LE_U32;
1047   default:
1048     llvm_unreachable("Unknown condition code!");
1049   }
1050 }
1051 
1052 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1053   MachineBasicBlock *BB = I.getParent();
1054   const DebugLoc &DL = I.getDebugLoc();
1055 
1056   Register SrcReg = I.getOperand(2).getReg();
1057   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1058 
1059   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1060 
1061   Register CCReg = I.getOperand(0).getReg();
1062   if (!isVCC(CCReg, *MRI)) {
1063     int Opcode = getS_CMPOpcode(Pred, Size);
1064     if (Opcode == -1)
1065       return false;
1066     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1067             .add(I.getOperand(2))
1068             .add(I.getOperand(3));
1069     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1070       .addReg(AMDGPU::SCC);
1071     bool Ret =
1072         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1073         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1074     I.eraseFromParent();
1075     return Ret;
1076   }
1077 
1078   int Opcode = getV_CMPOpcode(Pred, Size);
1079   if (Opcode == -1)
1080     return false;
1081 
1082   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1083             I.getOperand(0).getReg())
1084             .add(I.getOperand(2))
1085             .add(I.getOperand(3));
1086   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1087                                *TRI.getBoolRC(), *MRI);
1088   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1089   I.eraseFromParent();
1090   return Ret;
1091 }
1092 
1093 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1094   Register Dst = I.getOperand(0).getReg();
1095   if (isVCC(Dst, *MRI))
1096     return false;
1097 
1098   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1099     return false;
1100 
1101   MachineBasicBlock *BB = I.getParent();
1102   const DebugLoc &DL = I.getDebugLoc();
1103   Register SrcReg = I.getOperand(2).getReg();
1104   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1105 
1106   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1107   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1108     MachineInstr *ICmp =
1109         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1110 
1111     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1112                                       *TRI.getBoolRC(), *MRI))
1113       return false;
1114     I.eraseFromParent();
1115     return true;
1116   }
1117 
1118   int Opcode = getV_CMPOpcode(Pred, Size);
1119   if (Opcode == -1)
1120     return false;
1121 
1122   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1123                            .add(I.getOperand(2))
1124                            .add(I.getOperand(3));
1125   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1126                                *MRI);
1127   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1128   I.eraseFromParent();
1129   return Ret;
1130 }
1131 
1132 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1133   MachineBasicBlock *BB = I.getParent();
1134   const DebugLoc &DL = I.getDebugLoc();
1135   Register DstReg = I.getOperand(0).getReg();
1136   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1137   const bool Is64 = Size == 64;
1138 
1139   if (Size != STI.getWavefrontSize())
1140     return false;
1141 
1142   Optional<ValueAndVReg> Arg =
1143       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1144 
1145   if (Arg.hasValue()) {
1146     const int64_t Value = Arg.getValue().Value.getSExtValue();
1147     if (Value == 0) {
1148       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1149       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1150     } else if (Value == -1) { // all ones
1151       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1152       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1153     } else
1154       return false;
1155   } else {
1156     Register SrcReg = I.getOperand(2).getReg();
1157     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1158   }
1159 
1160   I.eraseFromParent();
1161   return true;
1162 }
1163 
1164 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1165   Register DstReg = I.getOperand(0).getReg();
1166   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1167   const TargetRegisterClass *DstRC =
1168     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1169   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1170     return false;
1171 
1172   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1173 
1174   Module *M = MF->getFunction().getParent();
1175   const MDNode *Metadata = I.getOperand(2).getMetadata();
1176   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1177   auto RelocSymbol = cast<GlobalVariable>(
1178     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1179 
1180   MachineBasicBlock *BB = I.getParent();
1181   BuildMI(*BB, &I, I.getDebugLoc(),
1182           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1183     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1184 
1185   I.eraseFromParent();
1186   return true;
1187 }
1188 
1189 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1190   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1191 
1192   Register DstReg = I.getOperand(0).getReg();
1193   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1194   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1195     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1196 
1197   MachineBasicBlock *MBB = I.getParent();
1198   const DebugLoc &DL = I.getDebugLoc();
1199 
1200   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1201 
1202   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1203     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1204     MIB.addImm(MFI->getLDSSize());
1205   } else {
1206     Module *M = MF->getFunction().getParent();
1207     const GlobalValue *GV
1208       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1209     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1210   }
1211 
1212   I.eraseFromParent();
1213   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1214 }
1215 
1216 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1217   MachineBasicBlock *MBB = I.getParent();
1218   MachineFunction &MF = *MBB->getParent();
1219   const DebugLoc &DL = I.getDebugLoc();
1220 
1221   MachineOperand &Dst = I.getOperand(0);
1222   Register DstReg = Dst.getReg();
1223   unsigned Depth = I.getOperand(2).getImm();
1224 
1225   const TargetRegisterClass *RC
1226     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1227   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1228       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1229     return false;
1230 
1231   // Check for kernel and shader functions
1232   if (Depth != 0 ||
1233       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1234     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1235       .addImm(0);
1236     I.eraseFromParent();
1237     return true;
1238   }
1239 
1240   MachineFrameInfo &MFI = MF.getFrameInfo();
1241   // There is a call to @llvm.returnaddress in this function
1242   MFI.setReturnAddressIsTaken(true);
1243 
1244   // Get the return address reg and mark it as an implicit live-in
1245   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1246   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1247                                              AMDGPU::SReg_64RegClass, DL);
1248   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1249     .addReg(LiveIn);
1250   I.eraseFromParent();
1251   return true;
1252 }
1253 
1254 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1255   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1256   // SelectionDAG uses for wave32 vs wave64.
1257   MachineBasicBlock *BB = MI.getParent();
1258   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1259       .add(MI.getOperand(1));
1260 
1261   Register Reg = MI.getOperand(1).getReg();
1262   MI.eraseFromParent();
1263 
1264   if (!MRI->getRegClassOrNull(Reg))
1265     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1266   return true;
1267 }
1268 
1269 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1270   MachineInstr &MI, Intrinsic::ID IntrID) const {
1271   MachineBasicBlock *MBB = MI.getParent();
1272   MachineFunction *MF = MBB->getParent();
1273   const DebugLoc &DL = MI.getDebugLoc();
1274 
1275   unsigned IndexOperand = MI.getOperand(7).getImm();
1276   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1277   bool WaveDone = MI.getOperand(9).getImm() != 0;
1278 
1279   if (WaveDone && !WaveRelease)
1280     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1281 
1282   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1283   IndexOperand &= ~0x3f;
1284   unsigned CountDw = 0;
1285 
1286   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1287     CountDw = (IndexOperand >> 24) & 0xf;
1288     IndexOperand &= ~(0xf << 24);
1289 
1290     if (CountDw < 1 || CountDw > 4) {
1291       report_fatal_error(
1292         "ds_ordered_count: dword count must be between 1 and 4");
1293     }
1294   }
1295 
1296   if (IndexOperand)
1297     report_fatal_error("ds_ordered_count: bad index operand");
1298 
1299   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1300   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1301 
1302   unsigned Offset0 = OrderedCountIndex << 2;
1303   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1304                      (Instruction << 4);
1305 
1306   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1307     Offset1 |= (CountDw - 1) << 6;
1308 
1309   unsigned Offset = Offset0 | (Offset1 << 8);
1310 
1311   Register M0Val = MI.getOperand(2).getReg();
1312   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1313     .addReg(M0Val);
1314 
1315   Register DstReg = MI.getOperand(0).getReg();
1316   Register ValReg = MI.getOperand(3).getReg();
1317   MachineInstrBuilder DS =
1318     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1319       .addReg(ValReg)
1320       .addImm(Offset)
1321       .cloneMemRefs(MI);
1322 
1323   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1324     return false;
1325 
1326   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1327   MI.eraseFromParent();
1328   return Ret;
1329 }
1330 
1331 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1332   switch (IntrID) {
1333   case Intrinsic::amdgcn_ds_gws_init:
1334     return AMDGPU::DS_GWS_INIT;
1335   case Intrinsic::amdgcn_ds_gws_barrier:
1336     return AMDGPU::DS_GWS_BARRIER;
1337   case Intrinsic::amdgcn_ds_gws_sema_v:
1338     return AMDGPU::DS_GWS_SEMA_V;
1339   case Intrinsic::amdgcn_ds_gws_sema_br:
1340     return AMDGPU::DS_GWS_SEMA_BR;
1341   case Intrinsic::amdgcn_ds_gws_sema_p:
1342     return AMDGPU::DS_GWS_SEMA_P;
1343   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1344     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1345   default:
1346     llvm_unreachable("not a gws intrinsic");
1347   }
1348 }
1349 
1350 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1351                                                      Intrinsic::ID IID) const {
1352   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1353       !STI.hasGWSSemaReleaseAll())
1354     return false;
1355 
1356   // intrinsic ID, vsrc, offset
1357   const bool HasVSrc = MI.getNumOperands() == 3;
1358   assert(HasVSrc || MI.getNumOperands() == 2);
1359 
1360   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1361   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1362   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1363     return false;
1364 
1365   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1366   assert(OffsetDef);
1367 
1368   unsigned ImmOffset;
1369 
1370   MachineBasicBlock *MBB = MI.getParent();
1371   const DebugLoc &DL = MI.getDebugLoc();
1372 
1373   MachineInstr *Readfirstlane = nullptr;
1374 
1375   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1376   // incoming offset, in case there's an add of a constant. We'll have to put it
1377   // back later.
1378   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1379     Readfirstlane = OffsetDef;
1380     BaseOffset = OffsetDef->getOperand(1).getReg();
1381     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1382   }
1383 
1384   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1385     // If we have a constant offset, try to use the 0 in m0 as the base.
1386     // TODO: Look into changing the default m0 initialization value. If the
1387     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1388     // the immediate offset.
1389 
1390     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1391     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1392       .addImm(0);
1393   } else {
1394     std::tie(BaseOffset, ImmOffset) =
1395         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1396 
1397     if (Readfirstlane) {
1398       // We have the constant offset now, so put the readfirstlane back on the
1399       // variable component.
1400       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1401         return false;
1402 
1403       Readfirstlane->getOperand(1).setReg(BaseOffset);
1404       BaseOffset = Readfirstlane->getOperand(0).getReg();
1405     } else {
1406       if (!RBI.constrainGenericRegister(BaseOffset,
1407                                         AMDGPU::SReg_32RegClass, *MRI))
1408         return false;
1409     }
1410 
1411     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1412     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1413       .addReg(BaseOffset)
1414       .addImm(16);
1415 
1416     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1417       .addReg(M0Base);
1418   }
1419 
1420   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1421   // offset field) % 64. Some versions of the programming guide omit the m0
1422   // part, or claim it's from offset 0.
1423   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1424 
1425   if (HasVSrc) {
1426     Register VSrc = MI.getOperand(1).getReg();
1427 
1428     if (STI.needsAlignedVGPRs()) {
1429       // Add implicit aligned super-reg to force alignment on the data operand.
1430       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1431       BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1432       Register NewVR =
1433           MRI->createVirtualRegister(&AMDGPU::VReg_64_Align2RegClass);
1434       BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), NewVR)
1435           .addReg(VSrc, 0, MI.getOperand(1).getSubReg())
1436           .addImm(AMDGPU::sub0)
1437           .addReg(Undef)
1438           .addImm(AMDGPU::sub1);
1439       MIB.addReg(NewVR, 0, AMDGPU::sub0);
1440       MIB.addReg(NewVR, RegState::Implicit);
1441     } else {
1442       MIB.addReg(VSrc);
1443     }
1444 
1445     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1446       return false;
1447   }
1448 
1449   MIB.addImm(ImmOffset)
1450      .cloneMemRefs(MI);
1451 
1452   MI.eraseFromParent();
1453   return true;
1454 }
1455 
1456 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1457                                                       bool IsAppend) const {
1458   Register PtrBase = MI.getOperand(2).getReg();
1459   LLT PtrTy = MRI->getType(PtrBase);
1460   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1461 
1462   unsigned Offset;
1463   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1464 
1465   // TODO: Should this try to look through readfirstlane like GWS?
1466   if (!isDSOffsetLegal(PtrBase, Offset)) {
1467     PtrBase = MI.getOperand(2).getReg();
1468     Offset = 0;
1469   }
1470 
1471   MachineBasicBlock *MBB = MI.getParent();
1472   const DebugLoc &DL = MI.getDebugLoc();
1473   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1474 
1475   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1476     .addReg(PtrBase);
1477   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1478     return false;
1479 
1480   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1481     .addImm(Offset)
1482     .addImm(IsGDS ? -1 : 0)
1483     .cloneMemRefs(MI);
1484   MI.eraseFromParent();
1485   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1486 }
1487 
1488 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1489   if (TM.getOptLevel() > CodeGenOpt::None) {
1490     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1491     if (WGSize <= STI.getWavefrontSize()) {
1492       MachineBasicBlock *MBB = MI.getParent();
1493       const DebugLoc &DL = MI.getDebugLoc();
1494       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1495       MI.eraseFromParent();
1496       return true;
1497     }
1498   }
1499   return selectImpl(MI, *CoverageInfo);
1500 }
1501 
1502 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1503                          bool &IsTexFail) {
1504   if (TexFailCtrl)
1505     IsTexFail = true;
1506 
1507   TFE = (TexFailCtrl & 0x1) ? true : false;
1508   TexFailCtrl &= ~(uint64_t)0x1;
1509   LWE = (TexFailCtrl & 0x2) ? true : false;
1510   TexFailCtrl &= ~(uint64_t)0x2;
1511 
1512   return TexFailCtrl == 0;
1513 }
1514 
1515 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1516   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1517   MachineBasicBlock *MBB = MI.getParent();
1518   const DebugLoc &DL = MI.getDebugLoc();
1519 
1520   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1521     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1522 
1523   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1524   unsigned IntrOpcode = Intr->BaseOpcode;
1525   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1526 
1527   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1528 
1529   Register VDataIn, VDataOut;
1530   LLT VDataTy;
1531   int NumVDataDwords = -1;
1532   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1533                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1534 
1535   bool Unorm;
1536   if (!BaseOpcode->Sampler)
1537     Unorm = true;
1538   else
1539     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1540 
1541   bool TFE;
1542   bool LWE;
1543   bool IsTexFail = false;
1544   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1545                     TFE, LWE, IsTexFail))
1546     return false;
1547 
1548   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1549   const bool IsA16 = (Flags & 1) != 0;
1550   const bool IsG16 = (Flags & 2) != 0;
1551 
1552   // A16 implies 16 bit gradients if subtarget doesn't support G16
1553   if (IsA16 && !STI.hasG16() && !IsG16)
1554     return false;
1555 
1556   unsigned DMask = 0;
1557   unsigned DMaskLanes = 0;
1558 
1559   if (BaseOpcode->Atomic) {
1560     VDataOut = MI.getOperand(0).getReg();
1561     VDataIn = MI.getOperand(2).getReg();
1562     LLT Ty = MRI->getType(VDataIn);
1563 
1564     // Be careful to allow atomic swap on 16-bit element vectors.
1565     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1566       Ty.getSizeInBits() == 128 :
1567       Ty.getSizeInBits() == 64;
1568 
1569     if (BaseOpcode->AtomicX2) {
1570       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1571 
1572       DMask = Is64Bit ? 0xf : 0x3;
1573       NumVDataDwords = Is64Bit ? 4 : 2;
1574     } else {
1575       DMask = Is64Bit ? 0x3 : 0x1;
1576       NumVDataDwords = Is64Bit ? 2 : 1;
1577     }
1578   } else {
1579     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1580     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1581 
1582     if (BaseOpcode->Store) {
1583       VDataIn = MI.getOperand(1).getReg();
1584       VDataTy = MRI->getType(VDataIn);
1585       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1586     } else {
1587       VDataOut = MI.getOperand(0).getReg();
1588       VDataTy = MRI->getType(VDataOut);
1589       NumVDataDwords = DMaskLanes;
1590 
1591       if (IsD16 && !STI.hasUnpackedD16VMem())
1592         NumVDataDwords = (DMaskLanes + 1) / 2;
1593     }
1594   }
1595 
1596   // Set G16 opcode
1597   if (IsG16 && !IsA16) {
1598     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1599         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1600     assert(G16MappingInfo);
1601     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1602   }
1603 
1604   // TODO: Check this in verifier.
1605   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1606 
1607   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1608   if (BaseOpcode->Atomic)
1609     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1610   if (CPol & ~AMDGPU::CPol::ALL)
1611     return false;
1612 
1613   int NumVAddrRegs = 0;
1614   int NumVAddrDwords = 0;
1615   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1616     // Skip the $noregs and 0s inserted during legalization.
1617     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1618     if (!AddrOp.isReg())
1619       continue; // XXX - Break?
1620 
1621     Register Addr = AddrOp.getReg();
1622     if (!Addr)
1623       break;
1624 
1625     ++NumVAddrRegs;
1626     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1627   }
1628 
1629   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1630   // NSA, these should have beeen packed into a single value in the first
1631   // address register
1632   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1633   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1634     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1635     return false;
1636   }
1637 
1638   if (IsTexFail)
1639     ++NumVDataDwords;
1640 
1641   int Opcode = -1;
1642   if (IsGFX10Plus) {
1643     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1644                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1645                                           : AMDGPU::MIMGEncGfx10Default,
1646                                    NumVDataDwords, NumVAddrDwords);
1647   } else {
1648     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1649       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1650                                      NumVDataDwords, NumVAddrDwords);
1651     if (Opcode == -1)
1652       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1653                                      NumVDataDwords, NumVAddrDwords);
1654   }
1655   assert(Opcode != -1);
1656 
1657   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1658     .cloneMemRefs(MI);
1659 
1660   if (VDataOut) {
1661     if (BaseOpcode->AtomicX2) {
1662       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1663 
1664       Register TmpReg = MRI->createVirtualRegister(
1665         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1666       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1667 
1668       MIB.addDef(TmpReg);
1669       if (!MRI->use_empty(VDataOut)) {
1670         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1671             .addReg(TmpReg, RegState::Kill, SubReg);
1672       }
1673 
1674     } else {
1675       MIB.addDef(VDataOut); // vdata output
1676     }
1677   }
1678 
1679   if (VDataIn)
1680     MIB.addReg(VDataIn); // vdata input
1681 
1682   for (int I = 0; I != NumVAddrRegs; ++I) {
1683     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1684     if (SrcOp.isReg()) {
1685       assert(SrcOp.getReg() != 0);
1686       MIB.addReg(SrcOp.getReg());
1687     }
1688   }
1689 
1690   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1691   if (BaseOpcode->Sampler)
1692     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1693 
1694   MIB.addImm(DMask); // dmask
1695 
1696   if (IsGFX10Plus)
1697     MIB.addImm(DimInfo->Encoding);
1698   MIB.addImm(Unorm);
1699 
1700   MIB.addImm(CPol);
1701   MIB.addImm(IsA16 &&  // a16 or r128
1702              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1703   if (IsGFX10Plus)
1704     MIB.addImm(IsA16 ? -1 : 0);
1705 
1706   MIB.addImm(TFE); // tfe
1707   MIB.addImm(LWE); // lwe
1708   if (!IsGFX10Plus)
1709     MIB.addImm(DimInfo->DA ? -1 : 0);
1710   if (BaseOpcode->HasD16)
1711     MIB.addImm(IsD16 ? -1 : 0);
1712 
1713   if (IsTexFail) {
1714     // An image load instruction with TFE/LWE only conditionally writes to its
1715     // result registers. Initialize them to zero so that we always get well
1716     // defined result values.
1717     assert(VDataOut && !VDataIn);
1718     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1719     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1720     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1721       .addImm(0);
1722     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1723     if (STI.usePRTStrictNull()) {
1724       // With enable-prt-strict-null enabled, initialize all result registers to
1725       // zero.
1726       auto RegSeq =
1727           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1728       for (auto Sub : Parts)
1729         RegSeq.addReg(Zero).addImm(Sub);
1730     } else {
1731       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1732       // result register.
1733       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1734       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1735       auto RegSeq =
1736           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1737       for (auto Sub : Parts.drop_back(1))
1738         RegSeq.addReg(Undef).addImm(Sub);
1739       RegSeq.addReg(Zero).addImm(Parts.back());
1740     }
1741     MIB.addReg(Tied, RegState::Implicit);
1742     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1743   }
1744 
1745   MI.eraseFromParent();
1746   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1747 }
1748 
1749 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1750     MachineInstr &I) const {
1751   unsigned IntrinsicID = I.getIntrinsicID();
1752   switch (IntrinsicID) {
1753   case Intrinsic::amdgcn_end_cf:
1754     return selectEndCfIntrinsic(I);
1755   case Intrinsic::amdgcn_ds_ordered_add:
1756   case Intrinsic::amdgcn_ds_ordered_swap:
1757     return selectDSOrderedIntrinsic(I, IntrinsicID);
1758   case Intrinsic::amdgcn_ds_gws_init:
1759   case Intrinsic::amdgcn_ds_gws_barrier:
1760   case Intrinsic::amdgcn_ds_gws_sema_v:
1761   case Intrinsic::amdgcn_ds_gws_sema_br:
1762   case Intrinsic::amdgcn_ds_gws_sema_p:
1763   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1764     return selectDSGWSIntrinsic(I, IntrinsicID);
1765   case Intrinsic::amdgcn_ds_append:
1766     return selectDSAppendConsume(I, true);
1767   case Intrinsic::amdgcn_ds_consume:
1768     return selectDSAppendConsume(I, false);
1769   case Intrinsic::amdgcn_s_barrier:
1770     return selectSBarrier(I);
1771   case Intrinsic::amdgcn_global_atomic_fadd:
1772     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1773   default: {
1774     return selectImpl(I, *CoverageInfo);
1775   }
1776   }
1777 }
1778 
1779 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1780   if (selectImpl(I, *CoverageInfo))
1781     return true;
1782 
1783   MachineBasicBlock *BB = I.getParent();
1784   const DebugLoc &DL = I.getDebugLoc();
1785 
1786   Register DstReg = I.getOperand(0).getReg();
1787   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1788   assert(Size <= 32 || Size == 64);
1789   const MachineOperand &CCOp = I.getOperand(1);
1790   Register CCReg = CCOp.getReg();
1791   if (!isVCC(CCReg, *MRI)) {
1792     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1793                                          AMDGPU::S_CSELECT_B32;
1794     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1795             .addReg(CCReg);
1796 
1797     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1798     // bank, because it does not cover the register class that we used to represent
1799     // for it.  So we need to manually set the register class here.
1800     if (!MRI->getRegClassOrNull(CCReg))
1801         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1802     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1803             .add(I.getOperand(2))
1804             .add(I.getOperand(3));
1805 
1806     bool Ret = false;
1807     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1808     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1809     I.eraseFromParent();
1810     return Ret;
1811   }
1812 
1813   // Wide VGPR select should have been split in RegBankSelect.
1814   if (Size > 32)
1815     return false;
1816 
1817   MachineInstr *Select =
1818       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1819               .addImm(0)
1820               .add(I.getOperand(3))
1821               .addImm(0)
1822               .add(I.getOperand(2))
1823               .add(I.getOperand(1));
1824 
1825   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1826   I.eraseFromParent();
1827   return Ret;
1828 }
1829 
1830 static int sizeToSubRegIndex(unsigned Size) {
1831   switch (Size) {
1832   case 32:
1833     return AMDGPU::sub0;
1834   case 64:
1835     return AMDGPU::sub0_sub1;
1836   case 96:
1837     return AMDGPU::sub0_sub1_sub2;
1838   case 128:
1839     return AMDGPU::sub0_sub1_sub2_sub3;
1840   case 256:
1841     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1842   default:
1843     if (Size < 32)
1844       return AMDGPU::sub0;
1845     if (Size > 256)
1846       return -1;
1847     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1848   }
1849 }
1850 
1851 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1852   Register DstReg = I.getOperand(0).getReg();
1853   Register SrcReg = I.getOperand(1).getReg();
1854   const LLT DstTy = MRI->getType(DstReg);
1855   const LLT SrcTy = MRI->getType(SrcReg);
1856   const LLT S1 = LLT::scalar(1);
1857 
1858   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1859   const RegisterBank *DstRB;
1860   if (DstTy == S1) {
1861     // This is a special case. We don't treat s1 for legalization artifacts as
1862     // vcc booleans.
1863     DstRB = SrcRB;
1864   } else {
1865     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1866     if (SrcRB != DstRB)
1867       return false;
1868   }
1869 
1870   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1871 
1872   unsigned DstSize = DstTy.getSizeInBits();
1873   unsigned SrcSize = SrcTy.getSizeInBits();
1874 
1875   const TargetRegisterClass *SrcRC
1876     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1877   const TargetRegisterClass *DstRC
1878     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1879   if (!SrcRC || !DstRC)
1880     return false;
1881 
1882   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1883       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1884     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1885     return false;
1886   }
1887 
1888   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1889     MachineBasicBlock *MBB = I.getParent();
1890     const DebugLoc &DL = I.getDebugLoc();
1891 
1892     Register LoReg = MRI->createVirtualRegister(DstRC);
1893     Register HiReg = MRI->createVirtualRegister(DstRC);
1894     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1895       .addReg(SrcReg, 0, AMDGPU::sub0);
1896     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1897       .addReg(SrcReg, 0, AMDGPU::sub1);
1898 
1899     if (IsVALU && STI.hasSDWA()) {
1900       // Write the low 16-bits of the high element into the high 16-bits of the
1901       // low element.
1902       MachineInstr *MovSDWA =
1903         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1904         .addImm(0)                             // $src0_modifiers
1905         .addReg(HiReg)                         // $src0
1906         .addImm(0)                             // $clamp
1907         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1908         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1909         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1910         .addReg(LoReg, RegState::Implicit);
1911       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1912     } else {
1913       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1914       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1915       Register ImmReg = MRI->createVirtualRegister(DstRC);
1916       if (IsVALU) {
1917         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1918           .addImm(16)
1919           .addReg(HiReg);
1920       } else {
1921         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1922           .addReg(HiReg)
1923           .addImm(16);
1924       }
1925 
1926       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1927       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1928       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1929 
1930       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1931         .addImm(0xffff);
1932       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1933         .addReg(LoReg)
1934         .addReg(ImmReg);
1935       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1936         .addReg(TmpReg0)
1937         .addReg(TmpReg1);
1938     }
1939 
1940     I.eraseFromParent();
1941     return true;
1942   }
1943 
1944   if (!DstTy.isScalar())
1945     return false;
1946 
1947   if (SrcSize > 32) {
1948     int SubRegIdx = sizeToSubRegIndex(DstSize);
1949     if (SubRegIdx == -1)
1950       return false;
1951 
1952     // Deal with weird cases where the class only partially supports the subreg
1953     // index.
1954     const TargetRegisterClass *SrcWithSubRC
1955       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1956     if (!SrcWithSubRC)
1957       return false;
1958 
1959     if (SrcWithSubRC != SrcRC) {
1960       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1961         return false;
1962     }
1963 
1964     I.getOperand(1).setSubReg(SubRegIdx);
1965   }
1966 
1967   I.setDesc(TII.get(TargetOpcode::COPY));
1968   return true;
1969 }
1970 
1971 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1972 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1973   Mask = maskTrailingOnes<unsigned>(Size);
1974   int SignedMask = static_cast<int>(Mask);
1975   return SignedMask >= -16 && SignedMask <= 64;
1976 }
1977 
1978 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1979 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1980   Register Reg, const MachineRegisterInfo &MRI,
1981   const TargetRegisterInfo &TRI) const {
1982   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1983   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1984     return RB;
1985 
1986   // Ignore the type, since we don't use vcc in artifacts.
1987   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1988     return &RBI.getRegBankFromRegClass(*RC, LLT());
1989   return nullptr;
1990 }
1991 
1992 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1993   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1994   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1995   const DebugLoc &DL = I.getDebugLoc();
1996   MachineBasicBlock &MBB = *I.getParent();
1997   const Register DstReg = I.getOperand(0).getReg();
1998   const Register SrcReg = I.getOperand(1).getReg();
1999 
2000   const LLT DstTy = MRI->getType(DstReg);
2001   const LLT SrcTy = MRI->getType(SrcReg);
2002   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2003     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2004   const unsigned DstSize = DstTy.getSizeInBits();
2005   if (!DstTy.isScalar())
2006     return false;
2007 
2008   // Artifact casts should never use vcc.
2009   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2010 
2011   // FIXME: This should probably be illegal and split earlier.
2012   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2013     if (DstSize <= 32)
2014       return selectCOPY(I);
2015 
2016     const TargetRegisterClass *SrcRC =
2017         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
2018     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2019     const TargetRegisterClass *DstRC =
2020         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
2021 
2022     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2023     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2024     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2025       .addReg(SrcReg)
2026       .addImm(AMDGPU::sub0)
2027       .addReg(UndefReg)
2028       .addImm(AMDGPU::sub1);
2029     I.eraseFromParent();
2030 
2031     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2032            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2033   }
2034 
2035   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2036     // 64-bit should have been split up in RegBankSelect
2037 
2038     // Try to use an and with a mask if it will save code size.
2039     unsigned Mask;
2040     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2041       MachineInstr *ExtI =
2042       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2043         .addImm(Mask)
2044         .addReg(SrcReg);
2045       I.eraseFromParent();
2046       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2047     }
2048 
2049     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2050     MachineInstr *ExtI =
2051       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2052       .addReg(SrcReg)
2053       .addImm(0) // Offset
2054       .addImm(SrcSize); // Width
2055     I.eraseFromParent();
2056     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2057   }
2058 
2059   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2060     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2061       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2062     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2063       return false;
2064 
2065     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2066       const unsigned SextOpc = SrcSize == 8 ?
2067         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2068       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2069         .addReg(SrcReg);
2070       I.eraseFromParent();
2071       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2072     }
2073 
2074     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2075     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2076 
2077     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2078     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2079       // We need a 64-bit register source, but the high bits don't matter.
2080       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2081       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2082       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2083 
2084       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2085       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2086         .addReg(SrcReg, 0, SubReg)
2087         .addImm(AMDGPU::sub0)
2088         .addReg(UndefReg)
2089         .addImm(AMDGPU::sub1);
2090 
2091       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2092         .addReg(ExtReg)
2093         .addImm(SrcSize << 16);
2094 
2095       I.eraseFromParent();
2096       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2097     }
2098 
2099     unsigned Mask;
2100     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2101       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2102         .addReg(SrcReg)
2103         .addImm(Mask);
2104     } else {
2105       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2106         .addReg(SrcReg)
2107         .addImm(SrcSize << 16);
2108     }
2109 
2110     I.eraseFromParent();
2111     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2112   }
2113 
2114   return false;
2115 }
2116 
2117 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2118   MachineBasicBlock *BB = I.getParent();
2119   MachineOperand &ImmOp = I.getOperand(1);
2120   Register DstReg = I.getOperand(0).getReg();
2121   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2122 
2123   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2124   if (ImmOp.isFPImm()) {
2125     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2126     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2127   } else if (ImmOp.isCImm()) {
2128     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2129   } else {
2130     llvm_unreachable("Not supported by g_constants");
2131   }
2132 
2133   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2134   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2135 
2136   unsigned Opcode;
2137   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2138     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2139   } else {
2140     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2141 
2142     // We should never produce s1 values on banks other than VCC. If the user of
2143     // this already constrained the register, we may incorrectly think it's VCC
2144     // if it wasn't originally.
2145     if (Size == 1)
2146       return false;
2147   }
2148 
2149   if (Size != 64) {
2150     I.setDesc(TII.get(Opcode));
2151     I.addImplicitDefUseOperands(*MF);
2152     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2153   }
2154 
2155   const DebugLoc &DL = I.getDebugLoc();
2156 
2157   APInt Imm(Size, I.getOperand(1).getImm());
2158 
2159   MachineInstr *ResInst;
2160   if (IsSgpr && TII.isInlineConstant(Imm)) {
2161     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2162       .addImm(I.getOperand(1).getImm());
2163   } else {
2164     const TargetRegisterClass *RC = IsSgpr ?
2165       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2166     Register LoReg = MRI->createVirtualRegister(RC);
2167     Register HiReg = MRI->createVirtualRegister(RC);
2168 
2169     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2170       .addImm(Imm.trunc(32).getZExtValue());
2171 
2172     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2173       .addImm(Imm.ashr(32).getZExtValue());
2174 
2175     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2176       .addReg(LoReg)
2177       .addImm(AMDGPU::sub0)
2178       .addReg(HiReg)
2179       .addImm(AMDGPU::sub1);
2180   }
2181 
2182   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2183   // work for target independent opcodes
2184   I.eraseFromParent();
2185   const TargetRegisterClass *DstRC =
2186     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2187   if (!DstRC)
2188     return true;
2189   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2190 }
2191 
2192 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2193   // Only manually handle the f64 SGPR case.
2194   //
2195   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2196   // the bit ops theoretically have a second result due to the implicit def of
2197   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2198   // that is easy by disabling the check. The result works, but uses a
2199   // nonsensical sreg32orlds_and_sreg_1 regclass.
2200   //
2201   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2202   // the variadic REG_SEQUENCE operands.
2203 
2204   Register Dst = MI.getOperand(0).getReg();
2205   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2206   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2207       MRI->getType(Dst) != LLT::scalar(64))
2208     return false;
2209 
2210   Register Src = MI.getOperand(1).getReg();
2211   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2212   if (Fabs)
2213     Src = Fabs->getOperand(1).getReg();
2214 
2215   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2216       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2217     return false;
2218 
2219   MachineBasicBlock *BB = MI.getParent();
2220   const DebugLoc &DL = MI.getDebugLoc();
2221   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2222   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2223   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2224   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2225 
2226   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2227     .addReg(Src, 0, AMDGPU::sub0);
2228   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2229     .addReg(Src, 0, AMDGPU::sub1);
2230   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2231     .addImm(0x80000000);
2232 
2233   // Set or toggle sign bit.
2234   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2235   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2236     .addReg(HiReg)
2237     .addReg(ConstReg);
2238   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2239     .addReg(LoReg)
2240     .addImm(AMDGPU::sub0)
2241     .addReg(OpReg)
2242     .addImm(AMDGPU::sub1);
2243   MI.eraseFromParent();
2244   return true;
2245 }
2246 
2247 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2248 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2249   Register Dst = MI.getOperand(0).getReg();
2250   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2251   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2252       MRI->getType(Dst) != LLT::scalar(64))
2253     return false;
2254 
2255   Register Src = MI.getOperand(1).getReg();
2256   MachineBasicBlock *BB = MI.getParent();
2257   const DebugLoc &DL = MI.getDebugLoc();
2258   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2259   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2260   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2261   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2262 
2263   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2264       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2265     return false;
2266 
2267   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2268     .addReg(Src, 0, AMDGPU::sub0);
2269   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2270     .addReg(Src, 0, AMDGPU::sub1);
2271   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2272     .addImm(0x7fffffff);
2273 
2274   // Clear sign bit.
2275   // TODO: Should this used S_BITSET0_*?
2276   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2277     .addReg(HiReg)
2278     .addReg(ConstReg);
2279   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2280     .addReg(LoReg)
2281     .addImm(AMDGPU::sub0)
2282     .addReg(OpReg)
2283     .addImm(AMDGPU::sub1);
2284 
2285   MI.eraseFromParent();
2286   return true;
2287 }
2288 
2289 static bool isConstant(const MachineInstr &MI) {
2290   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2291 }
2292 
2293 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2294     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2295 
2296   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2297 
2298   assert(PtrMI);
2299 
2300   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2301     return;
2302 
2303   GEPInfo GEPInfo(*PtrMI);
2304 
2305   for (unsigned i = 1; i != 3; ++i) {
2306     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2307     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2308     assert(OpDef);
2309     if (i == 2 && isConstant(*OpDef)) {
2310       // TODO: Could handle constant base + variable offset, but a combine
2311       // probably should have commuted it.
2312       assert(GEPInfo.Imm == 0);
2313       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2314       continue;
2315     }
2316     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2317     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2318       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2319     else
2320       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2321   }
2322 
2323   AddrInfo.push_back(GEPInfo);
2324   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2325 }
2326 
2327 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2328   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2329 }
2330 
2331 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2332   if (!MI.hasOneMemOperand())
2333     return false;
2334 
2335   const MachineMemOperand *MMO = *MI.memoperands_begin();
2336   const Value *Ptr = MMO->getValue();
2337 
2338   // UndefValue means this is a load of a kernel input.  These are uniform.
2339   // Sometimes LDS instructions have constant pointers.
2340   // If Ptr is null, then that means this mem operand contains a
2341   // PseudoSourceValue like GOT.
2342   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2343       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2344     return true;
2345 
2346   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2347     return true;
2348 
2349   const Instruction *I = dyn_cast<Instruction>(Ptr);
2350   return I && I->getMetadata("amdgpu.uniform");
2351 }
2352 
2353 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2354   for (const GEPInfo &GEPInfo : AddrInfo) {
2355     if (!GEPInfo.VgprParts.empty())
2356       return true;
2357   }
2358   return false;
2359 }
2360 
2361 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2362   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2363   unsigned AS = PtrTy.getAddressSpace();
2364   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2365       STI.ldsRequiresM0Init()) {
2366     MachineBasicBlock *BB = I.getParent();
2367 
2368     // If DS instructions require M0 initialization, insert it before selecting.
2369     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2370       .addImm(-1);
2371   }
2372 }
2373 
2374 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2375   MachineInstr &I) const {
2376   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2377     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2378     unsigned AS = PtrTy.getAddressSpace();
2379     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2380       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2381   }
2382 
2383   initM0(I);
2384   return selectImpl(I, *CoverageInfo);
2385 }
2386 
2387 // TODO: No rtn optimization.
2388 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2389   MachineInstr &MI) const {
2390   Register PtrReg = MI.getOperand(1).getReg();
2391   const LLT PtrTy = MRI->getType(PtrReg);
2392   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2393       STI.useFlatForGlobal())
2394     return selectImpl(MI, *CoverageInfo);
2395 
2396   Register DstReg = MI.getOperand(0).getReg();
2397   const LLT Ty = MRI->getType(DstReg);
2398   const bool Is64 = Ty.getSizeInBits() == 64;
2399   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2400   Register TmpReg = MRI->createVirtualRegister(
2401     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2402 
2403   const DebugLoc &DL = MI.getDebugLoc();
2404   MachineBasicBlock *BB = MI.getParent();
2405 
2406   Register VAddr, RSrcReg, SOffset;
2407   int64_t Offset = 0;
2408 
2409   unsigned Opcode;
2410   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2411     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2412                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2413   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2414                                    RSrcReg, SOffset, Offset)) {
2415     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2416                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2417   } else
2418     return selectImpl(MI, *CoverageInfo);
2419 
2420   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2421     .addReg(MI.getOperand(2).getReg());
2422 
2423   if (VAddr)
2424     MIB.addReg(VAddr);
2425 
2426   MIB.addReg(RSrcReg);
2427   if (SOffset)
2428     MIB.addReg(SOffset);
2429   else
2430     MIB.addImm(0);
2431 
2432   MIB.addImm(Offset);
2433   MIB.addImm(AMDGPU::CPol::GLC);
2434   MIB.cloneMemRefs(MI);
2435 
2436   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2437     .addReg(TmpReg, RegState::Kill, SubReg);
2438 
2439   MI.eraseFromParent();
2440 
2441   MRI->setRegClass(
2442     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2443   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2444 }
2445 
2446 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2447   if (Reg.isPhysical())
2448     return false;
2449 
2450   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2451   const unsigned Opcode = MI.getOpcode();
2452 
2453   if (Opcode == AMDGPU::COPY)
2454     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2455 
2456   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2457       Opcode == AMDGPU::G_XOR)
2458     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2459            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2460 
2461   if (Opcode == TargetOpcode::G_INTRINSIC)
2462     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2463 
2464   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2465 }
2466 
2467 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2468   MachineBasicBlock *BB = I.getParent();
2469   MachineOperand &CondOp = I.getOperand(0);
2470   Register CondReg = CondOp.getReg();
2471   const DebugLoc &DL = I.getDebugLoc();
2472 
2473   unsigned BrOpcode;
2474   Register CondPhysReg;
2475   const TargetRegisterClass *ConstrainRC;
2476 
2477   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2478   // whether the branch is uniform when selecting the instruction. In
2479   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2480   // RegBankSelect knows what it's doing if the branch condition is scc, even
2481   // though it currently does not.
2482   if (!isVCC(CondReg, *MRI)) {
2483     if (MRI->getType(CondReg) != LLT::scalar(32))
2484       return false;
2485 
2486     CondPhysReg = AMDGPU::SCC;
2487     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2488     ConstrainRC = &AMDGPU::SReg_32RegClass;
2489   } else {
2490     // FIXME: Should scc->vcc copies and with exec?
2491 
2492     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2493     // need to insert an and with exec.
2494     if (!isVCmpResult(CondReg, *MRI)) {
2495       const bool Is64 = STI.isWave64();
2496       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2497       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2498 
2499       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2500       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2501           .addReg(CondReg)
2502           .addReg(Exec);
2503       CondReg = TmpReg;
2504     }
2505 
2506     CondPhysReg = TRI.getVCC();
2507     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2508     ConstrainRC = TRI.getBoolRC();
2509   }
2510 
2511   if (!MRI->getRegClassOrNull(CondReg))
2512     MRI->setRegClass(CondReg, ConstrainRC);
2513 
2514   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2515     .addReg(CondReg);
2516   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2517     .addMBB(I.getOperand(1).getMBB());
2518 
2519   I.eraseFromParent();
2520   return true;
2521 }
2522 
2523 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2524   MachineInstr &I) const {
2525   Register DstReg = I.getOperand(0).getReg();
2526   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2527   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2528   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2529   if (IsVGPR)
2530     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2531 
2532   return RBI.constrainGenericRegister(
2533     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2534 }
2535 
2536 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2537   Register DstReg = I.getOperand(0).getReg();
2538   Register SrcReg = I.getOperand(1).getReg();
2539   Register MaskReg = I.getOperand(2).getReg();
2540   LLT Ty = MRI->getType(DstReg);
2541   LLT MaskTy = MRI->getType(MaskReg);
2542   MachineBasicBlock *BB = I.getParent();
2543   const DebugLoc &DL = I.getDebugLoc();
2544 
2545   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2546   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2547   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2548   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2549   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2550     return false;
2551 
2552   // Try to avoid emitting a bit operation when we only need to touch half of
2553   // the 64-bit pointer.
2554   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2555   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2556   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2557 
2558   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2559   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2560 
2561   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2562       !CanCopyLow32 && !CanCopyHi32) {
2563     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2564       .addReg(SrcReg)
2565       .addReg(MaskReg);
2566     I.eraseFromParent();
2567     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2568   }
2569 
2570   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2571   const TargetRegisterClass &RegRC
2572     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2573 
2574   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2575                                                                   *MRI);
2576   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2577                                                                   *MRI);
2578   const TargetRegisterClass *MaskRC =
2579       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2580 
2581   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2582       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2583       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2584     return false;
2585 
2586   if (Ty.getSizeInBits() == 32) {
2587     assert(MaskTy.getSizeInBits() == 32 &&
2588            "ptrmask should have been narrowed during legalize");
2589 
2590     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2591       .addReg(SrcReg)
2592       .addReg(MaskReg);
2593     I.eraseFromParent();
2594     return true;
2595   }
2596 
2597   Register HiReg = MRI->createVirtualRegister(&RegRC);
2598   Register LoReg = MRI->createVirtualRegister(&RegRC);
2599 
2600   // Extract the subregisters from the source pointer.
2601   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2602     .addReg(SrcReg, 0, AMDGPU::sub0);
2603   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2604     .addReg(SrcReg, 0, AMDGPU::sub1);
2605 
2606   Register MaskedLo, MaskedHi;
2607 
2608   if (CanCopyLow32) {
2609     // If all the bits in the low half are 1, we only need a copy for it.
2610     MaskedLo = LoReg;
2611   } else {
2612     // Extract the mask subregister and apply the and.
2613     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2614     MaskedLo = MRI->createVirtualRegister(&RegRC);
2615 
2616     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2617       .addReg(MaskReg, 0, AMDGPU::sub0);
2618     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2619       .addReg(LoReg)
2620       .addReg(MaskLo);
2621   }
2622 
2623   if (CanCopyHi32) {
2624     // If all the bits in the high half are 1, we only need a copy for it.
2625     MaskedHi = HiReg;
2626   } else {
2627     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2628     MaskedHi = MRI->createVirtualRegister(&RegRC);
2629 
2630     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2631       .addReg(MaskReg, 0, AMDGPU::sub1);
2632     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2633       .addReg(HiReg)
2634       .addReg(MaskHi);
2635   }
2636 
2637   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2638     .addReg(MaskedLo)
2639     .addImm(AMDGPU::sub0)
2640     .addReg(MaskedHi)
2641     .addImm(AMDGPU::sub1);
2642   I.eraseFromParent();
2643   return true;
2644 }
2645 
2646 /// Return the register to use for the index value, and the subregister to use
2647 /// for the indirectly accessed register.
2648 static std::pair<Register, unsigned>
2649 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2650                         const SIRegisterInfo &TRI,
2651                         const TargetRegisterClass *SuperRC,
2652                         Register IdxReg,
2653                         unsigned EltSize) {
2654   Register IdxBaseReg;
2655   int Offset;
2656 
2657   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2658   if (IdxBaseReg == AMDGPU::NoRegister) {
2659     // This will happen if the index is a known constant. This should ordinarily
2660     // be legalized out, but handle it as a register just in case.
2661     assert(Offset == 0);
2662     IdxBaseReg = IdxReg;
2663   }
2664 
2665   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2666 
2667   // Skip out of bounds offsets, or else we would end up using an undefined
2668   // register.
2669   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2670     return std::make_pair(IdxReg, SubRegs[0]);
2671   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2672 }
2673 
2674 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2675   MachineInstr &MI) const {
2676   Register DstReg = MI.getOperand(0).getReg();
2677   Register SrcReg = MI.getOperand(1).getReg();
2678   Register IdxReg = MI.getOperand(2).getReg();
2679 
2680   LLT DstTy = MRI->getType(DstReg);
2681   LLT SrcTy = MRI->getType(SrcReg);
2682 
2683   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2684   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2685   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2686 
2687   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2688   // into a waterfall loop.
2689   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2690     return false;
2691 
2692   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2693                                                                   *MRI);
2694   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2695                                                                   *MRI);
2696   if (!SrcRC || !DstRC)
2697     return false;
2698   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2699       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2700       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2701     return false;
2702 
2703   MachineBasicBlock *BB = MI.getParent();
2704   const DebugLoc &DL = MI.getDebugLoc();
2705   const bool Is64 = DstTy.getSizeInBits() == 64;
2706 
2707   unsigned SubReg;
2708   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2709                                                      DstTy.getSizeInBits() / 8);
2710 
2711   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2712     if (DstTy.getSizeInBits() != 32 && !Is64)
2713       return false;
2714 
2715     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2716       .addReg(IdxReg);
2717 
2718     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2719     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2720       .addReg(SrcReg, 0, SubReg)
2721       .addReg(SrcReg, RegState::Implicit);
2722     MI.eraseFromParent();
2723     return true;
2724   }
2725 
2726   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2727     return false;
2728 
2729   if (!STI.useVGPRIndexMode()) {
2730     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2731       .addReg(IdxReg);
2732     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2733       .addReg(SrcReg, 0, SubReg)
2734       .addReg(SrcReg, RegState::Implicit);
2735     MI.eraseFromParent();
2736     return true;
2737   }
2738 
2739   const MCInstrDesc &GPRIDXDesc =
2740       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2741   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2742       .addReg(SrcReg)
2743       .addReg(IdxReg)
2744       .addImm(SubReg);
2745 
2746   MI.eraseFromParent();
2747   return true;
2748 }
2749 
2750 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2751 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2752   MachineInstr &MI) const {
2753   Register DstReg = MI.getOperand(0).getReg();
2754   Register VecReg = MI.getOperand(1).getReg();
2755   Register ValReg = MI.getOperand(2).getReg();
2756   Register IdxReg = MI.getOperand(3).getReg();
2757 
2758   LLT VecTy = MRI->getType(DstReg);
2759   LLT ValTy = MRI->getType(ValReg);
2760   unsigned VecSize = VecTy.getSizeInBits();
2761   unsigned ValSize = ValTy.getSizeInBits();
2762 
2763   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2764   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2765   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2766 
2767   assert(VecTy.getElementType() == ValTy);
2768 
2769   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2770   // into a waterfall loop.
2771   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2772     return false;
2773 
2774   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2775                                                                   *MRI);
2776   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2777                                                                   *MRI);
2778 
2779   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2780       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2781       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2782       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2783     return false;
2784 
2785   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2786     return false;
2787 
2788   unsigned SubReg;
2789   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2790                                                      ValSize / 8);
2791 
2792   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2793                          STI.useVGPRIndexMode();
2794 
2795   MachineBasicBlock *BB = MI.getParent();
2796   const DebugLoc &DL = MI.getDebugLoc();
2797 
2798   if (!IndexMode) {
2799     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2800       .addReg(IdxReg);
2801 
2802     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2803         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2804     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2805         .addReg(VecReg)
2806         .addReg(ValReg)
2807         .addImm(SubReg);
2808     MI.eraseFromParent();
2809     return true;
2810   }
2811 
2812   const MCInstrDesc &GPRIDXDesc =
2813       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2814   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2815       .addReg(VecReg)
2816       .addReg(ValReg)
2817       .addReg(IdxReg)
2818       .addImm(SubReg);
2819 
2820   MI.eraseFromParent();
2821   return true;
2822 }
2823 
2824 static bool isZeroOrUndef(int X) {
2825   return X == 0 || X == -1;
2826 }
2827 
2828 static bool isOneOrUndef(int X) {
2829   return X == 1 || X == -1;
2830 }
2831 
2832 static bool isZeroOrOneOrUndef(int X) {
2833   return X == 0 || X == 1 || X == -1;
2834 }
2835 
2836 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2837 // 32-bit register.
2838 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2839                                    ArrayRef<int> Mask) {
2840   NewMask[0] = Mask[0];
2841   NewMask[1] = Mask[1];
2842   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2843     return Src0;
2844 
2845   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2846   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2847 
2848   // Shift the mask inputs to be 0/1;
2849   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2850   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2851   return Src1;
2852 }
2853 
2854 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2855 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2856   MachineInstr &MI) const {
2857   Register DstReg = MI.getOperand(0).getReg();
2858   Register Src0Reg = MI.getOperand(1).getReg();
2859   Register Src1Reg = MI.getOperand(2).getReg();
2860   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2861 
2862   const LLT V2S16 = LLT::fixed_vector(2, 16);
2863   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2864     return false;
2865 
2866   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2867     return false;
2868 
2869   assert(ShufMask.size() == 2);
2870   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2871 
2872   MachineBasicBlock *MBB = MI.getParent();
2873   const DebugLoc &DL = MI.getDebugLoc();
2874 
2875   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2876   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2877   const TargetRegisterClass &RC = IsVALU ?
2878     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2879 
2880   // Handle the degenerate case which should have folded out.
2881   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2882     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2883 
2884     MI.eraseFromParent();
2885     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2886   }
2887 
2888   // A legal VOP3P mask only reads one of the sources.
2889   int Mask[2];
2890   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2891 
2892   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2893       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2894     return false;
2895 
2896   // TODO: This also should have been folded out
2897   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2898     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2899       .addReg(SrcVec);
2900 
2901     MI.eraseFromParent();
2902     return true;
2903   }
2904 
2905   if (Mask[0] == 1 && Mask[1] == -1) {
2906     if (IsVALU) {
2907       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2908         .addImm(16)
2909         .addReg(SrcVec);
2910     } else {
2911       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2912         .addReg(SrcVec)
2913         .addImm(16);
2914     }
2915   } else if (Mask[0] == -1 && Mask[1] == 0) {
2916     if (IsVALU) {
2917       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2918         .addImm(16)
2919         .addReg(SrcVec);
2920     } else {
2921       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2922         .addReg(SrcVec)
2923         .addImm(16);
2924     }
2925   } else if (Mask[0] == 0 && Mask[1] == 0) {
2926     if (IsVALU) {
2927       // Write low half of the register into the high half.
2928       MachineInstr *MovSDWA =
2929         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2930         .addImm(0)                             // $src0_modifiers
2931         .addReg(SrcVec)                        // $src0
2932         .addImm(0)                             // $clamp
2933         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2934         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2935         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2936         .addReg(SrcVec, RegState::Implicit);
2937       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2938     } else {
2939       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2940         .addReg(SrcVec)
2941         .addReg(SrcVec);
2942     }
2943   } else if (Mask[0] == 1 && Mask[1] == 1) {
2944     if (IsVALU) {
2945       // Write high half of the register into the low half.
2946       MachineInstr *MovSDWA =
2947         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2948         .addImm(0)                             // $src0_modifiers
2949         .addReg(SrcVec)                        // $src0
2950         .addImm(0)                             // $clamp
2951         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2952         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2953         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2954         .addReg(SrcVec, RegState::Implicit);
2955       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2956     } else {
2957       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2958         .addReg(SrcVec)
2959         .addReg(SrcVec);
2960     }
2961   } else if (Mask[0] == 1 && Mask[1] == 0) {
2962     if (IsVALU) {
2963       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2964         .addReg(SrcVec)
2965         .addReg(SrcVec)
2966         .addImm(16);
2967     } else {
2968       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2969       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2970         .addReg(SrcVec)
2971         .addImm(16);
2972       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2973         .addReg(TmpReg)
2974         .addReg(SrcVec);
2975     }
2976   } else
2977     llvm_unreachable("all shuffle masks should be handled");
2978 
2979   MI.eraseFromParent();
2980   return true;
2981 }
2982 
2983 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2984   MachineInstr &MI) const {
2985   if (STI.hasGFX90AInsts())
2986     return selectImpl(MI, *CoverageInfo);
2987 
2988   MachineBasicBlock *MBB = MI.getParent();
2989   const DebugLoc &DL = MI.getDebugLoc();
2990 
2991   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2992     Function &F = MBB->getParent()->getFunction();
2993     DiagnosticInfoUnsupported
2994       NoFpRet(F, "return versions of fp atomics not supported",
2995               MI.getDebugLoc(), DS_Error);
2996     F.getContext().diagnose(NoFpRet);
2997     return false;
2998   }
2999 
3000   // FIXME: This is only needed because tablegen requires number of dst operands
3001   // in match and replace pattern to be the same. Otherwise patterns can be
3002   // exported from SDag path.
3003   MachineOperand &VDataIn = MI.getOperand(1);
3004   MachineOperand &VIndex = MI.getOperand(3);
3005   MachineOperand &VOffset = MI.getOperand(4);
3006   MachineOperand &SOffset = MI.getOperand(5);
3007   int16_t Offset = MI.getOperand(6).getImm();
3008 
3009   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
3010   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
3011 
3012   unsigned Opcode;
3013   if (HasVOffset) {
3014     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
3015                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
3016   } else {
3017     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
3018                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
3019   }
3020 
3021   if (MRI->getType(VDataIn.getReg()).isVector()) {
3022     switch (Opcode) {
3023     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
3024       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
3025       break;
3026     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
3027       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3028       break;
3029     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3030       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3031       break;
3032     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3033       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3034       break;
3035     }
3036   }
3037 
3038   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3039   I.add(VDataIn);
3040 
3041   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3042       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3043     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3044     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3045       .addReg(VIndex.getReg())
3046       .addImm(AMDGPU::sub0)
3047       .addReg(VOffset.getReg())
3048       .addImm(AMDGPU::sub1);
3049 
3050     I.addReg(IdxReg);
3051   } else if (HasVIndex) {
3052     I.add(VIndex);
3053   } else if (HasVOffset) {
3054     I.add(VOffset);
3055   }
3056 
3057   I.add(MI.getOperand(2)); // rsrc
3058   I.add(SOffset);
3059   I.addImm(Offset);
3060   I.addImm(MI.getOperand(7).getImm()); // cpol
3061   I.cloneMemRefs(MI);
3062 
3063   MI.eraseFromParent();
3064 
3065   return true;
3066 }
3067 
3068 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3069   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3070 
3071   if (STI.hasGFX90AInsts()) {
3072     // gfx90a adds return versions of the global atomic fadd instructions so no
3073     // special handling is required.
3074     return selectImpl(MI, *CoverageInfo);
3075   }
3076 
3077   MachineBasicBlock *MBB = MI.getParent();
3078   const DebugLoc &DL = MI.getDebugLoc();
3079 
3080   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3081     Function &F = MBB->getParent()->getFunction();
3082     DiagnosticInfoUnsupported
3083       NoFpRet(F, "return versions of fp atomics not supported",
3084               MI.getDebugLoc(), DS_Error);
3085     F.getContext().diagnose(NoFpRet);
3086     return false;
3087   }
3088 
3089   // FIXME: This is only needed because tablegen requires number of dst operands
3090   // in match and replace pattern to be the same. Otherwise patterns can be
3091   // exported from SDag path.
3092   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3093 
3094   Register Data = DataOp.getReg();
3095   const unsigned Opc = MRI->getType(Data).isVector() ?
3096     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3097   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3098     .addReg(Addr.first)
3099     .addReg(Data)
3100     .addImm(Addr.second)
3101     .addImm(0) // cpol
3102     .cloneMemRefs(MI);
3103 
3104   MI.eraseFromParent();
3105   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3106 }
3107 
3108 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3109   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3110   MI.RemoveOperand(1);
3111   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3112   return true;
3113 }
3114 
3115 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3116   Register DstReg = MI.getOperand(0).getReg();
3117   Register SrcReg = MI.getOperand(1).getReg();
3118   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3119   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3120   MachineBasicBlock *MBB = MI.getParent();
3121   const DebugLoc &DL = MI.getDebugLoc();
3122 
3123   if (IsVALU) {
3124     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3125       .addImm(Subtarget->getWavefrontSizeLog2())
3126       .addReg(SrcReg);
3127   } else {
3128     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3129       .addReg(SrcReg)
3130       .addImm(Subtarget->getWavefrontSizeLog2());
3131   }
3132 
3133   const TargetRegisterClass &RC =
3134       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3135   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3136     return false;
3137 
3138   MI.eraseFromParent();
3139   return true;
3140 }
3141 
3142 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3143   if (I.isPHI())
3144     return selectPHI(I);
3145 
3146   if (!I.isPreISelOpcode()) {
3147     if (I.isCopy())
3148       return selectCOPY(I);
3149     return true;
3150   }
3151 
3152   switch (I.getOpcode()) {
3153   case TargetOpcode::G_AND:
3154   case TargetOpcode::G_OR:
3155   case TargetOpcode::G_XOR:
3156     if (selectImpl(I, *CoverageInfo))
3157       return true;
3158     return selectG_AND_OR_XOR(I);
3159   case TargetOpcode::G_ADD:
3160   case TargetOpcode::G_SUB:
3161     if (selectImpl(I, *CoverageInfo))
3162       return true;
3163     return selectG_ADD_SUB(I);
3164   case TargetOpcode::G_UADDO:
3165   case TargetOpcode::G_USUBO:
3166   case TargetOpcode::G_UADDE:
3167   case TargetOpcode::G_USUBE:
3168     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3169   case TargetOpcode::G_INTTOPTR:
3170   case TargetOpcode::G_BITCAST:
3171   case TargetOpcode::G_PTRTOINT:
3172     return selectCOPY(I);
3173   case TargetOpcode::G_CONSTANT:
3174   case TargetOpcode::G_FCONSTANT:
3175     return selectG_CONSTANT(I);
3176   case TargetOpcode::G_FNEG:
3177     if (selectImpl(I, *CoverageInfo))
3178       return true;
3179     return selectG_FNEG(I);
3180   case TargetOpcode::G_FABS:
3181     if (selectImpl(I, *CoverageInfo))
3182       return true;
3183     return selectG_FABS(I);
3184   case TargetOpcode::G_EXTRACT:
3185     return selectG_EXTRACT(I);
3186   case TargetOpcode::G_MERGE_VALUES:
3187   case TargetOpcode::G_BUILD_VECTOR:
3188   case TargetOpcode::G_CONCAT_VECTORS:
3189     return selectG_MERGE_VALUES(I);
3190   case TargetOpcode::G_UNMERGE_VALUES:
3191     return selectG_UNMERGE_VALUES(I);
3192   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3193     return selectG_BUILD_VECTOR_TRUNC(I);
3194   case TargetOpcode::G_PTR_ADD:
3195     return selectG_PTR_ADD(I);
3196   case TargetOpcode::G_IMPLICIT_DEF:
3197     return selectG_IMPLICIT_DEF(I);
3198   case TargetOpcode::G_FREEZE:
3199     return selectCOPY(I);
3200   case TargetOpcode::G_INSERT:
3201     return selectG_INSERT(I);
3202   case TargetOpcode::G_INTRINSIC:
3203     return selectG_INTRINSIC(I);
3204   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3205     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3206   case TargetOpcode::G_ICMP:
3207     if (selectG_ICMP(I))
3208       return true;
3209     return selectImpl(I, *CoverageInfo);
3210   case TargetOpcode::G_LOAD:
3211   case TargetOpcode::G_STORE:
3212   case TargetOpcode::G_ATOMIC_CMPXCHG:
3213   case TargetOpcode::G_ATOMICRMW_XCHG:
3214   case TargetOpcode::G_ATOMICRMW_ADD:
3215   case TargetOpcode::G_ATOMICRMW_SUB:
3216   case TargetOpcode::G_ATOMICRMW_AND:
3217   case TargetOpcode::G_ATOMICRMW_OR:
3218   case TargetOpcode::G_ATOMICRMW_XOR:
3219   case TargetOpcode::G_ATOMICRMW_MIN:
3220   case TargetOpcode::G_ATOMICRMW_MAX:
3221   case TargetOpcode::G_ATOMICRMW_UMIN:
3222   case TargetOpcode::G_ATOMICRMW_UMAX:
3223   case TargetOpcode::G_ATOMICRMW_FADD:
3224   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3225   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3226   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3227   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3228     return selectG_LOAD_STORE_ATOMICRMW(I);
3229   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3230     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3231   case TargetOpcode::G_SELECT:
3232     return selectG_SELECT(I);
3233   case TargetOpcode::G_TRUNC:
3234     return selectG_TRUNC(I);
3235   case TargetOpcode::G_SEXT:
3236   case TargetOpcode::G_ZEXT:
3237   case TargetOpcode::G_ANYEXT:
3238   case TargetOpcode::G_SEXT_INREG:
3239     if (selectImpl(I, *CoverageInfo))
3240       return true;
3241     return selectG_SZA_EXT(I);
3242   case TargetOpcode::G_BRCOND:
3243     return selectG_BRCOND(I);
3244   case TargetOpcode::G_GLOBAL_VALUE:
3245     return selectG_GLOBAL_VALUE(I);
3246   case TargetOpcode::G_PTRMASK:
3247     return selectG_PTRMASK(I);
3248   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3249     return selectG_EXTRACT_VECTOR_ELT(I);
3250   case TargetOpcode::G_INSERT_VECTOR_ELT:
3251     return selectG_INSERT_VECTOR_ELT(I);
3252   case TargetOpcode::G_SHUFFLE_VECTOR:
3253     return selectG_SHUFFLE_VECTOR(I);
3254   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3255   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3256   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3257   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3258     const AMDGPU::ImageDimIntrinsicInfo *Intr
3259       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3260     assert(Intr && "not an image intrinsic with image pseudo");
3261     return selectImageIntrinsic(I, Intr);
3262   }
3263   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3264     return selectBVHIntrinsic(I);
3265   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3266     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3267   case AMDGPU::G_SBFX:
3268   case AMDGPU::G_UBFX:
3269     return selectG_SBFX_UBFX(I);
3270   case AMDGPU::G_SI_CALL:
3271     I.setDesc(TII.get(AMDGPU::SI_CALL));
3272     return true;
3273   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3274     return selectWaveAddress(I);
3275   default:
3276     return selectImpl(I, *CoverageInfo);
3277   }
3278   return false;
3279 }
3280 
3281 InstructionSelector::ComplexRendererFns
3282 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3283   return {{
3284       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3285   }};
3286 
3287 }
3288 
3289 std::pair<Register, unsigned>
3290 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3291                                               bool AllowAbs) const {
3292   Register Src = Root.getReg();
3293   Register OrigSrc = Src;
3294   unsigned Mods = 0;
3295   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3296 
3297   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3298     Src = MI->getOperand(1).getReg();
3299     Mods |= SISrcMods::NEG;
3300     MI = getDefIgnoringCopies(Src, *MRI);
3301   }
3302 
3303   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3304     Src = MI->getOperand(1).getReg();
3305     Mods |= SISrcMods::ABS;
3306   }
3307 
3308   if (Mods != 0 &&
3309       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3310     MachineInstr *UseMI = Root.getParent();
3311 
3312     // If we looked through copies to find source modifiers on an SGPR operand,
3313     // we now have an SGPR register source. To avoid potentially violating the
3314     // constant bus restriction, we need to insert a copy to a VGPR.
3315     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3316     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3317             TII.get(AMDGPU::COPY), VGPRSrc)
3318       .addReg(Src);
3319     Src = VGPRSrc;
3320   }
3321 
3322   return std::make_pair(Src, Mods);
3323 }
3324 
3325 ///
3326 /// This will select either an SGPR or VGPR operand and will save us from
3327 /// having to write an extra tablegen pattern.
3328 InstructionSelector::ComplexRendererFns
3329 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3330   return {{
3331       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3332   }};
3333 }
3334 
3335 InstructionSelector::ComplexRendererFns
3336 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3337   Register Src;
3338   unsigned Mods;
3339   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3340 
3341   return {{
3342       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3343       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3344       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3345       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3346   }};
3347 }
3348 
3349 InstructionSelector::ComplexRendererFns
3350 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3351   Register Src;
3352   unsigned Mods;
3353   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3354 
3355   return {{
3356       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3357       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3358       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3359       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3360   }};
3361 }
3362 
3363 InstructionSelector::ComplexRendererFns
3364 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3365   return {{
3366       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3367       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3368       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3369   }};
3370 }
3371 
3372 InstructionSelector::ComplexRendererFns
3373 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3374   Register Src;
3375   unsigned Mods;
3376   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3377 
3378   return {{
3379       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3380       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3381   }};
3382 }
3383 
3384 InstructionSelector::ComplexRendererFns
3385 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3386   Register Src;
3387   unsigned Mods;
3388   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3389 
3390   return {{
3391       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3392       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3393   }};
3394 }
3395 
3396 InstructionSelector::ComplexRendererFns
3397 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3398   Register Reg = Root.getReg();
3399   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3400   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3401               Def->getOpcode() == AMDGPU::G_FABS))
3402     return {};
3403   return {{
3404       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3405   }};
3406 }
3407 
3408 std::pair<Register, unsigned>
3409 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3410   Register Src, const MachineRegisterInfo &MRI) const {
3411   unsigned Mods = 0;
3412   MachineInstr *MI = MRI.getVRegDef(Src);
3413 
3414   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3415       // It's possible to see an f32 fneg here, but unlikely.
3416       // TODO: Treat f32 fneg as only high bit.
3417       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3418     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3419     Src = MI->getOperand(1).getReg();
3420     MI = MRI.getVRegDef(Src);
3421   }
3422 
3423   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3424 
3425   // Packed instructions do not have abs modifiers.
3426   Mods |= SISrcMods::OP_SEL_1;
3427 
3428   return std::make_pair(Src, Mods);
3429 }
3430 
3431 InstructionSelector::ComplexRendererFns
3432 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3433   MachineRegisterInfo &MRI
3434     = Root.getParent()->getParent()->getParent()->getRegInfo();
3435 
3436   Register Src;
3437   unsigned Mods;
3438   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3439 
3440   return {{
3441       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3442       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3443   }};
3444 }
3445 
3446 InstructionSelector::ComplexRendererFns
3447 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3448   Register Src;
3449   unsigned Mods;
3450   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3451   if (!isKnownNeverNaN(Src, *MRI))
3452     return None;
3453 
3454   return {{
3455       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3456       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3457   }};
3458 }
3459 
3460 InstructionSelector::ComplexRendererFns
3461 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3462   // FIXME: Handle op_sel
3463   return {{
3464       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3465       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3466   }};
3467 }
3468 
3469 InstructionSelector::ComplexRendererFns
3470 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3471   SmallVector<GEPInfo, 4> AddrInfo;
3472   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3473 
3474   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3475     return None;
3476 
3477   const GEPInfo &GEPInfo = AddrInfo[0];
3478   Optional<int64_t> EncodedImm =
3479       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3480   if (!EncodedImm)
3481     return None;
3482 
3483   unsigned PtrReg = GEPInfo.SgprParts[0];
3484   return {{
3485     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3486     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3487   }};
3488 }
3489 
3490 InstructionSelector::ComplexRendererFns
3491 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3492   SmallVector<GEPInfo, 4> AddrInfo;
3493   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3494 
3495   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3496     return None;
3497 
3498   const GEPInfo &GEPInfo = AddrInfo[0];
3499   Register PtrReg = GEPInfo.SgprParts[0];
3500   Optional<int64_t> EncodedImm =
3501       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3502   if (!EncodedImm)
3503     return None;
3504 
3505   return {{
3506     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3507     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3508   }};
3509 }
3510 
3511 InstructionSelector::ComplexRendererFns
3512 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3513   MachineInstr *MI = Root.getParent();
3514   MachineBasicBlock *MBB = MI->getParent();
3515 
3516   SmallVector<GEPInfo, 4> AddrInfo;
3517   getAddrModeInfo(*MI, *MRI, AddrInfo);
3518 
3519   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3520   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3521   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3522     return None;
3523 
3524   const GEPInfo &GEPInfo = AddrInfo[0];
3525   // SGPR offset is unsigned.
3526   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3527     return None;
3528 
3529   // If we make it this far we have a load with an 32-bit immediate offset.
3530   // It is OK to select this using a sgpr offset, because we have already
3531   // failed trying to select this load into one of the _IMM variants since
3532   // the _IMM Patterns are considered before the _SGPR patterns.
3533   Register PtrReg = GEPInfo.SgprParts[0];
3534   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3535   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3536           .addImm(GEPInfo.Imm);
3537   return {{
3538     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3539     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3540   }};
3541 }
3542 
3543 std::pair<Register, int>
3544 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3545                                                 uint64_t FlatVariant) const {
3546   MachineInstr *MI = Root.getParent();
3547 
3548   auto Default = std::make_pair(Root.getReg(), 0);
3549 
3550   if (!STI.hasFlatInstOffsets())
3551     return Default;
3552 
3553   Register PtrBase;
3554   int64_t ConstOffset;
3555   std::tie(PtrBase, ConstOffset) =
3556       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3557   if (ConstOffset == 0)
3558     return Default;
3559 
3560   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3561   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3562     return Default;
3563 
3564   return std::make_pair(PtrBase, ConstOffset);
3565 }
3566 
3567 InstructionSelector::ComplexRendererFns
3568 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3569   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3570 
3571   return {{
3572       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3573       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3574     }};
3575 }
3576 
3577 InstructionSelector::ComplexRendererFns
3578 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3579   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3580 
3581   return {{
3582       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3583       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3584   }};
3585 }
3586 
3587 InstructionSelector::ComplexRendererFns
3588 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3589   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3590 
3591   return {{
3592       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3593       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3594     }};
3595 }
3596 
3597 /// Match a zero extend from a 32-bit value to 64-bits.
3598 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3599   Register ZExtSrc;
3600   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3601     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3602 
3603   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3604   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3605   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3606     return false;
3607 
3608   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3609     return Def->getOperand(1).getReg();
3610   }
3611 
3612   return Register();
3613 }
3614 
3615 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3616 InstructionSelector::ComplexRendererFns
3617 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3618   Register Addr = Root.getReg();
3619   Register PtrBase;
3620   int64_t ConstOffset;
3621   int64_t ImmOffset = 0;
3622 
3623   // Match the immediate offset first, which canonically is moved as low as
3624   // possible.
3625   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3626 
3627   if (ConstOffset != 0) {
3628     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3629                               SIInstrFlags::FlatGlobal)) {
3630       Addr = PtrBase;
3631       ImmOffset = ConstOffset;
3632     } else {
3633       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3634       if (!PtrBaseDef)
3635         return None;
3636 
3637       if (isSGPR(PtrBaseDef->Reg)) {
3638         if (ConstOffset > 0) {
3639           // Offset is too large.
3640           //
3641           // saddr + large_offset -> saddr +
3642           //                         (voffset = large_offset & ~MaxOffset) +
3643           //                         (large_offset & MaxOffset);
3644           int64_t SplitImmOffset, RemainderOffset;
3645           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3646               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3647 
3648           if (isUInt<32>(RemainderOffset)) {
3649             MachineInstr *MI = Root.getParent();
3650             MachineBasicBlock *MBB = MI->getParent();
3651             Register HighBits =
3652                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3653 
3654             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3655                     HighBits)
3656                 .addImm(RemainderOffset);
3657 
3658             return {{
3659                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3660                 [=](MachineInstrBuilder &MIB) {
3661                   MIB.addReg(HighBits);
3662                 }, // voffset
3663                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3664             }};
3665           }
3666         }
3667 
3668         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3669         // is 1 we would need to perform 1 or 2 extra moves for each half of
3670         // the constant and it is better to do a scalar add and then issue a
3671         // single VALU instruction to materialize zero. Otherwise it is less
3672         // instructions to perform VALU adds with immediates or inline literals.
3673         unsigned NumLiterals =
3674             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3675             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3676         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3677           return None;
3678       }
3679     }
3680   }
3681 
3682   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3683   if (!AddrDef)
3684     return None;
3685 
3686   // Match the variable offset.
3687   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3688     // Look through the SGPR->VGPR copy.
3689     Register SAddr =
3690         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3691 
3692     if (SAddr && isSGPR(SAddr)) {
3693       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3694 
3695       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3696       // inserted later.
3697       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3698         return {{[=](MachineInstrBuilder &MIB) { // saddr
3699                    MIB.addReg(SAddr);
3700                  },
3701                  [=](MachineInstrBuilder &MIB) { // voffset
3702                    MIB.addReg(VOffset);
3703                  },
3704                  [=](MachineInstrBuilder &MIB) { // offset
3705                    MIB.addImm(ImmOffset);
3706                  }}};
3707       }
3708     }
3709   }
3710 
3711   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3712   // drop this.
3713   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3714       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3715     return None;
3716 
3717   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3718   // moves required to copy a 64-bit SGPR to VGPR.
3719   MachineInstr *MI = Root.getParent();
3720   MachineBasicBlock *MBB = MI->getParent();
3721   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3722 
3723   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3724       .addImm(0);
3725 
3726   return {{
3727       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
3728       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
3729       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
3730   }};
3731 }
3732 
3733 InstructionSelector::ComplexRendererFns
3734 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3735   Register Addr = Root.getReg();
3736   Register PtrBase;
3737   int64_t ConstOffset;
3738   int64_t ImmOffset = 0;
3739 
3740   // Match the immediate offset first, which canonically is moved as low as
3741   // possible.
3742   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3743 
3744   if (ConstOffset != 0 &&
3745       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3746                             SIInstrFlags::FlatScratch)) {
3747     Addr = PtrBase;
3748     ImmOffset = ConstOffset;
3749   }
3750 
3751   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3752   if (!AddrDef)
3753     return None;
3754 
3755   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3756     int FI = AddrDef->MI->getOperand(1).getIndex();
3757     return {{
3758         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3759         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3760     }};
3761   }
3762 
3763   Register SAddr = AddrDef->Reg;
3764 
3765   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3766     Register LHS = AddrDef->MI->getOperand(1).getReg();
3767     Register RHS = AddrDef->MI->getOperand(2).getReg();
3768     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3769     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3770 
3771     if (LHSDef && RHSDef &&
3772         LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3773         isSGPR(RHSDef->Reg)) {
3774       int FI = LHSDef->MI->getOperand(1).getIndex();
3775       MachineInstr &I = *Root.getParent();
3776       MachineBasicBlock *BB = I.getParent();
3777       const DebugLoc &DL = I.getDebugLoc();
3778       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3779 
3780       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
3781           .addFrameIndex(FI)
3782           .addReg(RHSDef->Reg);
3783     }
3784   }
3785 
3786   if (!isSGPR(SAddr))
3787     return None;
3788 
3789   return {{
3790       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3791       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3792   }};
3793 }
3794 
3795 InstructionSelector::ComplexRendererFns
3796 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3797   MachineInstr *MI = Root.getParent();
3798   MachineBasicBlock *MBB = MI->getParent();
3799   MachineFunction *MF = MBB->getParent();
3800   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3801 
3802   int64_t Offset = 0;
3803   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3804       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3805     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3806 
3807     // TODO: Should this be inside the render function? The iterator seems to
3808     // move.
3809     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3810             HighBits)
3811       .addImm(Offset & ~4095);
3812 
3813     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3814                MIB.addReg(Info->getScratchRSrcReg());
3815              },
3816              [=](MachineInstrBuilder &MIB) { // vaddr
3817                MIB.addReg(HighBits);
3818              },
3819              [=](MachineInstrBuilder &MIB) { // soffset
3820                // Use constant zero for soffset and rely on eliminateFrameIndex
3821                // to choose the appropriate frame register if need be.
3822                MIB.addImm(0);
3823              },
3824              [=](MachineInstrBuilder &MIB) { // offset
3825                MIB.addImm(Offset & 4095);
3826              }}};
3827   }
3828 
3829   assert(Offset == 0 || Offset == -1);
3830 
3831   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3832   // offsets.
3833   Optional<int> FI;
3834   Register VAddr = Root.getReg();
3835   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3836     Register PtrBase;
3837     int64_t ConstOffset;
3838     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
3839     if (ConstOffset != 0) {
3840       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
3841           (!STI.privateMemoryResourceIsRangeChecked() ||
3842            KnownBits->signBitIsZero(PtrBase))) {
3843         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
3844         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3845           FI = PtrBaseDef->getOperand(1).getIndex();
3846         else
3847           VAddr = PtrBase;
3848         Offset = ConstOffset;
3849       }
3850     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3851       FI = RootDef->getOperand(1).getIndex();
3852     }
3853   }
3854 
3855   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3856              MIB.addReg(Info->getScratchRSrcReg());
3857            },
3858            [=](MachineInstrBuilder &MIB) { // vaddr
3859              if (FI.hasValue())
3860                MIB.addFrameIndex(FI.getValue());
3861              else
3862                MIB.addReg(VAddr);
3863            },
3864            [=](MachineInstrBuilder &MIB) { // soffset
3865              // Use constant zero for soffset and rely on eliminateFrameIndex
3866              // to choose the appropriate frame register if need be.
3867              MIB.addImm(0);
3868            },
3869            [=](MachineInstrBuilder &MIB) { // offset
3870              MIB.addImm(Offset);
3871            }}};
3872 }
3873 
3874 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3875                                                 int64_t Offset) const {
3876   if (!isUInt<16>(Offset))
3877     return false;
3878 
3879   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3880     return true;
3881 
3882   // On Southern Islands instruction with a negative base value and an offset
3883   // don't seem to work.
3884   return KnownBits->signBitIsZero(Base);
3885 }
3886 
3887 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3888                                                  int64_t Offset1,
3889                                                  unsigned Size) const {
3890   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3891     return false;
3892   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3893     return false;
3894 
3895   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3896     return true;
3897 
3898   // On Southern Islands instruction with a negative base value and an offset
3899   // don't seem to work.
3900   return KnownBits->signBitIsZero(Base);
3901 }
3902 
3903 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
3904                                                     unsigned ShAmtBits) const {
3905   assert(MI.getOpcode() == TargetOpcode::G_AND);
3906 
3907   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
3908   if (!RHS)
3909     return false;
3910 
3911   if (RHS->countTrailingOnes() >= ShAmtBits)
3912     return true;
3913 
3914   const APInt &LHSKnownZeros =
3915       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
3916   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
3917 }
3918 
3919 // Return the wave level SGPR base address if this is a wave address.
3920 static Register getWaveAddress(const MachineInstr *Def) {
3921   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
3922              ? Def->getOperand(1).getReg()
3923              : Register();
3924 }
3925 
3926 InstructionSelector::ComplexRendererFns
3927 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3928     MachineOperand &Root) const {
3929   Register Reg = Root.getReg();
3930   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3931 
3932   const MachineInstr *Def = MRI->getVRegDef(Reg);
3933   if (Register WaveBase = getWaveAddress(Def)) {
3934     return {{
3935         [=](MachineInstrBuilder &MIB) { // rsrc
3936           MIB.addReg(Info->getScratchRSrcReg());
3937         },
3938         [=](MachineInstrBuilder &MIB) { // soffset
3939           MIB.addReg(WaveBase);
3940         },
3941         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
3942     }};
3943   }
3944 
3945   int64_t Offset = 0;
3946 
3947   // FIXME: Copy check is a hack
3948   Register BasePtr;
3949   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
3950     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3951       return {};
3952     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
3953     Register WaveBase = getWaveAddress(BasePtrDef);
3954     if (!WaveBase)
3955       return {};
3956 
3957     return {{
3958         [=](MachineInstrBuilder &MIB) { // rsrc
3959           MIB.addReg(Info->getScratchRSrcReg());
3960         },
3961         [=](MachineInstrBuilder &MIB) { // soffset
3962           MIB.addReg(WaveBase);
3963         },
3964         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3965     }};
3966   }
3967 
3968   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3969       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3970     return {};
3971 
3972   return {{
3973       [=](MachineInstrBuilder &MIB) { // rsrc
3974         MIB.addReg(Info->getScratchRSrcReg());
3975       },
3976       [=](MachineInstrBuilder &MIB) { // soffset
3977         MIB.addImm(0);
3978       },
3979       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3980   }};
3981 }
3982 
3983 std::pair<Register, unsigned>
3984 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3985   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3986   if (!RootDef)
3987     return std::make_pair(Root.getReg(), 0);
3988 
3989   int64_t ConstAddr = 0;
3990 
3991   Register PtrBase;
3992   int64_t Offset;
3993   std::tie(PtrBase, Offset) =
3994     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3995 
3996   if (Offset) {
3997     if (isDSOffsetLegal(PtrBase, Offset)) {
3998       // (add n0, c0)
3999       return std::make_pair(PtrBase, Offset);
4000     }
4001   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4002     // TODO
4003 
4004 
4005   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4006     // TODO
4007 
4008   }
4009 
4010   return std::make_pair(Root.getReg(), 0);
4011 }
4012 
4013 InstructionSelector::ComplexRendererFns
4014 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4015   Register Reg;
4016   unsigned Offset;
4017   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4018   return {{
4019       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4020       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4021     }};
4022 }
4023 
4024 InstructionSelector::ComplexRendererFns
4025 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4026   return selectDSReadWrite2(Root, 4);
4027 }
4028 
4029 InstructionSelector::ComplexRendererFns
4030 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4031   return selectDSReadWrite2(Root, 8);
4032 }
4033 
4034 InstructionSelector::ComplexRendererFns
4035 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4036                                               unsigned Size) const {
4037   Register Reg;
4038   unsigned Offset;
4039   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4040   return {{
4041       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4042       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4043       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4044     }};
4045 }
4046 
4047 std::pair<Register, unsigned>
4048 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4049                                                   unsigned Size) const {
4050   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4051   if (!RootDef)
4052     return std::make_pair(Root.getReg(), 0);
4053 
4054   int64_t ConstAddr = 0;
4055 
4056   Register PtrBase;
4057   int64_t Offset;
4058   std::tie(PtrBase, Offset) =
4059     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4060 
4061   if (Offset) {
4062     int64_t OffsetValue0 = Offset;
4063     int64_t OffsetValue1 = Offset + Size;
4064     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4065       // (add n0, c0)
4066       return std::make_pair(PtrBase, OffsetValue0 / Size);
4067     }
4068   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4069     // TODO
4070 
4071   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4072     // TODO
4073 
4074   }
4075 
4076   return std::make_pair(Root.getReg(), 0);
4077 }
4078 
4079 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4080 /// the base value with the constant offset. There may be intervening copies
4081 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4082 /// not match the pattern.
4083 std::pair<Register, int64_t>
4084 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4085   Register Root, const MachineRegisterInfo &MRI) const {
4086   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4087   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4088     return {Root, 0};
4089 
4090   MachineOperand &RHS = RootI->getOperand(2);
4091   Optional<ValueAndVReg> MaybeOffset =
4092       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4093   if (!MaybeOffset)
4094     return {Root, 0};
4095   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4096 }
4097 
4098 static void addZeroImm(MachineInstrBuilder &MIB) {
4099   MIB.addImm(0);
4100 }
4101 
4102 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4103 /// BasePtr is not valid, a null base pointer will be used.
4104 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4105                           uint32_t FormatLo, uint32_t FormatHi,
4106                           Register BasePtr) {
4107   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4108   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4109   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4110   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4111 
4112   B.buildInstr(AMDGPU::S_MOV_B32)
4113     .addDef(RSrc2)
4114     .addImm(FormatLo);
4115   B.buildInstr(AMDGPU::S_MOV_B32)
4116     .addDef(RSrc3)
4117     .addImm(FormatHi);
4118 
4119   // Build the half of the subregister with the constants before building the
4120   // full 128-bit register. If we are building multiple resource descriptors,
4121   // this will allow CSEing of the 2-component register.
4122   B.buildInstr(AMDGPU::REG_SEQUENCE)
4123     .addDef(RSrcHi)
4124     .addReg(RSrc2)
4125     .addImm(AMDGPU::sub0)
4126     .addReg(RSrc3)
4127     .addImm(AMDGPU::sub1);
4128 
4129   Register RSrcLo = BasePtr;
4130   if (!BasePtr) {
4131     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4132     B.buildInstr(AMDGPU::S_MOV_B64)
4133       .addDef(RSrcLo)
4134       .addImm(0);
4135   }
4136 
4137   B.buildInstr(AMDGPU::REG_SEQUENCE)
4138     .addDef(RSrc)
4139     .addReg(RSrcLo)
4140     .addImm(AMDGPU::sub0_sub1)
4141     .addReg(RSrcHi)
4142     .addImm(AMDGPU::sub2_sub3);
4143 
4144   return RSrc;
4145 }
4146 
4147 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4148                                 const SIInstrInfo &TII, Register BasePtr) {
4149   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4150 
4151   // FIXME: Why are half the "default" bits ignored based on the addressing
4152   // mode?
4153   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4154 }
4155 
4156 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4157                                const SIInstrInfo &TII, Register BasePtr) {
4158   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4159 
4160   // FIXME: Why are half the "default" bits ignored based on the addressing
4161   // mode?
4162   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4163 }
4164 
4165 AMDGPUInstructionSelector::MUBUFAddressData
4166 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4167   MUBUFAddressData Data;
4168   Data.N0 = Src;
4169 
4170   Register PtrBase;
4171   int64_t Offset;
4172 
4173   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4174   if (isUInt<32>(Offset)) {
4175     Data.N0 = PtrBase;
4176     Data.Offset = Offset;
4177   }
4178 
4179   if (MachineInstr *InputAdd
4180       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4181     Data.N2 = InputAdd->getOperand(1).getReg();
4182     Data.N3 = InputAdd->getOperand(2).getReg();
4183 
4184     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4185     // FIXME: Don't know this was defined by operand 0
4186     //
4187     // TODO: Remove this when we have copy folding optimizations after
4188     // RegBankSelect.
4189     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4190     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4191   }
4192 
4193   return Data;
4194 }
4195 
4196 /// Return if the addr64 mubuf mode should be used for the given address.
4197 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4198   // (ptr_add N2, N3) -> addr64, or
4199   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4200   if (Addr.N2)
4201     return true;
4202 
4203   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4204   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4205 }
4206 
4207 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4208 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4209 /// component.
4210 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4211   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4212   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4213     return;
4214 
4215   // Illegal offset, store it in soffset.
4216   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4217   B.buildInstr(AMDGPU::S_MOV_B32)
4218     .addDef(SOffset)
4219     .addImm(ImmOffset);
4220   ImmOffset = 0;
4221 }
4222 
4223 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4224   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4225   Register &SOffset, int64_t &Offset) const {
4226   // FIXME: Predicates should stop this from reaching here.
4227   // addr64 bit was removed for volcanic islands.
4228   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4229     return false;
4230 
4231   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4232   if (!shouldUseAddr64(AddrData))
4233     return false;
4234 
4235   Register N0 = AddrData.N0;
4236   Register N2 = AddrData.N2;
4237   Register N3 = AddrData.N3;
4238   Offset = AddrData.Offset;
4239 
4240   // Base pointer for the SRD.
4241   Register SRDPtr;
4242 
4243   if (N2) {
4244     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4245       assert(N3);
4246       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4247         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4248         // addr64, and construct the default resource from a 0 address.
4249         VAddr = N0;
4250       } else {
4251         SRDPtr = N3;
4252         VAddr = N2;
4253       }
4254     } else {
4255       // N2 is not divergent.
4256       SRDPtr = N2;
4257       VAddr = N3;
4258     }
4259   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4260     // Use the default null pointer in the resource
4261     VAddr = N0;
4262   } else {
4263     // N0 -> offset, or
4264     // (N0 + C1) -> offset
4265     SRDPtr = N0;
4266   }
4267 
4268   MachineIRBuilder B(*Root.getParent());
4269   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4270   splitIllegalMUBUFOffset(B, SOffset, Offset);
4271   return true;
4272 }
4273 
4274 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4275   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4276   int64_t &Offset) const {
4277 
4278   // FIXME: Pattern should not reach here.
4279   if (STI.useFlatForGlobal())
4280     return false;
4281 
4282   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4283   if (shouldUseAddr64(AddrData))
4284     return false;
4285 
4286   // N0 -> offset, or
4287   // (N0 + C1) -> offset
4288   Register SRDPtr = AddrData.N0;
4289   Offset = AddrData.Offset;
4290 
4291   // TODO: Look through extensions for 32-bit soffset.
4292   MachineIRBuilder B(*Root.getParent());
4293 
4294   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4295   splitIllegalMUBUFOffset(B, SOffset, Offset);
4296   return true;
4297 }
4298 
4299 InstructionSelector::ComplexRendererFns
4300 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4301   Register VAddr;
4302   Register RSrcReg;
4303   Register SOffset;
4304   int64_t Offset = 0;
4305 
4306   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4307     return {};
4308 
4309   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4310   // pattern.
4311   return {{
4312       [=](MachineInstrBuilder &MIB) {  // rsrc
4313         MIB.addReg(RSrcReg);
4314       },
4315       [=](MachineInstrBuilder &MIB) { // vaddr
4316         MIB.addReg(VAddr);
4317       },
4318       [=](MachineInstrBuilder &MIB) { // soffset
4319         if (SOffset)
4320           MIB.addReg(SOffset);
4321         else
4322           MIB.addImm(0);
4323       },
4324       [=](MachineInstrBuilder &MIB) { // offset
4325         MIB.addImm(Offset);
4326       },
4327       addZeroImm, //  cpol
4328       addZeroImm, //  tfe
4329       addZeroImm  //  swz
4330     }};
4331 }
4332 
4333 InstructionSelector::ComplexRendererFns
4334 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4335   Register RSrcReg;
4336   Register SOffset;
4337   int64_t Offset = 0;
4338 
4339   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4340     return {};
4341 
4342   return {{
4343       [=](MachineInstrBuilder &MIB) {  // rsrc
4344         MIB.addReg(RSrcReg);
4345       },
4346       [=](MachineInstrBuilder &MIB) { // soffset
4347         if (SOffset)
4348           MIB.addReg(SOffset);
4349         else
4350           MIB.addImm(0);
4351       },
4352       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4353       addZeroImm, //  cpol
4354       addZeroImm, //  tfe
4355       addZeroImm, //  swz
4356     }};
4357 }
4358 
4359 InstructionSelector::ComplexRendererFns
4360 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4361   Register VAddr;
4362   Register RSrcReg;
4363   Register SOffset;
4364   int64_t Offset = 0;
4365 
4366   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4367     return {};
4368 
4369   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4370   // pattern.
4371   return {{
4372       [=](MachineInstrBuilder &MIB) {  // rsrc
4373         MIB.addReg(RSrcReg);
4374       },
4375       [=](MachineInstrBuilder &MIB) { // vaddr
4376         MIB.addReg(VAddr);
4377       },
4378       [=](MachineInstrBuilder &MIB) { // soffset
4379         if (SOffset)
4380           MIB.addReg(SOffset);
4381         else
4382           MIB.addImm(0);
4383       },
4384       [=](MachineInstrBuilder &MIB) { // offset
4385         MIB.addImm(Offset);
4386       },
4387       [=](MachineInstrBuilder &MIB) {
4388         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4389       }
4390     }};
4391 }
4392 
4393 InstructionSelector::ComplexRendererFns
4394 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4395   Register RSrcReg;
4396   Register SOffset;
4397   int64_t Offset = 0;
4398 
4399   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4400     return {};
4401 
4402   return {{
4403       [=](MachineInstrBuilder &MIB) {  // rsrc
4404         MIB.addReg(RSrcReg);
4405       },
4406       [=](MachineInstrBuilder &MIB) { // soffset
4407         if (SOffset)
4408           MIB.addReg(SOffset);
4409         else
4410           MIB.addImm(0);
4411       },
4412       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4413       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4414     }};
4415 }
4416 
4417 /// Get an immediate that must be 32-bits, and treated as zero extended.
4418 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4419                                                const MachineRegisterInfo &MRI) {
4420   // getIConstantVRegVal sexts any values, so see if that matters.
4421   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4422   if (!OffsetVal || !isInt<32>(*OffsetVal))
4423     return None;
4424   return Lo_32(*OffsetVal);
4425 }
4426 
4427 InstructionSelector::ComplexRendererFns
4428 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4429   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4430   if (!OffsetVal)
4431     return {};
4432 
4433   Optional<int64_t> EncodedImm =
4434       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4435   if (!EncodedImm)
4436     return {};
4437 
4438   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4439 }
4440 
4441 InstructionSelector::ComplexRendererFns
4442 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4443   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4444 
4445   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4446   if (!OffsetVal)
4447     return {};
4448 
4449   Optional<int64_t> EncodedImm
4450     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4451   if (!EncodedImm)
4452     return {};
4453 
4454   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4455 }
4456 
4457 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4458                                                  const MachineInstr &MI,
4459                                                  int OpIdx) const {
4460   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4461          "Expected G_CONSTANT");
4462   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4463 }
4464 
4465 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4466                                                 const MachineInstr &MI,
4467                                                 int OpIdx) const {
4468   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4469          "Expected G_CONSTANT");
4470   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4471 }
4472 
4473 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4474                                                  const MachineInstr &MI,
4475                                                  int OpIdx) const {
4476   assert(OpIdx == -1);
4477 
4478   const MachineOperand &Op = MI.getOperand(1);
4479   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4480     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4481   else {
4482     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4483     MIB.addImm(Op.getCImm()->getSExtValue());
4484   }
4485 }
4486 
4487 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4488                                                 const MachineInstr &MI,
4489                                                 int OpIdx) const {
4490   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4491          "Expected G_CONSTANT");
4492   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4493 }
4494 
4495 /// This only really exists to satisfy DAG type checking machinery, so is a
4496 /// no-op here.
4497 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4498                                                 const MachineInstr &MI,
4499                                                 int OpIdx) const {
4500   MIB.addImm(MI.getOperand(OpIdx).getImm());
4501 }
4502 
4503 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4504                                                   const MachineInstr &MI,
4505                                                   int OpIdx) const {
4506   assert(OpIdx >= 0 && "expected to match an immediate operand");
4507   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4508 }
4509 
4510 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4511                                                  const MachineInstr &MI,
4512                                                  int OpIdx) const {
4513   assert(OpIdx >= 0 && "expected to match an immediate operand");
4514   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4515 }
4516 
4517 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4518                                              const MachineInstr &MI,
4519                                              int OpIdx) const {
4520   assert(OpIdx >= 0 && "expected to match an immediate operand");
4521   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4522 }
4523 
4524 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4525                                                  const MachineInstr &MI,
4526                                                  int OpIdx) const {
4527   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4528 }
4529 
4530 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4531   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4532 }
4533 
4534 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4535   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4536 }
4537 
4538 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4539   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4540 }
4541 
4542 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4543   return TII.isInlineConstant(Imm);
4544 }
4545