xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp (revision 5e801ac66d24704442eba426ed13c3effb8a34e7)
1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/IR/DiagnosticInfo.h"
27 #include "llvm/IR/IntrinsicsAMDGPU.h"
28 
29 #define DEBUG_TYPE "amdgpu-isel"
30 
31 using namespace llvm;
32 using namespace MIPatternMatch;
33 
34 static cl::opt<bool> AllowRiskySelect(
35   "amdgpu-global-isel-risky-select",
36   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
37   cl::init(false),
38   cl::ReallyHidden);
39 
40 #define GET_GLOBALISEL_IMPL
41 #define AMDGPUSubtarget GCNSubtarget
42 #include "AMDGPUGenGlobalISel.inc"
43 #undef GET_GLOBALISEL_IMPL
44 #undef AMDGPUSubtarget
45 
46 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
47     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
48     const AMDGPUTargetMachine &TM)
49     : InstructionSelector(), TII(*STI.getInstrInfo()),
50       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
85            Ty.isValid() && Ty.getSizeInBits() == 1;
86   }
87 
88   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
89   return RB->getID() == AMDGPU::VCCRegBankID;
90 }
91 
92 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
93                                                         unsigned NewOpc) const {
94   MI.setDesc(TII.get(NewOpc));
95   MI.RemoveOperand(1); // Remove intrinsic ID.
96   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
97 
98   MachineOperand &Dst = MI.getOperand(0);
99   MachineOperand &Src = MI.getOperand(1);
100 
101   // TODO: This should be legalized to s32 if needed
102   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
103     return false;
104 
105   const TargetRegisterClass *DstRC
106     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
107   const TargetRegisterClass *SrcRC
108     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
109   if (!DstRC || DstRC != SrcRC)
110     return false;
111 
112   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
113          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
114 }
115 
116 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
117   const DebugLoc &DL = I.getDebugLoc();
118   MachineBasicBlock *BB = I.getParent();
119   I.setDesc(TII.get(TargetOpcode::COPY));
120 
121   const MachineOperand &Src = I.getOperand(1);
122   MachineOperand &Dst = I.getOperand(0);
123   Register DstReg = Dst.getReg();
124   Register SrcReg = Src.getReg();
125 
126   if (isVCC(DstReg, *MRI)) {
127     if (SrcReg == AMDGPU::SCC) {
128       const TargetRegisterClass *RC
129         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
130       if (!RC)
131         return true;
132       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
133     }
134 
135     if (!isVCC(SrcReg, *MRI)) {
136       // TODO: Should probably leave the copy and let copyPhysReg expand it.
137       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
138         return false;
139 
140       const TargetRegisterClass *SrcRC
141         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
142 
143       Optional<ValueAndVReg> ConstVal =
144           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
145       if (ConstVal) {
146         unsigned MovOpc =
147             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
148         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
149             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
150       } else {
151         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
152 
153         // We can't trust the high bits at this point, so clear them.
154 
155         // TODO: Skip masking high bits if def is known boolean.
156 
157         unsigned AndOpc =
158             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
159         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
160             .addImm(1)
161             .addReg(SrcReg);
162         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
163             .addImm(0)
164             .addReg(MaskedReg);
165       }
166 
167       if (!MRI->getRegClassOrNull(SrcReg))
168         MRI->setRegClass(SrcReg, SrcRC);
169       I.eraseFromParent();
170       return true;
171     }
172 
173     const TargetRegisterClass *RC =
174       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
175     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
176       return false;
177 
178     return true;
179   }
180 
181   for (const MachineOperand &MO : I.operands()) {
182     if (MO.getReg().isPhysical())
183       continue;
184 
185     const TargetRegisterClass *RC =
186             TRI.getConstrainedRegClassForOperand(MO, *MRI);
187     if (!RC)
188       continue;
189     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
190   }
191   return true;
192 }
193 
194 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
195   const Register DefReg = I.getOperand(0).getReg();
196   const LLT DefTy = MRI->getType(DefReg);
197   if (DefTy == LLT::scalar(1)) {
198     if (!AllowRiskySelect) {
199       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
200       return false;
201     }
202 
203     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
204   }
205 
206   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
207 
208   const RegClassOrRegBank &RegClassOrBank =
209     MRI->getRegClassOrRegBank(DefReg);
210 
211   const TargetRegisterClass *DefRC
212     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
213   if (!DefRC) {
214     if (!DefTy.isValid()) {
215       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
216       return false;
217     }
218 
219     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
220     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
221     if (!DefRC) {
222       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
223       return false;
224     }
225   }
226 
227   // TODO: Verify that all registers have the same bank
228   I.setDesc(TII.get(TargetOpcode::PHI));
229   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
230 }
231 
232 MachineOperand
233 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
234                                            const TargetRegisterClass &SubRC,
235                                            unsigned SubIdx) const {
236 
237   MachineInstr *MI = MO.getParent();
238   MachineBasicBlock *BB = MO.getParent()->getParent();
239   Register DstReg = MRI->createVirtualRegister(&SubRC);
240 
241   if (MO.isReg()) {
242     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
243     Register Reg = MO.getReg();
244     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
245             .addReg(Reg, 0, ComposedSubIdx);
246 
247     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
248                                      MO.isKill(), MO.isDead(), MO.isUndef(),
249                                      MO.isEarlyClobber(), 0, MO.isDebug(),
250                                      MO.isInternalRead());
251   }
252 
253   assert(MO.isImm());
254 
255   APInt Imm(64, MO.getImm());
256 
257   switch (SubIdx) {
258   default:
259     llvm_unreachable("do not know to split immediate with this sub index.");
260   case AMDGPU::sub0:
261     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
262   case AMDGPU::sub1:
263     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
264   }
265 }
266 
267 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
268   switch (Opc) {
269   case AMDGPU::G_AND:
270     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
271   case AMDGPU::G_OR:
272     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
273   case AMDGPU::G_XOR:
274     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
275   default:
276     llvm_unreachable("not a bit op");
277   }
278 }
279 
280 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
281   Register DstReg = I.getOperand(0).getReg();
282   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
283 
284   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
285   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
286       DstRB->getID() != AMDGPU::VCCRegBankID)
287     return false;
288 
289   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
290                             STI.isWave64());
291   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
292 
293   // Dead implicit-def of scc
294   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
295                                          true, // isImp
296                                          false, // isKill
297                                          true)); // isDead
298   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
299 }
300 
301 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
302   MachineBasicBlock *BB = I.getParent();
303   MachineFunction *MF = BB->getParent();
304   Register DstReg = I.getOperand(0).getReg();
305   const DebugLoc &DL = I.getDebugLoc();
306   LLT Ty = MRI->getType(DstReg);
307   if (Ty.isVector())
308     return false;
309 
310   unsigned Size = Ty.getSizeInBits();
311   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
312   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
313   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
314 
315   if (Size == 32) {
316     if (IsSALU) {
317       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
318       MachineInstr *Add =
319         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
320         .add(I.getOperand(1))
321         .add(I.getOperand(2));
322       I.eraseFromParent();
323       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
324     }
325 
326     if (STI.hasAddNoCarry()) {
327       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
328       I.setDesc(TII.get(Opc));
329       I.addOperand(*MF, MachineOperand::CreateImm(0));
330       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
331       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
332     }
333 
334     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
335 
336     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
337     MachineInstr *Add
338       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
339       .addDef(UnusedCarry, RegState::Dead)
340       .add(I.getOperand(1))
341       .add(I.getOperand(2))
342       .addImm(0);
343     I.eraseFromParent();
344     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
345   }
346 
347   assert(!Sub && "illegal sub should not reach here");
348 
349   const TargetRegisterClass &RC
350     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
351   const TargetRegisterClass &HalfRC
352     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
353 
354   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
355   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
356   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
357   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
358 
359   Register DstLo = MRI->createVirtualRegister(&HalfRC);
360   Register DstHi = MRI->createVirtualRegister(&HalfRC);
361 
362   if (IsSALU) {
363     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
364       .add(Lo1)
365       .add(Lo2);
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
367       .add(Hi1)
368       .add(Hi2);
369   } else {
370     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
371     Register CarryReg = MRI->createVirtualRegister(CarryRC);
372     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
373       .addDef(CarryReg)
374       .add(Lo1)
375       .add(Lo2)
376       .addImm(0);
377     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
378       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
379       .add(Hi1)
380       .add(Hi2)
381       .addReg(CarryReg, RegState::Kill)
382       .addImm(0);
383 
384     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
385       return false;
386   }
387 
388   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
389     .addReg(DstLo)
390     .addImm(AMDGPU::sub0)
391     .addReg(DstHi)
392     .addImm(AMDGPU::sub1);
393 
394 
395   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
396     return false;
397 
398   I.eraseFromParent();
399   return true;
400 }
401 
402 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
403   MachineInstr &I) const {
404   MachineBasicBlock *BB = I.getParent();
405   MachineFunction *MF = BB->getParent();
406   const DebugLoc &DL = I.getDebugLoc();
407   Register Dst0Reg = I.getOperand(0).getReg();
408   Register Dst1Reg = I.getOperand(1).getReg();
409   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
410                      I.getOpcode() == AMDGPU::G_UADDE;
411   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
412                           I.getOpcode() == AMDGPU::G_USUBE;
413 
414   if (isVCC(Dst1Reg, *MRI)) {
415     unsigned NoCarryOpc =
416         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
417     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
418     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
419     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
420     I.addOperand(*MF, MachineOperand::CreateImm(0));
421     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
422   }
423 
424   Register Src0Reg = I.getOperand(2).getReg();
425   Register Src1Reg = I.getOperand(3).getReg();
426 
427   if (HasCarryIn) {
428     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
429       .addReg(I.getOperand(4).getReg());
430   }
431 
432   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
433   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
434 
435   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
436     .add(I.getOperand(2))
437     .add(I.getOperand(3));
438   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
439     .addReg(AMDGPU::SCC);
440 
441   if (!MRI->getRegClassOrNull(Dst1Reg))
442     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
443 
444   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
445       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
446       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
447     return false;
448 
449   if (HasCarryIn &&
450       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
451                                     AMDGPU::SReg_32RegClass, *MRI))
452     return false;
453 
454   I.eraseFromParent();
455   return true;
456 }
457 
458 // TODO: We should probably legalize these to only using 32-bit results.
459 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
460   MachineBasicBlock *BB = I.getParent();
461   Register DstReg = I.getOperand(0).getReg();
462   Register SrcReg = I.getOperand(1).getReg();
463   LLT DstTy = MRI->getType(DstReg);
464   LLT SrcTy = MRI->getType(SrcReg);
465   const unsigned SrcSize = SrcTy.getSizeInBits();
466   unsigned DstSize = DstTy.getSizeInBits();
467 
468   // TODO: Should handle any multiple of 32 offset.
469   unsigned Offset = I.getOperand(2).getImm();
470   if (Offset % 32 != 0 || DstSize > 128)
471     return false;
472 
473   // 16-bit operations really use 32-bit registers.
474   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
475   if (DstSize == 16)
476     DstSize = 32;
477 
478   const TargetRegisterClass *DstRC =
479     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
480   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
481     return false;
482 
483   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
484   const TargetRegisterClass *SrcRC =
485     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
486   if (!SrcRC)
487     return false;
488   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
489                                                          DstSize / 32);
490   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
491   if (!SrcRC)
492     return false;
493 
494   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
495                                     *SrcRC, I.getOperand(1));
496   const DebugLoc &DL = I.getDebugLoc();
497   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
498     .addReg(SrcReg, 0, SubReg);
499 
500   I.eraseFromParent();
501   return true;
502 }
503 
504 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
505   MachineBasicBlock *BB = MI.getParent();
506   Register DstReg = MI.getOperand(0).getReg();
507   LLT DstTy = MRI->getType(DstReg);
508   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
509 
510   const unsigned SrcSize = SrcTy.getSizeInBits();
511   if (SrcSize < 32)
512     return selectImpl(MI, *CoverageInfo);
513 
514   const DebugLoc &DL = MI.getDebugLoc();
515   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
516   const unsigned DstSize = DstTy.getSizeInBits();
517   const TargetRegisterClass *DstRC =
518     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
519   if (!DstRC)
520     return false;
521 
522   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
523   MachineInstrBuilder MIB =
524     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
525   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
526     MachineOperand &Src = MI.getOperand(I + 1);
527     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
528     MIB.addImm(SubRegs[I]);
529 
530     const TargetRegisterClass *SrcRC
531       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
532     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
533       return false;
534   }
535 
536   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
537     return false;
538 
539   MI.eraseFromParent();
540   return true;
541 }
542 
543 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
544   MachineBasicBlock *BB = MI.getParent();
545   const int NumDst = MI.getNumOperands() - 1;
546 
547   MachineOperand &Src = MI.getOperand(NumDst);
548 
549   Register SrcReg = Src.getReg();
550   Register DstReg0 = MI.getOperand(0).getReg();
551   LLT DstTy = MRI->getType(DstReg0);
552   LLT SrcTy = MRI->getType(SrcReg);
553 
554   const unsigned DstSize = DstTy.getSizeInBits();
555   const unsigned SrcSize = SrcTy.getSizeInBits();
556   const DebugLoc &DL = MI.getDebugLoc();
557   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
558 
559   const TargetRegisterClass *SrcRC =
560     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
561   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
562     return false;
563 
564   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
565   // source, and this relies on the fact that the same subregister indices are
566   // used for both.
567   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
568   for (int I = 0, E = NumDst; I != E; ++I) {
569     MachineOperand &Dst = MI.getOperand(I);
570     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
571       .addReg(SrcReg, 0, SubRegs[I]);
572 
573     // Make sure the subregister index is valid for the source register.
574     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
575     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
576       return false;
577 
578     const TargetRegisterClass *DstRC =
579       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
580     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
581       return false;
582   }
583 
584   MI.eraseFromParent();
585   return true;
586 }
587 
588 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
589   MachineInstr &MI) const {
590   if (selectImpl(MI, *CoverageInfo))
591     return true;
592 
593   const LLT S32 = LLT::scalar(32);
594   const LLT V2S16 = LLT::fixed_vector(2, 16);
595 
596   Register Dst = MI.getOperand(0).getReg();
597   if (MRI->getType(Dst) != V2S16)
598     return false;
599 
600   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
601   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
602     return false;
603 
604   Register Src0 = MI.getOperand(1).getReg();
605   Register Src1 = MI.getOperand(2).getReg();
606   if (MRI->getType(Src0) != S32)
607     return false;
608 
609   const DebugLoc &DL = MI.getDebugLoc();
610   MachineBasicBlock *BB = MI.getParent();
611 
612   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
613   if (ConstSrc1) {
614     auto ConstSrc0 =
615         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
616     if (ConstSrc0) {
617       const int64_t K0 = ConstSrc0->Value.getSExtValue();
618       const int64_t K1 = ConstSrc1->Value.getSExtValue();
619       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
620       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
621 
622       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
623         .addImm(Lo16 | (Hi16 << 16));
624       MI.eraseFromParent();
625       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
626     }
627   }
628 
629   // TODO: This should probably be a combine somewhere
630   // (build_vector_trunc $src0, undef -> copy $src0
631   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
632   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
633     MI.setDesc(TII.get(AMDGPU::COPY));
634     MI.RemoveOperand(2);
635     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
636            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
637   }
638 
639   Register ShiftSrc0;
640   Register ShiftSrc1;
641 
642   // With multiple uses of the shift, this will duplicate the shift and
643   // increase register pressure.
644   //
645   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
646   //  => (S_PACK_HH_B32_B16 $src0, $src1)
647   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
648   //  => (S_PACK_LH_B32_B16 $src0, $src1)
649   // (build_vector_trunc $src0, $src1)
650   //  => (S_PACK_LL_B32_B16 $src0, $src1)
651 
652   bool Shift0 = mi_match(
653       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
654 
655   bool Shift1 = mi_match(
656       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
657 
658   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
659   if (Shift0 && Shift1) {
660     Opc = AMDGPU::S_PACK_HH_B32_B16;
661     MI.getOperand(1).setReg(ShiftSrc0);
662     MI.getOperand(2).setReg(ShiftSrc1);
663   } else if (Shift1) {
664     Opc = AMDGPU::S_PACK_LH_B32_B16;
665     MI.getOperand(2).setReg(ShiftSrc1);
666   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
667     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
668     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
669       .addReg(ShiftSrc0)
670       .addImm(16);
671 
672     MI.eraseFromParent();
673     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
674   }
675 
676   MI.setDesc(TII.get(Opc));
677   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
678 }
679 
680 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
681   return selectG_ADD_SUB(I);
682 }
683 
684 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
685   const MachineOperand &MO = I.getOperand(0);
686 
687   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
688   // regbank check here is to know why getConstrainedRegClassForOperand failed.
689   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
690   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
691       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
692     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
693     return true;
694   }
695 
696   return false;
697 }
698 
699 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
700   MachineBasicBlock *BB = I.getParent();
701 
702   Register DstReg = I.getOperand(0).getReg();
703   Register Src0Reg = I.getOperand(1).getReg();
704   Register Src1Reg = I.getOperand(2).getReg();
705   LLT Src1Ty = MRI->getType(Src1Reg);
706 
707   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
708   unsigned InsSize = Src1Ty.getSizeInBits();
709 
710   int64_t Offset = I.getOperand(3).getImm();
711 
712   // FIXME: These cases should have been illegal and unnecessary to check here.
713   if (Offset % 32 != 0 || InsSize % 32 != 0)
714     return false;
715 
716   // Currently not handled by getSubRegFromChannel.
717   if (InsSize > 128)
718     return false;
719 
720   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
721   if (SubReg == AMDGPU::NoSubRegister)
722     return false;
723 
724   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
725   const TargetRegisterClass *DstRC =
726     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
727   if (!DstRC)
728     return false;
729 
730   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
731   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
732   const TargetRegisterClass *Src0RC =
733     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
734   const TargetRegisterClass *Src1RC =
735     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
736 
737   // Deal with weird cases where the class only partially supports the subreg
738   // index.
739   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
740   if (!Src0RC || !Src1RC)
741     return false;
742 
743   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
744       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
745       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
746     return false;
747 
748   const DebugLoc &DL = I.getDebugLoc();
749   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
750     .addReg(Src0Reg)
751     .addReg(Src1Reg)
752     .addImm(SubReg);
753 
754   I.eraseFromParent();
755   return true;
756 }
757 
758 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
759   Register DstReg = MI.getOperand(0).getReg();
760   Register SrcReg = MI.getOperand(1).getReg();
761   Register OffsetReg = MI.getOperand(2).getReg();
762   Register WidthReg = MI.getOperand(3).getReg();
763 
764   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
765          "scalar BFX instructions are expanded in regbankselect");
766   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
767          "64-bit vector BFX instructions are expanded in regbankselect");
768 
769   const DebugLoc &DL = MI.getDebugLoc();
770   MachineBasicBlock *MBB = MI.getParent();
771 
772   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
773   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
774   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
775                  .addReg(SrcReg)
776                  .addReg(OffsetReg)
777                  .addReg(WidthReg);
778   MI.eraseFromParent();
779   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
780 }
781 
782 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
783   if (STI.getLDSBankCount() != 16)
784     return selectImpl(MI, *CoverageInfo);
785 
786   Register Dst = MI.getOperand(0).getReg();
787   Register Src0 = MI.getOperand(2).getReg();
788   Register M0Val = MI.getOperand(6).getReg();
789   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
790       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
791       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
792     return false;
793 
794   // This requires 2 instructions. It is possible to write a pattern to support
795   // this, but the generated isel emitter doesn't correctly deal with multiple
796   // output instructions using the same physical register input. The copy to m0
797   // is incorrectly placed before the second instruction.
798   //
799   // TODO: Match source modifiers.
800 
801   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
802   const DebugLoc &DL = MI.getDebugLoc();
803   MachineBasicBlock *MBB = MI.getParent();
804 
805   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
806     .addReg(M0Val);
807   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
808     .addImm(2)
809     .addImm(MI.getOperand(4).getImm())  // $attr
810     .addImm(MI.getOperand(3).getImm()); // $attrchan
811 
812   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
813     .addImm(0)                          // $src0_modifiers
814     .addReg(Src0)                       // $src0
815     .addImm(MI.getOperand(4).getImm())  // $attr
816     .addImm(MI.getOperand(3).getImm())  // $attrchan
817     .addImm(0)                          // $src2_modifiers
818     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
819     .addImm(MI.getOperand(5).getImm())  // $high
820     .addImm(0)                          // $clamp
821     .addImm(0);                         // $omod
822 
823   MI.eraseFromParent();
824   return true;
825 }
826 
827 // Writelane is special in that it can use SGPR and M0 (which would normally
828 // count as using the constant bus twice - but in this case it is allowed since
829 // the lane selector doesn't count as a use of the constant bus). However, it is
830 // still required to abide by the 1 SGPR rule. Fix this up if we might have
831 // multiple SGPRs.
832 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
833   // With a constant bus limit of at least 2, there's no issue.
834   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
835     return selectImpl(MI, *CoverageInfo);
836 
837   MachineBasicBlock *MBB = MI.getParent();
838   const DebugLoc &DL = MI.getDebugLoc();
839   Register VDst = MI.getOperand(0).getReg();
840   Register Val = MI.getOperand(2).getReg();
841   Register LaneSelect = MI.getOperand(3).getReg();
842   Register VDstIn = MI.getOperand(4).getReg();
843 
844   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
845 
846   Optional<ValueAndVReg> ConstSelect =
847       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
848   if (ConstSelect) {
849     // The selector has to be an inline immediate, so we can use whatever for
850     // the other operands.
851     MIB.addReg(Val);
852     MIB.addImm(ConstSelect->Value.getSExtValue() &
853                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
854   } else {
855     Optional<ValueAndVReg> ConstVal =
856         getIConstantVRegValWithLookThrough(Val, *MRI);
857 
858     // If the value written is an inline immediate, we can get away without a
859     // copy to m0.
860     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
861                                                  STI.hasInv2PiInlineImm())) {
862       MIB.addImm(ConstVal->Value.getSExtValue());
863       MIB.addReg(LaneSelect);
864     } else {
865       MIB.addReg(Val);
866 
867       // If the lane selector was originally in a VGPR and copied with
868       // readfirstlane, there's a hazard to read the same SGPR from the
869       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
870       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
871 
872       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
873         .addReg(LaneSelect);
874       MIB.addReg(AMDGPU::M0);
875     }
876   }
877 
878   MIB.addReg(VDstIn);
879 
880   MI.eraseFromParent();
881   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
882 }
883 
884 // We need to handle this here because tablegen doesn't support matching
885 // instructions with multiple outputs.
886 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
887   Register Dst0 = MI.getOperand(0).getReg();
888   Register Dst1 = MI.getOperand(1).getReg();
889 
890   LLT Ty = MRI->getType(Dst0);
891   unsigned Opc;
892   if (Ty == LLT::scalar(32))
893     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
894   else if (Ty == LLT::scalar(64))
895     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
896   else
897     return false;
898 
899   // TODO: Match source modifiers.
900 
901   const DebugLoc &DL = MI.getDebugLoc();
902   MachineBasicBlock *MBB = MI.getParent();
903 
904   Register Numer = MI.getOperand(3).getReg();
905   Register Denom = MI.getOperand(4).getReg();
906   unsigned ChooseDenom = MI.getOperand(5).getImm();
907 
908   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
909 
910   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
911     .addDef(Dst1)
912     .addImm(0)     // $src0_modifiers
913     .addUse(Src0)  // $src0
914     .addImm(0)     // $src1_modifiers
915     .addUse(Denom) // $src1
916     .addImm(0)     // $src2_modifiers
917     .addUse(Numer) // $src2
918     .addImm(0)     // $clamp
919     .addImm(0);    // $omod
920 
921   MI.eraseFromParent();
922   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
923 }
924 
925 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
926   unsigned IntrinsicID = I.getIntrinsicID();
927   switch (IntrinsicID) {
928   case Intrinsic::amdgcn_if_break: {
929     MachineBasicBlock *BB = I.getParent();
930 
931     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
932     // SelectionDAG uses for wave32 vs wave64.
933     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
934       .add(I.getOperand(0))
935       .add(I.getOperand(2))
936       .add(I.getOperand(3));
937 
938     Register DstReg = I.getOperand(0).getReg();
939     Register Src0Reg = I.getOperand(2).getReg();
940     Register Src1Reg = I.getOperand(3).getReg();
941 
942     I.eraseFromParent();
943 
944     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
945       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
946 
947     return true;
948   }
949   case Intrinsic::amdgcn_interp_p1_f16:
950     return selectInterpP1F16(I);
951   case Intrinsic::amdgcn_wqm:
952     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
953   case Intrinsic::amdgcn_softwqm:
954     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
955   case Intrinsic::amdgcn_strict_wwm:
956   case Intrinsic::amdgcn_wwm:
957     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
958   case Intrinsic::amdgcn_strict_wqm:
959     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
960   case Intrinsic::amdgcn_writelane:
961     return selectWritelane(I);
962   case Intrinsic::amdgcn_div_scale:
963     return selectDivScale(I);
964   case Intrinsic::amdgcn_icmp:
965     return selectIntrinsicIcmp(I);
966   case Intrinsic::amdgcn_ballot:
967     return selectBallot(I);
968   case Intrinsic::amdgcn_reloc_constant:
969     return selectRelocConstant(I);
970   case Intrinsic::amdgcn_groupstaticsize:
971     return selectGroupStaticSize(I);
972   case Intrinsic::returnaddress:
973     return selectReturnAddress(I);
974   default:
975     return selectImpl(I, *CoverageInfo);
976   }
977 }
978 
979 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
980   if (Size != 32 && Size != 64)
981     return -1;
982   switch (P) {
983   default:
984     llvm_unreachable("Unknown condition code!");
985   case CmpInst::ICMP_NE:
986     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
987   case CmpInst::ICMP_EQ:
988     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
989   case CmpInst::ICMP_SGT:
990     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
991   case CmpInst::ICMP_SGE:
992     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
993   case CmpInst::ICMP_SLT:
994     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
995   case CmpInst::ICMP_SLE:
996     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
997   case CmpInst::ICMP_UGT:
998     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
999   case CmpInst::ICMP_UGE:
1000     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1001   case CmpInst::ICMP_ULT:
1002     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1003   case CmpInst::ICMP_ULE:
1004     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1005   }
1006 }
1007 
1008 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1009                                               unsigned Size) const {
1010   if (Size == 64) {
1011     if (!STI.hasScalarCompareEq64())
1012       return -1;
1013 
1014     switch (P) {
1015     case CmpInst::ICMP_NE:
1016       return AMDGPU::S_CMP_LG_U64;
1017     case CmpInst::ICMP_EQ:
1018       return AMDGPU::S_CMP_EQ_U64;
1019     default:
1020       return -1;
1021     }
1022   }
1023 
1024   if (Size != 32)
1025     return -1;
1026 
1027   switch (P) {
1028   case CmpInst::ICMP_NE:
1029     return AMDGPU::S_CMP_LG_U32;
1030   case CmpInst::ICMP_EQ:
1031     return AMDGPU::S_CMP_EQ_U32;
1032   case CmpInst::ICMP_SGT:
1033     return AMDGPU::S_CMP_GT_I32;
1034   case CmpInst::ICMP_SGE:
1035     return AMDGPU::S_CMP_GE_I32;
1036   case CmpInst::ICMP_SLT:
1037     return AMDGPU::S_CMP_LT_I32;
1038   case CmpInst::ICMP_SLE:
1039     return AMDGPU::S_CMP_LE_I32;
1040   case CmpInst::ICMP_UGT:
1041     return AMDGPU::S_CMP_GT_U32;
1042   case CmpInst::ICMP_UGE:
1043     return AMDGPU::S_CMP_GE_U32;
1044   case CmpInst::ICMP_ULT:
1045     return AMDGPU::S_CMP_LT_U32;
1046   case CmpInst::ICMP_ULE:
1047     return AMDGPU::S_CMP_LE_U32;
1048   default:
1049     llvm_unreachable("Unknown condition code!");
1050   }
1051 }
1052 
1053 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1054   MachineBasicBlock *BB = I.getParent();
1055   const DebugLoc &DL = I.getDebugLoc();
1056 
1057   Register SrcReg = I.getOperand(2).getReg();
1058   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1059 
1060   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1061 
1062   Register CCReg = I.getOperand(0).getReg();
1063   if (!isVCC(CCReg, *MRI)) {
1064     int Opcode = getS_CMPOpcode(Pred, Size);
1065     if (Opcode == -1)
1066       return false;
1067     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1068             .add(I.getOperand(2))
1069             .add(I.getOperand(3));
1070     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1071       .addReg(AMDGPU::SCC);
1072     bool Ret =
1073         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1074         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1075     I.eraseFromParent();
1076     return Ret;
1077   }
1078 
1079   int Opcode = getV_CMPOpcode(Pred, Size);
1080   if (Opcode == -1)
1081     return false;
1082 
1083   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1084             I.getOperand(0).getReg())
1085             .add(I.getOperand(2))
1086             .add(I.getOperand(3));
1087   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1088                                *TRI.getBoolRC(), *MRI);
1089   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1090   I.eraseFromParent();
1091   return Ret;
1092 }
1093 
1094 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1095   Register Dst = I.getOperand(0).getReg();
1096   if (isVCC(Dst, *MRI))
1097     return false;
1098 
1099   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1100     return false;
1101 
1102   MachineBasicBlock *BB = I.getParent();
1103   const DebugLoc &DL = I.getDebugLoc();
1104   Register SrcReg = I.getOperand(2).getReg();
1105   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1106   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1107 
1108   int Opcode = getV_CMPOpcode(Pred, Size);
1109   if (Opcode == -1)
1110     return false;
1111 
1112   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1113                            .add(I.getOperand(2))
1114                            .add(I.getOperand(3));
1115   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1116                                *MRI);
1117   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1118   I.eraseFromParent();
1119   return Ret;
1120 }
1121 
1122 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1123   MachineBasicBlock *BB = I.getParent();
1124   const DebugLoc &DL = I.getDebugLoc();
1125   Register DstReg = I.getOperand(0).getReg();
1126   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1127   const bool Is64 = Size == 64;
1128 
1129   if (Size != STI.getWavefrontSize())
1130     return false;
1131 
1132   Optional<ValueAndVReg> Arg =
1133       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1134 
1135   if (Arg.hasValue()) {
1136     const int64_t Value = Arg.getValue().Value.getSExtValue();
1137     if (Value == 0) {
1138       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1139       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1140     } else if (Value == -1) { // all ones
1141       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1142       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1143     } else
1144       return false;
1145   } else {
1146     Register SrcReg = I.getOperand(2).getReg();
1147     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1148   }
1149 
1150   I.eraseFromParent();
1151   return true;
1152 }
1153 
1154 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1155   Register DstReg = I.getOperand(0).getReg();
1156   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1157   const TargetRegisterClass *DstRC =
1158     TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI);
1159   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1160     return false;
1161 
1162   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1163 
1164   Module *M = MF->getFunction().getParent();
1165   const MDNode *Metadata = I.getOperand(2).getMetadata();
1166   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1167   auto RelocSymbol = cast<GlobalVariable>(
1168     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1169 
1170   MachineBasicBlock *BB = I.getParent();
1171   BuildMI(*BB, &I, I.getDebugLoc(),
1172           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1173     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1174 
1175   I.eraseFromParent();
1176   return true;
1177 }
1178 
1179 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1180   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1181 
1182   Register DstReg = I.getOperand(0).getReg();
1183   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1184   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1185     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1186 
1187   MachineBasicBlock *MBB = I.getParent();
1188   const DebugLoc &DL = I.getDebugLoc();
1189 
1190   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1191 
1192   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1193     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1194     MIB.addImm(MFI->getLDSSize());
1195   } else {
1196     Module *M = MF->getFunction().getParent();
1197     const GlobalValue *GV
1198       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1199     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1200   }
1201 
1202   I.eraseFromParent();
1203   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1204 }
1205 
1206 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1207   MachineBasicBlock *MBB = I.getParent();
1208   MachineFunction &MF = *MBB->getParent();
1209   const DebugLoc &DL = I.getDebugLoc();
1210 
1211   MachineOperand &Dst = I.getOperand(0);
1212   Register DstReg = Dst.getReg();
1213   unsigned Depth = I.getOperand(2).getImm();
1214 
1215   const TargetRegisterClass *RC
1216     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1217   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1218       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1219     return false;
1220 
1221   // Check for kernel and shader functions
1222   if (Depth != 0 ||
1223       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1224     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1225       .addImm(0);
1226     I.eraseFromParent();
1227     return true;
1228   }
1229 
1230   MachineFrameInfo &MFI = MF.getFrameInfo();
1231   // There is a call to @llvm.returnaddress in this function
1232   MFI.setReturnAddressIsTaken(true);
1233 
1234   // Get the return address reg and mark it as an implicit live-in
1235   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1236   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1237                                              AMDGPU::SReg_64RegClass);
1238   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1239     .addReg(LiveIn);
1240   I.eraseFromParent();
1241   return true;
1242 }
1243 
1244 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1245   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1246   // SelectionDAG uses for wave32 vs wave64.
1247   MachineBasicBlock *BB = MI.getParent();
1248   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1249       .add(MI.getOperand(1));
1250 
1251   Register Reg = MI.getOperand(1).getReg();
1252   MI.eraseFromParent();
1253 
1254   if (!MRI->getRegClassOrNull(Reg))
1255     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1256   return true;
1257 }
1258 
1259 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1260   MachineInstr &MI, Intrinsic::ID IntrID) const {
1261   MachineBasicBlock *MBB = MI.getParent();
1262   MachineFunction *MF = MBB->getParent();
1263   const DebugLoc &DL = MI.getDebugLoc();
1264 
1265   unsigned IndexOperand = MI.getOperand(7).getImm();
1266   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1267   bool WaveDone = MI.getOperand(9).getImm() != 0;
1268 
1269   if (WaveDone && !WaveRelease)
1270     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1271 
1272   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1273   IndexOperand &= ~0x3f;
1274   unsigned CountDw = 0;
1275 
1276   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1277     CountDw = (IndexOperand >> 24) & 0xf;
1278     IndexOperand &= ~(0xf << 24);
1279 
1280     if (CountDw < 1 || CountDw > 4) {
1281       report_fatal_error(
1282         "ds_ordered_count: dword count must be between 1 and 4");
1283     }
1284   }
1285 
1286   if (IndexOperand)
1287     report_fatal_error("ds_ordered_count: bad index operand");
1288 
1289   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1290   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1291 
1292   unsigned Offset0 = OrderedCountIndex << 2;
1293   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1294                      (Instruction << 4);
1295 
1296   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1297     Offset1 |= (CountDw - 1) << 6;
1298 
1299   unsigned Offset = Offset0 | (Offset1 << 8);
1300 
1301   Register M0Val = MI.getOperand(2).getReg();
1302   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1303     .addReg(M0Val);
1304 
1305   Register DstReg = MI.getOperand(0).getReg();
1306   Register ValReg = MI.getOperand(3).getReg();
1307   MachineInstrBuilder DS =
1308     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1309       .addReg(ValReg)
1310       .addImm(Offset)
1311       .cloneMemRefs(MI);
1312 
1313   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1314     return false;
1315 
1316   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1317   MI.eraseFromParent();
1318   return Ret;
1319 }
1320 
1321 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1322   switch (IntrID) {
1323   case Intrinsic::amdgcn_ds_gws_init:
1324     return AMDGPU::DS_GWS_INIT;
1325   case Intrinsic::amdgcn_ds_gws_barrier:
1326     return AMDGPU::DS_GWS_BARRIER;
1327   case Intrinsic::amdgcn_ds_gws_sema_v:
1328     return AMDGPU::DS_GWS_SEMA_V;
1329   case Intrinsic::amdgcn_ds_gws_sema_br:
1330     return AMDGPU::DS_GWS_SEMA_BR;
1331   case Intrinsic::amdgcn_ds_gws_sema_p:
1332     return AMDGPU::DS_GWS_SEMA_P;
1333   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1334     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1335   default:
1336     llvm_unreachable("not a gws intrinsic");
1337   }
1338 }
1339 
1340 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1341                                                      Intrinsic::ID IID) const {
1342   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1343       !STI.hasGWSSemaReleaseAll())
1344     return false;
1345 
1346   // intrinsic ID, vsrc, offset
1347   const bool HasVSrc = MI.getNumOperands() == 3;
1348   assert(HasVSrc || MI.getNumOperands() == 2);
1349 
1350   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1351   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1352   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1353     return false;
1354 
1355   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1356   assert(OffsetDef);
1357 
1358   unsigned ImmOffset;
1359 
1360   MachineBasicBlock *MBB = MI.getParent();
1361   const DebugLoc &DL = MI.getDebugLoc();
1362 
1363   MachineInstr *Readfirstlane = nullptr;
1364 
1365   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1366   // incoming offset, in case there's an add of a constant. We'll have to put it
1367   // back later.
1368   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1369     Readfirstlane = OffsetDef;
1370     BaseOffset = OffsetDef->getOperand(1).getReg();
1371     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1372   }
1373 
1374   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1375     // If we have a constant offset, try to use the 0 in m0 as the base.
1376     // TODO: Look into changing the default m0 initialization value. If the
1377     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1378     // the immediate offset.
1379 
1380     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1381     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1382       .addImm(0);
1383   } else {
1384     std::tie(BaseOffset, ImmOffset) =
1385         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1386 
1387     if (Readfirstlane) {
1388       // We have the constant offset now, so put the readfirstlane back on the
1389       // variable component.
1390       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1391         return false;
1392 
1393       Readfirstlane->getOperand(1).setReg(BaseOffset);
1394       BaseOffset = Readfirstlane->getOperand(0).getReg();
1395     } else {
1396       if (!RBI.constrainGenericRegister(BaseOffset,
1397                                         AMDGPU::SReg_32RegClass, *MRI))
1398         return false;
1399     }
1400 
1401     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1402     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1403       .addReg(BaseOffset)
1404       .addImm(16);
1405 
1406     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1407       .addReg(M0Base);
1408   }
1409 
1410   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1411   // offset field) % 64. Some versions of the programming guide omit the m0
1412   // part, or claim it's from offset 0.
1413   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1414 
1415   if (HasVSrc) {
1416     Register VSrc = MI.getOperand(1).getReg();
1417 
1418     if (STI.needsAlignedVGPRs()) {
1419       // Add implicit aligned super-reg to force alignment on the data operand.
1420       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1421       BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1422       Register NewVR =
1423           MRI->createVirtualRegister(&AMDGPU::VReg_64_Align2RegClass);
1424       BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), NewVR)
1425           .addReg(VSrc, 0, MI.getOperand(1).getSubReg())
1426           .addImm(AMDGPU::sub0)
1427           .addReg(Undef)
1428           .addImm(AMDGPU::sub1);
1429       MIB.addReg(NewVR, 0, AMDGPU::sub0);
1430       MIB.addReg(NewVR, RegState::Implicit);
1431     } else {
1432       MIB.addReg(VSrc);
1433     }
1434 
1435     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1436       return false;
1437   }
1438 
1439   MIB.addImm(ImmOffset)
1440      .cloneMemRefs(MI);
1441 
1442   MI.eraseFromParent();
1443   return true;
1444 }
1445 
1446 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1447                                                       bool IsAppend) const {
1448   Register PtrBase = MI.getOperand(2).getReg();
1449   LLT PtrTy = MRI->getType(PtrBase);
1450   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1451 
1452   unsigned Offset;
1453   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1454 
1455   // TODO: Should this try to look through readfirstlane like GWS?
1456   if (!isDSOffsetLegal(PtrBase, Offset)) {
1457     PtrBase = MI.getOperand(2).getReg();
1458     Offset = 0;
1459   }
1460 
1461   MachineBasicBlock *MBB = MI.getParent();
1462   const DebugLoc &DL = MI.getDebugLoc();
1463   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1464 
1465   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1466     .addReg(PtrBase);
1467   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1468     return false;
1469 
1470   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1471     .addImm(Offset)
1472     .addImm(IsGDS ? -1 : 0)
1473     .cloneMemRefs(MI);
1474   MI.eraseFromParent();
1475   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1476 }
1477 
1478 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1479   if (TM.getOptLevel() > CodeGenOpt::None) {
1480     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1481     if (WGSize <= STI.getWavefrontSize()) {
1482       MachineBasicBlock *MBB = MI.getParent();
1483       const DebugLoc &DL = MI.getDebugLoc();
1484       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1485       MI.eraseFromParent();
1486       return true;
1487     }
1488   }
1489   return selectImpl(MI, *CoverageInfo);
1490 }
1491 
1492 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1493                          bool &IsTexFail) {
1494   if (TexFailCtrl)
1495     IsTexFail = true;
1496 
1497   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1498   TexFailCtrl &= ~(uint64_t)0x1;
1499   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1500   TexFailCtrl &= ~(uint64_t)0x2;
1501 
1502   return TexFailCtrl == 0;
1503 }
1504 
1505 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1506   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1507   MachineBasicBlock *MBB = MI.getParent();
1508   const DebugLoc &DL = MI.getDebugLoc();
1509 
1510   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1511     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1512 
1513   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1514   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1515       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1516   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1517       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1518   unsigned IntrOpcode = Intr->BaseOpcode;
1519   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1520 
1521   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1522 
1523   Register VDataIn, VDataOut;
1524   LLT VDataTy;
1525   int NumVDataDwords = -1;
1526   bool IsD16 = false;
1527 
1528   bool Unorm;
1529   if (!BaseOpcode->Sampler)
1530     Unorm = true;
1531   else
1532     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1533 
1534   bool TFE;
1535   bool LWE;
1536   bool IsTexFail = false;
1537   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1538                     TFE, LWE, IsTexFail))
1539     return false;
1540 
1541   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1542   const bool IsA16 = (Flags & 1) != 0;
1543   const bool IsG16 = (Flags & 2) != 0;
1544 
1545   // A16 implies 16 bit gradients if subtarget doesn't support G16
1546   if (IsA16 && !STI.hasG16() && !IsG16)
1547     return false;
1548 
1549   unsigned DMask = 0;
1550   unsigned DMaskLanes = 0;
1551 
1552   if (BaseOpcode->Atomic) {
1553     VDataOut = MI.getOperand(0).getReg();
1554     VDataIn = MI.getOperand(2).getReg();
1555     LLT Ty = MRI->getType(VDataIn);
1556 
1557     // Be careful to allow atomic swap on 16-bit element vectors.
1558     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1559       Ty.getSizeInBits() == 128 :
1560       Ty.getSizeInBits() == 64;
1561 
1562     if (BaseOpcode->AtomicX2) {
1563       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1564 
1565       DMask = Is64Bit ? 0xf : 0x3;
1566       NumVDataDwords = Is64Bit ? 4 : 2;
1567     } else {
1568       DMask = Is64Bit ? 0x3 : 0x1;
1569       NumVDataDwords = Is64Bit ? 2 : 1;
1570     }
1571   } else {
1572     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1573     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1574 
1575     // One memoperand is mandatory, except for getresinfo.
1576     // FIXME: Check this in verifier.
1577     if (!MI.memoperands_empty()) {
1578       const MachineMemOperand *MMO = *MI.memoperands_begin();
1579 
1580       // Infer d16 from the memory size, as the register type will be mangled by
1581       // unpacked subtargets, or by TFE.
1582       IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1583     }
1584 
1585     if (BaseOpcode->Store) {
1586       VDataIn = MI.getOperand(1).getReg();
1587       VDataTy = MRI->getType(VDataIn);
1588       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1589     } else {
1590       VDataOut = MI.getOperand(0).getReg();
1591       VDataTy = MRI->getType(VDataOut);
1592       NumVDataDwords = DMaskLanes;
1593 
1594       if (IsD16 && !STI.hasUnpackedD16VMem())
1595         NumVDataDwords = (DMaskLanes + 1) / 2;
1596     }
1597   }
1598 
1599   // Optimize _L to _LZ when _L is zero
1600   if (LZMappingInfo) {
1601     // The legalizer replaced the register with an immediate 0 if we need to
1602     // change the opcode.
1603     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex);
1604     if (Lod.isImm()) {
1605       assert(Lod.getImm() == 0);
1606       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1607     }
1608   }
1609 
1610   // Optimize _mip away, when 'lod' is zero
1611   if (MIPMappingInfo) {
1612     const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex);
1613     if (Lod.isImm()) {
1614       assert(Lod.getImm() == 0);
1615       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1616     }
1617   }
1618 
1619   // Set G16 opcode
1620   if (IsG16 && !IsA16) {
1621     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1622         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1623     assert(G16MappingInfo);
1624     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1625   }
1626 
1627   // TODO: Check this in verifier.
1628   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1629 
1630   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1631   if (BaseOpcode->Atomic)
1632     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1633   if (CPol & ~AMDGPU::CPol::ALL)
1634     return false;
1635 
1636   int NumVAddrRegs = 0;
1637   int NumVAddrDwords = 0;
1638   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1639     // Skip the $noregs and 0s inserted during legalization.
1640     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1641     if (!AddrOp.isReg())
1642       continue; // XXX - Break?
1643 
1644     Register Addr = AddrOp.getReg();
1645     if (!Addr)
1646       break;
1647 
1648     ++NumVAddrRegs;
1649     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1650   }
1651 
1652   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1653   // NSA, these should have beeen packed into a single value in the first
1654   // address register
1655   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1656   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1657     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1658     return false;
1659   }
1660 
1661   if (IsTexFail)
1662     ++NumVDataDwords;
1663 
1664   int Opcode = -1;
1665   if (IsGFX10Plus) {
1666     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1667                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1668                                           : AMDGPU::MIMGEncGfx10Default,
1669                                    NumVDataDwords, NumVAddrDwords);
1670   } else {
1671     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1672       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1673                                      NumVDataDwords, NumVAddrDwords);
1674     if (Opcode == -1)
1675       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1676                                      NumVDataDwords, NumVAddrDwords);
1677   }
1678   assert(Opcode != -1);
1679 
1680   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1681     .cloneMemRefs(MI);
1682 
1683   if (VDataOut) {
1684     if (BaseOpcode->AtomicX2) {
1685       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1686 
1687       Register TmpReg = MRI->createVirtualRegister(
1688         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1689       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1690 
1691       MIB.addDef(TmpReg);
1692       if (!MRI->use_empty(VDataOut)) {
1693         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1694             .addReg(TmpReg, RegState::Kill, SubReg);
1695       }
1696 
1697     } else {
1698       MIB.addDef(VDataOut); // vdata output
1699     }
1700   }
1701 
1702   if (VDataIn)
1703     MIB.addReg(VDataIn); // vdata input
1704 
1705   for (int I = 0; I != NumVAddrRegs; ++I) {
1706     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1707     if (SrcOp.isReg()) {
1708       assert(SrcOp.getReg() != 0);
1709       MIB.addReg(SrcOp.getReg());
1710     }
1711   }
1712 
1713   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1714   if (BaseOpcode->Sampler)
1715     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1716 
1717   MIB.addImm(DMask); // dmask
1718 
1719   if (IsGFX10Plus)
1720     MIB.addImm(DimInfo->Encoding);
1721   MIB.addImm(Unorm);
1722 
1723   MIB.addImm(CPol);
1724   MIB.addImm(IsA16 &&  // a16 or r128
1725              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1726   if (IsGFX10Plus)
1727     MIB.addImm(IsA16 ? -1 : 0);
1728 
1729   MIB.addImm(TFE); // tfe
1730   MIB.addImm(LWE); // lwe
1731   if (!IsGFX10Plus)
1732     MIB.addImm(DimInfo->DA ? -1 : 0);
1733   if (BaseOpcode->HasD16)
1734     MIB.addImm(IsD16 ? -1 : 0);
1735 
1736   if (IsTexFail) {
1737     // An image load instruction with TFE/LWE only conditionally writes to its
1738     // result registers. Initialize them to zero so that we always get well
1739     // defined result values.
1740     assert(VDataOut && !VDataIn);
1741     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1742     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1743     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1744       .addImm(0);
1745     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1746     if (STI.usePRTStrictNull()) {
1747       // With enable-prt-strict-null enabled, initialize all result registers to
1748       // zero.
1749       auto RegSeq =
1750           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1751       for (auto Sub : Parts)
1752         RegSeq.addReg(Zero).addImm(Sub);
1753     } else {
1754       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1755       // result register.
1756       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1757       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1758       auto RegSeq =
1759           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1760       for (auto Sub : Parts.drop_back(1))
1761         RegSeq.addReg(Undef).addImm(Sub);
1762       RegSeq.addReg(Zero).addImm(Parts.back());
1763     }
1764     MIB.addReg(Tied, RegState::Implicit);
1765     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1766   }
1767 
1768   MI.eraseFromParent();
1769   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1770 }
1771 
1772 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1773     MachineInstr &I) const {
1774   unsigned IntrinsicID = I.getIntrinsicID();
1775   switch (IntrinsicID) {
1776   case Intrinsic::amdgcn_end_cf:
1777     return selectEndCfIntrinsic(I);
1778   case Intrinsic::amdgcn_ds_ordered_add:
1779   case Intrinsic::amdgcn_ds_ordered_swap:
1780     return selectDSOrderedIntrinsic(I, IntrinsicID);
1781   case Intrinsic::amdgcn_ds_gws_init:
1782   case Intrinsic::amdgcn_ds_gws_barrier:
1783   case Intrinsic::amdgcn_ds_gws_sema_v:
1784   case Intrinsic::amdgcn_ds_gws_sema_br:
1785   case Intrinsic::amdgcn_ds_gws_sema_p:
1786   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1787     return selectDSGWSIntrinsic(I, IntrinsicID);
1788   case Intrinsic::amdgcn_ds_append:
1789     return selectDSAppendConsume(I, true);
1790   case Intrinsic::amdgcn_ds_consume:
1791     return selectDSAppendConsume(I, false);
1792   case Intrinsic::amdgcn_s_barrier:
1793     return selectSBarrier(I);
1794   case Intrinsic::amdgcn_global_atomic_fadd:
1795     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1796   default: {
1797     return selectImpl(I, *CoverageInfo);
1798   }
1799   }
1800 }
1801 
1802 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1803   if (selectImpl(I, *CoverageInfo))
1804     return true;
1805 
1806   MachineBasicBlock *BB = I.getParent();
1807   const DebugLoc &DL = I.getDebugLoc();
1808 
1809   Register DstReg = I.getOperand(0).getReg();
1810   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1811   assert(Size <= 32 || Size == 64);
1812   const MachineOperand &CCOp = I.getOperand(1);
1813   Register CCReg = CCOp.getReg();
1814   if (!isVCC(CCReg, *MRI)) {
1815     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1816                                          AMDGPU::S_CSELECT_B32;
1817     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1818             .addReg(CCReg);
1819 
1820     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1821     // bank, because it does not cover the register class that we used to represent
1822     // for it.  So we need to manually set the register class here.
1823     if (!MRI->getRegClassOrNull(CCReg))
1824         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1825     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1826             .add(I.getOperand(2))
1827             .add(I.getOperand(3));
1828 
1829     bool Ret = false;
1830     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1831     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1832     I.eraseFromParent();
1833     return Ret;
1834   }
1835 
1836   // Wide VGPR select should have been split in RegBankSelect.
1837   if (Size > 32)
1838     return false;
1839 
1840   MachineInstr *Select =
1841       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1842               .addImm(0)
1843               .add(I.getOperand(3))
1844               .addImm(0)
1845               .add(I.getOperand(2))
1846               .add(I.getOperand(1));
1847 
1848   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1849   I.eraseFromParent();
1850   return Ret;
1851 }
1852 
1853 static int sizeToSubRegIndex(unsigned Size) {
1854   switch (Size) {
1855   case 32:
1856     return AMDGPU::sub0;
1857   case 64:
1858     return AMDGPU::sub0_sub1;
1859   case 96:
1860     return AMDGPU::sub0_sub1_sub2;
1861   case 128:
1862     return AMDGPU::sub0_sub1_sub2_sub3;
1863   case 256:
1864     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1865   default:
1866     if (Size < 32)
1867       return AMDGPU::sub0;
1868     if (Size > 256)
1869       return -1;
1870     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1871   }
1872 }
1873 
1874 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1875   Register DstReg = I.getOperand(0).getReg();
1876   Register SrcReg = I.getOperand(1).getReg();
1877   const LLT DstTy = MRI->getType(DstReg);
1878   const LLT SrcTy = MRI->getType(SrcReg);
1879   const LLT S1 = LLT::scalar(1);
1880 
1881   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1882   const RegisterBank *DstRB;
1883   if (DstTy == S1) {
1884     // This is a special case. We don't treat s1 for legalization artifacts as
1885     // vcc booleans.
1886     DstRB = SrcRB;
1887   } else {
1888     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1889     if (SrcRB != DstRB)
1890       return false;
1891   }
1892 
1893   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1894 
1895   unsigned DstSize = DstTy.getSizeInBits();
1896   unsigned SrcSize = SrcTy.getSizeInBits();
1897 
1898   const TargetRegisterClass *SrcRC
1899     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1900   const TargetRegisterClass *DstRC
1901     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1902   if (!SrcRC || !DstRC)
1903     return false;
1904 
1905   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1906       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1907     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1908     return false;
1909   }
1910 
1911   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1912     MachineBasicBlock *MBB = I.getParent();
1913     const DebugLoc &DL = I.getDebugLoc();
1914 
1915     Register LoReg = MRI->createVirtualRegister(DstRC);
1916     Register HiReg = MRI->createVirtualRegister(DstRC);
1917     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1918       .addReg(SrcReg, 0, AMDGPU::sub0);
1919     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1920       .addReg(SrcReg, 0, AMDGPU::sub1);
1921 
1922     if (IsVALU && STI.hasSDWA()) {
1923       // Write the low 16-bits of the high element into the high 16-bits of the
1924       // low element.
1925       MachineInstr *MovSDWA =
1926         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1927         .addImm(0)                             // $src0_modifiers
1928         .addReg(HiReg)                         // $src0
1929         .addImm(0)                             // $clamp
1930         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1931         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1932         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1933         .addReg(LoReg, RegState::Implicit);
1934       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1935     } else {
1936       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1937       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1938       Register ImmReg = MRI->createVirtualRegister(DstRC);
1939       if (IsVALU) {
1940         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1941           .addImm(16)
1942           .addReg(HiReg);
1943       } else {
1944         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1945           .addReg(HiReg)
1946           .addImm(16);
1947       }
1948 
1949       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1950       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1951       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1952 
1953       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1954         .addImm(0xffff);
1955       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1956         .addReg(LoReg)
1957         .addReg(ImmReg);
1958       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1959         .addReg(TmpReg0)
1960         .addReg(TmpReg1);
1961     }
1962 
1963     I.eraseFromParent();
1964     return true;
1965   }
1966 
1967   if (!DstTy.isScalar())
1968     return false;
1969 
1970   if (SrcSize > 32) {
1971     int SubRegIdx = sizeToSubRegIndex(DstSize);
1972     if (SubRegIdx == -1)
1973       return false;
1974 
1975     // Deal with weird cases where the class only partially supports the subreg
1976     // index.
1977     const TargetRegisterClass *SrcWithSubRC
1978       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1979     if (!SrcWithSubRC)
1980       return false;
1981 
1982     if (SrcWithSubRC != SrcRC) {
1983       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1984         return false;
1985     }
1986 
1987     I.getOperand(1).setSubReg(SubRegIdx);
1988   }
1989 
1990   I.setDesc(TII.get(TargetOpcode::COPY));
1991   return true;
1992 }
1993 
1994 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1995 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1996   Mask = maskTrailingOnes<unsigned>(Size);
1997   int SignedMask = static_cast<int>(Mask);
1998   return SignedMask >= -16 && SignedMask <= 64;
1999 }
2000 
2001 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2002 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2003   Register Reg, const MachineRegisterInfo &MRI,
2004   const TargetRegisterInfo &TRI) const {
2005   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2006   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2007     return RB;
2008 
2009   // Ignore the type, since we don't use vcc in artifacts.
2010   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2011     return &RBI.getRegBankFromRegClass(*RC, LLT());
2012   return nullptr;
2013 }
2014 
2015 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2016   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2017   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2018   const DebugLoc &DL = I.getDebugLoc();
2019   MachineBasicBlock &MBB = *I.getParent();
2020   const Register DstReg = I.getOperand(0).getReg();
2021   const Register SrcReg = I.getOperand(1).getReg();
2022 
2023   const LLT DstTy = MRI->getType(DstReg);
2024   const LLT SrcTy = MRI->getType(SrcReg);
2025   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2026     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2027   const unsigned DstSize = DstTy.getSizeInBits();
2028   if (!DstTy.isScalar())
2029     return false;
2030 
2031   // Artifact casts should never use vcc.
2032   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2033 
2034   // FIXME: This should probably be illegal and split earlier.
2035   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2036     if (DstSize <= 32)
2037       return selectCOPY(I);
2038 
2039     const TargetRegisterClass *SrcRC =
2040         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI);
2041     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2042     const TargetRegisterClass *DstRC =
2043         TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
2044 
2045     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2046     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2047     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2048       .addReg(SrcReg)
2049       .addImm(AMDGPU::sub0)
2050       .addReg(UndefReg)
2051       .addImm(AMDGPU::sub1);
2052     I.eraseFromParent();
2053 
2054     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2055            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2056   }
2057 
2058   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2059     // 64-bit should have been split up in RegBankSelect
2060 
2061     // Try to use an and with a mask if it will save code size.
2062     unsigned Mask;
2063     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2064       MachineInstr *ExtI =
2065       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2066         .addImm(Mask)
2067         .addReg(SrcReg);
2068       I.eraseFromParent();
2069       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2070     }
2071 
2072     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2073     MachineInstr *ExtI =
2074       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2075       .addReg(SrcReg)
2076       .addImm(0) // Offset
2077       .addImm(SrcSize); // Width
2078     I.eraseFromParent();
2079     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2080   }
2081 
2082   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2083     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2084       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2085     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2086       return false;
2087 
2088     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2089       const unsigned SextOpc = SrcSize == 8 ?
2090         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2091       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2092         .addReg(SrcReg);
2093       I.eraseFromParent();
2094       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2095     }
2096 
2097     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2098     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2099 
2100     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2101     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2102       // We need a 64-bit register source, but the high bits don't matter.
2103       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2104       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2105       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2106 
2107       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2108       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2109         .addReg(SrcReg, 0, SubReg)
2110         .addImm(AMDGPU::sub0)
2111         .addReg(UndefReg)
2112         .addImm(AMDGPU::sub1);
2113 
2114       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2115         .addReg(ExtReg)
2116         .addImm(SrcSize << 16);
2117 
2118       I.eraseFromParent();
2119       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2120     }
2121 
2122     unsigned Mask;
2123     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2124       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2125         .addReg(SrcReg)
2126         .addImm(Mask);
2127     } else {
2128       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2129         .addReg(SrcReg)
2130         .addImm(SrcSize << 16);
2131     }
2132 
2133     I.eraseFromParent();
2134     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2135   }
2136 
2137   return false;
2138 }
2139 
2140 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2141   MachineBasicBlock *BB = I.getParent();
2142   MachineOperand &ImmOp = I.getOperand(1);
2143   Register DstReg = I.getOperand(0).getReg();
2144   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2145 
2146   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2147   if (ImmOp.isFPImm()) {
2148     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2149     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2150   } else if (ImmOp.isCImm()) {
2151     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2152   } else {
2153     llvm_unreachable("Not supported by g_constants");
2154   }
2155 
2156   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2157   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2158 
2159   unsigned Opcode;
2160   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2161     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2162   } else {
2163     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2164 
2165     // We should never produce s1 values on banks other than VCC. If the user of
2166     // this already constrained the register, we may incorrectly think it's VCC
2167     // if it wasn't originally.
2168     if (Size == 1)
2169       return false;
2170   }
2171 
2172   if (Size != 64) {
2173     I.setDesc(TII.get(Opcode));
2174     I.addImplicitDefUseOperands(*MF);
2175     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2176   }
2177 
2178   const DebugLoc &DL = I.getDebugLoc();
2179 
2180   APInt Imm(Size, I.getOperand(1).getImm());
2181 
2182   MachineInstr *ResInst;
2183   if (IsSgpr && TII.isInlineConstant(Imm)) {
2184     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2185       .addImm(I.getOperand(1).getImm());
2186   } else {
2187     const TargetRegisterClass *RC = IsSgpr ?
2188       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2189     Register LoReg = MRI->createVirtualRegister(RC);
2190     Register HiReg = MRI->createVirtualRegister(RC);
2191 
2192     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2193       .addImm(Imm.trunc(32).getZExtValue());
2194 
2195     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2196       .addImm(Imm.ashr(32).getZExtValue());
2197 
2198     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2199       .addReg(LoReg)
2200       .addImm(AMDGPU::sub0)
2201       .addReg(HiReg)
2202       .addImm(AMDGPU::sub1);
2203   }
2204 
2205   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2206   // work for target independent opcodes
2207   I.eraseFromParent();
2208   const TargetRegisterClass *DstRC =
2209     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2210   if (!DstRC)
2211     return true;
2212   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2213 }
2214 
2215 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2216   // Only manually handle the f64 SGPR case.
2217   //
2218   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2219   // the bit ops theoretically have a second result due to the implicit def of
2220   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2221   // that is easy by disabling the check. The result works, but uses a
2222   // nonsensical sreg32orlds_and_sreg_1 regclass.
2223   //
2224   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2225   // the variadic REG_SEQUENCE operands.
2226 
2227   Register Dst = MI.getOperand(0).getReg();
2228   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2229   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2230       MRI->getType(Dst) != LLT::scalar(64))
2231     return false;
2232 
2233   Register Src = MI.getOperand(1).getReg();
2234   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2235   if (Fabs)
2236     Src = Fabs->getOperand(1).getReg();
2237 
2238   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2239       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2240     return false;
2241 
2242   MachineBasicBlock *BB = MI.getParent();
2243   const DebugLoc &DL = MI.getDebugLoc();
2244   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2245   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2246   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2247   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2248 
2249   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2250     .addReg(Src, 0, AMDGPU::sub0);
2251   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2252     .addReg(Src, 0, AMDGPU::sub1);
2253   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2254     .addImm(0x80000000);
2255 
2256   // Set or toggle sign bit.
2257   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2258   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2259     .addReg(HiReg)
2260     .addReg(ConstReg);
2261   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2262     .addReg(LoReg)
2263     .addImm(AMDGPU::sub0)
2264     .addReg(OpReg)
2265     .addImm(AMDGPU::sub1);
2266   MI.eraseFromParent();
2267   return true;
2268 }
2269 
2270 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2271 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2272   Register Dst = MI.getOperand(0).getReg();
2273   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2274   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2275       MRI->getType(Dst) != LLT::scalar(64))
2276     return false;
2277 
2278   Register Src = MI.getOperand(1).getReg();
2279   MachineBasicBlock *BB = MI.getParent();
2280   const DebugLoc &DL = MI.getDebugLoc();
2281   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2282   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2283   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2284   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2285 
2286   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2287       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2288     return false;
2289 
2290   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2291     .addReg(Src, 0, AMDGPU::sub0);
2292   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2293     .addReg(Src, 0, AMDGPU::sub1);
2294   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2295     .addImm(0x7fffffff);
2296 
2297   // Clear sign bit.
2298   // TODO: Should this used S_BITSET0_*?
2299   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2300     .addReg(HiReg)
2301     .addReg(ConstReg);
2302   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2303     .addReg(LoReg)
2304     .addImm(AMDGPU::sub0)
2305     .addReg(OpReg)
2306     .addImm(AMDGPU::sub1);
2307 
2308   MI.eraseFromParent();
2309   return true;
2310 }
2311 
2312 static bool isConstant(const MachineInstr &MI) {
2313   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2314 }
2315 
2316 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2317     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2318 
2319   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2320 
2321   assert(PtrMI);
2322 
2323   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2324     return;
2325 
2326   GEPInfo GEPInfo(*PtrMI);
2327 
2328   for (unsigned i = 1; i != 3; ++i) {
2329     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2330     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2331     assert(OpDef);
2332     if (i == 2 && isConstant(*OpDef)) {
2333       // TODO: Could handle constant base + variable offset, but a combine
2334       // probably should have commuted it.
2335       assert(GEPInfo.Imm == 0);
2336       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2337       continue;
2338     }
2339     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2340     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2341       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2342     else
2343       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2344   }
2345 
2346   AddrInfo.push_back(GEPInfo);
2347   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2348 }
2349 
2350 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2351   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2352 }
2353 
2354 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2355   if (!MI.hasOneMemOperand())
2356     return false;
2357 
2358   const MachineMemOperand *MMO = *MI.memoperands_begin();
2359   const Value *Ptr = MMO->getValue();
2360 
2361   // UndefValue means this is a load of a kernel input.  These are uniform.
2362   // Sometimes LDS instructions have constant pointers.
2363   // If Ptr is null, then that means this mem operand contains a
2364   // PseudoSourceValue like GOT.
2365   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2366       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2367     return true;
2368 
2369   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2370     return true;
2371 
2372   const Instruction *I = dyn_cast<Instruction>(Ptr);
2373   return I && I->getMetadata("amdgpu.uniform");
2374 }
2375 
2376 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2377   for (const GEPInfo &GEPInfo : AddrInfo) {
2378     if (!GEPInfo.VgprParts.empty())
2379       return true;
2380   }
2381   return false;
2382 }
2383 
2384 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2385   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2386   unsigned AS = PtrTy.getAddressSpace();
2387   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2388       STI.ldsRequiresM0Init()) {
2389     MachineBasicBlock *BB = I.getParent();
2390 
2391     // If DS instructions require M0 initialization, insert it before selecting.
2392     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2393       .addImm(-1);
2394   }
2395 }
2396 
2397 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2398   MachineInstr &I) const {
2399   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2400     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2401     unsigned AS = PtrTy.getAddressSpace();
2402     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2403       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2404   }
2405 
2406   initM0(I);
2407   return selectImpl(I, *CoverageInfo);
2408 }
2409 
2410 // TODO: No rtn optimization.
2411 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2412   MachineInstr &MI) const {
2413   Register PtrReg = MI.getOperand(1).getReg();
2414   const LLT PtrTy = MRI->getType(PtrReg);
2415   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2416       STI.useFlatForGlobal())
2417     return selectImpl(MI, *CoverageInfo);
2418 
2419   Register DstReg = MI.getOperand(0).getReg();
2420   const LLT Ty = MRI->getType(DstReg);
2421   const bool Is64 = Ty.getSizeInBits() == 64;
2422   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2423   Register TmpReg = MRI->createVirtualRegister(
2424     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2425 
2426   const DebugLoc &DL = MI.getDebugLoc();
2427   MachineBasicBlock *BB = MI.getParent();
2428 
2429   Register VAddr, RSrcReg, SOffset;
2430   int64_t Offset = 0;
2431 
2432   unsigned Opcode;
2433   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2434     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2435                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2436   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2437                                    RSrcReg, SOffset, Offset)) {
2438     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2439                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2440   } else
2441     return selectImpl(MI, *CoverageInfo);
2442 
2443   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2444     .addReg(MI.getOperand(2).getReg());
2445 
2446   if (VAddr)
2447     MIB.addReg(VAddr);
2448 
2449   MIB.addReg(RSrcReg);
2450   if (SOffset)
2451     MIB.addReg(SOffset);
2452   else
2453     MIB.addImm(0);
2454 
2455   MIB.addImm(Offset);
2456   MIB.addImm(AMDGPU::CPol::GLC);
2457   MIB.cloneMemRefs(MI);
2458 
2459   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2460     .addReg(TmpReg, RegState::Kill, SubReg);
2461 
2462   MI.eraseFromParent();
2463 
2464   MRI->setRegClass(
2465     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2466   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2467 }
2468 
2469 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2470   if (Reg.isPhysical())
2471     return false;
2472 
2473   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2474   const unsigned Opcode = MI.getOpcode();
2475 
2476   if (Opcode == AMDGPU::COPY)
2477     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2478 
2479   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2480       Opcode == AMDGPU::G_XOR)
2481     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2482            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2483 
2484   if (Opcode == TargetOpcode::G_INTRINSIC)
2485     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2486 
2487   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2488 }
2489 
2490 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2491   MachineBasicBlock *BB = I.getParent();
2492   MachineOperand &CondOp = I.getOperand(0);
2493   Register CondReg = CondOp.getReg();
2494   const DebugLoc &DL = I.getDebugLoc();
2495 
2496   unsigned BrOpcode;
2497   Register CondPhysReg;
2498   const TargetRegisterClass *ConstrainRC;
2499 
2500   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2501   // whether the branch is uniform when selecting the instruction. In
2502   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2503   // RegBankSelect knows what it's doing if the branch condition is scc, even
2504   // though it currently does not.
2505   if (!isVCC(CondReg, *MRI)) {
2506     if (MRI->getType(CondReg) != LLT::scalar(32))
2507       return false;
2508 
2509     CondPhysReg = AMDGPU::SCC;
2510     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2511     ConstrainRC = &AMDGPU::SReg_32RegClass;
2512   } else {
2513     // FIXME: Should scc->vcc copies and with exec?
2514 
2515     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2516     // need to insert an and with exec.
2517     if (!isVCmpResult(CondReg, *MRI)) {
2518       const bool Is64 = STI.isWave64();
2519       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2520       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2521 
2522       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2523       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2524           .addReg(CondReg)
2525           .addReg(Exec);
2526       CondReg = TmpReg;
2527     }
2528 
2529     CondPhysReg = TRI.getVCC();
2530     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2531     ConstrainRC = TRI.getBoolRC();
2532   }
2533 
2534   if (!MRI->getRegClassOrNull(CondReg))
2535     MRI->setRegClass(CondReg, ConstrainRC);
2536 
2537   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2538     .addReg(CondReg);
2539   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2540     .addMBB(I.getOperand(1).getMBB());
2541 
2542   I.eraseFromParent();
2543   return true;
2544 }
2545 
2546 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2547   MachineInstr &I) const {
2548   Register DstReg = I.getOperand(0).getReg();
2549   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2550   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2551   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2552   if (IsVGPR)
2553     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2554 
2555   return RBI.constrainGenericRegister(
2556     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2557 }
2558 
2559 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2560   Register DstReg = I.getOperand(0).getReg();
2561   Register SrcReg = I.getOperand(1).getReg();
2562   Register MaskReg = I.getOperand(2).getReg();
2563   LLT Ty = MRI->getType(DstReg);
2564   LLT MaskTy = MRI->getType(MaskReg);
2565 
2566   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2567   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2568   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2569   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2570   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2571     return false;
2572 
2573   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2574   const TargetRegisterClass &RegRC
2575     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2576 
2577   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2578                                                                   *MRI);
2579   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2580                                                                   *MRI);
2581   const TargetRegisterClass *MaskRC =
2582       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI);
2583 
2584   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2585       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2586       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2587     return false;
2588 
2589   MachineBasicBlock *BB = I.getParent();
2590   const DebugLoc &DL = I.getDebugLoc();
2591   if (Ty.getSizeInBits() == 32) {
2592     assert(MaskTy.getSizeInBits() == 32 &&
2593            "ptrmask should have been narrowed during legalize");
2594 
2595     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2596       .addReg(SrcReg)
2597       .addReg(MaskReg);
2598     I.eraseFromParent();
2599     return true;
2600   }
2601 
2602   Register HiReg = MRI->createVirtualRegister(&RegRC);
2603   Register LoReg = MRI->createVirtualRegister(&RegRC);
2604 
2605   // Extract the subregisters from the source pointer.
2606   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2607     .addReg(SrcReg, 0, AMDGPU::sub0);
2608   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2609     .addReg(SrcReg, 0, AMDGPU::sub1);
2610 
2611   Register MaskedLo, MaskedHi;
2612 
2613   // Try to avoid emitting a bit operation when we only need to touch half of
2614   // the 64-bit pointer.
2615   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64);
2616 
2617   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2618   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2619   if ((MaskOnes & MaskLo32) == MaskLo32) {
2620     // If all the bits in the low half are 1, we only need a copy for it.
2621     MaskedLo = LoReg;
2622   } else {
2623     // Extract the mask subregister and apply the and.
2624     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2625     MaskedLo = MRI->createVirtualRegister(&RegRC);
2626 
2627     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2628       .addReg(MaskReg, 0, AMDGPU::sub0);
2629     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2630       .addReg(LoReg)
2631       .addReg(MaskLo);
2632   }
2633 
2634   if ((MaskOnes & MaskHi32) == MaskHi32) {
2635     // If all the bits in the high half are 1, we only need a copy for it.
2636     MaskedHi = HiReg;
2637   } else {
2638     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2639     MaskedHi = MRI->createVirtualRegister(&RegRC);
2640 
2641     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2642       .addReg(MaskReg, 0, AMDGPU::sub1);
2643     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2644       .addReg(HiReg)
2645       .addReg(MaskHi);
2646   }
2647 
2648   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2649     .addReg(MaskedLo)
2650     .addImm(AMDGPU::sub0)
2651     .addReg(MaskedHi)
2652     .addImm(AMDGPU::sub1);
2653   I.eraseFromParent();
2654   return true;
2655 }
2656 
2657 /// Return the register to use for the index value, and the subregister to use
2658 /// for the indirectly accessed register.
2659 static std::pair<Register, unsigned>
2660 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2661                         const SIRegisterInfo &TRI,
2662                         const TargetRegisterClass *SuperRC,
2663                         Register IdxReg,
2664                         unsigned EltSize) {
2665   Register IdxBaseReg;
2666   int Offset;
2667 
2668   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2669   if (IdxBaseReg == AMDGPU::NoRegister) {
2670     // This will happen if the index is a known constant. This should ordinarily
2671     // be legalized out, but handle it as a register just in case.
2672     assert(Offset == 0);
2673     IdxBaseReg = IdxReg;
2674   }
2675 
2676   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2677 
2678   // Skip out of bounds offsets, or else we would end up using an undefined
2679   // register.
2680   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2681     return std::make_pair(IdxReg, SubRegs[0]);
2682   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2683 }
2684 
2685 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2686   MachineInstr &MI) const {
2687   Register DstReg = MI.getOperand(0).getReg();
2688   Register SrcReg = MI.getOperand(1).getReg();
2689   Register IdxReg = MI.getOperand(2).getReg();
2690 
2691   LLT DstTy = MRI->getType(DstReg);
2692   LLT SrcTy = MRI->getType(SrcReg);
2693 
2694   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2695   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2696   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2697 
2698   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2699   // into a waterfall loop.
2700   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2701     return false;
2702 
2703   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2704                                                                   *MRI);
2705   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2706                                                                   *MRI);
2707   if (!SrcRC || !DstRC)
2708     return false;
2709   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2710       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2711       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2712     return false;
2713 
2714   MachineBasicBlock *BB = MI.getParent();
2715   const DebugLoc &DL = MI.getDebugLoc();
2716   const bool Is64 = DstTy.getSizeInBits() == 64;
2717 
2718   unsigned SubReg;
2719   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2720                                                      DstTy.getSizeInBits() / 8);
2721 
2722   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2723     if (DstTy.getSizeInBits() != 32 && !Is64)
2724       return false;
2725 
2726     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2727       .addReg(IdxReg);
2728 
2729     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2730     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2731       .addReg(SrcReg, 0, SubReg)
2732       .addReg(SrcReg, RegState::Implicit);
2733     MI.eraseFromParent();
2734     return true;
2735   }
2736 
2737   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2738     return false;
2739 
2740   if (!STI.useVGPRIndexMode()) {
2741     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2742       .addReg(IdxReg);
2743     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2744       .addReg(SrcReg, 0, SubReg)
2745       .addReg(SrcReg, RegState::Implicit);
2746     MI.eraseFromParent();
2747     return true;
2748   }
2749 
2750   const MCInstrDesc &GPRIDXDesc =
2751       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2752   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2753       .addReg(SrcReg)
2754       .addReg(IdxReg)
2755       .addImm(SubReg);
2756 
2757   MI.eraseFromParent();
2758   return true;
2759 }
2760 
2761 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2762 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2763   MachineInstr &MI) const {
2764   Register DstReg = MI.getOperand(0).getReg();
2765   Register VecReg = MI.getOperand(1).getReg();
2766   Register ValReg = MI.getOperand(2).getReg();
2767   Register IdxReg = MI.getOperand(3).getReg();
2768 
2769   LLT VecTy = MRI->getType(DstReg);
2770   LLT ValTy = MRI->getType(ValReg);
2771   unsigned VecSize = VecTy.getSizeInBits();
2772   unsigned ValSize = ValTy.getSizeInBits();
2773 
2774   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2775   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2776   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2777 
2778   assert(VecTy.getElementType() == ValTy);
2779 
2780   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2781   // into a waterfall loop.
2782   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2783     return false;
2784 
2785   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2786                                                                   *MRI);
2787   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2788                                                                   *MRI);
2789 
2790   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2791       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2792       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2793       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2794     return false;
2795 
2796   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2797     return false;
2798 
2799   unsigned SubReg;
2800   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2801                                                      ValSize / 8);
2802 
2803   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2804                          STI.useVGPRIndexMode();
2805 
2806   MachineBasicBlock *BB = MI.getParent();
2807   const DebugLoc &DL = MI.getDebugLoc();
2808 
2809   if (!IndexMode) {
2810     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2811       .addReg(IdxReg);
2812 
2813     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2814         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2815     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2816         .addReg(VecReg)
2817         .addReg(ValReg)
2818         .addImm(SubReg);
2819     MI.eraseFromParent();
2820     return true;
2821   }
2822 
2823   const MCInstrDesc &GPRIDXDesc =
2824       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2825   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2826       .addReg(VecReg)
2827       .addReg(ValReg)
2828       .addReg(IdxReg)
2829       .addImm(SubReg);
2830 
2831   MI.eraseFromParent();
2832   return true;
2833 }
2834 
2835 static bool isZeroOrUndef(int X) {
2836   return X == 0 || X == -1;
2837 }
2838 
2839 static bool isOneOrUndef(int X) {
2840   return X == 1 || X == -1;
2841 }
2842 
2843 static bool isZeroOrOneOrUndef(int X) {
2844   return X == 0 || X == 1 || X == -1;
2845 }
2846 
2847 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2848 // 32-bit register.
2849 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2850                                    ArrayRef<int> Mask) {
2851   NewMask[0] = Mask[0];
2852   NewMask[1] = Mask[1];
2853   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2854     return Src0;
2855 
2856   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2857   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2858 
2859   // Shift the mask inputs to be 0/1;
2860   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2861   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2862   return Src1;
2863 }
2864 
2865 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2866 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2867   MachineInstr &MI) const {
2868   Register DstReg = MI.getOperand(0).getReg();
2869   Register Src0Reg = MI.getOperand(1).getReg();
2870   Register Src1Reg = MI.getOperand(2).getReg();
2871   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2872 
2873   const LLT V2S16 = LLT::fixed_vector(2, 16);
2874   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2875     return false;
2876 
2877   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2878     return false;
2879 
2880   assert(ShufMask.size() == 2);
2881   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2882 
2883   MachineBasicBlock *MBB = MI.getParent();
2884   const DebugLoc &DL = MI.getDebugLoc();
2885 
2886   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2887   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2888   const TargetRegisterClass &RC = IsVALU ?
2889     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2890 
2891   // Handle the degenerate case which should have folded out.
2892   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2893     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2894 
2895     MI.eraseFromParent();
2896     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2897   }
2898 
2899   // A legal VOP3P mask only reads one of the sources.
2900   int Mask[2];
2901   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2902 
2903   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2904       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2905     return false;
2906 
2907   // TODO: This also should have been folded out
2908   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2909     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2910       .addReg(SrcVec);
2911 
2912     MI.eraseFromParent();
2913     return true;
2914   }
2915 
2916   if (Mask[0] == 1 && Mask[1] == -1) {
2917     if (IsVALU) {
2918       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2919         .addImm(16)
2920         .addReg(SrcVec);
2921     } else {
2922       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2923         .addReg(SrcVec)
2924         .addImm(16);
2925     }
2926   } else if (Mask[0] == -1 && Mask[1] == 0) {
2927     if (IsVALU) {
2928       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2929         .addImm(16)
2930         .addReg(SrcVec);
2931     } else {
2932       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2933         .addReg(SrcVec)
2934         .addImm(16);
2935     }
2936   } else if (Mask[0] == 0 && Mask[1] == 0) {
2937     if (IsVALU) {
2938       // Write low half of the register into the high half.
2939       MachineInstr *MovSDWA =
2940         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2941         .addImm(0)                             // $src0_modifiers
2942         .addReg(SrcVec)                        // $src0
2943         .addImm(0)                             // $clamp
2944         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2945         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2946         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2947         .addReg(SrcVec, RegState::Implicit);
2948       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2949     } else {
2950       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2951         .addReg(SrcVec)
2952         .addReg(SrcVec);
2953     }
2954   } else if (Mask[0] == 1 && Mask[1] == 1) {
2955     if (IsVALU) {
2956       // Write high half of the register into the low half.
2957       MachineInstr *MovSDWA =
2958         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2959         .addImm(0)                             // $src0_modifiers
2960         .addReg(SrcVec)                        // $src0
2961         .addImm(0)                             // $clamp
2962         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2963         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2964         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2965         .addReg(SrcVec, RegState::Implicit);
2966       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2967     } else {
2968       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2969         .addReg(SrcVec)
2970         .addReg(SrcVec);
2971     }
2972   } else if (Mask[0] == 1 && Mask[1] == 0) {
2973     if (IsVALU) {
2974       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2975         .addReg(SrcVec)
2976         .addReg(SrcVec)
2977         .addImm(16);
2978     } else {
2979       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2980       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2981         .addReg(SrcVec)
2982         .addImm(16);
2983       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2984         .addReg(TmpReg)
2985         .addReg(SrcVec);
2986     }
2987   } else
2988     llvm_unreachable("all shuffle masks should be handled");
2989 
2990   MI.eraseFromParent();
2991   return true;
2992 }
2993 
2994 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2995   MachineInstr &MI) const {
2996   if (STI.hasGFX90AInsts())
2997     return selectImpl(MI, *CoverageInfo);
2998 
2999   MachineBasicBlock *MBB = MI.getParent();
3000   const DebugLoc &DL = MI.getDebugLoc();
3001 
3002   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3003     Function &F = MBB->getParent()->getFunction();
3004     DiagnosticInfoUnsupported
3005       NoFpRet(F, "return versions of fp atomics not supported",
3006               MI.getDebugLoc(), DS_Error);
3007     F.getContext().diagnose(NoFpRet);
3008     return false;
3009   }
3010 
3011   // FIXME: This is only needed because tablegen requires number of dst operands
3012   // in match and replace pattern to be the same. Otherwise patterns can be
3013   // exported from SDag path.
3014   MachineOperand &VDataIn = MI.getOperand(1);
3015   MachineOperand &VIndex = MI.getOperand(3);
3016   MachineOperand &VOffset = MI.getOperand(4);
3017   MachineOperand &SOffset = MI.getOperand(5);
3018   int16_t Offset = MI.getOperand(6).getImm();
3019 
3020   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
3021   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
3022 
3023   unsigned Opcode;
3024   if (HasVOffset) {
3025     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
3026                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
3027   } else {
3028     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
3029                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
3030   }
3031 
3032   if (MRI->getType(VDataIn.getReg()).isVector()) {
3033     switch (Opcode) {
3034     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
3035       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
3036       break;
3037     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
3038       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3039       break;
3040     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3041       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3042       break;
3043     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3044       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3045       break;
3046     }
3047   }
3048 
3049   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3050   I.add(VDataIn);
3051 
3052   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3053       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3054     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3055     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3056       .addReg(VIndex.getReg())
3057       .addImm(AMDGPU::sub0)
3058       .addReg(VOffset.getReg())
3059       .addImm(AMDGPU::sub1);
3060 
3061     I.addReg(IdxReg);
3062   } else if (HasVIndex) {
3063     I.add(VIndex);
3064   } else if (HasVOffset) {
3065     I.add(VOffset);
3066   }
3067 
3068   I.add(MI.getOperand(2)); // rsrc
3069   I.add(SOffset);
3070   I.addImm(Offset);
3071   I.addImm(MI.getOperand(7).getImm()); // cpol
3072   I.cloneMemRefs(MI);
3073 
3074   MI.eraseFromParent();
3075 
3076   return true;
3077 }
3078 
3079 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3080   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3081 
3082   if (STI.hasGFX90AInsts()) {
3083     // gfx90a adds return versions of the global atomic fadd instructions so no
3084     // special handling is required.
3085     return selectImpl(MI, *CoverageInfo);
3086   }
3087 
3088   MachineBasicBlock *MBB = MI.getParent();
3089   const DebugLoc &DL = MI.getDebugLoc();
3090 
3091   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3092     Function &F = MBB->getParent()->getFunction();
3093     DiagnosticInfoUnsupported
3094       NoFpRet(F, "return versions of fp atomics not supported",
3095               MI.getDebugLoc(), DS_Error);
3096     F.getContext().diagnose(NoFpRet);
3097     return false;
3098   }
3099 
3100   // FIXME: This is only needed because tablegen requires number of dst operands
3101   // in match and replace pattern to be the same. Otherwise patterns can be
3102   // exported from SDag path.
3103   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3104 
3105   Register Data = DataOp.getReg();
3106   const unsigned Opc = MRI->getType(Data).isVector() ?
3107     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3108   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3109     .addReg(Addr.first)
3110     .addReg(Data)
3111     .addImm(Addr.second)
3112     .addImm(0) // cpol
3113     .cloneMemRefs(MI);
3114 
3115   MI.eraseFromParent();
3116   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3117 }
3118 
3119 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3120   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3121   MI.RemoveOperand(1);
3122   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3123   return true;
3124 }
3125 
3126 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3127   if (I.isPHI())
3128     return selectPHI(I);
3129 
3130   if (!I.isPreISelOpcode()) {
3131     if (I.isCopy())
3132       return selectCOPY(I);
3133     return true;
3134   }
3135 
3136   switch (I.getOpcode()) {
3137   case TargetOpcode::G_AND:
3138   case TargetOpcode::G_OR:
3139   case TargetOpcode::G_XOR:
3140     if (selectImpl(I, *CoverageInfo))
3141       return true;
3142     return selectG_AND_OR_XOR(I);
3143   case TargetOpcode::G_ADD:
3144   case TargetOpcode::G_SUB:
3145     if (selectImpl(I, *CoverageInfo))
3146       return true;
3147     return selectG_ADD_SUB(I);
3148   case TargetOpcode::G_UADDO:
3149   case TargetOpcode::G_USUBO:
3150   case TargetOpcode::G_UADDE:
3151   case TargetOpcode::G_USUBE:
3152     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3153   case TargetOpcode::G_INTTOPTR:
3154   case TargetOpcode::G_BITCAST:
3155   case TargetOpcode::G_PTRTOINT:
3156     return selectCOPY(I);
3157   case TargetOpcode::G_CONSTANT:
3158   case TargetOpcode::G_FCONSTANT:
3159     return selectG_CONSTANT(I);
3160   case TargetOpcode::G_FNEG:
3161     if (selectImpl(I, *CoverageInfo))
3162       return true;
3163     return selectG_FNEG(I);
3164   case TargetOpcode::G_FABS:
3165     if (selectImpl(I, *CoverageInfo))
3166       return true;
3167     return selectG_FABS(I);
3168   case TargetOpcode::G_EXTRACT:
3169     return selectG_EXTRACT(I);
3170   case TargetOpcode::G_MERGE_VALUES:
3171   case TargetOpcode::G_BUILD_VECTOR:
3172   case TargetOpcode::G_CONCAT_VECTORS:
3173     return selectG_MERGE_VALUES(I);
3174   case TargetOpcode::G_UNMERGE_VALUES:
3175     return selectG_UNMERGE_VALUES(I);
3176   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3177     return selectG_BUILD_VECTOR_TRUNC(I);
3178   case TargetOpcode::G_PTR_ADD:
3179     return selectG_PTR_ADD(I);
3180   case TargetOpcode::G_IMPLICIT_DEF:
3181     return selectG_IMPLICIT_DEF(I);
3182   case TargetOpcode::G_FREEZE:
3183     return selectCOPY(I);
3184   case TargetOpcode::G_INSERT:
3185     return selectG_INSERT(I);
3186   case TargetOpcode::G_INTRINSIC:
3187     return selectG_INTRINSIC(I);
3188   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3189     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3190   case TargetOpcode::G_ICMP:
3191     if (selectG_ICMP(I))
3192       return true;
3193     return selectImpl(I, *CoverageInfo);
3194   case TargetOpcode::G_LOAD:
3195   case TargetOpcode::G_STORE:
3196   case TargetOpcode::G_ATOMIC_CMPXCHG:
3197   case TargetOpcode::G_ATOMICRMW_XCHG:
3198   case TargetOpcode::G_ATOMICRMW_ADD:
3199   case TargetOpcode::G_ATOMICRMW_SUB:
3200   case TargetOpcode::G_ATOMICRMW_AND:
3201   case TargetOpcode::G_ATOMICRMW_OR:
3202   case TargetOpcode::G_ATOMICRMW_XOR:
3203   case TargetOpcode::G_ATOMICRMW_MIN:
3204   case TargetOpcode::G_ATOMICRMW_MAX:
3205   case TargetOpcode::G_ATOMICRMW_UMIN:
3206   case TargetOpcode::G_ATOMICRMW_UMAX:
3207   case TargetOpcode::G_ATOMICRMW_FADD:
3208   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3209   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3210   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3211   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3212     return selectG_LOAD_STORE_ATOMICRMW(I);
3213   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
3214     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
3215   case TargetOpcode::G_SELECT:
3216     return selectG_SELECT(I);
3217   case TargetOpcode::G_TRUNC:
3218     return selectG_TRUNC(I);
3219   case TargetOpcode::G_SEXT:
3220   case TargetOpcode::G_ZEXT:
3221   case TargetOpcode::G_ANYEXT:
3222   case TargetOpcode::G_SEXT_INREG:
3223     if (selectImpl(I, *CoverageInfo))
3224       return true;
3225     return selectG_SZA_EXT(I);
3226   case TargetOpcode::G_BRCOND:
3227     return selectG_BRCOND(I);
3228   case TargetOpcode::G_GLOBAL_VALUE:
3229     return selectG_GLOBAL_VALUE(I);
3230   case TargetOpcode::G_PTRMASK:
3231     return selectG_PTRMASK(I);
3232   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3233     return selectG_EXTRACT_VECTOR_ELT(I);
3234   case TargetOpcode::G_INSERT_VECTOR_ELT:
3235     return selectG_INSERT_VECTOR_ELT(I);
3236   case TargetOpcode::G_SHUFFLE_VECTOR:
3237     return selectG_SHUFFLE_VECTOR(I);
3238   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3239   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
3240     const AMDGPU::ImageDimIntrinsicInfo *Intr
3241       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3242     assert(Intr && "not an image intrinsic with image pseudo");
3243     return selectImageIntrinsic(I, Intr);
3244   }
3245   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3246     return selectBVHIntrinsic(I);
3247   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3248     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3249   case AMDGPU::G_SBFX:
3250   case AMDGPU::G_UBFX:
3251     return selectG_SBFX_UBFX(I);
3252   case AMDGPU::G_SI_CALL:
3253     I.setDesc(TII.get(AMDGPU::SI_CALL));
3254     return true;
3255   default:
3256     return selectImpl(I, *CoverageInfo);
3257   }
3258   return false;
3259 }
3260 
3261 InstructionSelector::ComplexRendererFns
3262 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3263   return {{
3264       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3265   }};
3266 
3267 }
3268 
3269 std::pair<Register, unsigned>
3270 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3271                                               bool AllowAbs) const {
3272   Register Src = Root.getReg();
3273   Register OrigSrc = Src;
3274   unsigned Mods = 0;
3275   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3276 
3277   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3278     Src = MI->getOperand(1).getReg();
3279     Mods |= SISrcMods::NEG;
3280     MI = getDefIgnoringCopies(Src, *MRI);
3281   }
3282 
3283   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3284     Src = MI->getOperand(1).getReg();
3285     Mods |= SISrcMods::ABS;
3286   }
3287 
3288   if (Mods != 0 &&
3289       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3290     MachineInstr *UseMI = Root.getParent();
3291 
3292     // If we looked through copies to find source modifiers on an SGPR operand,
3293     // we now have an SGPR register source. To avoid potentially violating the
3294     // constant bus restriction, we need to insert a copy to a VGPR.
3295     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3296     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3297             TII.get(AMDGPU::COPY), VGPRSrc)
3298       .addReg(Src);
3299     Src = VGPRSrc;
3300   }
3301 
3302   return std::make_pair(Src, Mods);
3303 }
3304 
3305 ///
3306 /// This will select either an SGPR or VGPR operand and will save us from
3307 /// having to write an extra tablegen pattern.
3308 InstructionSelector::ComplexRendererFns
3309 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3310   return {{
3311       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3312   }};
3313 }
3314 
3315 InstructionSelector::ComplexRendererFns
3316 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3317   Register Src;
3318   unsigned Mods;
3319   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3320 
3321   return {{
3322       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3323       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3324       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3325       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3326   }};
3327 }
3328 
3329 InstructionSelector::ComplexRendererFns
3330 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3331   Register Src;
3332   unsigned Mods;
3333   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3334 
3335   return {{
3336       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3337       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3338       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3339       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3340   }};
3341 }
3342 
3343 InstructionSelector::ComplexRendererFns
3344 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3345   return {{
3346       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3347       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3348       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3349   }};
3350 }
3351 
3352 InstructionSelector::ComplexRendererFns
3353 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3354   Register Src;
3355   unsigned Mods;
3356   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3357 
3358   return {{
3359       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3360       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3361   }};
3362 }
3363 
3364 InstructionSelector::ComplexRendererFns
3365 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3366   Register Src;
3367   unsigned Mods;
3368   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3369 
3370   return {{
3371       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3372       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3373   }};
3374 }
3375 
3376 InstructionSelector::ComplexRendererFns
3377 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3378   Register Reg = Root.getReg();
3379   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3380   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3381               Def->getOpcode() == AMDGPU::G_FABS))
3382     return {};
3383   return {{
3384       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3385   }};
3386 }
3387 
3388 std::pair<Register, unsigned>
3389 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3390   Register Src, const MachineRegisterInfo &MRI) const {
3391   unsigned Mods = 0;
3392   MachineInstr *MI = MRI.getVRegDef(Src);
3393 
3394   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3395       // It's possible to see an f32 fneg here, but unlikely.
3396       // TODO: Treat f32 fneg as only high bit.
3397       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3398     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3399     Src = MI->getOperand(1).getReg();
3400     MI = MRI.getVRegDef(Src);
3401   }
3402 
3403   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3404 
3405   // Packed instructions do not have abs modifiers.
3406   Mods |= SISrcMods::OP_SEL_1;
3407 
3408   return std::make_pair(Src, Mods);
3409 }
3410 
3411 InstructionSelector::ComplexRendererFns
3412 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3413   MachineRegisterInfo &MRI
3414     = Root.getParent()->getParent()->getParent()->getRegInfo();
3415 
3416   Register Src;
3417   unsigned Mods;
3418   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3419 
3420   return {{
3421       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3422       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3423   }};
3424 }
3425 
3426 InstructionSelector::ComplexRendererFns
3427 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3428   Register Src;
3429   unsigned Mods;
3430   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3431   if (!isKnownNeverNaN(Src, *MRI))
3432     return None;
3433 
3434   return {{
3435       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3436       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3437   }};
3438 }
3439 
3440 InstructionSelector::ComplexRendererFns
3441 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3442   // FIXME: Handle op_sel
3443   return {{
3444       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3445       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3446   }};
3447 }
3448 
3449 InstructionSelector::ComplexRendererFns
3450 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3451   SmallVector<GEPInfo, 4> AddrInfo;
3452   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3453 
3454   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3455     return None;
3456 
3457   const GEPInfo &GEPInfo = AddrInfo[0];
3458   Optional<int64_t> EncodedImm =
3459       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3460   if (!EncodedImm)
3461     return None;
3462 
3463   unsigned PtrReg = GEPInfo.SgprParts[0];
3464   return {{
3465     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3466     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3467   }};
3468 }
3469 
3470 InstructionSelector::ComplexRendererFns
3471 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3472   SmallVector<GEPInfo, 4> AddrInfo;
3473   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3474 
3475   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3476     return None;
3477 
3478   const GEPInfo &GEPInfo = AddrInfo[0];
3479   Register PtrReg = GEPInfo.SgprParts[0];
3480   Optional<int64_t> EncodedImm =
3481       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3482   if (!EncodedImm)
3483     return None;
3484 
3485   return {{
3486     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3487     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3488   }};
3489 }
3490 
3491 InstructionSelector::ComplexRendererFns
3492 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3493   MachineInstr *MI = Root.getParent();
3494   MachineBasicBlock *MBB = MI->getParent();
3495 
3496   SmallVector<GEPInfo, 4> AddrInfo;
3497   getAddrModeInfo(*MI, *MRI, AddrInfo);
3498 
3499   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3500   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3501   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3502     return None;
3503 
3504   const GEPInfo &GEPInfo = AddrInfo[0];
3505   // SGPR offset is unsigned.
3506   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3507     return None;
3508 
3509   // If we make it this far we have a load with an 32-bit immediate offset.
3510   // It is OK to select this using a sgpr offset, because we have already
3511   // failed trying to select this load into one of the _IMM variants since
3512   // the _IMM Patterns are considered before the _SGPR patterns.
3513   Register PtrReg = GEPInfo.SgprParts[0];
3514   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3515   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3516           .addImm(GEPInfo.Imm);
3517   return {{
3518     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3519     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3520   }};
3521 }
3522 
3523 std::pair<Register, int>
3524 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3525                                                 uint64_t FlatVariant) const {
3526   MachineInstr *MI = Root.getParent();
3527 
3528   auto Default = std::make_pair(Root.getReg(), 0);
3529 
3530   if (!STI.hasFlatInstOffsets())
3531     return Default;
3532 
3533   Register PtrBase;
3534   int64_t ConstOffset;
3535   std::tie(PtrBase, ConstOffset) =
3536       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3537   if (ConstOffset == 0)
3538     return Default;
3539 
3540   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3541   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3542     return Default;
3543 
3544   return std::make_pair(PtrBase, ConstOffset);
3545 }
3546 
3547 InstructionSelector::ComplexRendererFns
3548 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3549   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3550 
3551   return {{
3552       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3553       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3554     }};
3555 }
3556 
3557 InstructionSelector::ComplexRendererFns
3558 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3559   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3560 
3561   return {{
3562       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3563       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3564   }};
3565 }
3566 
3567 InstructionSelector::ComplexRendererFns
3568 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3569   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3570 
3571   return {{
3572       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3573       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3574     }};
3575 }
3576 
3577 /// Match a zero extend from a 32-bit value to 64-bits.
3578 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3579   Register ZExtSrc;
3580   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3581     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3582 
3583   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3584   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3585   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3586     return false;
3587 
3588   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3589     return Def->getOperand(1).getReg();
3590   }
3591 
3592   return Register();
3593 }
3594 
3595 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3596 InstructionSelector::ComplexRendererFns
3597 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3598   Register Addr = Root.getReg();
3599   Register PtrBase;
3600   int64_t ConstOffset;
3601   int64_t ImmOffset = 0;
3602 
3603   // Match the immediate offset first, which canonically is moved as low as
3604   // possible.
3605   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3606 
3607   if (ConstOffset != 0) {
3608     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3609                               SIInstrFlags::FlatGlobal)) {
3610       Addr = PtrBase;
3611       ImmOffset = ConstOffset;
3612     } else {
3613       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3614       if (!PtrBaseDef)
3615         return None;
3616 
3617       if (isSGPR(PtrBaseDef->Reg)) {
3618         if (ConstOffset > 0) {
3619           // Offset is too large.
3620           //
3621           // saddr + large_offset -> saddr +
3622           //                         (voffset = large_offset & ~MaxOffset) +
3623           //                         (large_offset & MaxOffset);
3624           int64_t SplitImmOffset, RemainderOffset;
3625           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3626               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3627 
3628           if (isUInt<32>(RemainderOffset)) {
3629             MachineInstr *MI = Root.getParent();
3630             MachineBasicBlock *MBB = MI->getParent();
3631             Register HighBits =
3632                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3633 
3634             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3635                     HighBits)
3636                 .addImm(RemainderOffset);
3637 
3638             return {{
3639                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3640                 [=](MachineInstrBuilder &MIB) {
3641                   MIB.addReg(HighBits);
3642                 }, // voffset
3643                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3644             }};
3645           }
3646         }
3647 
3648         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3649         // is 1 we would need to perform 1 or 2 extra moves for each half of
3650         // the constant and it is better to do a scalar add and then issue a
3651         // single VALU instruction to materialize zero. Otherwise it is less
3652         // instructions to perform VALU adds with immediates or inline literals.
3653         unsigned NumLiterals =
3654             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3655             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3656         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3657           return None;
3658       }
3659     }
3660   }
3661 
3662   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3663   if (!AddrDef)
3664     return None;
3665 
3666   // Match the variable offset.
3667   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3668     // Look through the SGPR->VGPR copy.
3669     Register SAddr =
3670         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3671 
3672     if (SAddr && isSGPR(SAddr)) {
3673       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3674 
3675       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3676       // inserted later.
3677       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3678         return {{[=](MachineInstrBuilder &MIB) { // saddr
3679                    MIB.addReg(SAddr);
3680                  },
3681                  [=](MachineInstrBuilder &MIB) { // voffset
3682                    MIB.addReg(VOffset);
3683                  },
3684                  [=](MachineInstrBuilder &MIB) { // offset
3685                    MIB.addImm(ImmOffset);
3686                  }}};
3687       }
3688     }
3689   }
3690 
3691   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3692   // drop this.
3693   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3694       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3695     return None;
3696 
3697   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3698   // moves required to copy a 64-bit SGPR to VGPR.
3699   MachineInstr *MI = Root.getParent();
3700   MachineBasicBlock *MBB = MI->getParent();
3701   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3702 
3703   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3704       .addImm(0);
3705 
3706   return {{
3707       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
3708       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
3709       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
3710   }};
3711 }
3712 
3713 InstructionSelector::ComplexRendererFns
3714 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3715   Register Addr = Root.getReg();
3716   Register PtrBase;
3717   int64_t ConstOffset;
3718   int64_t ImmOffset = 0;
3719 
3720   // Match the immediate offset first, which canonically is moved as low as
3721   // possible.
3722   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3723 
3724   if (ConstOffset != 0 &&
3725       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3726                             SIInstrFlags::FlatScratch)) {
3727     Addr = PtrBase;
3728     ImmOffset = ConstOffset;
3729   }
3730 
3731   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3732   if (!AddrDef)
3733     return None;
3734 
3735   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3736     int FI = AddrDef->MI->getOperand(1).getIndex();
3737     return {{
3738         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3739         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3740     }};
3741   }
3742 
3743   Register SAddr = AddrDef->Reg;
3744 
3745   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3746     Register LHS = AddrDef->MI->getOperand(1).getReg();
3747     Register RHS = AddrDef->MI->getOperand(2).getReg();
3748     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3749     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3750 
3751     if (LHSDef && RHSDef &&
3752         LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3753         isSGPR(RHSDef->Reg)) {
3754       int FI = LHSDef->MI->getOperand(1).getIndex();
3755       MachineInstr &I = *Root.getParent();
3756       MachineBasicBlock *BB = I.getParent();
3757       const DebugLoc &DL = I.getDebugLoc();
3758       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3759 
3760       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
3761           .addFrameIndex(FI)
3762           .addReg(RHSDef->Reg);
3763     }
3764   }
3765 
3766   if (!isSGPR(SAddr))
3767     return None;
3768 
3769   return {{
3770       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3771       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3772   }};
3773 }
3774 
3775 InstructionSelector::ComplexRendererFns
3776 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
3777   MachineInstr *MI = Root.getParent();
3778   MachineBasicBlock *MBB = MI->getParent();
3779   MachineFunction *MF = MBB->getParent();
3780   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3781 
3782   int64_t Offset = 0;
3783   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
3784       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
3785     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3786 
3787     // TODO: Should this be inside the render function? The iterator seems to
3788     // move.
3789     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3790             HighBits)
3791       .addImm(Offset & ~4095);
3792 
3793     return {{[=](MachineInstrBuilder &MIB) { // rsrc
3794                MIB.addReg(Info->getScratchRSrcReg());
3795              },
3796              [=](MachineInstrBuilder &MIB) { // vaddr
3797                MIB.addReg(HighBits);
3798              },
3799              [=](MachineInstrBuilder &MIB) { // soffset
3800                // Use constant zero for soffset and rely on eliminateFrameIndex
3801                // to choose the appropriate frame register if need be.
3802                MIB.addImm(0);
3803              },
3804              [=](MachineInstrBuilder &MIB) { // offset
3805                MIB.addImm(Offset & 4095);
3806              }}};
3807   }
3808 
3809   assert(Offset == 0 || Offset == -1);
3810 
3811   // Try to fold a frame index directly into the MUBUF vaddr field, and any
3812   // offsets.
3813   Optional<int> FI;
3814   Register VAddr = Root.getReg();
3815   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
3816     Register PtrBase;
3817     int64_t ConstOffset;
3818     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
3819     if (ConstOffset != 0) {
3820       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
3821           (!STI.privateMemoryResourceIsRangeChecked() ||
3822            KnownBits->signBitIsZero(PtrBase))) {
3823         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
3824         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3825           FI = PtrBaseDef->getOperand(1).getIndex();
3826         else
3827           VAddr = PtrBase;
3828         Offset = ConstOffset;
3829       }
3830     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3831       FI = RootDef->getOperand(1).getIndex();
3832     }
3833   }
3834 
3835   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3836              MIB.addReg(Info->getScratchRSrcReg());
3837            },
3838            [=](MachineInstrBuilder &MIB) { // vaddr
3839              if (FI.hasValue())
3840                MIB.addFrameIndex(FI.getValue());
3841              else
3842                MIB.addReg(VAddr);
3843            },
3844            [=](MachineInstrBuilder &MIB) { // soffset
3845              // Use constant zero for soffset and rely on eliminateFrameIndex
3846              // to choose the appropriate frame register if need be.
3847              MIB.addImm(0);
3848            },
3849            [=](MachineInstrBuilder &MIB) { // offset
3850              MIB.addImm(Offset);
3851            }}};
3852 }
3853 
3854 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3855                                                 int64_t Offset) const {
3856   if (!isUInt<16>(Offset))
3857     return false;
3858 
3859   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3860     return true;
3861 
3862   // On Southern Islands instruction with a negative base value and an offset
3863   // don't seem to work.
3864   return KnownBits->signBitIsZero(Base);
3865 }
3866 
3867 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
3868                                                  int64_t Offset1,
3869                                                  unsigned Size) const {
3870   if (Offset0 % Size != 0 || Offset1 % Size != 0)
3871     return false;
3872   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
3873     return false;
3874 
3875   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3876     return true;
3877 
3878   // On Southern Islands instruction with a negative base value and an offset
3879   // don't seem to work.
3880   return KnownBits->signBitIsZero(Base);
3881 }
3882 
3883 InstructionSelector::ComplexRendererFns
3884 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3885     MachineOperand &Root) const {
3886   MachineInstr *MI = Root.getParent();
3887   MachineBasicBlock *MBB = MI->getParent();
3888 
3889   int64_t Offset = 0;
3890   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3891       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3892     return {};
3893 
3894   const MachineFunction *MF = MBB->getParent();
3895   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3896 
3897   return {{
3898       [=](MachineInstrBuilder &MIB) { // rsrc
3899         MIB.addReg(Info->getScratchRSrcReg());
3900       },
3901       [=](MachineInstrBuilder &MIB) { // soffset
3902         MIB.addImm(0);
3903       },
3904       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3905   }};
3906 }
3907 
3908 std::pair<Register, unsigned>
3909 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3910   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3911   if (!RootDef)
3912     return std::make_pair(Root.getReg(), 0);
3913 
3914   int64_t ConstAddr = 0;
3915 
3916   Register PtrBase;
3917   int64_t Offset;
3918   std::tie(PtrBase, Offset) =
3919     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3920 
3921   if (Offset) {
3922     if (isDSOffsetLegal(PtrBase, Offset)) {
3923       // (add n0, c0)
3924       return std::make_pair(PtrBase, Offset);
3925     }
3926   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3927     // TODO
3928 
3929 
3930   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3931     // TODO
3932 
3933   }
3934 
3935   return std::make_pair(Root.getReg(), 0);
3936 }
3937 
3938 InstructionSelector::ComplexRendererFns
3939 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3940   Register Reg;
3941   unsigned Offset;
3942   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3943   return {{
3944       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3945       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3946     }};
3947 }
3948 
3949 InstructionSelector::ComplexRendererFns
3950 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3951   return selectDSReadWrite2(Root, 4);
3952 }
3953 
3954 InstructionSelector::ComplexRendererFns
3955 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
3956   return selectDSReadWrite2(Root, 8);
3957 }
3958 
3959 InstructionSelector::ComplexRendererFns
3960 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
3961                                               unsigned Size) const {
3962   Register Reg;
3963   unsigned Offset;
3964   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
3965   return {{
3966       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3967       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3968       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3969     }};
3970 }
3971 
3972 std::pair<Register, unsigned>
3973 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
3974                                                   unsigned Size) const {
3975   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3976   if (!RootDef)
3977     return std::make_pair(Root.getReg(), 0);
3978 
3979   int64_t ConstAddr = 0;
3980 
3981   Register PtrBase;
3982   int64_t Offset;
3983   std::tie(PtrBase, Offset) =
3984     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3985 
3986   if (Offset) {
3987     int64_t OffsetValue0 = Offset;
3988     int64_t OffsetValue1 = Offset + Size;
3989     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
3990       // (add n0, c0)
3991       return std::make_pair(PtrBase, OffsetValue0 / Size);
3992     }
3993   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3994     // TODO
3995 
3996   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3997     // TODO
3998 
3999   }
4000 
4001   return std::make_pair(Root.getReg(), 0);
4002 }
4003 
4004 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4005 /// the base value with the constant offset. There may be intervening copies
4006 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4007 /// not match the pattern.
4008 std::pair<Register, int64_t>
4009 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4010   Register Root, const MachineRegisterInfo &MRI) const {
4011   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4012   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4013     return {Root, 0};
4014 
4015   MachineOperand &RHS = RootI->getOperand(2);
4016   Optional<ValueAndVReg> MaybeOffset =
4017       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4018   if (!MaybeOffset)
4019     return {Root, 0};
4020   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4021 }
4022 
4023 static void addZeroImm(MachineInstrBuilder &MIB) {
4024   MIB.addImm(0);
4025 }
4026 
4027 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4028 /// BasePtr is not valid, a null base pointer will be used.
4029 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4030                           uint32_t FormatLo, uint32_t FormatHi,
4031                           Register BasePtr) {
4032   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4033   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4034   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4035   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4036 
4037   B.buildInstr(AMDGPU::S_MOV_B32)
4038     .addDef(RSrc2)
4039     .addImm(FormatLo);
4040   B.buildInstr(AMDGPU::S_MOV_B32)
4041     .addDef(RSrc3)
4042     .addImm(FormatHi);
4043 
4044   // Build the half of the subregister with the constants before building the
4045   // full 128-bit register. If we are building multiple resource descriptors,
4046   // this will allow CSEing of the 2-component register.
4047   B.buildInstr(AMDGPU::REG_SEQUENCE)
4048     .addDef(RSrcHi)
4049     .addReg(RSrc2)
4050     .addImm(AMDGPU::sub0)
4051     .addReg(RSrc3)
4052     .addImm(AMDGPU::sub1);
4053 
4054   Register RSrcLo = BasePtr;
4055   if (!BasePtr) {
4056     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4057     B.buildInstr(AMDGPU::S_MOV_B64)
4058       .addDef(RSrcLo)
4059       .addImm(0);
4060   }
4061 
4062   B.buildInstr(AMDGPU::REG_SEQUENCE)
4063     .addDef(RSrc)
4064     .addReg(RSrcLo)
4065     .addImm(AMDGPU::sub0_sub1)
4066     .addReg(RSrcHi)
4067     .addImm(AMDGPU::sub2_sub3);
4068 
4069   return RSrc;
4070 }
4071 
4072 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4073                                 const SIInstrInfo &TII, Register BasePtr) {
4074   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4075 
4076   // FIXME: Why are half the "default" bits ignored based on the addressing
4077   // mode?
4078   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4079 }
4080 
4081 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4082                                const SIInstrInfo &TII, Register BasePtr) {
4083   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4084 
4085   // FIXME: Why are half the "default" bits ignored based on the addressing
4086   // mode?
4087   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4088 }
4089 
4090 AMDGPUInstructionSelector::MUBUFAddressData
4091 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4092   MUBUFAddressData Data;
4093   Data.N0 = Src;
4094 
4095   Register PtrBase;
4096   int64_t Offset;
4097 
4098   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4099   if (isUInt<32>(Offset)) {
4100     Data.N0 = PtrBase;
4101     Data.Offset = Offset;
4102   }
4103 
4104   if (MachineInstr *InputAdd
4105       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4106     Data.N2 = InputAdd->getOperand(1).getReg();
4107     Data.N3 = InputAdd->getOperand(2).getReg();
4108 
4109     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4110     // FIXME: Don't know this was defined by operand 0
4111     //
4112     // TODO: Remove this when we have copy folding optimizations after
4113     // RegBankSelect.
4114     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4115     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4116   }
4117 
4118   return Data;
4119 }
4120 
4121 /// Return if the addr64 mubuf mode should be used for the given address.
4122 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4123   // (ptr_add N2, N3) -> addr64, or
4124   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4125   if (Addr.N2)
4126     return true;
4127 
4128   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4129   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4130 }
4131 
4132 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4133 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4134 /// component.
4135 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4136   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4137   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4138     return;
4139 
4140   // Illegal offset, store it in soffset.
4141   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4142   B.buildInstr(AMDGPU::S_MOV_B32)
4143     .addDef(SOffset)
4144     .addImm(ImmOffset);
4145   ImmOffset = 0;
4146 }
4147 
4148 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4149   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4150   Register &SOffset, int64_t &Offset) const {
4151   // FIXME: Predicates should stop this from reaching here.
4152   // addr64 bit was removed for volcanic islands.
4153   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4154     return false;
4155 
4156   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4157   if (!shouldUseAddr64(AddrData))
4158     return false;
4159 
4160   Register N0 = AddrData.N0;
4161   Register N2 = AddrData.N2;
4162   Register N3 = AddrData.N3;
4163   Offset = AddrData.Offset;
4164 
4165   // Base pointer for the SRD.
4166   Register SRDPtr;
4167 
4168   if (N2) {
4169     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4170       assert(N3);
4171       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4172         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4173         // addr64, and construct the default resource from a 0 address.
4174         VAddr = N0;
4175       } else {
4176         SRDPtr = N3;
4177         VAddr = N2;
4178       }
4179     } else {
4180       // N2 is not divergent.
4181       SRDPtr = N2;
4182       VAddr = N3;
4183     }
4184   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4185     // Use the default null pointer in the resource
4186     VAddr = N0;
4187   } else {
4188     // N0 -> offset, or
4189     // (N0 + C1) -> offset
4190     SRDPtr = N0;
4191   }
4192 
4193   MachineIRBuilder B(*Root.getParent());
4194   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4195   splitIllegalMUBUFOffset(B, SOffset, Offset);
4196   return true;
4197 }
4198 
4199 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4200   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4201   int64_t &Offset) const {
4202 
4203   // FIXME: Pattern should not reach here.
4204   if (STI.useFlatForGlobal())
4205     return false;
4206 
4207   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4208   if (shouldUseAddr64(AddrData))
4209     return false;
4210 
4211   // N0 -> offset, or
4212   // (N0 + C1) -> offset
4213   Register SRDPtr = AddrData.N0;
4214   Offset = AddrData.Offset;
4215 
4216   // TODO: Look through extensions for 32-bit soffset.
4217   MachineIRBuilder B(*Root.getParent());
4218 
4219   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4220   splitIllegalMUBUFOffset(B, SOffset, Offset);
4221   return true;
4222 }
4223 
4224 InstructionSelector::ComplexRendererFns
4225 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4226   Register VAddr;
4227   Register RSrcReg;
4228   Register SOffset;
4229   int64_t Offset = 0;
4230 
4231   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4232     return {};
4233 
4234   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4235   // pattern.
4236   return {{
4237       [=](MachineInstrBuilder &MIB) {  // rsrc
4238         MIB.addReg(RSrcReg);
4239       },
4240       [=](MachineInstrBuilder &MIB) { // vaddr
4241         MIB.addReg(VAddr);
4242       },
4243       [=](MachineInstrBuilder &MIB) { // soffset
4244         if (SOffset)
4245           MIB.addReg(SOffset);
4246         else
4247           MIB.addImm(0);
4248       },
4249       [=](MachineInstrBuilder &MIB) { // offset
4250         MIB.addImm(Offset);
4251       },
4252       addZeroImm, //  cpol
4253       addZeroImm, //  tfe
4254       addZeroImm  //  swz
4255     }};
4256 }
4257 
4258 InstructionSelector::ComplexRendererFns
4259 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4260   Register RSrcReg;
4261   Register SOffset;
4262   int64_t Offset = 0;
4263 
4264   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4265     return {};
4266 
4267   return {{
4268       [=](MachineInstrBuilder &MIB) {  // rsrc
4269         MIB.addReg(RSrcReg);
4270       },
4271       [=](MachineInstrBuilder &MIB) { // soffset
4272         if (SOffset)
4273           MIB.addReg(SOffset);
4274         else
4275           MIB.addImm(0);
4276       },
4277       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4278       addZeroImm, //  cpol
4279       addZeroImm, //  tfe
4280       addZeroImm, //  swz
4281     }};
4282 }
4283 
4284 InstructionSelector::ComplexRendererFns
4285 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4286   Register VAddr;
4287   Register RSrcReg;
4288   Register SOffset;
4289   int64_t Offset = 0;
4290 
4291   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4292     return {};
4293 
4294   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4295   // pattern.
4296   return {{
4297       [=](MachineInstrBuilder &MIB) {  // rsrc
4298         MIB.addReg(RSrcReg);
4299       },
4300       [=](MachineInstrBuilder &MIB) { // vaddr
4301         MIB.addReg(VAddr);
4302       },
4303       [=](MachineInstrBuilder &MIB) { // soffset
4304         if (SOffset)
4305           MIB.addReg(SOffset);
4306         else
4307           MIB.addImm(0);
4308       },
4309       [=](MachineInstrBuilder &MIB) { // offset
4310         MIB.addImm(Offset);
4311       },
4312       [=](MachineInstrBuilder &MIB) {
4313         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4314       }
4315     }};
4316 }
4317 
4318 InstructionSelector::ComplexRendererFns
4319 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4320   Register RSrcReg;
4321   Register SOffset;
4322   int64_t Offset = 0;
4323 
4324   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4325     return {};
4326 
4327   return {{
4328       [=](MachineInstrBuilder &MIB) {  // rsrc
4329         MIB.addReg(RSrcReg);
4330       },
4331       [=](MachineInstrBuilder &MIB) { // soffset
4332         if (SOffset)
4333           MIB.addReg(SOffset);
4334         else
4335           MIB.addImm(0);
4336       },
4337       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4338       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4339     }};
4340 }
4341 
4342 /// Get an immediate that must be 32-bits, and treated as zero extended.
4343 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4344                                                const MachineRegisterInfo &MRI) {
4345   // getIConstantVRegVal sexts any values, so see if that matters.
4346   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4347   if (!OffsetVal || !isInt<32>(*OffsetVal))
4348     return None;
4349   return Lo_32(*OffsetVal);
4350 }
4351 
4352 InstructionSelector::ComplexRendererFns
4353 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4354   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4355   if (!OffsetVal)
4356     return {};
4357 
4358   Optional<int64_t> EncodedImm =
4359       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4360   if (!EncodedImm)
4361     return {};
4362 
4363   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4364 }
4365 
4366 InstructionSelector::ComplexRendererFns
4367 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4368   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4369 
4370   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4371   if (!OffsetVal)
4372     return {};
4373 
4374   Optional<int64_t> EncodedImm
4375     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4376   if (!EncodedImm)
4377     return {};
4378 
4379   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4380 }
4381 
4382 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4383                                                  const MachineInstr &MI,
4384                                                  int OpIdx) const {
4385   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4386          "Expected G_CONSTANT");
4387   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4388 }
4389 
4390 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4391                                                 const MachineInstr &MI,
4392                                                 int OpIdx) const {
4393   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4394          "Expected G_CONSTANT");
4395   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4396 }
4397 
4398 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4399                                                  const MachineInstr &MI,
4400                                                  int OpIdx) const {
4401   assert(OpIdx == -1);
4402 
4403   const MachineOperand &Op = MI.getOperand(1);
4404   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4405     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4406   else {
4407     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4408     MIB.addImm(Op.getCImm()->getSExtValue());
4409   }
4410 }
4411 
4412 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4413                                                 const MachineInstr &MI,
4414                                                 int OpIdx) const {
4415   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4416          "Expected G_CONSTANT");
4417   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4418 }
4419 
4420 /// This only really exists to satisfy DAG type checking machinery, so is a
4421 /// no-op here.
4422 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4423                                                 const MachineInstr &MI,
4424                                                 int OpIdx) const {
4425   MIB.addImm(MI.getOperand(OpIdx).getImm());
4426 }
4427 
4428 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4429                                                   const MachineInstr &MI,
4430                                                   int OpIdx) const {
4431   assert(OpIdx >= 0 && "expected to match an immediate operand");
4432   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4433 }
4434 
4435 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4436                                                  const MachineInstr &MI,
4437                                                  int OpIdx) const {
4438   assert(OpIdx >= 0 && "expected to match an immediate operand");
4439   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4440 }
4441 
4442 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4443                                              const MachineInstr &MI,
4444                                              int OpIdx) const {
4445   assert(OpIdx >= 0 && "expected to match an immediate operand");
4446   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4447 }
4448 
4449 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4450                                                  const MachineInstr &MI,
4451                                                  int OpIdx) const {
4452   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4453 }
4454 
4455 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4456   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4457 }
4458 
4459 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4460   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4461 }
4462 
4463 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4464   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4465 }
4466 
4467 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4468   return TII.isInlineConstant(Imm);
4469 }
4470