xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp (revision 5def4c47d4bd90b209b9b4a4ba9faec15846d8fd)
1 //===-- AMDGPUISelDAGToDAG.cpp - A dag to dag inst selector for AMDGPU ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //==-----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Defines an instruction selector for the AMDGPU target.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPU.h"
15 #include "AMDGPUTargetMachine.h"
16 #include "SIMachineFunctionInfo.h"
17 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/CodeGen/FunctionLoweringInfo.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/CodeGen/SelectionDAGNodes.h"
23 #include "llvm/IR/IntrinsicsAMDGPU.h"
24 #include "llvm/InitializePasses.h"
25 
26 #ifdef EXPENSIVE_CHECKS
27 #include "llvm/Analysis/LoopInfo.h"
28 #include "llvm/IR/Dominators.h"
29 #endif
30 
31 #define DEBUG_TYPE "isel"
32 
33 using namespace llvm;
34 
35 namespace llvm {
36 
37 class R600InstrInfo;
38 
39 } // end namespace llvm
40 
41 //===----------------------------------------------------------------------===//
42 // Instruction Selector Implementation
43 //===----------------------------------------------------------------------===//
44 
45 namespace {
46 
47 static bool isNullConstantOrUndef(SDValue V) {
48   if (V.isUndef())
49     return true;
50 
51   ConstantSDNode *Const = dyn_cast<ConstantSDNode>(V);
52   return Const != nullptr && Const->isNullValue();
53 }
54 
55 static bool getConstantValue(SDValue N, uint32_t &Out) {
56   // This is only used for packed vectors, where ussing 0 for undef should
57   // always be good.
58   if (N.isUndef()) {
59     Out = 0;
60     return true;
61   }
62 
63   if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N)) {
64     Out = C->getAPIntValue().getSExtValue();
65     return true;
66   }
67 
68   if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N)) {
69     Out = C->getValueAPF().bitcastToAPInt().getSExtValue();
70     return true;
71   }
72 
73   return false;
74 }
75 
76 // TODO: Handle undef as zero
77 static SDNode *packConstantV2I16(const SDNode *N, SelectionDAG &DAG,
78                                  bool Negate = false) {
79   assert(N->getOpcode() == ISD::BUILD_VECTOR && N->getNumOperands() == 2);
80   uint32_t LHSVal, RHSVal;
81   if (getConstantValue(N->getOperand(0), LHSVal) &&
82       getConstantValue(N->getOperand(1), RHSVal)) {
83     SDLoc SL(N);
84     uint32_t K = Negate ?
85       (-LHSVal & 0xffff) | (-RHSVal << 16) :
86       (LHSVal & 0xffff) | (RHSVal << 16);
87     return DAG.getMachineNode(AMDGPU::S_MOV_B32, SL, N->getValueType(0),
88                               DAG.getTargetConstant(K, SL, MVT::i32));
89   }
90 
91   return nullptr;
92 }
93 
94 static SDNode *packNegConstantV2I16(const SDNode *N, SelectionDAG &DAG) {
95   return packConstantV2I16(N, DAG, true);
96 }
97 
98 /// AMDGPU specific code to select AMDGPU machine instructions for
99 /// SelectionDAG operations.
100 class AMDGPUDAGToDAGISel : public SelectionDAGISel {
101   // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can
102   // make the right decision when generating code for different targets.
103   const GCNSubtarget *Subtarget;
104 
105   // Default FP mode for the current function.
106   AMDGPU::SIModeRegisterDefaults Mode;
107 
108   bool EnableLateStructurizeCFG;
109 
110 public:
111   explicit AMDGPUDAGToDAGISel(TargetMachine *TM = nullptr,
112                               CodeGenOpt::Level OptLevel = CodeGenOpt::Default)
113     : SelectionDAGISel(*TM, OptLevel) {
114     EnableLateStructurizeCFG = AMDGPUTargetMachine::EnableLateStructurizeCFG;
115   }
116   ~AMDGPUDAGToDAGISel() override = default;
117 
118   void getAnalysisUsage(AnalysisUsage &AU) const override {
119     AU.addRequired<AMDGPUArgumentUsageInfo>();
120     AU.addRequired<LegacyDivergenceAnalysis>();
121 #ifdef EXPENSIVE_CHECKS
122     AU.addRequired<DominatorTreeWrapperPass>();
123     AU.addRequired<LoopInfoWrapperPass>();
124 #endif
125     SelectionDAGISel::getAnalysisUsage(AU);
126   }
127 
128   bool matchLoadD16FromBuildVector(SDNode *N) const;
129 
130   bool runOnMachineFunction(MachineFunction &MF) override;
131   void PreprocessISelDAG() override;
132   void Select(SDNode *N) override;
133   StringRef getPassName() const override;
134   void PostprocessISelDAG() override;
135 
136 protected:
137   void SelectBuildVector(SDNode *N, unsigned RegClassID);
138 
139 private:
140   std::pair<SDValue, SDValue> foldFrameIndex(SDValue N) const;
141   bool isNoNanSrc(SDValue N) const;
142   bool isInlineImmediate(const SDNode *N, bool Negated = false) const;
143   bool isNegInlineImmediate(const SDNode *N) const {
144     return isInlineImmediate(N, true);
145   }
146 
147   bool isInlineImmediate16(int64_t Imm) const {
148     return AMDGPU::isInlinableLiteral16(Imm, Subtarget->hasInv2PiInlineImm());
149   }
150 
151   bool isInlineImmediate32(int64_t Imm) const {
152     return AMDGPU::isInlinableLiteral32(Imm, Subtarget->hasInv2PiInlineImm());
153   }
154 
155   bool isInlineImmediate64(int64_t Imm) const {
156     return AMDGPU::isInlinableLiteral64(Imm, Subtarget->hasInv2PiInlineImm());
157   }
158 
159   bool isInlineImmediate(const APFloat &Imm) const {
160     return Subtarget->getInstrInfo()->isInlineConstant(Imm);
161   }
162 
163   bool isVGPRImm(const SDNode *N) const;
164   bool isUniformLoad(const SDNode *N) const;
165   bool isUniformBr(const SDNode *N) const;
166 
167   bool isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS,
168                                   SDValue &RHS) const;
169 
170   MachineSDNode *buildSMovImm64(SDLoc &DL, uint64_t Val, EVT VT) const;
171 
172   SDNode *glueCopyToOp(SDNode *N, SDValue NewChain, SDValue Glue) const;
173   SDNode *glueCopyToM0(SDNode *N, SDValue Val) const;
174   SDNode *glueCopyToM0LDSInit(SDNode *N) const;
175 
176   const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const;
177   virtual bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset);
178   virtual bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset);
179   bool isDSOffsetLegal(SDValue Base, unsigned Offset) const;
180   bool isDSOffset2Legal(SDValue Base, unsigned Offset0, unsigned Offset1,
181                         unsigned Size) const;
182   bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const;
183   bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
184                                  SDValue &Offset1) const;
185   bool SelectDS128Bit8ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0,
186                                   SDValue &Offset1) const;
187   bool SelectDSReadWrite2(SDValue Ptr, SDValue &Base, SDValue &Offset0,
188                           SDValue &Offset1, unsigned Size) const;
189   bool SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
190                    SDValue &SOffset, SDValue &Offset, SDValue &Offen,
191                    SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC,
192                    SDValue &TFE, SDValue &DLC, SDValue &SWZ) const;
193   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr,
194                          SDValue &SOffset, SDValue &Offset, SDValue &GLC,
195                          SDValue &SLC, SDValue &TFE, SDValue &DLC,
196                          SDValue &SWZ) const;
197   bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
198                          SDValue &VAddr, SDValue &SOffset, SDValue &Offset,
199                          SDValue &SLC) const;
200   bool SelectMUBUFScratchOffen(SDNode *Parent,
201                                SDValue Addr, SDValue &RSrc, SDValue &VAddr,
202                                SDValue &SOffset, SDValue &ImmOffset) const;
203   bool SelectMUBUFScratchOffset(SDNode *Parent,
204                                 SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
205                                 SDValue &Offset) const;
206 
207   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset,
208                          SDValue &Offset, SDValue &GLC, SDValue &SLC,
209                          SDValue &TFE, SDValue &DLC, SDValue &SWZ) const;
210   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
211                          SDValue &Offset, SDValue &SLC) const;
212   bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset,
213                          SDValue &Offset) const;
214 
215   template <bool IsSigned>
216   bool SelectFlatOffset(SDNode *N, SDValue Addr, SDValue &VAddr,
217                         SDValue &Offset) const;
218   bool SelectGlobalSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
219                          SDValue &VOffset, SDValue &Offset) const;
220   bool SelectScratchSAddr(SDNode *N, SDValue Addr, SDValue &SAddr,
221                           SDValue &Offset) const;
222 
223   bool SelectSMRDOffset(SDValue ByteOffsetNode, SDValue &Offset,
224                         bool &Imm) const;
225   SDValue Expand32BitAddress(SDValue Addr) const;
226   bool SelectSMRD(SDValue Addr, SDValue &SBase, SDValue &Offset,
227                   bool &Imm) const;
228   bool SelectSMRDImm(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
229   bool SelectSMRDImm32(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
230   bool SelectSMRDSgpr(SDValue Addr, SDValue &SBase, SDValue &Offset) const;
231   bool SelectSMRDBufferImm(SDValue Addr, SDValue &Offset) const;
232   bool SelectSMRDBufferImm32(SDValue Addr, SDValue &Offset) const;
233   bool SelectMOVRELOffset(SDValue Index, SDValue &Base, SDValue &Offset) const;
234 
235   bool SelectVOP3Mods_NNaN(SDValue In, SDValue &Src, SDValue &SrcMods) const;
236   bool SelectVOP3ModsImpl(SDValue In, SDValue &Src, unsigned &SrcMods,
237                           bool AllowAbs = true) const;
238   bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
239   bool SelectVOP3BMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
240   bool SelectVOP3NoMods(SDValue In, SDValue &Src) const;
241   bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods,
242                        SDValue &Clamp, SDValue &Omod) const;
243   bool SelectVOP3BMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
244                         SDValue &Clamp, SDValue &Omod) const;
245   bool SelectVOP3NoMods0(SDValue In, SDValue &Src, SDValue &SrcMods,
246                          SDValue &Clamp, SDValue &Omod) const;
247 
248   bool SelectVOP3OMods(SDValue In, SDValue &Src,
249                        SDValue &Clamp, SDValue &Omod) const;
250 
251   bool SelectVOP3PMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
252 
253   bool SelectVOP3OpSel(SDValue In, SDValue &Src, SDValue &SrcMods) const;
254 
255   bool SelectVOP3OpSelMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
256   bool SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src, unsigned &Mods) const;
257   bool SelectVOP3PMadMixMods(SDValue In, SDValue &Src, SDValue &SrcMods) const;
258 
259   SDValue getHi16Elt(SDValue In) const;
260 
261   SDValue getMaterializedScalarImm32(int64_t Val, const SDLoc &DL) const;
262 
263   void SelectADD_SUB_I64(SDNode *N);
264   void SelectAddcSubb(SDNode *N);
265   void SelectUADDO_USUBO(SDNode *N);
266   void SelectDIV_SCALE(SDNode *N);
267   void SelectMAD_64_32(SDNode *N);
268   void SelectFMA_W_CHAIN(SDNode *N);
269   void SelectFMUL_W_CHAIN(SDNode *N);
270 
271   SDNode *getS_BFE(unsigned Opcode, const SDLoc &DL, SDValue Val,
272                    uint32_t Offset, uint32_t Width);
273   void SelectS_BFEFromShifts(SDNode *N);
274   void SelectS_BFE(SDNode *N);
275   bool isCBranchSCC(const SDNode *N) const;
276   void SelectBRCOND(SDNode *N);
277   void SelectFMAD_FMA(SDNode *N);
278   void SelectATOMIC_CMP_SWAP(SDNode *N);
279   void SelectDSAppendConsume(SDNode *N, unsigned IntrID);
280   void SelectDS_GWS(SDNode *N, unsigned IntrID);
281   void SelectInterpP1F16(SDNode *N);
282   void SelectINTRINSIC_W_CHAIN(SDNode *N);
283   void SelectINTRINSIC_WO_CHAIN(SDNode *N);
284   void SelectINTRINSIC_VOID(SDNode *N);
285 
286 protected:
287   // Include the pieces autogenerated from the target description.
288 #include "AMDGPUGenDAGISel.inc"
289 };
290 
291 class R600DAGToDAGISel : public AMDGPUDAGToDAGISel {
292   const R600Subtarget *Subtarget;
293 
294   bool isConstantLoad(const MemSDNode *N, int cbID) const;
295   bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr);
296   bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg,
297                                        SDValue& Offset);
298 public:
299   explicit R600DAGToDAGISel(TargetMachine *TM, CodeGenOpt::Level OptLevel) :
300       AMDGPUDAGToDAGISel(TM, OptLevel) {}
301 
302   void Select(SDNode *N) override;
303 
304   bool SelectADDRIndirect(SDValue Addr, SDValue &Base,
305                           SDValue &Offset) override;
306   bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
307                           SDValue &Offset) override;
308 
309   bool runOnMachineFunction(MachineFunction &MF) override;
310 
311   void PreprocessISelDAG() override {}
312 
313 protected:
314   // Include the pieces autogenerated from the target description.
315 #include "R600GenDAGISel.inc"
316 };
317 
318 static SDValue stripBitcast(SDValue Val) {
319   return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val;
320 }
321 
322 // Figure out if this is really an extract of the high 16-bits of a dword.
323 static bool isExtractHiElt(SDValue In, SDValue &Out) {
324   In = stripBitcast(In);
325   if (In.getOpcode() != ISD::TRUNCATE)
326     return false;
327 
328   SDValue Srl = In.getOperand(0);
329   if (Srl.getOpcode() == ISD::SRL) {
330     if (ConstantSDNode *ShiftAmt = dyn_cast<ConstantSDNode>(Srl.getOperand(1))) {
331       if (ShiftAmt->getZExtValue() == 16) {
332         Out = stripBitcast(Srl.getOperand(0));
333         return true;
334       }
335     }
336   }
337 
338   return false;
339 }
340 
341 // Look through operations that obscure just looking at the low 16-bits of the
342 // same register.
343 static SDValue stripExtractLoElt(SDValue In) {
344   if (In.getOpcode() == ISD::TRUNCATE) {
345     SDValue Src = In.getOperand(0);
346     if (Src.getValueType().getSizeInBits() == 32)
347       return stripBitcast(Src);
348   }
349 
350   return In;
351 }
352 
353 }  // end anonymous namespace
354 
355 INITIALIZE_PASS_BEGIN(AMDGPUDAGToDAGISel, "amdgpu-isel",
356                       "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
357 INITIALIZE_PASS_DEPENDENCY(AMDGPUArgumentUsageInfo)
358 INITIALIZE_PASS_DEPENDENCY(AMDGPUPerfHintAnalysis)
359 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
360 #ifdef EXPENSIVE_CHECKS
361 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
362 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
363 #endif
364 INITIALIZE_PASS_END(AMDGPUDAGToDAGISel, "amdgpu-isel",
365                     "AMDGPU DAG->DAG Pattern Instruction Selection", false, false)
366 
367 /// This pass converts a legalized DAG into a AMDGPU-specific
368 // DAG, ready for instruction scheduling.
369 FunctionPass *llvm::createAMDGPUISelDag(TargetMachine *TM,
370                                         CodeGenOpt::Level OptLevel) {
371   return new AMDGPUDAGToDAGISel(TM, OptLevel);
372 }
373 
374 /// This pass converts a legalized DAG into a R600-specific
375 // DAG, ready for instruction scheduling.
376 FunctionPass *llvm::createR600ISelDag(TargetMachine *TM,
377                                       CodeGenOpt::Level OptLevel) {
378   return new R600DAGToDAGISel(TM, OptLevel);
379 }
380 
381 bool AMDGPUDAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
382 #ifdef EXPENSIVE_CHECKS
383   DominatorTree & DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
384   LoopInfo * LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
385   for (auto &L : LI->getLoopsInPreorder()) {
386     assert(L->isLCSSAForm(DT));
387   }
388 #endif
389   Subtarget = &MF.getSubtarget<GCNSubtarget>();
390   Mode = AMDGPU::SIModeRegisterDefaults(MF.getFunction());
391   return SelectionDAGISel::runOnMachineFunction(MF);
392 }
393 
394 bool AMDGPUDAGToDAGISel::matchLoadD16FromBuildVector(SDNode *N) const {
395   assert(Subtarget->d16PreservesUnusedBits());
396   MVT VT = N->getValueType(0).getSimpleVT();
397   if (VT != MVT::v2i16 && VT != MVT::v2f16)
398     return false;
399 
400   SDValue Lo = N->getOperand(0);
401   SDValue Hi = N->getOperand(1);
402 
403   LoadSDNode *LdHi = dyn_cast<LoadSDNode>(stripBitcast(Hi));
404 
405   // build_vector lo, (load ptr) -> load_d16_hi ptr, lo
406   // build_vector lo, (zextload ptr from i8) -> load_d16_hi_u8 ptr, lo
407   // build_vector lo, (sextload ptr from i8) -> load_d16_hi_i8 ptr, lo
408 
409   // Need to check for possible indirect dependencies on the other half of the
410   // vector to avoid introducing a cycle.
411   if (LdHi && Hi.hasOneUse() && !LdHi->isPredecessorOf(Lo.getNode())) {
412     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
413 
414     SDValue TiedIn = CurDAG->getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Lo);
415     SDValue Ops[] = {
416       LdHi->getChain(), LdHi->getBasePtr(), TiedIn
417     };
418 
419     unsigned LoadOp = AMDGPUISD::LOAD_D16_HI;
420     if (LdHi->getMemoryVT() == MVT::i8) {
421       LoadOp = LdHi->getExtensionType() == ISD::SEXTLOAD ?
422         AMDGPUISD::LOAD_D16_HI_I8 : AMDGPUISD::LOAD_D16_HI_U8;
423     } else {
424       assert(LdHi->getMemoryVT() == MVT::i16);
425     }
426 
427     SDValue NewLoadHi =
428       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdHi), VTList,
429                                   Ops, LdHi->getMemoryVT(),
430                                   LdHi->getMemOperand());
431 
432     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadHi);
433     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdHi, 1), NewLoadHi.getValue(1));
434     return true;
435   }
436 
437   // build_vector (load ptr), hi -> load_d16_lo ptr, hi
438   // build_vector (zextload ptr from i8), hi -> load_d16_lo_u8 ptr, hi
439   // build_vector (sextload ptr from i8), hi -> load_d16_lo_i8 ptr, hi
440   LoadSDNode *LdLo = dyn_cast<LoadSDNode>(stripBitcast(Lo));
441   if (LdLo && Lo.hasOneUse()) {
442     SDValue TiedIn = getHi16Elt(Hi);
443     if (!TiedIn || LdLo->isPredecessorOf(TiedIn.getNode()))
444       return false;
445 
446     SDVTList VTList = CurDAG->getVTList(VT, MVT::Other);
447     unsigned LoadOp = AMDGPUISD::LOAD_D16_LO;
448     if (LdLo->getMemoryVT() == MVT::i8) {
449       LoadOp = LdLo->getExtensionType() == ISD::SEXTLOAD ?
450         AMDGPUISD::LOAD_D16_LO_I8 : AMDGPUISD::LOAD_D16_LO_U8;
451     } else {
452       assert(LdLo->getMemoryVT() == MVT::i16);
453     }
454 
455     TiedIn = CurDAG->getNode(ISD::BITCAST, SDLoc(N), VT, TiedIn);
456 
457     SDValue Ops[] = {
458       LdLo->getChain(), LdLo->getBasePtr(), TiedIn
459     };
460 
461     SDValue NewLoadLo =
462       CurDAG->getMemIntrinsicNode(LoadOp, SDLoc(LdLo), VTList,
463                                   Ops, LdLo->getMemoryVT(),
464                                   LdLo->getMemOperand());
465 
466     CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewLoadLo);
467     CurDAG->ReplaceAllUsesOfValueWith(SDValue(LdLo, 1), NewLoadLo.getValue(1));
468     return true;
469   }
470 
471   return false;
472 }
473 
474 void AMDGPUDAGToDAGISel::PreprocessISelDAG() {
475   if (!Subtarget->d16PreservesUnusedBits())
476     return;
477 
478   SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_end();
479 
480   bool MadeChange = false;
481   while (Position != CurDAG->allnodes_begin()) {
482     SDNode *N = &*--Position;
483     if (N->use_empty())
484       continue;
485 
486     switch (N->getOpcode()) {
487     case ISD::BUILD_VECTOR:
488       MadeChange |= matchLoadD16FromBuildVector(N);
489       break;
490     default:
491       break;
492     }
493   }
494 
495   if (MadeChange) {
496     CurDAG->RemoveDeadNodes();
497     LLVM_DEBUG(dbgs() << "After PreProcess:\n";
498                CurDAG->dump(););
499   }
500 }
501 
502 bool AMDGPUDAGToDAGISel::isNoNanSrc(SDValue N) const {
503   if (TM.Options.NoNaNsFPMath)
504     return true;
505 
506   // TODO: Move into isKnownNeverNaN
507   if (N->getFlags().hasNoNaNs())
508     return true;
509 
510   return CurDAG->isKnownNeverNaN(N);
511 }
512 
513 bool AMDGPUDAGToDAGISel::isInlineImmediate(const SDNode *N,
514                                            bool Negated) const {
515   if (N->isUndef())
516     return true;
517 
518   const SIInstrInfo *TII = Subtarget->getInstrInfo();
519   if (Negated) {
520     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
521       return TII->isInlineConstant(-C->getAPIntValue());
522 
523     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
524       return TII->isInlineConstant(-C->getValueAPF().bitcastToAPInt());
525 
526   } else {
527     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(N))
528       return TII->isInlineConstant(C->getAPIntValue());
529 
530     if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N))
531       return TII->isInlineConstant(C->getValueAPF().bitcastToAPInt());
532   }
533 
534   return false;
535 }
536 
537 /// Determine the register class for \p OpNo
538 /// \returns The register class of the virtual register that will be used for
539 /// the given operand number \OpNo or NULL if the register class cannot be
540 /// determined.
541 const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N,
542                                                           unsigned OpNo) const {
543   if (!N->isMachineOpcode()) {
544     if (N->getOpcode() == ISD::CopyToReg) {
545       Register Reg = cast<RegisterSDNode>(N->getOperand(1))->getReg();
546       if (Reg.isVirtual()) {
547         MachineRegisterInfo &MRI = CurDAG->getMachineFunction().getRegInfo();
548         return MRI.getRegClass(Reg);
549       }
550 
551       const SIRegisterInfo *TRI
552         = static_cast<const GCNSubtarget *>(Subtarget)->getRegisterInfo();
553       return TRI->getPhysRegClass(Reg);
554     }
555 
556     return nullptr;
557   }
558 
559   switch (N->getMachineOpcode()) {
560   default: {
561     const MCInstrDesc &Desc =
562         Subtarget->getInstrInfo()->get(N->getMachineOpcode());
563     unsigned OpIdx = Desc.getNumDefs() + OpNo;
564     if (OpIdx >= Desc.getNumOperands())
565       return nullptr;
566     int RegClass = Desc.OpInfo[OpIdx].RegClass;
567     if (RegClass == -1)
568       return nullptr;
569 
570     return Subtarget->getRegisterInfo()->getRegClass(RegClass);
571   }
572   case AMDGPU::REG_SEQUENCE: {
573     unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
574     const TargetRegisterClass *SuperRC =
575         Subtarget->getRegisterInfo()->getRegClass(RCID);
576 
577     SDValue SubRegOp = N->getOperand(OpNo + 1);
578     unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue();
579     return Subtarget->getRegisterInfo()->getSubClassWithSubReg(SuperRC,
580                                                               SubRegIdx);
581   }
582   }
583 }
584 
585 SDNode *AMDGPUDAGToDAGISel::glueCopyToOp(SDNode *N, SDValue NewChain,
586                                          SDValue Glue) const {
587   SmallVector <SDValue, 8> Ops;
588   Ops.push_back(NewChain); // Replace the chain.
589   for (unsigned i = 1, e = N->getNumOperands(); i != e; ++i)
590     Ops.push_back(N->getOperand(i));
591 
592   Ops.push_back(Glue);
593   return CurDAG->MorphNodeTo(N, N->getOpcode(), N->getVTList(), Ops);
594 }
595 
596 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0(SDNode *N, SDValue Val) const {
597   const SITargetLowering& Lowering =
598     *static_cast<const SITargetLowering*>(getTargetLowering());
599 
600   assert(N->getOperand(0).getValueType() == MVT::Other && "Expected chain");
601 
602   SDValue M0 = Lowering.copyToM0(*CurDAG, N->getOperand(0), SDLoc(N), Val);
603   return glueCopyToOp(N, M0, M0.getValue(1));
604 }
605 
606 SDNode *AMDGPUDAGToDAGISel::glueCopyToM0LDSInit(SDNode *N) const {
607   unsigned AS = cast<MemSDNode>(N)->getAddressSpace();
608   if (AS == AMDGPUAS::LOCAL_ADDRESS) {
609     if (Subtarget->ldsRequiresM0Init())
610       return glueCopyToM0(N, CurDAG->getTargetConstant(-1, SDLoc(N), MVT::i32));
611   } else if (AS == AMDGPUAS::REGION_ADDRESS) {
612     MachineFunction &MF = CurDAG->getMachineFunction();
613     unsigned Value = MF.getInfo<SIMachineFunctionInfo>()->getGDSSize();
614     return
615         glueCopyToM0(N, CurDAG->getTargetConstant(Value, SDLoc(N), MVT::i32));
616   }
617   return N;
618 }
619 
620 MachineSDNode *AMDGPUDAGToDAGISel::buildSMovImm64(SDLoc &DL, uint64_t Imm,
621                                                   EVT VT) const {
622   SDNode *Lo = CurDAG->getMachineNode(
623       AMDGPU::S_MOV_B32, DL, MVT::i32,
624       CurDAG->getTargetConstant(Imm & 0xFFFFFFFF, DL, MVT::i32));
625   SDNode *Hi =
626       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32,
627                              CurDAG->getTargetConstant(Imm >> 32, DL, MVT::i32));
628   const SDValue Ops[] = {
629       CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
630       SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32),
631       SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32)};
632 
633   return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL, VT, Ops);
634 }
635 
636 void AMDGPUDAGToDAGISel::SelectBuildVector(SDNode *N, unsigned RegClassID) {
637   EVT VT = N->getValueType(0);
638   unsigned NumVectorElts = VT.getVectorNumElements();
639   EVT EltVT = VT.getVectorElementType();
640   SDLoc DL(N);
641   SDValue RegClass = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
642 
643   if (NumVectorElts == 1) {
644     CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, N->getOperand(0),
645                          RegClass);
646     return;
647   }
648 
649   assert(NumVectorElts <= 32 && "Vectors with more than 32 elements not "
650                                   "supported yet");
651   // 32 = Max Num Vector Elements
652   // 2 = 2 REG_SEQUENCE operands per element (value, subreg index)
653   // 1 = Vector Register Class
654   SmallVector<SDValue, 32 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1);
655 
656   bool IsGCN = CurDAG->getSubtarget().getTargetTriple().getArch() ==
657                Triple::amdgcn;
658   RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, DL, MVT::i32);
659   bool IsRegSeq = true;
660   unsigned NOps = N->getNumOperands();
661   for (unsigned i = 0; i < NOps; i++) {
662     // XXX: Why is this here?
663     if (isa<RegisterSDNode>(N->getOperand(i))) {
664       IsRegSeq = false;
665       break;
666     }
667     unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
668                          : R600RegisterInfo::getSubRegFromChannel(i);
669     RegSeqArgs[1 + (2 * i)] = N->getOperand(i);
670     RegSeqArgs[1 + (2 * i) + 1] = CurDAG->getTargetConstant(Sub, DL, MVT::i32);
671   }
672   if (NOps != NumVectorElts) {
673     // Fill in the missing undef elements if this was a scalar_to_vector.
674     assert(N->getOpcode() == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts);
675     MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
676                                                    DL, EltVT);
677     for (unsigned i = NOps; i < NumVectorElts; ++i) {
678       unsigned Sub = IsGCN ? SIRegisterInfo::getSubRegFromChannel(i)
679                            : R600RegisterInfo::getSubRegFromChannel(i);
680       RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0);
681       RegSeqArgs[1 + (2 * i) + 1] =
682           CurDAG->getTargetConstant(Sub, DL, MVT::i32);
683     }
684   }
685 
686   if (!IsRegSeq)
687     SelectCode(N);
688   CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), RegSeqArgs);
689 }
690 
691 void AMDGPUDAGToDAGISel::Select(SDNode *N) {
692   unsigned int Opc = N->getOpcode();
693   if (N->isMachineOpcode()) {
694     N->setNodeId(-1);
695     return;   // Already selected.
696   }
697 
698   // isa<MemSDNode> almost works but is slightly too permissive for some DS
699   // intrinsics.
700   if (Opc == ISD::LOAD || Opc == ISD::STORE || isa<AtomicSDNode>(N) ||
701       (Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
702        Opc == ISD::ATOMIC_LOAD_FADD ||
703        Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
704        Opc == AMDGPUISD::ATOMIC_LOAD_FMAX)) {
705     N = glueCopyToM0LDSInit(N);
706     SelectCode(N);
707     return;
708   }
709 
710   switch (Opc) {
711   default:
712     break;
713   // We are selecting i64 ADD here instead of custom lower it during
714   // DAG legalization, so we can fold some i64 ADDs used for address
715   // calculation into the LOAD and STORE instructions.
716   case ISD::ADDC:
717   case ISD::ADDE:
718   case ISD::SUBC:
719   case ISD::SUBE: {
720     if (N->getValueType(0) != MVT::i64)
721       break;
722 
723     SelectADD_SUB_I64(N);
724     return;
725   }
726   case ISD::ADDCARRY:
727   case ISD::SUBCARRY:
728     if (N->getValueType(0) != MVT::i32)
729       break;
730 
731     SelectAddcSubb(N);
732     return;
733   case ISD::UADDO:
734   case ISD::USUBO: {
735     SelectUADDO_USUBO(N);
736     return;
737   }
738   case AMDGPUISD::FMUL_W_CHAIN: {
739     SelectFMUL_W_CHAIN(N);
740     return;
741   }
742   case AMDGPUISD::FMA_W_CHAIN: {
743     SelectFMA_W_CHAIN(N);
744     return;
745   }
746 
747   case ISD::SCALAR_TO_VECTOR:
748   case ISD::BUILD_VECTOR: {
749     EVT VT = N->getValueType(0);
750     unsigned NumVectorElts = VT.getVectorNumElements();
751     if (VT.getScalarSizeInBits() == 16) {
752       if (Opc == ISD::BUILD_VECTOR && NumVectorElts == 2) {
753         if (SDNode *Packed = packConstantV2I16(N, *CurDAG)) {
754           ReplaceNode(N, Packed);
755           return;
756         }
757       }
758 
759       break;
760     }
761 
762     assert(VT.getVectorElementType().bitsEq(MVT::i32));
763     unsigned RegClassID =
764         SIRegisterInfo::getSGPRClassForBitWidth(NumVectorElts * 32)->getID();
765     SelectBuildVector(N, RegClassID);
766     return;
767   }
768   case ISD::BUILD_PAIR: {
769     SDValue RC, SubReg0, SubReg1;
770     SDLoc DL(N);
771     if (N->getValueType(0) == MVT::i128) {
772       RC = CurDAG->getTargetConstant(AMDGPU::SGPR_128RegClassID, DL, MVT::i32);
773       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, DL, MVT::i32);
774       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, DL, MVT::i32);
775     } else if (N->getValueType(0) == MVT::i64) {
776       RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32);
777       SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
778       SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
779     } else {
780       llvm_unreachable("Unhandled value type for BUILD_PAIR");
781     }
782     const SDValue Ops[] = { RC, N->getOperand(0), SubReg0,
783                             N->getOperand(1), SubReg1 };
784     ReplaceNode(N, CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, DL,
785                                           N->getValueType(0), Ops));
786     return;
787   }
788 
789   case ISD::Constant:
790   case ISD::ConstantFP: {
791     if (N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N))
792       break;
793 
794     uint64_t Imm;
795     if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N))
796       Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue();
797     else {
798       ConstantSDNode *C = cast<ConstantSDNode>(N);
799       Imm = C->getZExtValue();
800     }
801 
802     SDLoc DL(N);
803     ReplaceNode(N, buildSMovImm64(DL, Imm, N->getValueType(0)));
804     return;
805   }
806   case AMDGPUISD::BFE_I32:
807   case AMDGPUISD::BFE_U32: {
808     // There is a scalar version available, but unlike the vector version which
809     // has a separate operand for the offset and width, the scalar version packs
810     // the width and offset into a single operand. Try to move to the scalar
811     // version if the offsets are constant, so that we can try to keep extended
812     // loads of kernel arguments in SGPRs.
813 
814     // TODO: Technically we could try to pattern match scalar bitshifts of
815     // dynamic values, but it's probably not useful.
816     ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1));
817     if (!Offset)
818       break;
819 
820     ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2));
821     if (!Width)
822       break;
823 
824     bool Signed = Opc == AMDGPUISD::BFE_I32;
825 
826     uint32_t OffsetVal = Offset->getZExtValue();
827     uint32_t WidthVal = Width->getZExtValue();
828 
829     ReplaceNode(N, getS_BFE(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32,
830                             SDLoc(N), N->getOperand(0), OffsetVal, WidthVal));
831     return;
832   }
833   case AMDGPUISD::DIV_SCALE: {
834     SelectDIV_SCALE(N);
835     return;
836   }
837   case AMDGPUISD::MAD_I64_I32:
838   case AMDGPUISD::MAD_U64_U32: {
839     SelectMAD_64_32(N);
840     return;
841   }
842   case ISD::CopyToReg: {
843     const SITargetLowering& Lowering =
844       *static_cast<const SITargetLowering*>(getTargetLowering());
845     N = Lowering.legalizeTargetIndependentNode(N, *CurDAG);
846     break;
847   }
848   case ISD::AND:
849   case ISD::SRL:
850   case ISD::SRA:
851   case ISD::SIGN_EXTEND_INREG:
852     if (N->getValueType(0) != MVT::i32)
853       break;
854 
855     SelectS_BFE(N);
856     return;
857   case ISD::BRCOND:
858     SelectBRCOND(N);
859     return;
860   case ISD::FMAD:
861   case ISD::FMA:
862     SelectFMAD_FMA(N);
863     return;
864   case AMDGPUISD::ATOMIC_CMP_SWAP:
865     SelectATOMIC_CMP_SWAP(N);
866     return;
867   case AMDGPUISD::CVT_PKRTZ_F16_F32:
868   case AMDGPUISD::CVT_PKNORM_I16_F32:
869   case AMDGPUISD::CVT_PKNORM_U16_F32:
870   case AMDGPUISD::CVT_PK_U16_U32:
871   case AMDGPUISD::CVT_PK_I16_I32: {
872     // Hack around using a legal type if f16 is illegal.
873     if (N->getValueType(0) == MVT::i32) {
874       MVT NewVT = Opc == AMDGPUISD::CVT_PKRTZ_F16_F32 ? MVT::v2f16 : MVT::v2i16;
875       N = CurDAG->MorphNodeTo(N, N->getOpcode(), CurDAG->getVTList(NewVT),
876                               { N->getOperand(0), N->getOperand(1) });
877       SelectCode(N);
878       return;
879     }
880 
881     break;
882   }
883   case ISD::INTRINSIC_W_CHAIN: {
884     SelectINTRINSIC_W_CHAIN(N);
885     return;
886   }
887   case ISD::INTRINSIC_WO_CHAIN: {
888     SelectINTRINSIC_WO_CHAIN(N);
889     return;
890   }
891   case ISD::INTRINSIC_VOID: {
892     SelectINTRINSIC_VOID(N);
893     return;
894   }
895   }
896 
897   SelectCode(N);
898 }
899 
900 bool AMDGPUDAGToDAGISel::isUniformBr(const SDNode *N) const {
901   const BasicBlock *BB = FuncInfo->MBB->getBasicBlock();
902   const Instruction *Term = BB->getTerminator();
903   return Term->getMetadata("amdgpu.uniform") ||
904          Term->getMetadata("structurizecfg.uniform");
905 }
906 
907 static bool getBaseWithOffsetUsingSplitOR(SelectionDAG &DAG, SDValue Addr,
908                                           SDValue &N0, SDValue &N1) {
909   if (Addr.getValueType() == MVT::i64 && Addr.getOpcode() == ISD::BITCAST &&
910       Addr.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
911     // As we split 64-bit `or` earlier, it's complicated pattern to match, i.e.
912     // (i64 (bitcast (v2i32 (build_vector
913     //                        (or (extract_vector_elt V, 0), OFFSET),
914     //                        (extract_vector_elt V, 1)))))
915     SDValue Lo = Addr.getOperand(0).getOperand(0);
916     if (Lo.getOpcode() == ISD::OR && DAG.isBaseWithConstantOffset(Lo)) {
917       SDValue BaseLo = Lo.getOperand(0);
918       SDValue BaseHi = Addr.getOperand(0).getOperand(1);
919       // Check that split base (Lo and Hi) are extracted from the same one.
920       if (BaseLo.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
921           BaseHi.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
922           BaseLo.getOperand(0) == BaseHi.getOperand(0) &&
923           // Lo is statically extracted from index 0.
924           isa<ConstantSDNode>(BaseLo.getOperand(1)) &&
925           BaseLo.getConstantOperandVal(1) == 0 &&
926           // Hi is statically extracted from index 0.
927           isa<ConstantSDNode>(BaseHi.getOperand(1)) &&
928           BaseHi.getConstantOperandVal(1) == 1) {
929         N0 = BaseLo.getOperand(0).getOperand(0);
930         N1 = Lo.getOperand(1);
931         return true;
932       }
933     }
934   }
935   return false;
936 }
937 
938 bool AMDGPUDAGToDAGISel::isBaseWithConstantOffset64(SDValue Addr, SDValue &LHS,
939                                                     SDValue &RHS) const {
940   if (CurDAG->isBaseWithConstantOffset(Addr)) {
941     LHS = Addr.getOperand(0);
942     RHS = Addr.getOperand(1);
943     return true;
944   }
945 
946   if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, LHS, RHS)) {
947     assert(LHS && RHS && isa<ConstantSDNode>(RHS));
948     return true;
949   }
950 
951   return false;
952 }
953 
954 StringRef AMDGPUDAGToDAGISel::getPassName() const {
955   return "AMDGPU DAG->DAG Pattern Instruction Selection";
956 }
957 
958 //===----------------------------------------------------------------------===//
959 // Complex Patterns
960 //===----------------------------------------------------------------------===//
961 
962 bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
963                                             SDValue &Offset) {
964   return false;
965 }
966 
967 bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
968                                             SDValue &Offset) {
969   ConstantSDNode *C;
970   SDLoc DL(Addr);
971 
972   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
973     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
974     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
975   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
976              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
977     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
978     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
979   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
980             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
981     Base = Addr.getOperand(0);
982     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
983   } else {
984     Base = Addr;
985     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
986   }
987 
988   return true;
989 }
990 
991 SDValue AMDGPUDAGToDAGISel::getMaterializedScalarImm32(int64_t Val,
992                                                        const SDLoc &DL) const {
993   SDNode *Mov = CurDAG->getMachineNode(
994     AMDGPU::S_MOV_B32, DL, MVT::i32,
995     CurDAG->getTargetConstant(Val, DL, MVT::i32));
996   return SDValue(Mov, 0);
997 }
998 
999 // FIXME: Should only handle addcarry/subcarry
1000 void AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) {
1001   SDLoc DL(N);
1002   SDValue LHS = N->getOperand(0);
1003   SDValue RHS = N->getOperand(1);
1004 
1005   unsigned Opcode = N->getOpcode();
1006   bool ConsumeCarry = (Opcode == ISD::ADDE || Opcode == ISD::SUBE);
1007   bool ProduceCarry =
1008       ConsumeCarry || Opcode == ISD::ADDC || Opcode == ISD::SUBC;
1009   bool IsAdd = Opcode == ISD::ADD || Opcode == ISD::ADDC || Opcode == ISD::ADDE;
1010 
1011   SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
1012   SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
1013 
1014   SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1015                                        DL, MVT::i32, LHS, Sub0);
1016   SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1017                                        DL, MVT::i32, LHS, Sub1);
1018 
1019   SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1020                                        DL, MVT::i32, RHS, Sub0);
1021   SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1022                                        DL, MVT::i32, RHS, Sub1);
1023 
1024   SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue);
1025 
1026   static const unsigned OpcMap[2][2][2] = {
1027       {{AMDGPU::S_SUB_U32, AMDGPU::S_ADD_U32},
1028        {AMDGPU::V_SUB_CO_U32_e32, AMDGPU::V_ADD_CO_U32_e32}},
1029       {{AMDGPU::S_SUBB_U32, AMDGPU::S_ADDC_U32},
1030        {AMDGPU::V_SUBB_U32_e32, AMDGPU::V_ADDC_U32_e32}}};
1031 
1032   unsigned Opc = OpcMap[0][N->isDivergent()][IsAdd];
1033   unsigned CarryOpc = OpcMap[1][N->isDivergent()][IsAdd];
1034 
1035   SDNode *AddLo;
1036   if (!ConsumeCarry) {
1037     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) };
1038     AddLo = CurDAG->getMachineNode(Opc, DL, VTList, Args);
1039   } else {
1040     SDValue Args[] = { SDValue(Lo0, 0), SDValue(Lo1, 0), N->getOperand(2) };
1041     AddLo = CurDAG->getMachineNode(CarryOpc, DL, VTList, Args);
1042   }
1043   SDValue AddHiArgs[] = {
1044     SDValue(Hi0, 0),
1045     SDValue(Hi1, 0),
1046     SDValue(AddLo, 1)
1047   };
1048   SDNode *AddHi = CurDAG->getMachineNode(CarryOpc, DL, VTList, AddHiArgs);
1049 
1050   SDValue RegSequenceArgs[] = {
1051     CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, DL, MVT::i32),
1052     SDValue(AddLo,0),
1053     Sub0,
1054     SDValue(AddHi,0),
1055     Sub1,
1056   };
1057   SDNode *RegSequence = CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1058                                                MVT::i64, RegSequenceArgs);
1059 
1060   if (ProduceCarry) {
1061     // Replace the carry-use
1062     ReplaceUses(SDValue(N, 1), SDValue(AddHi, 1));
1063   }
1064 
1065   // Replace the remaining uses.
1066   ReplaceNode(N, RegSequence);
1067 }
1068 
1069 void AMDGPUDAGToDAGISel::SelectAddcSubb(SDNode *N) {
1070   SDLoc DL(N);
1071   SDValue LHS = N->getOperand(0);
1072   SDValue RHS = N->getOperand(1);
1073   SDValue CI = N->getOperand(2);
1074 
1075   if (N->isDivergent()) {
1076     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::V_ADDC_U32_e64
1077                                                    : AMDGPU::V_SUBB_U32_e64;
1078     CurDAG->SelectNodeTo(
1079         N, Opc, N->getVTList(),
1080         {LHS, RHS, CI,
1081          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1082   } else {
1083     unsigned Opc = N->getOpcode() == ISD::ADDCARRY ? AMDGPU::S_ADD_CO_PSEUDO
1084                                                    : AMDGPU::S_SUB_CO_PSEUDO;
1085     CurDAG->SelectNodeTo(N, Opc, N->getVTList(), {LHS, RHS, CI});
1086   }
1087 }
1088 
1089 void AMDGPUDAGToDAGISel::SelectUADDO_USUBO(SDNode *N) {
1090   // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
1091   // carry out despite the _i32 name. These were renamed in VI to _U32.
1092   // FIXME: We should probably rename the opcodes here.
1093   bool IsAdd = N->getOpcode() == ISD::UADDO;
1094   bool IsVALU = N->isDivergent();
1095 
1096   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); UI != E;
1097        ++UI)
1098     if (UI.getUse().getResNo() == 1) {
1099       if ((IsAdd && (UI->getOpcode() != ISD::ADDCARRY)) ||
1100           (!IsAdd && (UI->getOpcode() != ISD::SUBCARRY))) {
1101         IsVALU = true;
1102         break;
1103       }
1104     }
1105 
1106   if (IsVALU) {
1107     unsigned Opc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
1108 
1109     CurDAG->SelectNodeTo(
1110         N, Opc, N->getVTList(),
1111         {N->getOperand(0), N->getOperand(1),
1112          CurDAG->getTargetConstant(0, {}, MVT::i1) /*clamp bit*/});
1113   } else {
1114     unsigned Opc = N->getOpcode() == ISD::UADDO ? AMDGPU::S_UADDO_PSEUDO
1115                                                 : AMDGPU::S_USUBO_PSEUDO;
1116 
1117     CurDAG->SelectNodeTo(N, Opc, N->getVTList(),
1118                          {N->getOperand(0), N->getOperand(1)});
1119   }
1120 }
1121 
1122 void AMDGPUDAGToDAGISel::SelectFMA_W_CHAIN(SDNode *N) {
1123   SDLoc SL(N);
1124   //  src0_modifiers, src0,  src1_modifiers, src1, src2_modifiers, src2, clamp, omod
1125   SDValue Ops[10];
1126 
1127   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[6], Ops[7]);
1128   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1129   SelectVOP3Mods(N->getOperand(3), Ops[5], Ops[4]);
1130   Ops[8] = N->getOperand(0);
1131   Ops[9] = N->getOperand(4);
1132 
1133   CurDAG->SelectNodeTo(N, AMDGPU::V_FMA_F32_e64, N->getVTList(), Ops);
1134 }
1135 
1136 void AMDGPUDAGToDAGISel::SelectFMUL_W_CHAIN(SDNode *N) {
1137   SDLoc SL(N);
1138   //    src0_modifiers, src0,  src1_modifiers, src1, clamp, omod
1139   SDValue Ops[8];
1140 
1141   SelectVOP3Mods0(N->getOperand(1), Ops[1], Ops[0], Ops[4], Ops[5]);
1142   SelectVOP3Mods(N->getOperand(2), Ops[3], Ops[2]);
1143   Ops[6] = N->getOperand(0);
1144   Ops[7] = N->getOperand(3);
1145 
1146   CurDAG->SelectNodeTo(N, AMDGPU::V_MUL_F32_e64, N->getVTList(), Ops);
1147 }
1148 
1149 // We need to handle this here because tablegen doesn't support matching
1150 // instructions with multiple outputs.
1151 void AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) {
1152   SDLoc SL(N);
1153   EVT VT = N->getValueType(0);
1154 
1155   assert(VT == MVT::f32 || VT == MVT::f64);
1156 
1157   unsigned Opc
1158     = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64_e64 : AMDGPU::V_DIV_SCALE_F32_e64;
1159 
1160   // src0_modifiers, src0, src1_modifiers, src1, src2_modifiers, src2, clamp,
1161   // omod
1162   SDValue Ops[8];
1163   SelectVOP3BMods0(N->getOperand(0), Ops[1], Ops[0], Ops[6], Ops[7]);
1164   SelectVOP3BMods(N->getOperand(1), Ops[3], Ops[2]);
1165   SelectVOP3BMods(N->getOperand(2), Ops[5], Ops[4]);
1166   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1167 }
1168 
1169 // We need to handle this here because tablegen doesn't support matching
1170 // instructions with multiple outputs.
1171 void AMDGPUDAGToDAGISel::SelectMAD_64_32(SDNode *N) {
1172   SDLoc SL(N);
1173   bool Signed = N->getOpcode() == AMDGPUISD::MAD_I64_I32;
1174   unsigned Opc = Signed ? AMDGPU::V_MAD_I64_I32_e64 : AMDGPU::V_MAD_U64_U32_e64;
1175 
1176   SDValue Clamp = CurDAG->getTargetConstant(0, SL, MVT::i1);
1177   SDValue Ops[] = { N->getOperand(0), N->getOperand(1), N->getOperand(2),
1178                     Clamp };
1179   CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
1180 }
1181 
1182 bool AMDGPUDAGToDAGISel::isDSOffsetLegal(SDValue Base, unsigned Offset) const {
1183   if (!isUInt<16>(Offset))
1184     return false;
1185 
1186   if (!Base || Subtarget->hasUsableDSOffset() ||
1187       Subtarget->unsafeDSOffsetFoldingEnabled())
1188     return true;
1189 
1190   // On Southern Islands instruction with a negative base value and an offset
1191   // don't seem to work.
1192   return CurDAG->SignBitIsZero(Base);
1193 }
1194 
1195 bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base,
1196                                               SDValue &Offset) const {
1197   SDLoc DL(Addr);
1198   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1199     SDValue N0 = Addr.getOperand(0);
1200     SDValue N1 = Addr.getOperand(1);
1201     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1202     if (isDSOffsetLegal(N0, C1->getSExtValue())) {
1203       // (add n0, c0)
1204       Base = N0;
1205       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1206       return true;
1207     }
1208   } else if (Addr.getOpcode() == ISD::SUB) {
1209     // sub C, x -> add (sub 0, x), C
1210     if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1211       int64_t ByteOffset = C->getSExtValue();
1212       if (isDSOffsetLegal(SDValue(), ByteOffset)) {
1213         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1214 
1215         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1216         // the known bits in isDSOffsetLegal. We need to emit the selected node
1217         // here, so this is thrown away.
1218         SDValue Sub = CurDAG->getNode(ISD::SUB, DL, MVT::i32,
1219                                       Zero, Addr.getOperand(1));
1220 
1221         if (isDSOffsetLegal(Sub, ByteOffset)) {
1222           SmallVector<SDValue, 3> Opnds;
1223           Opnds.push_back(Zero);
1224           Opnds.push_back(Addr.getOperand(1));
1225 
1226           // FIXME: Select to VOP3 version for with-carry.
1227           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1228           if (Subtarget->hasAddNoCarry()) {
1229             SubOp = AMDGPU::V_SUB_U32_e64;
1230             Opnds.push_back(
1231                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1232           }
1233 
1234           MachineSDNode *MachineSub =
1235               CurDAG->getMachineNode(SubOp, DL, MVT::i32, Opnds);
1236 
1237           Base = SDValue(MachineSub, 0);
1238           Offset = CurDAG->getTargetConstant(ByteOffset, DL, MVT::i16);
1239           return true;
1240         }
1241       }
1242     }
1243   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1244     // If we have a constant address, prefer to put the constant into the
1245     // offset. This can save moves to load the constant address since multiple
1246     // operations can share the zero base address register, and enables merging
1247     // into read2 / write2 instructions.
1248 
1249     SDLoc DL(Addr);
1250 
1251     if (isDSOffsetLegal(SDValue(), CAddr->getZExtValue())) {
1252       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1253       MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32,
1254                                  DL, MVT::i32, Zero);
1255       Base = SDValue(MovZero, 0);
1256       Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1257       return true;
1258     }
1259   }
1260 
1261   // default case
1262   Base = Addr;
1263   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i16);
1264   return true;
1265 }
1266 
1267 bool AMDGPUDAGToDAGISel::isDSOffset2Legal(SDValue Base, unsigned Offset0,
1268                                           unsigned Offset1,
1269                                           unsigned Size) const {
1270   if (Offset0 % Size != 0 || Offset1 % Size != 0)
1271     return false;
1272   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
1273     return false;
1274 
1275   if (!Base || Subtarget->hasUsableDSOffset() ||
1276       Subtarget->unsafeDSOffsetFoldingEnabled())
1277     return true;
1278 
1279   // On Southern Islands instruction with a negative base value and an offset
1280   // don't seem to work.
1281   return CurDAG->SignBitIsZero(Base);
1282 }
1283 
1284 // TODO: If offset is too big, put low 16-bit into offset.
1285 bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base,
1286                                                    SDValue &Offset0,
1287                                                    SDValue &Offset1) const {
1288   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 4);
1289 }
1290 
1291 bool AMDGPUDAGToDAGISel::SelectDS128Bit8ByteAligned(SDValue Addr, SDValue &Base,
1292                                                     SDValue &Offset0,
1293                                                     SDValue &Offset1) const {
1294   return SelectDSReadWrite2(Addr, Base, Offset0, Offset1, 8);
1295 }
1296 
1297 bool AMDGPUDAGToDAGISel::SelectDSReadWrite2(SDValue Addr, SDValue &Base,
1298                                             SDValue &Offset0, SDValue &Offset1,
1299                                             unsigned Size) const {
1300   SDLoc DL(Addr);
1301 
1302   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1303     SDValue N0 = Addr.getOperand(0);
1304     SDValue N1 = Addr.getOperand(1);
1305     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1306     unsigned OffsetValue0 = C1->getZExtValue();
1307     unsigned OffsetValue1 = OffsetValue0 + Size;
1308 
1309     // (add n0, c0)
1310     if (isDSOffset2Legal(N0, OffsetValue0, OffsetValue1, Size)) {
1311       Base = N0;
1312       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1313       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1314       return true;
1315     }
1316   } else if (Addr.getOpcode() == ISD::SUB) {
1317     // sub C, x -> add (sub 0, x), C
1318     if (const ConstantSDNode *C =
1319             dyn_cast<ConstantSDNode>(Addr.getOperand(0))) {
1320       unsigned OffsetValue0 = C->getZExtValue();
1321       unsigned OffsetValue1 = OffsetValue0 + Size;
1322 
1323       if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1324         SDLoc DL(Addr);
1325         SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1326 
1327         // XXX - This is kind of hacky. Create a dummy sub node so we can check
1328         // the known bits in isDSOffsetLegal. We need to emit the selected node
1329         // here, so this is thrown away.
1330         SDValue Sub =
1331             CurDAG->getNode(ISD::SUB, DL, MVT::i32, Zero, Addr.getOperand(1));
1332 
1333         if (isDSOffset2Legal(Sub, OffsetValue0, OffsetValue1, Size)) {
1334           SmallVector<SDValue, 3> Opnds;
1335           Opnds.push_back(Zero);
1336           Opnds.push_back(Addr.getOperand(1));
1337           unsigned SubOp = AMDGPU::V_SUB_CO_U32_e32;
1338           if (Subtarget->hasAddNoCarry()) {
1339             SubOp = AMDGPU::V_SUB_U32_e64;
1340             Opnds.push_back(
1341                 CurDAG->getTargetConstant(0, {}, MVT::i1)); // clamp bit
1342           }
1343 
1344           MachineSDNode *MachineSub = CurDAG->getMachineNode(
1345               SubOp, DL, MVT::getIntegerVT(Size * 8), Opnds);
1346 
1347           Base = SDValue(MachineSub, 0);
1348           Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1349           Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1350           return true;
1351         }
1352       }
1353     }
1354   } else if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1355     unsigned OffsetValue0 = CAddr->getZExtValue();
1356     unsigned OffsetValue1 = OffsetValue0 + Size;
1357 
1358     if (isDSOffset2Legal(SDValue(), OffsetValue0, OffsetValue1, Size)) {
1359       SDValue Zero = CurDAG->getTargetConstant(0, DL, MVT::i32);
1360       MachineSDNode *MovZero =
1361           CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, DL, MVT::i32, Zero);
1362       Base = SDValue(MovZero, 0);
1363       Offset0 = CurDAG->getTargetConstant(OffsetValue0 / Size, DL, MVT::i8);
1364       Offset1 = CurDAG->getTargetConstant(OffsetValue1 / Size, DL, MVT::i8);
1365       return true;
1366     }
1367   }
1368 
1369   // default case
1370 
1371   Base = Addr;
1372   Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8);
1373   Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8);
1374   return true;
1375 }
1376 
1377 bool AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr,
1378                                      SDValue &VAddr, SDValue &SOffset,
1379                                      SDValue &Offset, SDValue &Offen,
1380                                      SDValue &Idxen, SDValue &Addr64,
1381                                      SDValue &GLC, SDValue &SLC,
1382                                      SDValue &TFE, SDValue &DLC,
1383                                      SDValue &SWZ) const {
1384   // Subtarget prefers to use flat instruction
1385   // FIXME: This should be a pattern predicate and not reach here
1386   if (Subtarget->useFlatForGlobal())
1387     return false;
1388 
1389   SDLoc DL(Addr);
1390 
1391   if (!GLC.getNode())
1392     GLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1393   if (!SLC.getNode())
1394     SLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1395   TFE = CurDAG->getTargetConstant(0, DL, MVT::i1);
1396   DLC = CurDAG->getTargetConstant(0, DL, MVT::i1);
1397   SWZ = CurDAG->getTargetConstant(0, DL, MVT::i1);
1398 
1399   Idxen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1400   Offen = CurDAG->getTargetConstant(0, DL, MVT::i1);
1401   Addr64 = CurDAG->getTargetConstant(0, DL, MVT::i1);
1402   SOffset = CurDAG->getTargetConstant(0, DL, MVT::i32);
1403 
1404   ConstantSDNode *C1 = nullptr;
1405   SDValue N0 = Addr;
1406   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1407     C1 = cast<ConstantSDNode>(Addr.getOperand(1));
1408     if (isUInt<32>(C1->getZExtValue()))
1409       N0 = Addr.getOperand(0);
1410     else
1411       C1 = nullptr;
1412   }
1413 
1414   if (N0.getOpcode() == ISD::ADD) {
1415     // (add N2, N3) -> addr64, or
1416     // (add (add N2, N3), C1) -> addr64
1417     SDValue N2 = N0.getOperand(0);
1418     SDValue N3 = N0.getOperand(1);
1419     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1420 
1421     if (N2->isDivergent()) {
1422       if (N3->isDivergent()) {
1423         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
1424         // addr64, and construct the resource from a 0 address.
1425         Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1426         VAddr = N0;
1427       } else {
1428         // N2 is divergent, N3 is not.
1429         Ptr = N3;
1430         VAddr = N2;
1431       }
1432     } else {
1433       // N2 is not divergent.
1434       Ptr = N2;
1435       VAddr = N3;
1436     }
1437     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1438   } else if (N0->isDivergent()) {
1439     // N0 is divergent. Use it as the addr64, and construct the resource from a
1440     // 0 address.
1441     Ptr = SDValue(buildSMovImm64(DL, 0, MVT::v2i32), 0);
1442     VAddr = N0;
1443     Addr64 = CurDAG->getTargetConstant(1, DL, MVT::i1);
1444   } else {
1445     // N0 -> offset, or
1446     // (N0 + C1) -> offset
1447     VAddr = CurDAG->getTargetConstant(0, DL, MVT::i32);
1448     Ptr = N0;
1449   }
1450 
1451   if (!C1) {
1452     // No offset.
1453     Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1454     return true;
1455   }
1456 
1457   if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue())) {
1458     // Legal offset for instruction.
1459     Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1460     return true;
1461   }
1462 
1463   // Illegal offset, store it in soffset.
1464   Offset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1465   SOffset =
1466       SDValue(CurDAG->getMachineNode(
1467                   AMDGPU::S_MOV_B32, DL, MVT::i32,
1468                   CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32)),
1469               0);
1470   return true;
1471 }
1472 
1473 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1474                                            SDValue &VAddr, SDValue &SOffset,
1475                                            SDValue &Offset, SDValue &GLC,
1476                                            SDValue &SLC, SDValue &TFE,
1477                                            SDValue &DLC, SDValue &SWZ) const {
1478   SDValue Ptr, Offen, Idxen, Addr64;
1479 
1480   // addr64 bit was removed for volcanic islands.
1481   // FIXME: This should be a pattern predicate and not reach here
1482   if (!Subtarget->hasAddr64())
1483     return false;
1484 
1485   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1486               GLC, SLC, TFE, DLC, SWZ))
1487     return false;
1488 
1489   ConstantSDNode *C = cast<ConstantSDNode>(Addr64);
1490   if (C->getSExtValue()) {
1491     SDLoc DL(Addr);
1492 
1493     const SITargetLowering& Lowering =
1494       *static_cast<const SITargetLowering*>(getTargetLowering());
1495 
1496     SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0);
1497     return true;
1498   }
1499 
1500   return false;
1501 }
1502 
1503 bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc,
1504                                            SDValue &VAddr, SDValue &SOffset,
1505                                            SDValue &Offset,
1506                                            SDValue &SLC) const {
1507   SLC = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i1);
1508   SDValue GLC, TFE, DLC, SWZ;
1509 
1510   return SelectMUBUFAddr64(Addr, SRsrc, VAddr, SOffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1511 }
1512 
1513 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
1514   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
1515   return PSV && PSV->isStack();
1516 }
1517 
1518 std::pair<SDValue, SDValue> AMDGPUDAGToDAGISel::foldFrameIndex(SDValue N) const {
1519   SDLoc DL(N);
1520 
1521   auto *FI = dyn_cast<FrameIndexSDNode>(N);
1522   SDValue TFI =
1523       FI ? CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0)) : N;
1524 
1525   // We rebase the base address into an absolute stack address and hence
1526   // use constant 0 for soffset. This value must be retained until
1527   // frame elimination and eliminateFrameIndex will choose the appropriate
1528   // frame register if need be.
1529   return std::make_pair(TFI, CurDAG->getTargetConstant(0, DL, MVT::i32));
1530 }
1531 
1532 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffen(SDNode *Parent,
1533                                                  SDValue Addr, SDValue &Rsrc,
1534                                                  SDValue &VAddr, SDValue &SOffset,
1535                                                  SDValue &ImmOffset) const {
1536 
1537   SDLoc DL(Addr);
1538   MachineFunction &MF = CurDAG->getMachineFunction();
1539   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1540 
1541   Rsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1542 
1543   if (ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) {
1544     int64_t Imm = CAddr->getSExtValue();
1545     const int64_t NullPtr =
1546         AMDGPUTargetMachine::getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS);
1547     // Don't fold null pointer.
1548     if (Imm != NullPtr) {
1549       SDValue HighBits = CurDAG->getTargetConstant(Imm & ~4095, DL, MVT::i32);
1550       MachineSDNode *MovHighBits = CurDAG->getMachineNode(
1551         AMDGPU::V_MOV_B32_e32, DL, MVT::i32, HighBits);
1552       VAddr = SDValue(MovHighBits, 0);
1553 
1554       // In a call sequence, stores to the argument stack area are relative to the
1555       // stack pointer.
1556       const MachinePointerInfo &PtrInfo
1557         = cast<MemSDNode>(Parent)->getPointerInfo();
1558       SOffset = isStackPtrRelative(PtrInfo)
1559         ? CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32)
1560         : CurDAG->getTargetConstant(0, DL, MVT::i32);
1561       ImmOffset = CurDAG->getTargetConstant(Imm & 4095, DL, MVT::i16);
1562       return true;
1563     }
1564   }
1565 
1566   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1567     // (add n0, c1)
1568 
1569     SDValue N0 = Addr.getOperand(0);
1570     SDValue N1 = Addr.getOperand(1);
1571 
1572     // Offsets in vaddr must be positive if range checking is enabled.
1573     //
1574     // The total computation of vaddr + soffset + offset must not overflow.  If
1575     // vaddr is negative, even if offset is 0 the sgpr offset add will end up
1576     // overflowing.
1577     //
1578     // Prior to gfx9, MUBUF instructions with the vaddr offset enabled would
1579     // always perform a range check. If a negative vaddr base index was used,
1580     // this would fail the range check. The overall address computation would
1581     // compute a valid address, but this doesn't happen due to the range
1582     // check. For out-of-bounds MUBUF loads, a 0 is returned.
1583     //
1584     // Therefore it should be safe to fold any VGPR offset on gfx9 into the
1585     // MUBUF vaddr, but not on older subtargets which can only do this if the
1586     // sign bit is known 0.
1587     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
1588     if (SIInstrInfo::isLegalMUBUFImmOffset(C1->getZExtValue()) &&
1589         (!Subtarget->privateMemoryResourceIsRangeChecked() ||
1590          CurDAG->SignBitIsZero(N0))) {
1591       std::tie(VAddr, SOffset) = foldFrameIndex(N0);
1592       ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i16);
1593       return true;
1594     }
1595   }
1596 
1597   // (node)
1598   std::tie(VAddr, SOffset) = foldFrameIndex(Addr);
1599   ImmOffset = CurDAG->getTargetConstant(0, DL, MVT::i16);
1600   return true;
1601 }
1602 
1603 bool AMDGPUDAGToDAGISel::SelectMUBUFScratchOffset(SDNode *Parent,
1604                                                   SDValue Addr,
1605                                                   SDValue &SRsrc,
1606                                                   SDValue &SOffset,
1607                                                   SDValue &Offset) const {
1608   ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr);
1609   if (!CAddr || !SIInstrInfo::isLegalMUBUFImmOffset(CAddr->getZExtValue()))
1610     return false;
1611 
1612   SDLoc DL(Addr);
1613   MachineFunction &MF = CurDAG->getMachineFunction();
1614   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1615 
1616   SRsrc = CurDAG->getRegister(Info->getScratchRSrcReg(), MVT::v4i32);
1617 
1618   const MachinePointerInfo &PtrInfo = cast<MemSDNode>(Parent)->getPointerInfo();
1619 
1620   // FIXME: Get from MachinePointerInfo? We should only be using the frame
1621   // offset if we know this is in a call sequence.
1622   SOffset = isStackPtrRelative(PtrInfo)
1623                 ? CurDAG->getRegister(Info->getStackPtrOffsetReg(), MVT::i32)
1624                 : CurDAG->getTargetConstant(0, DL, MVT::i32);
1625 
1626   Offset = CurDAG->getTargetConstant(CAddr->getZExtValue(), DL, MVT::i16);
1627   return true;
1628 }
1629 
1630 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1631                                            SDValue &SOffset, SDValue &Offset,
1632                                            SDValue &GLC, SDValue &SLC,
1633                                            SDValue &TFE, SDValue &DLC,
1634                                            SDValue &SWZ) const {
1635   SDValue Ptr, VAddr, Offen, Idxen, Addr64;
1636   const SIInstrInfo *TII =
1637     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
1638 
1639   if (!SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64,
1640               GLC, SLC, TFE, DLC, SWZ))
1641     return false;
1642 
1643   if (!cast<ConstantSDNode>(Offen)->getSExtValue() &&
1644       !cast<ConstantSDNode>(Idxen)->getSExtValue() &&
1645       !cast<ConstantSDNode>(Addr64)->getSExtValue()) {
1646     uint64_t Rsrc = TII->getDefaultRsrcDataFormat() |
1647                     APInt::getAllOnesValue(32).getZExtValue(); // Size
1648     SDLoc DL(Addr);
1649 
1650     const SITargetLowering& Lowering =
1651       *static_cast<const SITargetLowering*>(getTargetLowering());
1652 
1653     SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0);
1654     return true;
1655   }
1656   return false;
1657 }
1658 
1659 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1660                                            SDValue &Soffset, SDValue &Offset
1661                                            ) const {
1662   SDValue GLC, SLC, TFE, DLC, SWZ;
1663 
1664   return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1665 }
1666 bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc,
1667                                            SDValue &Soffset, SDValue &Offset,
1668                                            SDValue &SLC) const {
1669   SDValue GLC, TFE, DLC, SWZ;
1670 
1671   return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE, DLC, SWZ);
1672 }
1673 
1674 // Find a load or store from corresponding pattern root.
1675 // Roots may be build_vector, bitconvert or their combinations.
1676 static MemSDNode* findMemSDNode(SDNode *N) {
1677   N = AMDGPUTargetLowering::stripBitcast(SDValue(N,0)).getNode();
1678   if (MemSDNode *MN = dyn_cast<MemSDNode>(N))
1679     return MN;
1680   assert(isa<BuildVectorSDNode>(N));
1681   for (SDValue V : N->op_values())
1682     if (MemSDNode *MN =
1683           dyn_cast<MemSDNode>(AMDGPUTargetLowering::stripBitcast(V)))
1684       return MN;
1685   llvm_unreachable("cannot find MemSDNode in the pattern!");
1686 }
1687 
1688 template <bool IsSigned>
1689 bool AMDGPUDAGToDAGISel::SelectFlatOffset(SDNode *N,
1690                                           SDValue Addr,
1691                                           SDValue &VAddr,
1692                                           SDValue &Offset) const {
1693   int64_t OffsetVal = 0;
1694 
1695   unsigned AS = findMemSDNode(N)->getAddressSpace();
1696 
1697   if (Subtarget->hasFlatInstOffsets() &&
1698       (!Subtarget->hasFlatSegmentOffsetBug() ||
1699        AS != AMDGPUAS::FLAT_ADDRESS)) {
1700     SDValue N0, N1;
1701     if (isBaseWithConstantOffset64(Addr, N0, N1)) {
1702       uint64_t COffsetVal = cast<ConstantSDNode>(N1)->getSExtValue();
1703 
1704       const SIInstrInfo *TII = Subtarget->getInstrInfo();
1705       if (TII->isLegalFLATOffset(COffsetVal, AS, IsSigned)) {
1706         Addr = N0;
1707         OffsetVal = COffsetVal;
1708       } else {
1709         // If the offset doesn't fit, put the low bits into the offset field and
1710         // add the rest.
1711         //
1712         // For a FLAT instruction the hardware decides whether to access
1713         // global/scratch/shared memory based on the high bits of vaddr,
1714         // ignoring the offset field, so we have to ensure that when we add
1715         // remainder to vaddr it still points into the same underlying object.
1716         // The easiest way to do that is to make sure that we split the offset
1717         // into two pieces that are both >= 0 or both <= 0.
1718 
1719         SDLoc DL(N);
1720         uint64_t RemainderOffset;
1721 
1722         std::tie(OffsetVal, RemainderOffset)
1723           = TII->splitFlatOffset(COffsetVal, AS, IsSigned);
1724 
1725         SDValue AddOffsetLo =
1726             getMaterializedScalarImm32(Lo_32(RemainderOffset), DL);
1727         SDValue Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
1728 
1729         if (Addr.getValueType().getSizeInBits() == 32) {
1730           SmallVector<SDValue, 3> Opnds;
1731           Opnds.push_back(N0);
1732           Opnds.push_back(AddOffsetLo);
1733           unsigned AddOp = AMDGPU::V_ADD_CO_U32_e32;
1734           if (Subtarget->hasAddNoCarry()) {
1735             AddOp = AMDGPU::V_ADD_U32_e64;
1736             Opnds.push_back(Clamp);
1737           }
1738           Addr = SDValue(CurDAG->getMachineNode(AddOp, DL, MVT::i32, Opnds), 0);
1739         } else {
1740           // TODO: Should this try to use a scalar add pseudo if the base address
1741           // is uniform and saddr is usable?
1742           SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, DL, MVT::i32);
1743           SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, DL, MVT::i32);
1744 
1745           SDNode *N0Lo = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1746                                                 DL, MVT::i32, N0, Sub0);
1747           SDNode *N0Hi = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG,
1748                                                 DL, MVT::i32, N0, Sub1);
1749 
1750           SDValue AddOffsetHi =
1751               getMaterializedScalarImm32(Hi_32(RemainderOffset), DL);
1752 
1753           SDVTList VTs = CurDAG->getVTList(MVT::i32, MVT::i1);
1754 
1755           SDNode *Add =
1756               CurDAG->getMachineNode(AMDGPU::V_ADD_CO_U32_e64, DL, VTs,
1757                                      {AddOffsetLo, SDValue(N0Lo, 0), Clamp});
1758 
1759           SDNode *Addc = CurDAG->getMachineNode(
1760               AMDGPU::V_ADDC_U32_e64, DL, VTs,
1761               {AddOffsetHi, SDValue(N0Hi, 0), SDValue(Add, 1), Clamp});
1762 
1763           SDValue RegSequenceArgs[] = {
1764               CurDAG->getTargetConstant(AMDGPU::VReg_64RegClassID, DL, MVT::i32),
1765               SDValue(Add, 0), Sub0, SDValue(Addc, 0), Sub1};
1766 
1767           Addr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL,
1768                                                 MVT::i64, RegSequenceArgs),
1769                          0);
1770         }
1771       }
1772     }
1773   }
1774 
1775   VAddr = Addr;
1776   Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i16);
1777   return true;
1778 }
1779 
1780 // If this matches zero_extend i32:x, return x
1781 static SDValue matchZExtFromI32(SDValue Op) {
1782   if (Op.getOpcode() != ISD::ZERO_EXTEND)
1783     return SDValue();
1784 
1785   SDValue ExtSrc = Op.getOperand(0);
1786   return (ExtSrc.getValueType() == MVT::i32) ? ExtSrc : SDValue();
1787 }
1788 
1789 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
1790 bool AMDGPUDAGToDAGISel::SelectGlobalSAddr(SDNode *N,
1791                                            SDValue Addr,
1792                                            SDValue &SAddr,
1793                                            SDValue &VOffset,
1794                                            SDValue &Offset) const {
1795   int64_t ImmOffset = 0;
1796 
1797   // Match the immediate offset first, which canonically is moved as low as
1798   // possible.
1799 
1800   SDValue LHS, RHS;
1801   if (isBaseWithConstantOffset64(Addr, LHS, RHS)) {
1802     int64_t COffsetVal = cast<ConstantSDNode>(RHS)->getSExtValue();
1803     const SIInstrInfo *TII = Subtarget->getInstrInfo();
1804 
1805     if (TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::GLOBAL_ADDRESS, true)) {
1806       Addr = LHS;
1807       ImmOffset = COffsetVal;
1808     } else if (!LHS->isDivergent() && COffsetVal > 0) {
1809       SDLoc SL(N);
1810       // saddr + large_offset -> saddr + (voffset = large_offset & ~MaxOffset) +
1811       //                         (large_offset & MaxOffset);
1812       int64_t SplitImmOffset, RemainderOffset;
1813       std::tie(SplitImmOffset, RemainderOffset)
1814         = TII->splitFlatOffset(COffsetVal, AMDGPUAS::GLOBAL_ADDRESS, true);
1815 
1816       if (isUInt<32>(RemainderOffset)) {
1817         SDNode *VMov = CurDAG->getMachineNode(
1818           AMDGPU::V_MOV_B32_e32, SL, MVT::i32,
1819           CurDAG->getTargetConstant(RemainderOffset, SDLoc(), MVT::i32));
1820         VOffset = SDValue(VMov, 0);
1821         SAddr = LHS;
1822         Offset = CurDAG->getTargetConstant(SplitImmOffset, SDLoc(), MVT::i16);
1823         return true;
1824       }
1825     }
1826   }
1827 
1828   // Match the variable offset.
1829   if (Addr.getOpcode() != ISD::ADD) {
1830     if (Addr->isDivergent() || Addr.getOpcode() == ISD::UNDEF ||
1831         isa<ConstantSDNode>(Addr))
1832       return false;
1833 
1834     // It's cheaper to materialize a single 32-bit zero for vaddr than the two
1835     // moves required to copy a 64-bit SGPR to VGPR.
1836     SAddr = Addr;
1837     SDNode *VMov = CurDAG->getMachineNode(
1838       AMDGPU::V_MOV_B32_e32, SDLoc(Addr), MVT::i32,
1839       CurDAG->getTargetConstant(0, SDLoc(), MVT::i32));
1840     VOffset = SDValue(VMov, 0);
1841     Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1842     return true;
1843   }
1844 
1845   LHS = Addr.getOperand(0);
1846   RHS = Addr.getOperand(1);
1847 
1848   if (!LHS->isDivergent()) {
1849     // add (i64 sgpr), (zero_extend (i32 vgpr))
1850     if (SDValue ZextRHS = matchZExtFromI32(RHS)) {
1851       SAddr = LHS;
1852       VOffset = ZextRHS;
1853     }
1854   }
1855 
1856   if (!SAddr && !RHS->isDivergent()) {
1857     // add (zero_extend (i32 vgpr)), (i64 sgpr)
1858     if (SDValue ZextLHS = matchZExtFromI32(LHS)) {
1859       SAddr = RHS;
1860       VOffset = ZextLHS;
1861     }
1862   }
1863 
1864   if (!SAddr)
1865     return false;
1866 
1867   Offset = CurDAG->getTargetConstant(ImmOffset, SDLoc(), MVT::i16);
1868   return true;
1869 }
1870 
1871 // Match (32-bit SGPR base) + sext(imm offset)
1872 bool AMDGPUDAGToDAGISel::SelectScratchSAddr(SDNode *N,
1873                                             SDValue Addr,
1874                                             SDValue &SAddr,
1875                                             SDValue &Offset) const {
1876   if (Addr->isDivergent())
1877     return false;
1878 
1879   SAddr = Addr;
1880   int64_t COffsetVal = 0;
1881 
1882   if (CurDAG->isBaseWithConstantOffset(Addr)) {
1883     COffsetVal = cast<ConstantSDNode>(Addr.getOperand(1))->getSExtValue();
1884     SAddr = Addr.getOperand(0);
1885   }
1886 
1887   if (auto FI = dyn_cast<FrameIndexSDNode>(SAddr)) {
1888     SAddr = CurDAG->getTargetFrameIndex(FI->getIndex(), FI->getValueType(0));
1889   } else if (SAddr.getOpcode() == ISD::ADD &&
1890              isa<FrameIndexSDNode>(SAddr.getOperand(0))) {
1891     // Materialize this into a scalar move for scalar address to avoid
1892     // readfirstlane.
1893     auto FI = cast<FrameIndexSDNode>(SAddr.getOperand(0));
1894     SDValue TFI = CurDAG->getTargetFrameIndex(FI->getIndex(),
1895                                               FI->getValueType(0));
1896     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_U32, SDLoc(SAddr),
1897                                            MVT::i32, TFI, SAddr.getOperand(1)),
1898                     0);
1899   }
1900 
1901   const SIInstrInfo *TII = Subtarget->getInstrInfo();
1902 
1903   if (!TII->isLegalFLATOffset(COffsetVal, AMDGPUAS::PRIVATE_ADDRESS, true)) {
1904     int64_t RemainderOffset = COffsetVal;
1905     int64_t ImmField = 0;
1906     const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(*Subtarget, true);
1907     // Use signed division by a power of two to truncate towards 0.
1908     int64_t D = 1LL << (NumBits - 1);
1909     RemainderOffset = (COffsetVal / D) * D;
1910     ImmField = COffsetVal - RemainderOffset;
1911 
1912     assert(TII->isLegalFLATOffset(ImmField, AMDGPUAS::PRIVATE_ADDRESS, true));
1913     assert(RemainderOffset + ImmField == COffsetVal);
1914 
1915     COffsetVal = ImmField;
1916 
1917     SDLoc DL(N);
1918     SDValue AddOffset =
1919         getMaterializedScalarImm32(Lo_32(RemainderOffset), DL);
1920     SAddr = SDValue(CurDAG->getMachineNode(AMDGPU::S_ADD_U32, DL, MVT::i32,
1921                                            SAddr, AddOffset), 0);
1922   }
1923 
1924   Offset = CurDAG->getTargetConstant(COffsetVal, SDLoc(), MVT::i16);
1925 
1926   return true;
1927 }
1928 
1929 bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
1930                                           SDValue &Offset, bool &Imm) const {
1931   ConstantSDNode *C = dyn_cast<ConstantSDNode>(ByteOffsetNode);
1932   if (!C) {
1933     if (ByteOffsetNode.getValueType().isScalarInteger() &&
1934         ByteOffsetNode.getValueType().getSizeInBits() == 32) {
1935       Offset = ByteOffsetNode;
1936       Imm = false;
1937       return true;
1938     }
1939     if (ByteOffsetNode.getOpcode() == ISD::ZERO_EXTEND) {
1940       if (ByteOffsetNode.getOperand(0).getValueType().getSizeInBits() == 32) {
1941         Offset = ByteOffsetNode.getOperand(0);
1942         Imm = false;
1943         return true;
1944       }
1945     }
1946     return false;
1947   }
1948 
1949   SDLoc SL(ByteOffsetNode);
1950   // GFX9 and GFX10 have signed byte immediate offsets.
1951   int64_t ByteOffset = C->getSExtValue();
1952   Optional<int64_t> EncodedOffset =
1953       AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, false);
1954   if (EncodedOffset) {
1955     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1956     Imm = true;
1957     return true;
1958   }
1959 
1960   // SGPR and literal offsets are unsigned.
1961   if (ByteOffset < 0)
1962     return false;
1963 
1964   EncodedOffset = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget, ByteOffset);
1965   if (EncodedOffset) {
1966     Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
1967     return true;
1968   }
1969 
1970   if (!isUInt<32>(ByteOffset) && !isInt<32>(ByteOffset))
1971     return false;
1972 
1973   SDValue C32Bit = CurDAG->getTargetConstant(ByteOffset, SL, MVT::i32);
1974   Offset = SDValue(
1975       CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, C32Bit), 0);
1976 
1977   return true;
1978 }
1979 
1980 SDValue AMDGPUDAGToDAGISel::Expand32BitAddress(SDValue Addr) const {
1981   if (Addr.getValueType() != MVT::i32)
1982     return Addr;
1983 
1984   // Zero-extend a 32-bit address.
1985   SDLoc SL(Addr);
1986 
1987   const MachineFunction &MF = CurDAG->getMachineFunction();
1988   const SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
1989   unsigned AddrHiVal = Info->get32BitAddressHighBits();
1990   SDValue AddrHi = CurDAG->getTargetConstant(AddrHiVal, SL, MVT::i32);
1991 
1992   const SDValue Ops[] = {
1993     CurDAG->getTargetConstant(AMDGPU::SReg_64_XEXECRegClassID, SL, MVT::i32),
1994     Addr,
1995     CurDAG->getTargetConstant(AMDGPU::sub0, SL, MVT::i32),
1996     SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SL, MVT::i32, AddrHi),
1997             0),
1998     CurDAG->getTargetConstant(AMDGPU::sub1, SL, MVT::i32),
1999   };
2000 
2001   return SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, SL, MVT::i64,
2002                                         Ops), 0);
2003 }
2004 
2005 bool AMDGPUDAGToDAGISel::SelectSMRD(SDValue Addr, SDValue &SBase,
2006                                      SDValue &Offset, bool &Imm) const {
2007   SDLoc SL(Addr);
2008 
2009   // A 32-bit (address + offset) should not cause unsigned 32-bit integer
2010   // wraparound, because s_load instructions perform the addition in 64 bits.
2011   if ((Addr.getValueType() != MVT::i32 ||
2012        Addr->getFlags().hasNoUnsignedWrap())) {
2013     SDValue N0, N1;
2014     // Extract the base and offset if possible.
2015     if (CurDAG->isBaseWithConstantOffset(Addr) ||
2016         Addr.getOpcode() == ISD::ADD) {
2017       N0 = Addr.getOperand(0);
2018       N1 = Addr.getOperand(1);
2019     } else if (getBaseWithOffsetUsingSplitOR(*CurDAG, Addr, N0, N1)) {
2020       assert(N0 && N1 && isa<ConstantSDNode>(N1));
2021     }
2022     if (N0 && N1) {
2023       if (SelectSMRDOffset(N1, Offset, Imm)) {
2024         SBase = Expand32BitAddress(N0);
2025         return true;
2026       }
2027     }
2028   }
2029   SBase = Expand32BitAddress(Addr);
2030   Offset = CurDAG->getTargetConstant(0, SL, MVT::i32);
2031   Imm = true;
2032   return true;
2033 }
2034 
2035 bool AMDGPUDAGToDAGISel::SelectSMRDImm(SDValue Addr, SDValue &SBase,
2036                                        SDValue &Offset) const {
2037   bool Imm = false;
2038   return SelectSMRD(Addr, SBase, Offset, Imm) && Imm;
2039 }
2040 
2041 bool AMDGPUDAGToDAGISel::SelectSMRDImm32(SDValue Addr, SDValue &SBase,
2042                                          SDValue &Offset) const {
2043 
2044   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2045 
2046   bool Imm = false;
2047   if (!SelectSMRD(Addr, SBase, Offset, Imm))
2048     return false;
2049 
2050   return !Imm && isa<ConstantSDNode>(Offset);
2051 }
2052 
2053 bool AMDGPUDAGToDAGISel::SelectSMRDSgpr(SDValue Addr, SDValue &SBase,
2054                                         SDValue &Offset) const {
2055   bool Imm = false;
2056   return SelectSMRD(Addr, SBase, Offset, Imm) && !Imm &&
2057          !isa<ConstantSDNode>(Offset);
2058 }
2059 
2060 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm(SDValue Addr,
2061                                              SDValue &Offset) const {
2062   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2063     // The immediate offset for S_BUFFER instructions is unsigned.
2064     if (auto Imm =
2065             AMDGPU::getSMRDEncodedOffset(*Subtarget, C->getZExtValue(), true)) {
2066       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2067       return true;
2068     }
2069   }
2070 
2071   return false;
2072 }
2073 
2074 bool AMDGPUDAGToDAGISel::SelectSMRDBufferImm32(SDValue Addr,
2075                                                SDValue &Offset) const {
2076   assert(Subtarget->getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
2077 
2078   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Addr)) {
2079     if (auto Imm = AMDGPU::getSMRDEncodedLiteralOffset32(*Subtarget,
2080                                                          C->getZExtValue())) {
2081       Offset = CurDAG->getTargetConstant(*Imm, SDLoc(Addr), MVT::i32);
2082       return true;
2083     }
2084   }
2085 
2086   return false;
2087 }
2088 
2089 bool AMDGPUDAGToDAGISel::SelectMOVRELOffset(SDValue Index,
2090                                             SDValue &Base,
2091                                             SDValue &Offset) const {
2092   SDLoc DL(Index);
2093 
2094   if (CurDAG->isBaseWithConstantOffset(Index)) {
2095     SDValue N0 = Index.getOperand(0);
2096     SDValue N1 = Index.getOperand(1);
2097     ConstantSDNode *C1 = cast<ConstantSDNode>(N1);
2098 
2099     // (add n0, c0)
2100     // Don't peel off the offset (c0) if doing so could possibly lead
2101     // the base (n0) to be negative.
2102     // (or n0, |c0|) can never change a sign given isBaseWithConstantOffset.
2103     if (C1->getSExtValue() <= 0 || CurDAG->SignBitIsZero(N0) ||
2104         (Index->getOpcode() == ISD::OR && C1->getSExtValue() >= 0)) {
2105       Base = N0;
2106       Offset = CurDAG->getTargetConstant(C1->getZExtValue(), DL, MVT::i32);
2107       return true;
2108     }
2109   }
2110 
2111   if (isa<ConstantSDNode>(Index))
2112     return false;
2113 
2114   Base = Index;
2115   Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
2116   return true;
2117 }
2118 
2119 SDNode *AMDGPUDAGToDAGISel::getS_BFE(unsigned Opcode, const SDLoc &DL,
2120                                      SDValue Val, uint32_t Offset,
2121                                      uint32_t Width) {
2122   // Transformation function, pack the offset and width of a BFE into
2123   // the format expected by the S_BFE_I32 / S_BFE_U32. In the second
2124   // source, bits [5:0] contain the offset and bits [22:16] the width.
2125   uint32_t PackedVal = Offset | (Width << 16);
2126   SDValue PackedConst = CurDAG->getTargetConstant(PackedVal, DL, MVT::i32);
2127 
2128   return CurDAG->getMachineNode(Opcode, DL, MVT::i32, Val, PackedConst);
2129 }
2130 
2131 void AMDGPUDAGToDAGISel::SelectS_BFEFromShifts(SDNode *N) {
2132   // "(a << b) srl c)" ---> "BFE_U32 a, (c-b), (32-c)
2133   // "(a << b) sra c)" ---> "BFE_I32 a, (c-b), (32-c)
2134   // Predicate: 0 < b <= c < 32
2135 
2136   const SDValue &Shl = N->getOperand(0);
2137   ConstantSDNode *B = dyn_cast<ConstantSDNode>(Shl->getOperand(1));
2138   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
2139 
2140   if (B && C) {
2141     uint32_t BVal = B->getZExtValue();
2142     uint32_t CVal = C->getZExtValue();
2143 
2144     if (0 < BVal && BVal <= CVal && CVal < 32) {
2145       bool Signed = N->getOpcode() == ISD::SRA;
2146       unsigned Opcode = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2147 
2148       ReplaceNode(N, getS_BFE(Opcode, SDLoc(N), Shl.getOperand(0), CVal - BVal,
2149                               32 - CVal));
2150       return;
2151     }
2152   }
2153   SelectCode(N);
2154 }
2155 
2156 void AMDGPUDAGToDAGISel::SelectS_BFE(SDNode *N) {
2157   switch (N->getOpcode()) {
2158   case ISD::AND:
2159     if (N->getOperand(0).getOpcode() == ISD::SRL) {
2160       // "(a srl b) & mask" ---> "BFE_U32 a, b, popcount(mask)"
2161       // Predicate: isMask(mask)
2162       const SDValue &Srl = N->getOperand(0);
2163       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(Srl.getOperand(1));
2164       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(N->getOperand(1));
2165 
2166       if (Shift && Mask) {
2167         uint32_t ShiftVal = Shift->getZExtValue();
2168         uint32_t MaskVal = Mask->getZExtValue();
2169 
2170         if (isMask_32(MaskVal)) {
2171           uint32_t WidthVal = countPopulation(MaskVal);
2172 
2173           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
2174                                   Srl.getOperand(0), ShiftVal, WidthVal));
2175           return;
2176         }
2177       }
2178     }
2179     break;
2180   case ISD::SRL:
2181     if (N->getOperand(0).getOpcode() == ISD::AND) {
2182       // "(a & mask) srl b)" ---> "BFE_U32 a, b, popcount(mask >> b)"
2183       // Predicate: isMask(mask >> b)
2184       const SDValue &And = N->getOperand(0);
2185       ConstantSDNode *Shift = dyn_cast<ConstantSDNode>(N->getOperand(1));
2186       ConstantSDNode *Mask = dyn_cast<ConstantSDNode>(And->getOperand(1));
2187 
2188       if (Shift && Mask) {
2189         uint32_t ShiftVal = Shift->getZExtValue();
2190         uint32_t MaskVal = Mask->getZExtValue() >> ShiftVal;
2191 
2192         if (isMask_32(MaskVal)) {
2193           uint32_t WidthVal = countPopulation(MaskVal);
2194 
2195           ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_U32, SDLoc(N),
2196                                   And.getOperand(0), ShiftVal, WidthVal));
2197           return;
2198         }
2199       }
2200     } else if (N->getOperand(0).getOpcode() == ISD::SHL) {
2201       SelectS_BFEFromShifts(N);
2202       return;
2203     }
2204     break;
2205   case ISD::SRA:
2206     if (N->getOperand(0).getOpcode() == ISD::SHL) {
2207       SelectS_BFEFromShifts(N);
2208       return;
2209     }
2210     break;
2211 
2212   case ISD::SIGN_EXTEND_INREG: {
2213     // sext_inreg (srl x, 16), i8 -> bfe_i32 x, 16, 8
2214     SDValue Src = N->getOperand(0);
2215     if (Src.getOpcode() != ISD::SRL)
2216       break;
2217 
2218     const ConstantSDNode *Amt = dyn_cast<ConstantSDNode>(Src.getOperand(1));
2219     if (!Amt)
2220       break;
2221 
2222     unsigned Width = cast<VTSDNode>(N->getOperand(1))->getVT().getSizeInBits();
2223     ReplaceNode(N, getS_BFE(AMDGPU::S_BFE_I32, SDLoc(N), Src.getOperand(0),
2224                             Amt->getZExtValue(), Width));
2225     return;
2226   }
2227   }
2228 
2229   SelectCode(N);
2230 }
2231 
2232 bool AMDGPUDAGToDAGISel::isCBranchSCC(const SDNode *N) const {
2233   assert(N->getOpcode() == ISD::BRCOND);
2234   if (!N->hasOneUse())
2235     return false;
2236 
2237   SDValue Cond = N->getOperand(1);
2238   if (Cond.getOpcode() == ISD::CopyToReg)
2239     Cond = Cond.getOperand(2);
2240 
2241   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
2242     return false;
2243 
2244   MVT VT = Cond.getOperand(0).getSimpleValueType();
2245   if (VT == MVT::i32)
2246     return true;
2247 
2248   if (VT == MVT::i64) {
2249     auto ST = static_cast<const GCNSubtarget *>(Subtarget);
2250 
2251     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
2252     return (CC == ISD::SETEQ || CC == ISD::SETNE) && ST->hasScalarCompareEq64();
2253   }
2254 
2255   return false;
2256 }
2257 
2258 void AMDGPUDAGToDAGISel::SelectBRCOND(SDNode *N) {
2259   SDValue Cond = N->getOperand(1);
2260 
2261   if (Cond.isUndef()) {
2262     CurDAG->SelectNodeTo(N, AMDGPU::SI_BR_UNDEF, MVT::Other,
2263                          N->getOperand(2), N->getOperand(0));
2264     return;
2265   }
2266 
2267   const GCNSubtarget *ST = static_cast<const GCNSubtarget *>(Subtarget);
2268   const SIRegisterInfo *TRI = ST->getRegisterInfo();
2269 
2270   bool UseSCCBr = isCBranchSCC(N) && isUniformBr(N);
2271   unsigned BrOp = UseSCCBr ? AMDGPU::S_CBRANCH_SCC1 : AMDGPU::S_CBRANCH_VCCNZ;
2272   Register CondReg = UseSCCBr ? AMDGPU::SCC : TRI->getVCC();
2273   SDLoc SL(N);
2274 
2275   if (!UseSCCBr) {
2276     // This is the case that we are selecting to S_CBRANCH_VCCNZ.  We have not
2277     // analyzed what generates the vcc value, so we do not know whether vcc
2278     // bits for disabled lanes are 0.  Thus we need to mask out bits for
2279     // disabled lanes.
2280     //
2281     // For the case that we select S_CBRANCH_SCC1 and it gets
2282     // changed to S_CBRANCH_VCCNZ in SIFixSGPRCopies, SIFixSGPRCopies calls
2283     // SIInstrInfo::moveToVALU which inserts the S_AND).
2284     //
2285     // We could add an analysis of what generates the vcc value here and omit
2286     // the S_AND when is unnecessary. But it would be better to add a separate
2287     // pass after SIFixSGPRCopies to do the unnecessary S_AND removal, so it
2288     // catches both cases.
2289     Cond = SDValue(CurDAG->getMachineNode(ST->isWave32() ? AMDGPU::S_AND_B32
2290                                                          : AMDGPU::S_AND_B64,
2291                      SL, MVT::i1,
2292                      CurDAG->getRegister(ST->isWave32() ? AMDGPU::EXEC_LO
2293                                                         : AMDGPU::EXEC,
2294                                          MVT::i1),
2295                     Cond),
2296                    0);
2297   }
2298 
2299   SDValue VCC = CurDAG->getCopyToReg(N->getOperand(0), SL, CondReg, Cond);
2300   CurDAG->SelectNodeTo(N, BrOp, MVT::Other,
2301                        N->getOperand(2), // Basic Block
2302                        VCC.getValue(0));
2303 }
2304 
2305 void AMDGPUDAGToDAGISel::SelectFMAD_FMA(SDNode *N) {
2306   MVT VT = N->getSimpleValueType(0);
2307   bool IsFMA = N->getOpcode() == ISD::FMA;
2308   if (VT != MVT::f32 || (!Subtarget->hasMadMixInsts() &&
2309                          !Subtarget->hasFmaMixInsts()) ||
2310       ((IsFMA && Subtarget->hasMadMixInsts()) ||
2311        (!IsFMA && Subtarget->hasFmaMixInsts()))) {
2312     SelectCode(N);
2313     return;
2314   }
2315 
2316   SDValue Src0 = N->getOperand(0);
2317   SDValue Src1 = N->getOperand(1);
2318   SDValue Src2 = N->getOperand(2);
2319   unsigned Src0Mods, Src1Mods, Src2Mods;
2320 
2321   // Avoid using v_mad_mix_f32/v_fma_mix_f32 unless there is actually an operand
2322   // using the conversion from f16.
2323   bool Sel0 = SelectVOP3PMadMixModsImpl(Src0, Src0, Src0Mods);
2324   bool Sel1 = SelectVOP3PMadMixModsImpl(Src1, Src1, Src1Mods);
2325   bool Sel2 = SelectVOP3PMadMixModsImpl(Src2, Src2, Src2Mods);
2326 
2327   assert((IsFMA || !Mode.allFP32Denormals()) &&
2328          "fmad selected with denormals enabled");
2329   // TODO: We can select this with f32 denormals enabled if all the sources are
2330   // converted from f16 (in which case fmad isn't legal).
2331 
2332   if (Sel0 || Sel1 || Sel2) {
2333     // For dummy operands.
2334     SDValue Zero = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2335     SDValue Ops[] = {
2336       CurDAG->getTargetConstant(Src0Mods, SDLoc(), MVT::i32), Src0,
2337       CurDAG->getTargetConstant(Src1Mods, SDLoc(), MVT::i32), Src1,
2338       CurDAG->getTargetConstant(Src2Mods, SDLoc(), MVT::i32), Src2,
2339       CurDAG->getTargetConstant(0, SDLoc(), MVT::i1),
2340       Zero, Zero
2341     };
2342 
2343     CurDAG->SelectNodeTo(N,
2344                          IsFMA ? AMDGPU::V_FMA_MIX_F32 : AMDGPU::V_MAD_MIX_F32,
2345                          MVT::f32, Ops);
2346   } else {
2347     SelectCode(N);
2348   }
2349 }
2350 
2351 // This is here because there isn't a way to use the generated sub0_sub1 as the
2352 // subreg index to EXTRACT_SUBREG in tablegen.
2353 void AMDGPUDAGToDAGISel::SelectATOMIC_CMP_SWAP(SDNode *N) {
2354   MemSDNode *Mem = cast<MemSDNode>(N);
2355   unsigned AS = Mem->getAddressSpace();
2356   if (AS == AMDGPUAS::FLAT_ADDRESS) {
2357     SelectCode(N);
2358     return;
2359   }
2360 
2361   MVT VT = N->getSimpleValueType(0);
2362   bool Is32 = (VT == MVT::i32);
2363   SDLoc SL(N);
2364 
2365   MachineSDNode *CmpSwap = nullptr;
2366   if (Subtarget->hasAddr64()) {
2367     SDValue SRsrc, VAddr, SOffset, Offset, SLC;
2368 
2369     if (SelectMUBUFAddr64(Mem->getBasePtr(), SRsrc, VAddr, SOffset, Offset, SLC)) {
2370       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN :
2371         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN;
2372       SDValue CmpVal = Mem->getOperand(2);
2373       SDValue GLC = CurDAG->getTargetConstant(1, SL, MVT::i1);
2374 
2375       // XXX - Do we care about glue operands?
2376 
2377       SDValue Ops[] = {
2378         CmpVal, VAddr, SRsrc, SOffset, Offset, GLC, SLC, Mem->getChain()
2379       };
2380 
2381       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2382     }
2383   }
2384 
2385   if (!CmpSwap) {
2386     SDValue SRsrc, SOffset, Offset, SLC;
2387     if (SelectMUBUFOffset(Mem->getBasePtr(), SRsrc, SOffset, Offset, SLC)) {
2388       unsigned Opcode = Is32 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN :
2389         AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN;
2390 
2391       SDValue CmpVal = Mem->getOperand(2);
2392       SDValue GLC = CurDAG->getTargetConstant(1, SL, MVT::i1);
2393       SDValue Ops[] = {
2394         CmpVal, SRsrc, SOffset, Offset, GLC, SLC, Mem->getChain()
2395       };
2396 
2397       CmpSwap = CurDAG->getMachineNode(Opcode, SL, Mem->getVTList(), Ops);
2398     }
2399   }
2400 
2401   if (!CmpSwap) {
2402     SelectCode(N);
2403     return;
2404   }
2405 
2406   MachineMemOperand *MMO = Mem->getMemOperand();
2407   CurDAG->setNodeMemRefs(CmpSwap, {MMO});
2408 
2409   unsigned SubReg = Is32 ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
2410   SDValue Extract
2411     = CurDAG->getTargetExtractSubreg(SubReg, SL, VT, SDValue(CmpSwap, 0));
2412 
2413   ReplaceUses(SDValue(N, 0), Extract);
2414   ReplaceUses(SDValue(N, 1), SDValue(CmpSwap, 1));
2415   CurDAG->RemoveDeadNode(N);
2416 }
2417 
2418 void AMDGPUDAGToDAGISel::SelectDSAppendConsume(SDNode *N, unsigned IntrID) {
2419   // The address is assumed to be uniform, so if it ends up in a VGPR, it will
2420   // be copied to an SGPR with readfirstlane.
2421   unsigned Opc = IntrID == Intrinsic::amdgcn_ds_append ?
2422     AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
2423 
2424   SDValue Chain = N->getOperand(0);
2425   SDValue Ptr = N->getOperand(2);
2426   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2427   MachineMemOperand *MMO = M->getMemOperand();
2428   bool IsGDS = M->getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
2429 
2430   SDValue Offset;
2431   if (CurDAG->isBaseWithConstantOffset(Ptr)) {
2432     SDValue PtrBase = Ptr.getOperand(0);
2433     SDValue PtrOffset = Ptr.getOperand(1);
2434 
2435     const APInt &OffsetVal = cast<ConstantSDNode>(PtrOffset)->getAPIntValue();
2436     if (isDSOffsetLegal(PtrBase, OffsetVal.getZExtValue())) {
2437       N = glueCopyToM0(N, PtrBase);
2438       Offset = CurDAG->getTargetConstant(OffsetVal, SDLoc(), MVT::i32);
2439     }
2440   }
2441 
2442   if (!Offset) {
2443     N = glueCopyToM0(N, Ptr);
2444     Offset = CurDAG->getTargetConstant(0, SDLoc(), MVT::i32);
2445   }
2446 
2447   SDValue Ops[] = {
2448     Offset,
2449     CurDAG->getTargetConstant(IsGDS, SDLoc(), MVT::i32),
2450     Chain,
2451     N->getOperand(N->getNumOperands() - 1) // New glue
2452   };
2453 
2454   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2455   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2456 }
2457 
2458 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
2459   switch (IntrID) {
2460   case Intrinsic::amdgcn_ds_gws_init:
2461     return AMDGPU::DS_GWS_INIT;
2462   case Intrinsic::amdgcn_ds_gws_barrier:
2463     return AMDGPU::DS_GWS_BARRIER;
2464   case Intrinsic::amdgcn_ds_gws_sema_v:
2465     return AMDGPU::DS_GWS_SEMA_V;
2466   case Intrinsic::amdgcn_ds_gws_sema_br:
2467     return AMDGPU::DS_GWS_SEMA_BR;
2468   case Intrinsic::amdgcn_ds_gws_sema_p:
2469     return AMDGPU::DS_GWS_SEMA_P;
2470   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2471     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
2472   default:
2473     llvm_unreachable("not a gws intrinsic");
2474   }
2475 }
2476 
2477 void AMDGPUDAGToDAGISel::SelectDS_GWS(SDNode *N, unsigned IntrID) {
2478   if (IntrID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
2479       !Subtarget->hasGWSSemaReleaseAll()) {
2480     // Let this error.
2481     SelectCode(N);
2482     return;
2483   }
2484 
2485   // Chain, intrinsic ID, vsrc, offset
2486   const bool HasVSrc = N->getNumOperands() == 4;
2487   assert(HasVSrc || N->getNumOperands() == 3);
2488 
2489   SDLoc SL(N);
2490   SDValue BaseOffset = N->getOperand(HasVSrc ? 3 : 2);
2491   int ImmOffset = 0;
2492   MemIntrinsicSDNode *M = cast<MemIntrinsicSDNode>(N);
2493   MachineMemOperand *MMO = M->getMemOperand();
2494 
2495   // Don't worry if the offset ends up in a VGPR. Only one lane will have
2496   // effect, so SIFixSGPRCopies will validly insert readfirstlane.
2497 
2498   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
2499   // offset field) % 64. Some versions of the programming guide omit the m0
2500   // part, or claim it's from offset 0.
2501   if (ConstantSDNode *ConstOffset = dyn_cast<ConstantSDNode>(BaseOffset)) {
2502     // If we have a constant offset, try to use the 0 in m0 as the base.
2503     // TODO: Look into changing the default m0 initialization value. If the
2504     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
2505     // the immediate offset.
2506     glueCopyToM0(N, CurDAG->getTargetConstant(0, SL, MVT::i32));
2507     ImmOffset = ConstOffset->getZExtValue();
2508   } else {
2509     if (CurDAG->isBaseWithConstantOffset(BaseOffset)) {
2510       ImmOffset = BaseOffset.getConstantOperandVal(1);
2511       BaseOffset = BaseOffset.getOperand(0);
2512     }
2513 
2514     // Prefer to do the shift in an SGPR since it should be possible to use m0
2515     // as the result directly. If it's already an SGPR, it will be eliminated
2516     // later.
2517     SDNode *SGPROffset
2518       = CurDAG->getMachineNode(AMDGPU::V_READFIRSTLANE_B32, SL, MVT::i32,
2519                                BaseOffset);
2520     // Shift to offset in m0
2521     SDNode *M0Base
2522       = CurDAG->getMachineNode(AMDGPU::S_LSHL_B32, SL, MVT::i32,
2523                                SDValue(SGPROffset, 0),
2524                                CurDAG->getTargetConstant(16, SL, MVT::i32));
2525     glueCopyToM0(N, SDValue(M0Base, 0));
2526   }
2527 
2528   SDValue Chain = N->getOperand(0);
2529   SDValue OffsetField = CurDAG->getTargetConstant(ImmOffset, SL, MVT::i32);
2530 
2531   const unsigned Opc = gwsIntrinToOpcode(IntrID);
2532   SmallVector<SDValue, 5> Ops;
2533   if (HasVSrc)
2534     Ops.push_back(N->getOperand(2));
2535   Ops.push_back(OffsetField);
2536   Ops.push_back(Chain);
2537 
2538   SDNode *Selected = CurDAG->SelectNodeTo(N, Opc, N->getVTList(), Ops);
2539   CurDAG->setNodeMemRefs(cast<MachineSDNode>(Selected), {MMO});
2540 }
2541 
2542 void AMDGPUDAGToDAGISel::SelectInterpP1F16(SDNode *N) {
2543   if (Subtarget->getLDSBankCount() != 16) {
2544     // This is a single instruction with a pattern.
2545     SelectCode(N);
2546     return;
2547   }
2548 
2549   SDLoc DL(N);
2550 
2551   // This requires 2 instructions. It is possible to write a pattern to support
2552   // this, but the generated isel emitter doesn't correctly deal with multiple
2553   // output instructions using the same physical register input. The copy to m0
2554   // is incorrectly placed before the second instruction.
2555   //
2556   // TODO: Match source modifiers.
2557   //
2558   // def : Pat <
2559   //   (int_amdgcn_interp_p1_f16
2560   //    (VOP3Mods f32:$src0, i32:$src0_modifiers),
2561   //                             (i32 timm:$attrchan), (i32 timm:$attr),
2562   //                             (i1 timm:$high), M0),
2563   //   (V_INTERP_P1LV_F16 $src0_modifiers, VGPR_32:$src0, timm:$attr,
2564   //       timm:$attrchan, 0,
2565   //       (V_INTERP_MOV_F32 2, timm:$attr, timm:$attrchan), timm:$high)> {
2566   //   let Predicates = [has16BankLDS];
2567   // }
2568 
2569   // 16 bank LDS
2570   SDValue ToM0 = CurDAG->getCopyToReg(CurDAG->getEntryNode(), DL, AMDGPU::M0,
2571                                       N->getOperand(5), SDValue());
2572 
2573   SDVTList VTs = CurDAG->getVTList(MVT::f32, MVT::Other);
2574 
2575   SDNode *InterpMov =
2576     CurDAG->getMachineNode(AMDGPU::V_INTERP_MOV_F32, DL, VTs, {
2577         CurDAG->getTargetConstant(2, DL, MVT::i32), // P0
2578         N->getOperand(3),  // Attr
2579         N->getOperand(2),  // Attrchan
2580         ToM0.getValue(1) // In glue
2581   });
2582 
2583   SDNode *InterpP1LV =
2584     CurDAG->getMachineNode(AMDGPU::V_INTERP_P1LV_F16, DL, MVT::f32, {
2585         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src0_modifiers
2586         N->getOperand(1), // Src0
2587         N->getOperand(3), // Attr
2588         N->getOperand(2), // Attrchan
2589         CurDAG->getTargetConstant(0, DL, MVT::i32), // $src2_modifiers
2590         SDValue(InterpMov, 0), // Src2 - holds two f16 values selected by high
2591         N->getOperand(4), // high
2592         CurDAG->getTargetConstant(0, DL, MVT::i1), // $clamp
2593         CurDAG->getTargetConstant(0, DL, MVT::i32), // $omod
2594         SDValue(InterpMov, 1)
2595   });
2596 
2597   CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), SDValue(InterpP1LV, 0));
2598 }
2599 
2600 void AMDGPUDAGToDAGISel::SelectINTRINSIC_W_CHAIN(SDNode *N) {
2601   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2602   switch (IntrID) {
2603   case Intrinsic::amdgcn_ds_append:
2604   case Intrinsic::amdgcn_ds_consume: {
2605     if (N->getValueType(0) != MVT::i32)
2606       break;
2607     SelectDSAppendConsume(N, IntrID);
2608     return;
2609   }
2610   }
2611 
2612   SelectCode(N);
2613 }
2614 
2615 void AMDGPUDAGToDAGISel::SelectINTRINSIC_WO_CHAIN(SDNode *N) {
2616   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
2617   unsigned Opcode;
2618   switch (IntrID) {
2619   case Intrinsic::amdgcn_wqm:
2620     Opcode = AMDGPU::WQM;
2621     break;
2622   case Intrinsic::amdgcn_softwqm:
2623     Opcode = AMDGPU::SOFT_WQM;
2624     break;
2625   case Intrinsic::amdgcn_wwm:
2626     Opcode = AMDGPU::WWM;
2627     break;
2628   case Intrinsic::amdgcn_interp_p1_f16:
2629     SelectInterpP1F16(N);
2630     return;
2631   default:
2632     SelectCode(N);
2633     return;
2634   }
2635 
2636   SDValue Src = N->getOperand(1);
2637   CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), {Src});
2638 }
2639 
2640 void AMDGPUDAGToDAGISel::SelectINTRINSIC_VOID(SDNode *N) {
2641   unsigned IntrID = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
2642   switch (IntrID) {
2643   case Intrinsic::amdgcn_ds_gws_init:
2644   case Intrinsic::amdgcn_ds_gws_barrier:
2645   case Intrinsic::amdgcn_ds_gws_sema_v:
2646   case Intrinsic::amdgcn_ds_gws_sema_br:
2647   case Intrinsic::amdgcn_ds_gws_sema_p:
2648   case Intrinsic::amdgcn_ds_gws_sema_release_all:
2649     SelectDS_GWS(N, IntrID);
2650     return;
2651   default:
2652     break;
2653   }
2654 
2655   SelectCode(N);
2656 }
2657 
2658 bool AMDGPUDAGToDAGISel::SelectVOP3ModsImpl(SDValue In, SDValue &Src,
2659                                             unsigned &Mods,
2660                                             bool AllowAbs) const {
2661   Mods = 0;
2662   Src = In;
2663 
2664   if (Src.getOpcode() == ISD::FNEG) {
2665     Mods |= SISrcMods::NEG;
2666     Src = Src.getOperand(0);
2667   }
2668 
2669   if (AllowAbs && Src.getOpcode() == ISD::FABS) {
2670     Mods |= SISrcMods::ABS;
2671     Src = Src.getOperand(0);
2672   }
2673 
2674   return true;
2675 }
2676 
2677 bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src,
2678                                         SDValue &SrcMods) const {
2679   unsigned Mods;
2680   if (SelectVOP3ModsImpl(In, Src, Mods)) {
2681     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2682     return true;
2683   }
2684 
2685   return false;
2686 }
2687 
2688 bool AMDGPUDAGToDAGISel::SelectVOP3BMods(SDValue In, SDValue &Src,
2689                                          SDValue &SrcMods) const {
2690   unsigned Mods;
2691   if (SelectVOP3ModsImpl(In, Src, Mods, /* AllowAbs */ false)) {
2692     SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2693     return true;
2694   }
2695 
2696   return false;
2697 }
2698 
2699 bool AMDGPUDAGToDAGISel::SelectVOP3Mods_NNaN(SDValue In, SDValue &Src,
2700                                              SDValue &SrcMods) const {
2701   SelectVOP3Mods(In, Src, SrcMods);
2702   return isNoNanSrc(Src);
2703 }
2704 
2705 bool AMDGPUDAGToDAGISel::SelectVOP3NoMods(SDValue In, SDValue &Src) const {
2706   if (In.getOpcode() == ISD::FABS || In.getOpcode() == ISD::FNEG)
2707     return false;
2708 
2709   Src = In;
2710   return true;
2711 }
2712 
2713 bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src,
2714                                          SDValue &SrcMods, SDValue &Clamp,
2715                                          SDValue &Omod) const {
2716   SDLoc DL(In);
2717   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2718   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2719 
2720   return SelectVOP3Mods(In, Src, SrcMods);
2721 }
2722 
2723 bool AMDGPUDAGToDAGISel::SelectVOP3BMods0(SDValue In, SDValue &Src,
2724                                           SDValue &SrcMods, SDValue &Clamp,
2725                                           SDValue &Omod) const {
2726   SDLoc DL(In);
2727   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2728   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2729 
2730   return SelectVOP3BMods(In, Src, SrcMods);
2731 }
2732 
2733 bool AMDGPUDAGToDAGISel::SelectVOP3OMods(SDValue In, SDValue &Src,
2734                                          SDValue &Clamp, SDValue &Omod) const {
2735   Src = In;
2736 
2737   SDLoc DL(In);
2738   Clamp = CurDAG->getTargetConstant(0, DL, MVT::i1);
2739   Omod = CurDAG->getTargetConstant(0, DL, MVT::i1);
2740 
2741   return true;
2742 }
2743 
2744 bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src,
2745                                          SDValue &SrcMods) const {
2746   unsigned Mods = 0;
2747   Src = In;
2748 
2749   if (Src.getOpcode() == ISD::FNEG) {
2750     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2751     Src = Src.getOperand(0);
2752   }
2753 
2754   if (Src.getOpcode() == ISD::BUILD_VECTOR) {
2755     unsigned VecMods = Mods;
2756 
2757     SDValue Lo = stripBitcast(Src.getOperand(0));
2758     SDValue Hi = stripBitcast(Src.getOperand(1));
2759 
2760     if (Lo.getOpcode() == ISD::FNEG) {
2761       Lo = stripBitcast(Lo.getOperand(0));
2762       Mods ^= SISrcMods::NEG;
2763     }
2764 
2765     if (Hi.getOpcode() == ISD::FNEG) {
2766       Hi = stripBitcast(Hi.getOperand(0));
2767       Mods ^= SISrcMods::NEG_HI;
2768     }
2769 
2770     if (isExtractHiElt(Lo, Lo))
2771       Mods |= SISrcMods::OP_SEL_0;
2772 
2773     if (isExtractHiElt(Hi, Hi))
2774       Mods |= SISrcMods::OP_SEL_1;
2775 
2776     Lo = stripExtractLoElt(Lo);
2777     Hi = stripExtractLoElt(Hi);
2778 
2779     if (Lo == Hi && !isInlineImmediate(Lo.getNode())) {
2780       // Really a scalar input. Just select from the low half of the register to
2781       // avoid packing.
2782 
2783       Src = Lo;
2784       SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2785       return true;
2786     }
2787 
2788     Mods = VecMods;
2789   }
2790 
2791   // Packed instructions do not have abs modifiers.
2792   Mods |= SISrcMods::OP_SEL_1;
2793 
2794   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2795   return true;
2796 }
2797 
2798 bool AMDGPUDAGToDAGISel::SelectVOP3OpSel(SDValue In, SDValue &Src,
2799                                          SDValue &SrcMods) const {
2800   Src = In;
2801   // FIXME: Handle op_sel
2802   SrcMods = CurDAG->getTargetConstant(0, SDLoc(In), MVT::i32);
2803   return true;
2804 }
2805 
2806 bool AMDGPUDAGToDAGISel::SelectVOP3OpSelMods(SDValue In, SDValue &Src,
2807                                              SDValue &SrcMods) const {
2808   // FIXME: Handle op_sel
2809   return SelectVOP3Mods(In, Src, SrcMods);
2810 }
2811 
2812 // The return value is not whether the match is possible (which it always is),
2813 // but whether or not it a conversion is really used.
2814 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixModsImpl(SDValue In, SDValue &Src,
2815                                                    unsigned &Mods) const {
2816   Mods = 0;
2817   SelectVOP3ModsImpl(In, Src, Mods);
2818 
2819   if (Src.getOpcode() == ISD::FP_EXTEND) {
2820     Src = Src.getOperand(0);
2821     assert(Src.getValueType() == MVT::f16);
2822     Src = stripBitcast(Src);
2823 
2824     // Be careful about folding modifiers if we already have an abs. fneg is
2825     // applied last, so we don't want to apply an earlier fneg.
2826     if ((Mods & SISrcMods::ABS) == 0) {
2827       unsigned ModsTmp;
2828       SelectVOP3ModsImpl(Src, Src, ModsTmp);
2829 
2830       if ((ModsTmp & SISrcMods::NEG) != 0)
2831         Mods ^= SISrcMods::NEG;
2832 
2833       if ((ModsTmp & SISrcMods::ABS) != 0)
2834         Mods |= SISrcMods::ABS;
2835     }
2836 
2837     // op_sel/op_sel_hi decide the source type and source.
2838     // If the source's op_sel_hi is set, it indicates to do a conversion from fp16.
2839     // If the sources's op_sel is set, it picks the high half of the source
2840     // register.
2841 
2842     Mods |= SISrcMods::OP_SEL_1;
2843     if (isExtractHiElt(Src, Src)) {
2844       Mods |= SISrcMods::OP_SEL_0;
2845 
2846       // TODO: Should we try to look for neg/abs here?
2847     }
2848 
2849     return true;
2850   }
2851 
2852   return false;
2853 }
2854 
2855 bool AMDGPUDAGToDAGISel::SelectVOP3PMadMixMods(SDValue In, SDValue &Src,
2856                                                SDValue &SrcMods) const {
2857   unsigned Mods = 0;
2858   SelectVOP3PMadMixModsImpl(In, Src, Mods);
2859   SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32);
2860   return true;
2861 }
2862 
2863 SDValue AMDGPUDAGToDAGISel::getHi16Elt(SDValue In) const {
2864   if (In.isUndef())
2865     return CurDAG->getUNDEF(MVT::i32);
2866 
2867   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(In)) {
2868     SDLoc SL(In);
2869     return CurDAG->getConstant(C->getZExtValue() << 16, SL, MVT::i32);
2870   }
2871 
2872   if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(In)) {
2873     SDLoc SL(In);
2874     return CurDAG->getConstant(
2875       C->getValueAPF().bitcastToAPInt().getZExtValue() << 16, SL, MVT::i32);
2876   }
2877 
2878   SDValue Src;
2879   if (isExtractHiElt(In, Src))
2880     return Src;
2881 
2882   return SDValue();
2883 }
2884 
2885 bool AMDGPUDAGToDAGISel::isVGPRImm(const SDNode * N) const {
2886   assert(CurDAG->getTarget().getTargetTriple().getArch() == Triple::amdgcn);
2887 
2888   const SIRegisterInfo *SIRI =
2889     static_cast<const SIRegisterInfo *>(Subtarget->getRegisterInfo());
2890   const SIInstrInfo * SII =
2891     static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
2892 
2893   unsigned Limit = 0;
2894   bool AllUsesAcceptSReg = true;
2895   for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end();
2896     Limit < 10 && U != E; ++U, ++Limit) {
2897     const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo());
2898 
2899     // If the register class is unknown, it could be an unknown
2900     // register class that needs to be an SGPR, e.g. an inline asm
2901     // constraint
2902     if (!RC || SIRI->isSGPRClass(RC))
2903       return false;
2904 
2905     if (RC != &AMDGPU::VS_32RegClass) {
2906       AllUsesAcceptSReg = false;
2907       SDNode * User = *U;
2908       if (User->isMachineOpcode()) {
2909         unsigned Opc = User->getMachineOpcode();
2910         MCInstrDesc Desc = SII->get(Opc);
2911         if (Desc.isCommutable()) {
2912           unsigned OpIdx = Desc.getNumDefs() + U.getOperandNo();
2913           unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
2914           if (SII->findCommutedOpIndices(Desc, OpIdx, CommuteIdx1)) {
2915             unsigned CommutedOpNo = CommuteIdx1 - Desc.getNumDefs();
2916             const TargetRegisterClass *CommutedRC = getOperandRegClass(*U, CommutedOpNo);
2917             if (CommutedRC == &AMDGPU::VS_32RegClass)
2918               AllUsesAcceptSReg = true;
2919           }
2920         }
2921       }
2922       // If "AllUsesAcceptSReg == false" so far we haven't suceeded
2923       // commuting current user. This means have at least one use
2924       // that strictly require VGPR. Thus, we will not attempt to commute
2925       // other user instructions.
2926       if (!AllUsesAcceptSReg)
2927         break;
2928     }
2929   }
2930   return !AllUsesAcceptSReg && (Limit < 10);
2931 }
2932 
2933 bool AMDGPUDAGToDAGISel::isUniformLoad(const SDNode * N) const {
2934   auto Ld = cast<LoadSDNode>(N);
2935 
2936   return Ld->getAlignment() >= 4 &&
2937         (
2938           (
2939             (
2940               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS       ||
2941               Ld->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT
2942             )
2943             &&
2944             !N->isDivergent()
2945           )
2946           ||
2947           (
2948             Subtarget->getScalarizeGlobalBehavior() &&
2949             Ld->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS &&
2950             Ld->isSimple() &&
2951             !N->isDivergent() &&
2952             static_cast<const SITargetLowering *>(
2953               getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)
2954           )
2955         );
2956 }
2957 
2958 void AMDGPUDAGToDAGISel::PostprocessISelDAG() {
2959   const AMDGPUTargetLowering& Lowering =
2960     *static_cast<const AMDGPUTargetLowering*>(getTargetLowering());
2961   bool IsModified = false;
2962   do {
2963     IsModified = false;
2964 
2965     // Go over all selected nodes and try to fold them a bit more
2966     SelectionDAG::allnodes_iterator Position = CurDAG->allnodes_begin();
2967     while (Position != CurDAG->allnodes_end()) {
2968       SDNode *Node = &*Position++;
2969       MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(Node);
2970       if (!MachineNode)
2971         continue;
2972 
2973       SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG);
2974       if (ResNode != Node) {
2975         if (ResNode)
2976           ReplaceUses(Node, ResNode);
2977         IsModified = true;
2978       }
2979     }
2980     CurDAG->RemoveDeadNodes();
2981   } while (IsModified);
2982 }
2983 
2984 bool R600DAGToDAGISel::runOnMachineFunction(MachineFunction &MF) {
2985   Subtarget = &MF.getSubtarget<R600Subtarget>();
2986   return SelectionDAGISel::runOnMachineFunction(MF);
2987 }
2988 
2989 bool R600DAGToDAGISel::isConstantLoad(const MemSDNode *N, int CbId) const {
2990   if (!N->readMem())
2991     return false;
2992   if (CbId == -1)
2993     return N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
2994            N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
2995 
2996   return N->getAddressSpace() == AMDGPUAS::CONSTANT_BUFFER_0 + CbId;
2997 }
2998 
2999 bool R600DAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr,
3000                                                          SDValue& IntPtr) {
3001   if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) {
3002     IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr),
3003                                        true);
3004     return true;
3005   }
3006   return false;
3007 }
3008 
3009 bool R600DAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr,
3010     SDValue& BaseReg, SDValue &Offset) {
3011   if (!isa<ConstantSDNode>(Addr)) {
3012     BaseReg = Addr;
3013     Offset = CurDAG->getIntPtrConstant(0, SDLoc(Addr), true);
3014     return true;
3015   }
3016   return false;
3017 }
3018 
3019 void R600DAGToDAGISel::Select(SDNode *N) {
3020   unsigned int Opc = N->getOpcode();
3021   if (N->isMachineOpcode()) {
3022     N->setNodeId(-1);
3023     return;   // Already selected.
3024   }
3025 
3026   switch (Opc) {
3027   default: break;
3028   case AMDGPUISD::BUILD_VERTICAL_VECTOR:
3029   case ISD::SCALAR_TO_VECTOR:
3030   case ISD::BUILD_VECTOR: {
3031     EVT VT = N->getValueType(0);
3032     unsigned NumVectorElts = VT.getVectorNumElements();
3033     unsigned RegClassID;
3034     // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG
3035     // that adds a 128 bits reg copy when going through TwoAddressInstructions
3036     // pass. We want to avoid 128 bits copies as much as possible because they
3037     // can't be bundled by our scheduler.
3038     switch(NumVectorElts) {
3039     case 2: RegClassID = R600::R600_Reg64RegClassID; break;
3040     case 4:
3041       if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR)
3042         RegClassID = R600::R600_Reg128VerticalRegClassID;
3043       else
3044         RegClassID = R600::R600_Reg128RegClassID;
3045       break;
3046     default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR");
3047     }
3048     SelectBuildVector(N, RegClassID);
3049     return;
3050   }
3051   }
3052 
3053   SelectCode(N);
3054 }
3055 
3056 bool R600DAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base,
3057                                           SDValue &Offset) {
3058   ConstantSDNode *C;
3059   SDLoc DL(Addr);
3060 
3061   if ((C = dyn_cast<ConstantSDNode>(Addr))) {
3062     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
3063     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3064   } else if ((Addr.getOpcode() == AMDGPUISD::DWORDADDR) &&
3065              (C = dyn_cast<ConstantSDNode>(Addr.getOperand(0)))) {
3066     Base = CurDAG->getRegister(R600::INDIRECT_BASE_ADDR, MVT::i32);
3067     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3068   } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) &&
3069             (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
3070     Base = Addr.getOperand(0);
3071     Offset = CurDAG->getTargetConstant(C->getZExtValue(), DL, MVT::i32);
3072   } else {
3073     Base = Addr;
3074     Offset = CurDAG->getTargetConstant(0, DL, MVT::i32);
3075   }
3076 
3077   return true;
3078 }
3079 
3080 bool R600DAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base,
3081                                           SDValue &Offset) {
3082   ConstantSDNode *IMMOffset;
3083 
3084   if (Addr.getOpcode() == ISD::ADD
3085       && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
3086       && isInt<16>(IMMOffset->getZExtValue())) {
3087 
3088       Base = Addr.getOperand(0);
3089       Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
3090                                          MVT::i32);
3091       return true;
3092   // If the pointer address is constant, we can move it to the offset field.
3093   } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr))
3094              && isInt<16>(IMMOffset->getZExtValue())) {
3095     Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
3096                                   SDLoc(CurDAG->getEntryNode()),
3097                                   R600::ZERO, MVT::i32);
3098     Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), SDLoc(Addr),
3099                                        MVT::i32);
3100     return true;
3101   }
3102 
3103   // Default case, no offset
3104   Base = Addr;
3105   Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
3106   return true;
3107 }
3108