xref: /freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/R600ISelLowering.cpp (revision 2c2ec6bbc9cc7762a250ffe903bda6c2e44d25ff)
1 //===-- R600ISelLowering.cpp - R600 DAG Lowering Implementation -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// Custom DAG lowering for R600
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "R600ISelLowering.h"
15 #include "AMDGPU.h"
16 #include "MCTargetDesc/R600MCTargetDesc.h"
17 #include "R600Defines.h"
18 #include "R600MachineFunctionInfo.h"
19 #include "R600Subtarget.h"
20 #include "R600TargetMachine.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/IR/IntrinsicsAMDGPU.h"
23 #include "llvm/IR/IntrinsicsR600.h"
24 
25 using namespace llvm;
26 
27 #include "R600GenCallingConv.inc"
28 
29 R600TargetLowering::R600TargetLowering(const TargetMachine &TM,
30                                        const R600Subtarget &STI)
31     : AMDGPUTargetLowering(TM, STI), Subtarget(&STI), Gen(STI.getGeneration()) {
32   addRegisterClass(MVT::f32, &R600::R600_Reg32RegClass);
33   addRegisterClass(MVT::i32, &R600::R600_Reg32RegClass);
34   addRegisterClass(MVT::v2f32, &R600::R600_Reg64RegClass);
35   addRegisterClass(MVT::v2i32, &R600::R600_Reg64RegClass);
36   addRegisterClass(MVT::v4f32, &R600::R600_Reg128RegClass);
37   addRegisterClass(MVT::v4i32, &R600::R600_Reg128RegClass);
38 
39   setBooleanContents(ZeroOrNegativeOneBooleanContent);
40   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
41 
42   computeRegisterProperties(Subtarget->getRegisterInfo());
43 
44   // Legalize loads and stores to the private address space.
45   setOperationAction(ISD::LOAD, {MVT::i32, MVT::v2i32, MVT::v4i32}, Custom);
46 
47   // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address
48   // spaces, so it is custom lowered to handle those where it isn't.
49   for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD})
50     for (MVT VT : MVT::integer_valuetypes()) {
51       setLoadExtAction(Op, VT, MVT::i1, Promote);
52       setLoadExtAction(Op, VT, MVT::i8, Custom);
53       setLoadExtAction(Op, VT, MVT::i16, Custom);
54     }
55 
56   // Workaround for LegalizeDAG asserting on expansion of i1 vector loads.
57   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v2i32,
58                    MVT::v2i1, Expand);
59 
60   setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v4i32,
61                    MVT::v4i1, Expand);
62 
63   setOperationAction(ISD::STORE, {MVT::i8, MVT::i32, MVT::v2i32, MVT::v4i32},
64                      Custom);
65 
66   setTruncStoreAction(MVT::i32, MVT::i8, Custom);
67   setTruncStoreAction(MVT::i32, MVT::i16, Custom);
68   // We need to include these since trunc STORES to PRIVATE need
69   // special handling to accommodate RMW
70   setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom);
71   setTruncStoreAction(MVT::v4i32, MVT::v4i16, Custom);
72   setTruncStoreAction(MVT::v8i32, MVT::v8i16, Custom);
73   setTruncStoreAction(MVT::v16i32, MVT::v16i16, Custom);
74   setTruncStoreAction(MVT::v32i32, MVT::v32i16, Custom);
75   setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom);
76   setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom);
77   setTruncStoreAction(MVT::v8i32, MVT::v8i8, Custom);
78   setTruncStoreAction(MVT::v16i32, MVT::v16i8, Custom);
79   setTruncStoreAction(MVT::v32i32, MVT::v32i8, Custom);
80 
81   // Workaround for LegalizeDAG asserting on expansion of i1 vector stores.
82   setTruncStoreAction(MVT::v2i32, MVT::v2i1, Expand);
83   setTruncStoreAction(MVT::v4i32, MVT::v4i1, Expand);
84 
85   // Set condition code actions
86   setCondCodeAction({ISD::SETO, ISD::SETUO, ISD::SETLT, ISD::SETLE, ISD::SETOLT,
87                      ISD::SETOLE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGE,
88                      ISD::SETUGT, ISD::SETULT, ISD::SETULE},
89                     MVT::f32, Expand);
90 
91   setCondCodeAction({ISD::SETLE, ISD::SETLT, ISD::SETULE, ISD::SETULT},
92                     MVT::i32, Expand);
93 
94   setOperationAction({ISD::FCOS, ISD::FSIN}, MVT::f32, Custom);
95 
96   setOperationAction(ISD::SETCC, {MVT::v4i32, MVT::v2i32}, Expand);
97 
98   setOperationAction(ISD::BR_CC, {MVT::i32, MVT::f32}, Expand);
99   setOperationAction(ISD::BRCOND, MVT::Other, Custom);
100 
101   setOperationAction(ISD::FSUB, MVT::f32, Expand);
102 
103   setOperationAction(ISD::IS_FPCLASS,
104                      {MVT::f32, MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32,
105                       MVT::v6f32, MVT::v7f32, MVT::v8f32, MVT::v16f32},
106                      Expand);
107 
108   setOperationAction({ISD::FCEIL, ISD::FTRUNC, ISD::FROUNDEVEN, ISD::FFLOOR},
109                      MVT::f64, Custom);
110 
111   setOperationAction(ISD::SELECT_CC, {MVT::f32, MVT::i32}, Custom);
112 
113   setOperationAction(ISD::SETCC, {MVT::i32, MVT::f32}, Expand);
114   setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT}, {MVT::i1, MVT::i64},
115                      Custom);
116 
117   setOperationAction(ISD::SELECT, {MVT::i32, MVT::f32, MVT::v2i32, MVT::v4i32},
118                      Expand);
119 
120   // ADD, SUB overflow.
121   // TODO: turn these into Legal?
122   if (Subtarget->hasCARRY())
123     setOperationAction(ISD::UADDO, MVT::i32, Custom);
124 
125   if (Subtarget->hasBORROW())
126     setOperationAction(ISD::USUBO, MVT::i32, Custom);
127 
128   // Expand sign extension of vectors
129   if (!Subtarget->hasBFE())
130     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
131 
132   setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i1, MVT::v4i1}, Expand);
133 
134   if (!Subtarget->hasBFE())
135     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
136   setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i8, MVT::v4i8}, Expand);
137 
138   if (!Subtarget->hasBFE())
139     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
140   setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v4i16}, Expand);
141 
142   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
143   setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i32, MVT::v4i32}, Expand);
144 
145   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand);
146 
147   setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
148 
149   setOperationAction(ISD::EXTRACT_VECTOR_ELT,
150                      {MVT::v2i32, MVT::v2f32, MVT::v4i32, MVT::v4f32}, Custom);
151 
152   setOperationAction(ISD::INSERT_VECTOR_ELT,
153                      {MVT::v2i32, MVT::v2f32, MVT::v4i32, MVT::v4f32}, Custom);
154 
155   // We don't have 64-bit shifts. Thus we need either SHX i64 or SHX_PARTS i32
156   //  to be Legal/Custom in order to avoid library calls.
157   setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, MVT::i32,
158                      Custom);
159 
160   if (!Subtarget->hasFMA())
161     setOperationAction(ISD::FMA, {MVT::f32, MVT::f64}, Expand);
162 
163   // FIXME: May need no denormals check
164   setOperationAction(ISD::FMAD, MVT::f32, Legal);
165 
166   if (!Subtarget->hasBFI())
167     // fcopysign can be done in a single instruction with BFI.
168     setOperationAction(ISD::FCOPYSIGN, {MVT::f32, MVT::f64}, Expand);
169 
170   if (!Subtarget->hasBCNT(32))
171     setOperationAction(ISD::CTPOP, MVT::i32, Expand);
172 
173   if (!Subtarget->hasBCNT(64))
174     setOperationAction(ISD::CTPOP, MVT::i64, Expand);
175 
176   if (Subtarget->hasFFBH())
177     setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
178 
179   if (Subtarget->hasFFBL())
180     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
181 
182   // FIXME: This was moved from AMDGPUTargetLowering, I'm not sure if we
183   // need it for R600.
184   if (Subtarget->hasBFE())
185     setHasExtractBitsInsn(true);
186 
187   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
188   setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
189 
190   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
191   for (MVT VT : ScalarIntVTs)
192     setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT,
193                        Expand);
194 
195   // LLVM will expand these to atomic_cmp_swap(0)
196   // and atomic_swap, respectively.
197   setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i32, Expand);
198 
199   // We need to custom lower some of the intrinsics
200   setOperationAction({ISD::INTRINSIC_VOID, ISD::INTRINSIC_WO_CHAIN}, MVT::Other,
201                      Custom);
202 
203   setSchedulingPreference(Sched::Source);
204 
205   setTargetDAGCombine({ISD::FP_ROUND, ISD::FP_TO_SINT, ISD::EXTRACT_VECTOR_ELT,
206                        ISD::SELECT_CC, ISD::INSERT_VECTOR_ELT, ISD::LOAD});
207 }
208 
209 static inline bool isEOP(MachineBasicBlock::iterator I) {
210   if (std::next(I) == I->getParent()->end())
211     return false;
212   return std::next(I)->getOpcode() == R600::RETURN;
213 }
214 
215 MachineBasicBlock *
216 R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
217                                                 MachineBasicBlock *BB) const {
218   MachineFunction *MF = BB->getParent();
219   MachineRegisterInfo &MRI = MF->getRegInfo();
220   MachineBasicBlock::iterator I = MI;
221   const R600InstrInfo *TII = Subtarget->getInstrInfo();
222 
223   switch (MI.getOpcode()) {
224   default:
225     // Replace LDS_*_RET instruction that don't have any uses with the
226     // equivalent LDS_*_NORET instruction.
227     if (TII->isLDSRetInstr(MI.getOpcode())) {
228       int DstIdx = TII->getOperandIdx(MI.getOpcode(), R600::OpName::dst);
229       assert(DstIdx != -1);
230       MachineInstrBuilder NewMI;
231       // FIXME: getLDSNoRetOp method only handles LDS_1A1D LDS ops. Add
232       //        LDS_1A2D support and remove this special case.
233       if (!MRI.use_empty(MI.getOperand(DstIdx).getReg()) ||
234           MI.getOpcode() == R600::LDS_CMPST_RET)
235         return BB;
236 
237       NewMI = BuildMI(*BB, I, BB->findDebugLoc(I),
238                       TII->get(R600::getLDSNoRetOp(MI.getOpcode())));
239       for (const MachineOperand &MO : llvm::drop_begin(MI.operands()))
240         NewMI.add(MO);
241     } else {
242       return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
243     }
244     break;
245 
246   case R600::FABS_R600: {
247     MachineInstr *NewMI = TII->buildDefaultInstruction(
248         *BB, I, R600::MOV, MI.getOperand(0).getReg(),
249         MI.getOperand(1).getReg());
250     TII->addFlag(*NewMI, 0, MO_FLAG_ABS);
251     break;
252   }
253 
254   case R600::FNEG_R600: {
255     MachineInstr *NewMI = TII->buildDefaultInstruction(
256         *BB, I, R600::MOV, MI.getOperand(0).getReg(),
257         MI.getOperand(1).getReg());
258     TII->addFlag(*NewMI, 0, MO_FLAG_NEG);
259     break;
260   }
261 
262   case R600::MASK_WRITE: {
263     Register maskedRegister = MI.getOperand(0).getReg();
264     assert(maskedRegister.isVirtual());
265     MachineInstr * defInstr = MRI.getVRegDef(maskedRegister);
266     TII->addFlag(*defInstr, 0, MO_FLAG_MASK);
267     break;
268   }
269 
270   case R600::MOV_IMM_F32:
271     TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(), MI.getOperand(1)
272                                                             .getFPImm()
273                                                             ->getValueAPF()
274                                                             .bitcastToAPInt()
275                                                             .getZExtValue());
276     break;
277 
278   case R600::MOV_IMM_I32:
279     TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(),
280                      MI.getOperand(1).getImm());
281     break;
282 
283   case R600::MOV_IMM_GLOBAL_ADDR: {
284     //TODO: Perhaps combine this instruction with the next if possible
285     auto MIB = TII->buildDefaultInstruction(
286         *BB, MI, R600::MOV, MI.getOperand(0).getReg(), R600::ALU_LITERAL_X);
287     int Idx = TII->getOperandIdx(*MIB, R600::OpName::literal);
288     //TODO: Ugh this is rather ugly
289     const MachineOperand &MO = MI.getOperand(1);
290     MIB->getOperand(Idx).ChangeToGA(MO.getGlobal(), MO.getOffset(),
291                                     MO.getTargetFlags());
292     break;
293   }
294 
295   case R600::CONST_COPY: {
296     MachineInstr *NewMI = TII->buildDefaultInstruction(
297         *BB, MI, R600::MOV, MI.getOperand(0).getReg(), R600::ALU_CONST);
298     TII->setImmOperand(*NewMI, R600::OpName::src0_sel,
299                        MI.getOperand(1).getImm());
300     break;
301   }
302 
303   case R600::RAT_WRITE_CACHELESS_32_eg:
304   case R600::RAT_WRITE_CACHELESS_64_eg:
305   case R600::RAT_WRITE_CACHELESS_128_eg:
306     BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
307         .add(MI.getOperand(0))
308         .add(MI.getOperand(1))
309         .addImm(isEOP(I)); // Set End of program bit
310     break;
311 
312   case R600::RAT_STORE_TYPED_eg:
313     BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
314         .add(MI.getOperand(0))
315         .add(MI.getOperand(1))
316         .add(MI.getOperand(2))
317         .addImm(isEOP(I)); // Set End of program bit
318     break;
319 
320   case R600::BRANCH:
321     BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::JUMP))
322         .add(MI.getOperand(0));
323     break;
324 
325   case R600::BRANCH_COND_f32: {
326     MachineInstr *NewMI =
327         BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::PRED_X),
328                 R600::PREDICATE_BIT)
329             .add(MI.getOperand(1))
330             .addImm(R600::PRED_SETNE)
331             .addImm(0); // Flags
332     TII->addFlag(*NewMI, 0, MO_FLAG_PUSH);
333     BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::JUMP_COND))
334         .add(MI.getOperand(0))
335         .addReg(R600::PREDICATE_BIT, RegState::Kill);
336     break;
337   }
338 
339   case R600::BRANCH_COND_i32: {
340     MachineInstr *NewMI =
341         BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::PRED_X),
342                 R600::PREDICATE_BIT)
343             .add(MI.getOperand(1))
344             .addImm(R600::PRED_SETNE_INT)
345             .addImm(0); // Flags
346     TII->addFlag(*NewMI, 0, MO_FLAG_PUSH);
347     BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::JUMP_COND))
348         .add(MI.getOperand(0))
349         .addReg(R600::PREDICATE_BIT, RegState::Kill);
350     break;
351   }
352 
353   case R600::EG_ExportSwz:
354   case R600::R600_ExportSwz: {
355     // Instruction is left unmodified if its not the last one of its type
356     bool isLastInstructionOfItsType = true;
357     unsigned InstExportType = MI.getOperand(1).getImm();
358     for (MachineBasicBlock::iterator NextExportInst = std::next(I),
359          EndBlock = BB->end(); NextExportInst != EndBlock;
360          NextExportInst = std::next(NextExportInst)) {
361       if (NextExportInst->getOpcode() == R600::EG_ExportSwz ||
362           NextExportInst->getOpcode() == R600::R600_ExportSwz) {
363         unsigned CurrentInstExportType = NextExportInst->getOperand(1)
364             .getImm();
365         if (CurrentInstExportType == InstExportType) {
366           isLastInstructionOfItsType = false;
367           break;
368         }
369       }
370     }
371     bool EOP = isEOP(I);
372     if (!EOP && !isLastInstructionOfItsType)
373       return BB;
374     unsigned CfInst = (MI.getOpcode() == R600::EG_ExportSwz) ? 84 : 40;
375     BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode()))
376         .add(MI.getOperand(0))
377         .add(MI.getOperand(1))
378         .add(MI.getOperand(2))
379         .add(MI.getOperand(3))
380         .add(MI.getOperand(4))
381         .add(MI.getOperand(5))
382         .add(MI.getOperand(6))
383         .addImm(CfInst)
384         .addImm(EOP);
385     break;
386   }
387   case R600::RETURN: {
388     return BB;
389   }
390   }
391 
392   MI.eraseFromParent();
393   return BB;
394 }
395 
396 //===----------------------------------------------------------------------===//
397 // Custom DAG Lowering Operations
398 //===----------------------------------------------------------------------===//
399 
400 SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
401   MachineFunction &MF = DAG.getMachineFunction();
402   R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
403   switch (Op.getOpcode()) {
404   default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
405   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
406   case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
407   case ISD::SHL_PARTS:
408   case ISD::SRA_PARTS:
409   case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
410   case ISD::UADDO: return LowerUADDSUBO(Op, DAG, ISD::ADD, AMDGPUISD::CARRY);
411   case ISD::USUBO: return LowerUADDSUBO(Op, DAG, ISD::SUB, AMDGPUISD::BORROW);
412   case ISD::FCOS:
413   case ISD::FSIN: return LowerTrig(Op, DAG);
414   case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
415   case ISD::STORE: return LowerSTORE(Op, DAG);
416   case ISD::LOAD: {
417     SDValue Result = LowerLOAD(Op, DAG);
418     assert((!Result.getNode() ||
419             Result.getNode()->getNumValues() == 2) &&
420            "Load should return a value and a chain");
421     return Result;
422   }
423 
424   case ISD::BRCOND: return LowerBRCOND(Op, DAG);
425   case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
426   case ISD::FrameIndex: return lowerFrameIndex(Op, DAG);
427   case ISD::ADDRSPACECAST:
428     return lowerADDRSPACECAST(Op, DAG);
429   case ISD::INTRINSIC_VOID: {
430     SDValue Chain = Op.getOperand(0);
431     unsigned IntrinsicID = Op.getConstantOperandVal(1);
432     switch (IntrinsicID) {
433     case Intrinsic::r600_store_swizzle: {
434       SDLoc DL(Op);
435       const SDValue Args[8] = {
436         Chain,
437         Op.getOperand(2), // Export Value
438         Op.getOperand(3), // ArrayBase
439         Op.getOperand(4), // Type
440         DAG.getConstant(0, DL, MVT::i32), // SWZ_X
441         DAG.getConstant(1, DL, MVT::i32), // SWZ_Y
442         DAG.getConstant(2, DL, MVT::i32), // SWZ_Z
443         DAG.getConstant(3, DL, MVT::i32) // SWZ_W
444       };
445       return DAG.getNode(AMDGPUISD::R600_EXPORT, DL, Op.getValueType(), Args);
446     }
447 
448     // default for switch(IntrinsicID)
449     default: break;
450     }
451     // break out of case ISD::INTRINSIC_VOID in switch(Op.getOpcode())
452     break;
453   }
454   case ISD::INTRINSIC_WO_CHAIN: {
455     unsigned IntrinsicID = Op.getConstantOperandVal(0);
456     EVT VT = Op.getValueType();
457     SDLoc DL(Op);
458     switch (IntrinsicID) {
459     case Intrinsic::r600_tex:
460     case Intrinsic::r600_texc: {
461       unsigned TextureOp;
462       switch (IntrinsicID) {
463       case Intrinsic::r600_tex:
464         TextureOp = 0;
465         break;
466       case Intrinsic::r600_texc:
467         TextureOp = 1;
468         break;
469       default:
470         llvm_unreachable("unhandled texture operation");
471       }
472 
473       SDValue TexArgs[19] = {
474         DAG.getConstant(TextureOp, DL, MVT::i32),
475         Op.getOperand(1),
476         DAG.getConstant(0, DL, MVT::i32),
477         DAG.getConstant(1, DL, MVT::i32),
478         DAG.getConstant(2, DL, MVT::i32),
479         DAG.getConstant(3, DL, MVT::i32),
480         Op.getOperand(2),
481         Op.getOperand(3),
482         Op.getOperand(4),
483         DAG.getConstant(0, DL, MVT::i32),
484         DAG.getConstant(1, DL, MVT::i32),
485         DAG.getConstant(2, DL, MVT::i32),
486         DAG.getConstant(3, DL, MVT::i32),
487         Op.getOperand(5),
488         Op.getOperand(6),
489         Op.getOperand(7),
490         Op.getOperand(8),
491         Op.getOperand(9),
492         Op.getOperand(10)
493       };
494       return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs);
495     }
496     case Intrinsic::r600_dot4: {
497       SDValue Args[8] = {
498       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
499           DAG.getConstant(0, DL, MVT::i32)),
500       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
501           DAG.getConstant(0, DL, MVT::i32)),
502       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
503           DAG.getConstant(1, DL, MVT::i32)),
504       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
505           DAG.getConstant(1, DL, MVT::i32)),
506       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
507           DAG.getConstant(2, DL, MVT::i32)),
508       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
509           DAG.getConstant(2, DL, MVT::i32)),
510       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1),
511           DAG.getConstant(3, DL, MVT::i32)),
512       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2),
513           DAG.getConstant(3, DL, MVT::i32))
514       };
515       return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args);
516     }
517 
518     case Intrinsic::r600_implicitarg_ptr: {
519       MVT PtrVT = getPointerTy(DAG.getDataLayout(), AMDGPUAS::PARAM_I_ADDRESS);
520       uint32_t ByteOffset = getImplicitParameterOffset(MF, FIRST_IMPLICIT);
521       return DAG.getConstant(ByteOffset, DL, PtrVT);
522     }
523     case Intrinsic::r600_read_ngroups_x:
524       return LowerImplicitParameter(DAG, VT, DL, 0);
525     case Intrinsic::r600_read_ngroups_y:
526       return LowerImplicitParameter(DAG, VT, DL, 1);
527     case Intrinsic::r600_read_ngroups_z:
528       return LowerImplicitParameter(DAG, VT, DL, 2);
529     case Intrinsic::r600_read_global_size_x:
530       return LowerImplicitParameter(DAG, VT, DL, 3);
531     case Intrinsic::r600_read_global_size_y:
532       return LowerImplicitParameter(DAG, VT, DL, 4);
533     case Intrinsic::r600_read_global_size_z:
534       return LowerImplicitParameter(DAG, VT, DL, 5);
535     case Intrinsic::r600_read_local_size_x:
536       return LowerImplicitParameter(DAG, VT, DL, 6);
537     case Intrinsic::r600_read_local_size_y:
538       return LowerImplicitParameter(DAG, VT, DL, 7);
539     case Intrinsic::r600_read_local_size_z:
540       return LowerImplicitParameter(DAG, VT, DL, 8);
541 
542     case Intrinsic::r600_read_tgid_x:
543     case Intrinsic::amdgcn_workgroup_id_x:
544       return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass,
545                                      R600::T1_X, VT);
546     case Intrinsic::r600_read_tgid_y:
547     case Intrinsic::amdgcn_workgroup_id_y:
548       return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass,
549                                      R600::T1_Y, VT);
550     case Intrinsic::r600_read_tgid_z:
551     case Intrinsic::amdgcn_workgroup_id_z:
552       return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass,
553                                      R600::T1_Z, VT);
554     case Intrinsic::r600_read_tidig_x:
555     case Intrinsic::amdgcn_workitem_id_x:
556       return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass,
557                                      R600::T0_X, VT);
558     case Intrinsic::r600_read_tidig_y:
559     case Intrinsic::amdgcn_workitem_id_y:
560       return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass,
561                                      R600::T0_Y, VT);
562     case Intrinsic::r600_read_tidig_z:
563     case Intrinsic::amdgcn_workitem_id_z:
564       return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass,
565                                      R600::T0_Z, VT);
566 
567     case Intrinsic::r600_recipsqrt_ieee:
568       return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1));
569 
570     case Intrinsic::r600_recipsqrt_clamped:
571       return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1));
572     default:
573       return Op;
574     }
575 
576     // break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode())
577     break;
578   }
579   } // end switch(Op.getOpcode())
580   return SDValue();
581 }
582 
583 void R600TargetLowering::ReplaceNodeResults(SDNode *N,
584                                             SmallVectorImpl<SDValue> &Results,
585                                             SelectionDAG &DAG) const {
586   switch (N->getOpcode()) {
587   default:
588     AMDGPUTargetLowering::ReplaceNodeResults(N, Results, DAG);
589     return;
590   case ISD::FP_TO_UINT:
591     if (N->getValueType(0) == MVT::i1) {
592       Results.push_back(lowerFP_TO_UINT(N->getOperand(0), DAG));
593       return;
594     }
595     // Since we don't care about out of bounds values we can use FP_TO_SINT for
596     // uints too. The DAGLegalizer code for uint considers some extra cases
597     // which are not necessary here.
598     [[fallthrough]];
599   case ISD::FP_TO_SINT: {
600     if (N->getValueType(0) == MVT::i1) {
601       Results.push_back(lowerFP_TO_SINT(N->getOperand(0), DAG));
602       return;
603     }
604 
605     SDValue Result;
606     if (expandFP_TO_SINT(N, Result, DAG))
607       Results.push_back(Result);
608     return;
609   }
610   case ISD::SDIVREM: {
611     SDValue Op = SDValue(N, 1);
612     SDValue RES = LowerSDIVREM(Op, DAG);
613     Results.push_back(RES);
614     Results.push_back(RES.getValue(1));
615     break;
616   }
617   case ISD::UDIVREM: {
618     SDValue Op = SDValue(N, 0);
619     LowerUDIVREM64(Op, DAG, Results);
620     break;
621   }
622   }
623 }
624 
625 SDValue R600TargetLowering::vectorToVerticalVector(SelectionDAG &DAG,
626                                                    SDValue Vector) const {
627   SDLoc DL(Vector);
628   EVT VecVT = Vector.getValueType();
629   EVT EltVT = VecVT.getVectorElementType();
630   SmallVector<SDValue, 8> Args;
631 
632   for (unsigned i = 0, e = VecVT.getVectorNumElements(); i != e; ++i) {
633     Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vector,
634                                DAG.getVectorIdxConstant(i, DL)));
635   }
636 
637   return DAG.getNode(AMDGPUISD::BUILD_VERTICAL_VECTOR, DL, VecVT, Args);
638 }
639 
640 SDValue R600TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
641                                                     SelectionDAG &DAG) const {
642   SDLoc DL(Op);
643   SDValue Vector = Op.getOperand(0);
644   SDValue Index = Op.getOperand(1);
645 
646   if (isa<ConstantSDNode>(Index) ||
647       Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR)
648     return Op;
649 
650   Vector = vectorToVerticalVector(DAG, Vector);
651   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(),
652                      Vector, Index);
653 }
654 
655 SDValue R600TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
656                                                    SelectionDAG &DAG) const {
657   SDLoc DL(Op);
658   SDValue Vector = Op.getOperand(0);
659   SDValue Value = Op.getOperand(1);
660   SDValue Index = Op.getOperand(2);
661 
662   if (isa<ConstantSDNode>(Index) ||
663       Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR)
664     return Op;
665 
666   Vector = vectorToVerticalVector(DAG, Vector);
667   SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, Op.getValueType(),
668                                Vector, Value, Index);
669   return vectorToVerticalVector(DAG, Insert);
670 }
671 
672 SDValue R600TargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
673                                                SDValue Op,
674                                                SelectionDAG &DAG) const {
675   GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op);
676   if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS)
677     return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG);
678 
679   const DataLayout &DL = DAG.getDataLayout();
680   const GlobalValue *GV = GSD->getGlobal();
681   MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
682 
683   SDValue GA = DAG.getTargetGlobalAddress(GV, SDLoc(GSD), ConstPtrVT);
684   return DAG.getNode(AMDGPUISD::CONST_DATA_PTR, SDLoc(GSD), ConstPtrVT, GA);
685 }
686 
687 SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const {
688   // On hw >= R700, COS/SIN input must be between -1. and 1.
689   // Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5)
690   EVT VT = Op.getValueType();
691   SDValue Arg = Op.getOperand(0);
692   SDLoc DL(Op);
693 
694   // TODO: Should this propagate fast-math-flags?
695   SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT,
696       DAG.getNode(ISD::FADD, DL, VT,
697         DAG.getNode(ISD::FMUL, DL, VT, Arg,
698           DAG.getConstantFP(0.15915494309, DL, MVT::f32)),
699         DAG.getConstantFP(0.5, DL, MVT::f32)));
700   unsigned TrigNode;
701   switch (Op.getOpcode()) {
702   case ISD::FCOS:
703     TrigNode = AMDGPUISD::COS_HW;
704     break;
705   case ISD::FSIN:
706     TrigNode = AMDGPUISD::SIN_HW;
707     break;
708   default:
709     llvm_unreachable("Wrong trig opcode");
710   }
711   SDValue TrigVal = DAG.getNode(TrigNode, DL, VT,
712       DAG.getNode(ISD::FADD, DL, VT, FractPart,
713         DAG.getConstantFP(-0.5, DL, MVT::f32)));
714   if (Gen >= AMDGPUSubtarget::R700)
715     return TrigVal;
716   // On R600 hw, COS/SIN input must be between -Pi and Pi.
717   return DAG.getNode(ISD::FMUL, DL, VT, TrigVal,
718       DAG.getConstantFP(numbers::pif, DL, MVT::f32));
719 }
720 
721 SDValue R600TargetLowering::LowerShiftParts(SDValue Op,
722                                             SelectionDAG &DAG) const {
723   SDValue Lo, Hi;
724   expandShiftParts(Op.getNode(), Lo, Hi, DAG);
725   return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
726 }
727 
728 SDValue R600TargetLowering::LowerUADDSUBO(SDValue Op, SelectionDAG &DAG,
729                                           unsigned mainop, unsigned ovf) const {
730   SDLoc DL(Op);
731   EVT VT = Op.getValueType();
732 
733   SDValue Lo = Op.getOperand(0);
734   SDValue Hi = Op.getOperand(1);
735 
736   SDValue OVF = DAG.getNode(ovf, DL, VT, Lo, Hi);
737   // Extend sign.
738   OVF = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, OVF,
739                     DAG.getValueType(MVT::i1));
740 
741   SDValue Res = DAG.getNode(mainop, DL, VT, Lo, Hi);
742 
743   return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT, VT), Res, OVF);
744 }
745 
746 SDValue R600TargetLowering::lowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const {
747   SDLoc DL(Op);
748   return DAG.getNode(
749       ISD::SETCC,
750       DL,
751       MVT::i1,
752       Op, DAG.getConstantFP(1.0f, DL, MVT::f32),
753       DAG.getCondCode(ISD::SETEQ));
754 }
755 
756 SDValue R600TargetLowering::lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const {
757   SDLoc DL(Op);
758   return DAG.getNode(
759       ISD::SETCC,
760       DL,
761       MVT::i1,
762       Op, DAG.getConstantFP(-1.0f, DL, MVT::f32),
763       DAG.getCondCode(ISD::SETEQ));
764 }
765 
766 SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT,
767                                                    const SDLoc &DL,
768                                                    unsigned DwordOffset) const {
769   unsigned ByteOffset = DwordOffset * 4;
770   PointerType *PtrType =
771       PointerType::get(*DAG.getContext(), AMDGPUAS::PARAM_I_ADDRESS);
772 
773   // We shouldn't be using an offset wider than 16-bits for implicit parameters.
774   assert(isInt<16>(ByteOffset));
775 
776   return DAG.getLoad(VT, DL, DAG.getEntryNode(),
777                      DAG.getConstant(ByteOffset, DL, MVT::i32), // PTR
778                      MachinePointerInfo(ConstantPointerNull::get(PtrType)));
779 }
780 
781 bool R600TargetLowering::isZero(SDValue Op) const {
782   if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op))
783     return Cst->isZero();
784   if (ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op))
785     return CstFP->isZero();
786   return false;
787 }
788 
789 bool R600TargetLowering::isHWTrueValue(SDValue Op) const {
790   if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
791     return CFP->isExactlyValue(1.0);
792   }
793   return isAllOnesConstant(Op);
794 }
795 
796 bool R600TargetLowering::isHWFalseValue(SDValue Op) const {
797   if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) {
798     return CFP->getValueAPF().isZero();
799   }
800   return isNullConstant(Op);
801 }
802 
803 SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
804   SDLoc DL(Op);
805   EVT VT = Op.getValueType();
806 
807   SDValue LHS = Op.getOperand(0);
808   SDValue RHS = Op.getOperand(1);
809   SDValue True = Op.getOperand(2);
810   SDValue False = Op.getOperand(3);
811   SDValue CC = Op.getOperand(4);
812   SDValue Temp;
813 
814   if (VT == MVT::f32) {
815     DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr);
816     SDValue MinMax = combineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI);
817     if (MinMax)
818       return MinMax;
819   }
820 
821   // LHS and RHS are guaranteed to be the same value type
822   EVT CompareVT = LHS.getValueType();
823 
824   // Check if we can lower this to a native operation.
825 
826   // Try to lower to a SET* instruction:
827   //
828   // SET* can match the following patterns:
829   //
830   // select_cc f32, f32, -1,  0, cc_supported
831   // select_cc f32, f32, 1.0f, 0.0f, cc_supported
832   // select_cc i32, i32, -1,  0, cc_supported
833   //
834 
835   // Move hardware True/False values to the correct operand.
836   if (isHWTrueValue(False) && isHWFalseValue(True)) {
837     ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
838     ISD::CondCode InverseCC = ISD::getSetCCInverse(CCOpcode, CompareVT);
839     if (isCondCodeLegal(InverseCC, CompareVT.getSimpleVT())) {
840       std::swap(False, True);
841       CC = DAG.getCondCode(InverseCC);
842     } else {
843       ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InverseCC);
844       if (isCondCodeLegal(SwapInvCC, CompareVT.getSimpleVT())) {
845         std::swap(False, True);
846         std::swap(LHS, RHS);
847         CC = DAG.getCondCode(SwapInvCC);
848       }
849     }
850   }
851 
852   if (isHWTrueValue(True) && isHWFalseValue(False) &&
853       (CompareVT == VT || VT == MVT::i32)) {
854     // This can be matched by a SET* instruction.
855     return DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, True, False, CC);
856   }
857 
858   // Try to lower to a CND* instruction:
859   //
860   // CND* can match the following patterns:
861   //
862   // select_cc f32, 0.0, f32, f32, cc_supported
863   // select_cc f32, 0.0, i32, i32, cc_supported
864   // select_cc i32, 0,   f32, f32, cc_supported
865   // select_cc i32, 0,   i32, i32, cc_supported
866   //
867 
868   // Try to move the zero value to the RHS
869   if (isZero(LHS)) {
870     ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
871     // Try swapping the operands
872     ISD::CondCode CCSwapped = ISD::getSetCCSwappedOperands(CCOpcode);
873     if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) {
874       std::swap(LHS, RHS);
875       CC = DAG.getCondCode(CCSwapped);
876     } else {
877       // Try inverting the condition and then swapping the operands
878       ISD::CondCode CCInv = ISD::getSetCCInverse(CCOpcode, CompareVT);
879       CCSwapped = ISD::getSetCCSwappedOperands(CCInv);
880       if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) {
881         std::swap(True, False);
882         std::swap(LHS, RHS);
883         CC = DAG.getCondCode(CCSwapped);
884       }
885     }
886   }
887   if (isZero(RHS)) {
888     SDValue Cond = LHS;
889     SDValue Zero = RHS;
890     ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get();
891     if (CompareVT != VT) {
892       // Bitcast True / False to the correct types.  This will end up being
893       // a nop, but it allows us to define only a single pattern in the
894       // .TD files for each CND* instruction rather than having to have
895       // one pattern for integer True/False and one for fp True/False
896       True = DAG.getNode(ISD::BITCAST, DL, CompareVT, True);
897       False = DAG.getNode(ISD::BITCAST, DL, CompareVT, False);
898     }
899 
900     switch (CCOpcode) {
901     case ISD::SETONE:
902     case ISD::SETUNE:
903     case ISD::SETNE:
904       CCOpcode = ISD::getSetCCInverse(CCOpcode, CompareVT);
905       Temp = True;
906       True = False;
907       False = Temp;
908       break;
909     default:
910       break;
911     }
912     SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, CompareVT,
913         Cond, Zero,
914         True, False,
915         DAG.getCondCode(CCOpcode));
916     return DAG.getNode(ISD::BITCAST, DL, VT, SelectNode);
917   }
918 
919   // If we make it this for it means we have no native instructions to handle
920   // this SELECT_CC, so we must lower it.
921   SDValue HWTrue, HWFalse;
922 
923   if (CompareVT == MVT::f32) {
924     HWTrue = DAG.getConstantFP(1.0f, DL, CompareVT);
925     HWFalse = DAG.getConstantFP(0.0f, DL, CompareVT);
926   } else if (CompareVT == MVT::i32) {
927     HWTrue = DAG.getAllOnesConstant(DL, CompareVT);
928     HWFalse = DAG.getConstant(0, DL, CompareVT);
929   }
930   else {
931     llvm_unreachable("Unhandled value type in LowerSELECT_CC");
932   }
933 
934   // Lower this unsupported SELECT_CC into a combination of two supported
935   // SELECT_CC operations.
936   SDValue Cond = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, LHS, RHS, HWTrue, HWFalse, CC);
937 
938   return DAG.getNode(ISD::SELECT_CC, DL, VT,
939       Cond, HWFalse,
940       True, False,
941       DAG.getCondCode(ISD::SETNE));
942 }
943 
944 SDValue R600TargetLowering::lowerADDRSPACECAST(SDValue Op,
945                                                SelectionDAG &DAG) const {
946   SDLoc SL(Op);
947   EVT VT = Op.getValueType();
948 
949   const R600TargetMachine &TM =
950       static_cast<const R600TargetMachine &>(getTargetMachine());
951 
952   const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op);
953   unsigned SrcAS = ASC->getSrcAddressSpace();
954   unsigned DestAS = ASC->getDestAddressSpace();
955 
956   if (isNullConstant(Op.getOperand(0)) && SrcAS == AMDGPUAS::FLAT_ADDRESS)
957     return DAG.getSignedConstant(TM.getNullPointerValue(DestAS), SL, VT);
958 
959   return Op;
960 }
961 
962 /// LLVM generates byte-addressed pointers.  For indirect addressing, we need to
963 /// convert these pointers to a register index.  Each register holds
964 /// 16 bytes, (4 x 32bit sub-register), but we need to take into account the
965 /// \p StackWidth, which tells us how many of the 4 sub-registers will be used
966 /// for indirect addressing.
967 SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr,
968                                                unsigned StackWidth,
969                                                SelectionDAG &DAG) const {
970   unsigned SRLPad;
971   switch(StackWidth) {
972   case 1:
973     SRLPad = 2;
974     break;
975   case 2:
976     SRLPad = 3;
977     break;
978   case 4:
979     SRLPad = 4;
980     break;
981   default: llvm_unreachable("Invalid stack width");
982   }
983 
984   SDLoc DL(Ptr);
985   return DAG.getNode(ISD::SRL, DL, Ptr.getValueType(), Ptr,
986                      DAG.getConstant(SRLPad, DL, MVT::i32));
987 }
988 
989 void R600TargetLowering::getStackAddress(unsigned StackWidth,
990                                          unsigned ElemIdx,
991                                          unsigned &Channel,
992                                          unsigned &PtrIncr) const {
993   switch (StackWidth) {
994   default:
995   case 1:
996     Channel = 0;
997     if (ElemIdx > 0) {
998       PtrIncr = 1;
999     } else {
1000       PtrIncr = 0;
1001     }
1002     break;
1003   case 2:
1004     Channel = ElemIdx % 2;
1005     if (ElemIdx == 2) {
1006       PtrIncr = 1;
1007     } else {
1008       PtrIncr = 0;
1009     }
1010     break;
1011   case 4:
1012     Channel = ElemIdx;
1013     PtrIncr = 0;
1014     break;
1015   }
1016 }
1017 
1018 SDValue R600TargetLowering::lowerPrivateTruncStore(StoreSDNode *Store,
1019                                                    SelectionDAG &DAG) const {
1020   SDLoc DL(Store);
1021   //TODO: Who creates the i8 stores?
1022   assert(Store->isTruncatingStore()
1023          || Store->getValue().getValueType() == MVT::i8);
1024   assert(Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS);
1025 
1026   SDValue Mask;
1027   if (Store->getMemoryVT() == MVT::i8) {
1028     assert(Store->getAlign() >= 1);
1029     Mask = DAG.getConstant(0xff, DL, MVT::i32);
1030   } else if (Store->getMemoryVT() == MVT::i16) {
1031     assert(Store->getAlign() >= 2);
1032     Mask = DAG.getConstant(0xffff, DL, MVT::i32);
1033   } else {
1034     llvm_unreachable("Unsupported private trunc store");
1035   }
1036 
1037   SDValue OldChain = Store->getChain();
1038   bool VectorTrunc = (OldChain.getOpcode() == AMDGPUISD::DUMMY_CHAIN);
1039   // Skip dummy
1040   SDValue Chain = VectorTrunc ? OldChain->getOperand(0) : OldChain;
1041   SDValue BasePtr = Store->getBasePtr();
1042   SDValue Offset = Store->getOffset();
1043   EVT MemVT = Store->getMemoryVT();
1044 
1045   SDValue LoadPtr = BasePtr;
1046   if (!Offset.isUndef()) {
1047     LoadPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, Offset);
1048   }
1049 
1050   // Get dword location
1051   // TODO: this should be eliminated by the future SHR ptr, 2
1052   SDValue Ptr = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr,
1053                             DAG.getConstant(0xfffffffc, DL, MVT::i32));
1054 
1055   // Load dword
1056   // TODO: can we be smarter about machine pointer info?
1057   MachinePointerInfo PtrInfo(AMDGPUAS::PRIVATE_ADDRESS);
1058   SDValue Dst = DAG.getLoad(MVT::i32, DL, Chain, Ptr, PtrInfo);
1059 
1060   Chain = Dst.getValue(1);
1061 
1062   // Get offset in dword
1063   SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr,
1064                                 DAG.getConstant(0x3, DL, MVT::i32));
1065 
1066   // Convert byte offset to bit shift
1067   SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1068                                  DAG.getConstant(3, DL, MVT::i32));
1069 
1070   // TODO: Contrary to the name of the function,
1071   // it also handles sub i32 non-truncating stores (like i1)
1072   SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32,
1073                                   Store->getValue());
1074 
1075   // Mask the value to the right type
1076   SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT);
1077 
1078   // Shift the value in place
1079   SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32,
1080                                      MaskedValue, ShiftAmt);
1081 
1082   // Shift the mask in place
1083   SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, Mask, ShiftAmt);
1084 
1085   // Invert the mask. NOTE: if we had native ROL instructions we could
1086   // use inverted mask
1087   DstMask = DAG.getNOT(DL, DstMask, MVT::i32);
1088 
1089   // Cleanup the target bits
1090   Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask);
1091 
1092   // Add the new bits
1093   SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue);
1094 
1095   // Store dword
1096   // TODO: Can we be smarter about MachinePointerInfo?
1097   SDValue NewStore = DAG.getStore(Chain, DL, Value, Ptr, PtrInfo);
1098 
1099   // If we are part of expanded vector, make our neighbors depend on this store
1100   if (VectorTrunc) {
1101     // Make all other vector elements depend on this store
1102     Chain = DAG.getNode(AMDGPUISD::DUMMY_CHAIN, DL, MVT::Other, NewStore);
1103     DAG.ReplaceAllUsesOfValueWith(OldChain, Chain);
1104   }
1105   return NewStore;
1106 }
1107 
1108 SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
1109   StoreSDNode *StoreNode = cast<StoreSDNode>(Op);
1110   unsigned AS = StoreNode->getAddressSpace();
1111 
1112   SDValue Chain = StoreNode->getChain();
1113   SDValue Ptr = StoreNode->getBasePtr();
1114   SDValue Value = StoreNode->getValue();
1115 
1116   EVT VT = Value.getValueType();
1117   EVT MemVT = StoreNode->getMemoryVT();
1118   EVT PtrVT = Ptr.getValueType();
1119 
1120   SDLoc DL(Op);
1121 
1122   const bool TruncatingStore = StoreNode->isTruncatingStore();
1123 
1124   // Neither LOCAL nor PRIVATE can do vectors at the moment
1125   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS ||
1126        TruncatingStore) &&
1127       VT.isVector()) {
1128     if ((AS == AMDGPUAS::PRIVATE_ADDRESS) && TruncatingStore) {
1129       // Add an extra level of chain to isolate this vector
1130       SDValue NewChain = DAG.getNode(AMDGPUISD::DUMMY_CHAIN, DL, MVT::Other, Chain);
1131       // TODO: can the chain be replaced without creating a new store?
1132       SDValue NewStore = DAG.getTruncStore(
1133           NewChain, DL, Value, Ptr, StoreNode->getPointerInfo(), MemVT,
1134           StoreNode->getAlign(), StoreNode->getMemOperand()->getFlags(),
1135           StoreNode->getAAInfo());
1136       StoreNode = cast<StoreSDNode>(NewStore);
1137     }
1138 
1139     return scalarizeVectorStore(StoreNode, DAG);
1140   }
1141 
1142   Align Alignment = StoreNode->getAlign();
1143   if (Alignment < MemVT.getStoreSize() &&
1144       !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment,
1145                                       StoreNode->getMemOperand()->getFlags(),
1146                                       nullptr)) {
1147     return expandUnalignedStore(StoreNode, DAG);
1148   }
1149 
1150   SDValue DWordAddr = DAG.getNode(ISD::SRL, DL, PtrVT, Ptr,
1151                                   DAG.getConstant(2, DL, PtrVT));
1152 
1153   if (AS == AMDGPUAS::GLOBAL_ADDRESS) {
1154     // It is beneficial to create MSKOR here instead of combiner to avoid
1155     // artificial dependencies introduced by RMW
1156     if (TruncatingStore) {
1157       assert(VT.bitsLE(MVT::i32));
1158       SDValue MaskConstant;
1159       if (MemVT == MVT::i8) {
1160         MaskConstant = DAG.getConstant(0xFF, DL, MVT::i32);
1161       } else {
1162         assert(MemVT == MVT::i16);
1163         assert(StoreNode->getAlign() >= 2);
1164         MaskConstant = DAG.getConstant(0xFFFF, DL, MVT::i32);
1165       }
1166 
1167       SDValue ByteIndex = DAG.getNode(ISD::AND, DL, PtrVT, Ptr,
1168                                       DAG.getConstant(0x00000003, DL, PtrVT));
1169       SDValue BitShift = DAG.getNode(ISD::SHL, DL, VT, ByteIndex,
1170                                      DAG.getConstant(3, DL, VT));
1171 
1172       // Put the mask in correct place
1173       SDValue Mask = DAG.getNode(ISD::SHL, DL, VT, MaskConstant, BitShift);
1174 
1175       // Put the value bits in correct place
1176       SDValue TruncValue = DAG.getNode(ISD::AND, DL, VT, Value, MaskConstant);
1177       SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, VT, TruncValue, BitShift);
1178 
1179       // XXX: If we add a 64-bit ZW register class, then we could use a 2 x i32
1180       // vector instead.
1181       SDValue Src[4] = {
1182         ShiftedValue,
1183         DAG.getConstant(0, DL, MVT::i32),
1184         DAG.getConstant(0, DL, MVT::i32),
1185         Mask
1186       };
1187       SDValue Input = DAG.getBuildVector(MVT::v4i32, DL, Src);
1188       SDValue Args[3] = { Chain, Input, DWordAddr };
1189       return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL,
1190                                      Op->getVTList(), Args, MemVT,
1191                                      StoreNode->getMemOperand());
1192     }
1193     if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR && VT.bitsGE(MVT::i32)) {
1194       // Convert pointer from byte address to dword address.
1195       Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, PtrVT, DWordAddr);
1196 
1197       if (StoreNode->isIndexed()) {
1198         llvm_unreachable("Indexed stores not supported yet");
1199       } else {
1200         Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
1201       }
1202       return Chain;
1203     }
1204   }
1205 
1206   // GLOBAL_ADDRESS has been handled above, LOCAL_ADDRESS allows all sizes
1207   if (AS != AMDGPUAS::PRIVATE_ADDRESS)
1208     return SDValue();
1209 
1210   if (MemVT.bitsLT(MVT::i32))
1211     return lowerPrivateTruncStore(StoreNode, DAG);
1212 
1213   // Standard i32+ store, tag it with DWORDADDR to note that the address
1214   // has been shifted
1215   if (Ptr.getOpcode() != AMDGPUISD::DWORDADDR) {
1216     Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, PtrVT, DWordAddr);
1217     return DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand());
1218   }
1219 
1220   // Tagged i32+ stores will be matched by patterns
1221   return SDValue();
1222 }
1223 
1224 // return (512 + (kc_bank << 12)
1225 static int
1226 ConstantAddressBlock(unsigned AddressSpace) {
1227   switch (AddressSpace) {
1228   case AMDGPUAS::CONSTANT_BUFFER_0:
1229     return 512;
1230   case AMDGPUAS::CONSTANT_BUFFER_1:
1231     return 512 + 4096;
1232   case AMDGPUAS::CONSTANT_BUFFER_2:
1233     return 512 + 4096 * 2;
1234   case AMDGPUAS::CONSTANT_BUFFER_3:
1235     return 512 + 4096 * 3;
1236   case AMDGPUAS::CONSTANT_BUFFER_4:
1237     return 512 + 4096 * 4;
1238   case AMDGPUAS::CONSTANT_BUFFER_5:
1239     return 512 + 4096 * 5;
1240   case AMDGPUAS::CONSTANT_BUFFER_6:
1241     return 512 + 4096 * 6;
1242   case AMDGPUAS::CONSTANT_BUFFER_7:
1243     return 512 + 4096 * 7;
1244   case AMDGPUAS::CONSTANT_BUFFER_8:
1245     return 512 + 4096 * 8;
1246   case AMDGPUAS::CONSTANT_BUFFER_9:
1247     return 512 + 4096 * 9;
1248   case AMDGPUAS::CONSTANT_BUFFER_10:
1249     return 512 + 4096 * 10;
1250   case AMDGPUAS::CONSTANT_BUFFER_11:
1251     return 512 + 4096 * 11;
1252   case AMDGPUAS::CONSTANT_BUFFER_12:
1253     return 512 + 4096 * 12;
1254   case AMDGPUAS::CONSTANT_BUFFER_13:
1255     return 512 + 4096 * 13;
1256   case AMDGPUAS::CONSTANT_BUFFER_14:
1257     return 512 + 4096 * 14;
1258   case AMDGPUAS::CONSTANT_BUFFER_15:
1259     return 512 + 4096 * 15;
1260   default:
1261     return -1;
1262   }
1263 }
1264 
1265 SDValue R600TargetLowering::lowerPrivateExtLoad(SDValue Op,
1266                                                 SelectionDAG &DAG) const {
1267   SDLoc DL(Op);
1268   LoadSDNode *Load = cast<LoadSDNode>(Op);
1269   ISD::LoadExtType ExtType = Load->getExtensionType();
1270   EVT MemVT = Load->getMemoryVT();
1271   assert(Load->getAlign() >= MemVT.getStoreSize());
1272 
1273   SDValue BasePtr = Load->getBasePtr();
1274   SDValue Chain = Load->getChain();
1275   SDValue Offset = Load->getOffset();
1276 
1277   SDValue LoadPtr = BasePtr;
1278   if (!Offset.isUndef()) {
1279     LoadPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, Offset);
1280   }
1281 
1282   // Get dword location
1283   // NOTE: this should be eliminated by the future SHR ptr, 2
1284   SDValue Ptr = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr,
1285                             DAG.getConstant(0xfffffffc, DL, MVT::i32));
1286 
1287   // Load dword
1288   // TODO: can we be smarter about machine pointer info?
1289   MachinePointerInfo PtrInfo(AMDGPUAS::PRIVATE_ADDRESS);
1290   SDValue Read = DAG.getLoad(MVT::i32, DL, Chain, Ptr, PtrInfo);
1291 
1292   // Get offset within the register.
1293   SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32,
1294                                 LoadPtr, DAG.getConstant(0x3, DL, MVT::i32));
1295 
1296   // Bit offset of target byte (byteIdx * 8).
1297   SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx,
1298                                  DAG.getConstant(3, DL, MVT::i32));
1299 
1300   // Shift to the right.
1301   SDValue Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Read, ShiftAmt);
1302 
1303   // Eliminate the upper bits by setting them to ...
1304   EVT MemEltVT = MemVT.getScalarType();
1305 
1306   if (ExtType == ISD::SEXTLOAD) { // ... ones.
1307     SDValue MemEltVTNode = DAG.getValueType(MemEltVT);
1308     Ret = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode);
1309   } else { // ... or zeros.
1310     Ret = DAG.getZeroExtendInReg(Ret, DL, MemEltVT);
1311   }
1312 
1313   SDValue Ops[] = {
1314     Ret,
1315     Read.getValue(1) // This should be our output chain
1316   };
1317 
1318   return DAG.getMergeValues(Ops, DL);
1319 }
1320 
1321 SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
1322   LoadSDNode *LoadNode = cast<LoadSDNode>(Op);
1323   unsigned AS = LoadNode->getAddressSpace();
1324   EVT MemVT = LoadNode->getMemoryVT();
1325   ISD::LoadExtType ExtType = LoadNode->getExtensionType();
1326 
1327   if (AS == AMDGPUAS::PRIVATE_ADDRESS &&
1328       ExtType != ISD::NON_EXTLOAD && MemVT.bitsLT(MVT::i32)) {
1329     return lowerPrivateExtLoad(Op, DAG);
1330   }
1331 
1332   SDLoc DL(Op);
1333   EVT VT = Op.getValueType();
1334   SDValue Chain = LoadNode->getChain();
1335   SDValue Ptr = LoadNode->getBasePtr();
1336 
1337   if ((LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS ||
1338       LoadNode->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) &&
1339       VT.isVector()) {
1340     SDValue Ops[2];
1341     std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LoadNode, DAG);
1342     return DAG.getMergeValues(Ops, DL);
1343   }
1344 
1345   // This is still used for explicit load from addrspace(8)
1346   int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace());
1347   if (ConstantBlock > -1 &&
1348       ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) ||
1349        (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) {
1350     SDValue Result;
1351     if (isa<Constant>(LoadNode->getMemOperand()->getValue()) ||
1352         isa<ConstantSDNode>(Ptr)) {
1353       return constBufferLoad(LoadNode, LoadNode->getAddressSpace(), DAG);
1354     }
1355     // TODO: Does this even work?
1356     //  non-constant ptr can't be folded, keeps it as a v4f32 load
1357     Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32,
1358                          DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr,
1359                                      DAG.getConstant(4, DL, MVT::i32)),
1360                          DAG.getConstant(LoadNode->getAddressSpace() -
1361                                              AMDGPUAS::CONSTANT_BUFFER_0,
1362                                          DL, MVT::i32));
1363 
1364     if (!VT.isVector()) {
1365       Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result,
1366                            DAG.getConstant(0, DL, MVT::i32));
1367     }
1368 
1369     SDValue MergedValues[2] = {
1370       Result,
1371       Chain
1372     };
1373     return DAG.getMergeValues(MergedValues, DL);
1374   }
1375 
1376   // For most operations returning SDValue() will result in the node being
1377   // expanded by the DAG Legalizer. This is not the case for ISD::LOAD, so we
1378   // need to manually expand loads that may be legal in some address spaces and
1379   // illegal in others. SEXT loads from CONSTANT_BUFFER_0 are supported for
1380   // compute shaders, since the data is sign extended when it is uploaded to the
1381   // buffer. However SEXT loads from other address spaces are not supported, so
1382   // we need to expand them here.
1383   if (LoadNode->getExtensionType() == ISD::SEXTLOAD) {
1384     assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8));
1385     SDValue NewLoad = DAG.getExtLoad(
1386         ISD::EXTLOAD, DL, VT, Chain, Ptr, LoadNode->getPointerInfo(), MemVT,
1387         LoadNode->getAlign(), LoadNode->getMemOperand()->getFlags());
1388     SDValue Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, NewLoad,
1389                               DAG.getValueType(MemVT));
1390 
1391     SDValue MergedValues[2] = { Res, Chain };
1392     return DAG.getMergeValues(MergedValues, DL);
1393   }
1394 
1395   if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
1396     return SDValue();
1397   }
1398 
1399   // DWORDADDR ISD marks already shifted address
1400   if (Ptr.getOpcode() != AMDGPUISD::DWORDADDR) {
1401     assert(VT == MVT::i32);
1402     Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(2, DL, MVT::i32));
1403     Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, MVT::i32, Ptr);
1404     return DAG.getLoad(MVT::i32, DL, Chain, Ptr, LoadNode->getMemOperand());
1405   }
1406   return SDValue();
1407 }
1408 
1409 SDValue R600TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
1410   SDValue Chain = Op.getOperand(0);
1411   SDValue Cond  = Op.getOperand(1);
1412   SDValue Jump  = Op.getOperand(2);
1413 
1414   return DAG.getNode(AMDGPUISD::BRANCH_COND, SDLoc(Op), Op.getValueType(),
1415                      Chain, Jump, Cond);
1416 }
1417 
1418 SDValue R600TargetLowering::lowerFrameIndex(SDValue Op,
1419                                             SelectionDAG &DAG) const {
1420   MachineFunction &MF = DAG.getMachineFunction();
1421   const R600FrameLowering *TFL = Subtarget->getFrameLowering();
1422 
1423   FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op);
1424 
1425   unsigned FrameIndex = FIN->getIndex();
1426   Register IgnoredFrameReg;
1427   StackOffset Offset =
1428       TFL->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
1429   return DAG.getConstant(Offset.getFixed() * 4 * TFL->getStackWidth(MF),
1430                          SDLoc(Op), Op.getValueType());
1431 }
1432 
1433 CCAssignFn *R600TargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1434                                                   bool IsVarArg) const {
1435   switch (CC) {
1436   case CallingConv::AMDGPU_KERNEL:
1437   case CallingConv::SPIR_KERNEL:
1438   case CallingConv::C:
1439   case CallingConv::Fast:
1440   case CallingConv::Cold:
1441     llvm_unreachable("kernels should not be handled here");
1442   case CallingConv::AMDGPU_VS:
1443   case CallingConv::AMDGPU_GS:
1444   case CallingConv::AMDGPU_PS:
1445   case CallingConv::AMDGPU_CS:
1446   case CallingConv::AMDGPU_HS:
1447   case CallingConv::AMDGPU_ES:
1448   case CallingConv::AMDGPU_LS:
1449     return CC_R600;
1450   default:
1451     reportFatalUsageError("unsupported calling convention");
1452   }
1453 }
1454 
1455 /// XXX Only kernel functions are supported, so we can assume for now that
1456 /// every function is a kernel function, but in the future we should use
1457 /// separate calling conventions for kernel and non-kernel functions.
1458 SDValue R600TargetLowering::LowerFormalArguments(
1459     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
1460     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1461     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1462   SmallVector<CCValAssign, 16> ArgLocs;
1463   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1464                  *DAG.getContext());
1465   MachineFunction &MF = DAG.getMachineFunction();
1466 
1467   if (AMDGPU::isShader(CallConv)) {
1468     CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
1469   } else {
1470     analyzeFormalArgumentsCompute(CCInfo, Ins);
1471   }
1472 
1473   for (unsigned i = 0, e = Ins.size(); i < e; ++i) {
1474     CCValAssign &VA = ArgLocs[i];
1475     const ISD::InputArg &In = Ins[i];
1476     EVT VT = In.VT;
1477     EVT MemVT = VA.getLocVT();
1478     if (!VT.isVector() && MemVT.isVector()) {
1479       // Get load source type if scalarized.
1480       MemVT = MemVT.getVectorElementType();
1481     }
1482 
1483     if (AMDGPU::isShader(CallConv)) {
1484       Register Reg = MF.addLiveIn(VA.getLocReg(), &R600::R600_Reg128RegClass);
1485       SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT);
1486       InVals.push_back(Register);
1487       continue;
1488     }
1489 
1490     // i64 isn't a legal type, so the register type used ends up as i32, which
1491     // isn't expected here. It attempts to create this sextload, but it ends up
1492     // being invalid. Somehow this seems to work with i64 arguments, but breaks
1493     // for <1 x i64>.
1494 
1495     // The first 36 bytes of the input buffer contains information about
1496     // thread group and global sizes.
1497     ISD::LoadExtType Ext = ISD::NON_EXTLOAD;
1498     if (MemVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) {
1499       // FIXME: This should really check the extload type, but the handling of
1500       // extload vector parameters seems to be broken.
1501 
1502       // Ext = In.Flags.isSExt() ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
1503       Ext = ISD::SEXTLOAD;
1504     }
1505 
1506     // Compute the offset from the value.
1507     // XXX - I think PartOffset should give you this, but it seems to give the
1508     // size of the register which isn't useful.
1509 
1510     unsigned PartOffset = VA.getLocMemOffset();
1511     Align Alignment = commonAlignment(Align(VT.getStoreSize()), PartOffset);
1512 
1513     MachinePointerInfo PtrInfo(AMDGPUAS::PARAM_I_ADDRESS);
1514     SDValue Arg = DAG.getLoad(
1515         ISD::UNINDEXED, Ext, VT, DL, Chain,
1516         DAG.getConstant(PartOffset, DL, MVT::i32), DAG.getUNDEF(MVT::i32),
1517         PtrInfo,
1518         MemVT, Alignment, MachineMemOperand::MONonTemporal |
1519                                         MachineMemOperand::MODereferenceable |
1520                                         MachineMemOperand::MOInvariant);
1521 
1522     InVals.push_back(Arg);
1523   }
1524   return Chain;
1525 }
1526 
1527 EVT R600TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1528                                            EVT VT) const {
1529    if (!VT.isVector())
1530      return MVT::i32;
1531    return VT.changeVectorElementTypeToInteger();
1532 }
1533 
1534 bool R600TargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT,
1535                                           const MachineFunction &MF) const {
1536   // Local and Private addresses do not handle vectors. Limit to i32
1537   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS)) {
1538     return (MemVT.getSizeInBits() <= 32);
1539   }
1540   return true;
1541 }
1542 
1543 bool R600TargetLowering::allowsMisalignedMemoryAccesses(
1544     EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
1545     unsigned *IsFast) const {
1546   if (IsFast)
1547     *IsFast = 0;
1548 
1549   if (!VT.isSimple() || VT == MVT::Other)
1550     return false;
1551 
1552   if (VT.bitsLT(MVT::i32))
1553     return false;
1554 
1555   // TODO: This is a rough estimate.
1556   if (IsFast)
1557     *IsFast = 1;
1558 
1559   return VT.bitsGT(MVT::i32) && Alignment >= Align(4);
1560 }
1561 
1562 static SDValue CompactSwizzlableVector(
1563   SelectionDAG &DAG, SDValue VectorEntry,
1564   DenseMap<unsigned, unsigned> &RemapSwizzle) {
1565   assert(RemapSwizzle.empty());
1566 
1567   SDLoc DL(VectorEntry);
1568   EVT EltTy = VectorEntry.getValueType().getVectorElementType();
1569 
1570   SDValue NewBldVec[4];
1571   for (unsigned i = 0; i < 4; i++)
1572     NewBldVec[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltTy, VectorEntry,
1573                                DAG.getIntPtrConstant(i, DL));
1574 
1575   for (unsigned i = 0; i < 4; i++) {
1576     if (NewBldVec[i].isUndef())
1577       // We mask write here to teach later passes that the ith element of this
1578       // vector is undef. Thus we can use it to reduce 128 bits reg usage,
1579       // break false dependencies and additionally make assembly easier to read.
1580       RemapSwizzle[i] = 7; // SEL_MASK_WRITE
1581     if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(NewBldVec[i])) {
1582       if (C->isZero()) {
1583         RemapSwizzle[i] = 4; // SEL_0
1584         NewBldVec[i] = DAG.getUNDEF(MVT::f32);
1585       } else if (C->isExactlyValue(1.0)) {
1586         RemapSwizzle[i] = 5; // SEL_1
1587         NewBldVec[i] = DAG.getUNDEF(MVT::f32);
1588       }
1589     }
1590 
1591     if (NewBldVec[i].isUndef())
1592       continue;
1593 
1594     for (unsigned j = 0; j < i; j++) {
1595       if (NewBldVec[i] == NewBldVec[j]) {
1596         NewBldVec[i] = DAG.getUNDEF(NewBldVec[i].getValueType());
1597         RemapSwizzle[i] = j;
1598         break;
1599       }
1600     }
1601   }
1602 
1603   return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry),
1604                             NewBldVec);
1605 }
1606 
1607 static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry,
1608                                 DenseMap<unsigned, unsigned> &RemapSwizzle) {
1609   assert(RemapSwizzle.empty());
1610 
1611   SDLoc DL(VectorEntry);
1612   EVT EltTy = VectorEntry.getValueType().getVectorElementType();
1613 
1614   SDValue NewBldVec[4];
1615   bool isUnmovable[4] = {false, false, false, false};
1616   for (unsigned i = 0; i < 4; i++)
1617     NewBldVec[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltTy, VectorEntry,
1618                                DAG.getIntPtrConstant(i, DL));
1619 
1620   for (unsigned i = 0; i < 4; i++) {
1621     RemapSwizzle[i] = i;
1622     if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
1623       unsigned Idx = NewBldVec[i].getConstantOperandVal(1);
1624       if (i == Idx)
1625         isUnmovable[Idx] = true;
1626     }
1627   }
1628 
1629   for (unsigned i = 0; i < 4; i++) {
1630     if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
1631       unsigned Idx = NewBldVec[i].getConstantOperandVal(1);
1632       if (isUnmovable[Idx])
1633         continue;
1634       // Swap i and Idx
1635       std::swap(NewBldVec[Idx], NewBldVec[i]);
1636       std::swap(RemapSwizzle[i], RemapSwizzle[Idx]);
1637       break;
1638     }
1639   }
1640 
1641   return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry),
1642                             NewBldVec);
1643 }
1644 
1645 SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[],
1646                                             SelectionDAG &DAG,
1647                                             const SDLoc &DL) const {
1648   // Old -> New swizzle values
1649   DenseMap<unsigned, unsigned> SwizzleRemap;
1650 
1651   BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap);
1652   for (unsigned i = 0; i < 4; i++) {
1653     unsigned Idx = Swz[i]->getAsZExtVal();
1654     auto It = SwizzleRemap.find(Idx);
1655     if (It != SwizzleRemap.end())
1656       Swz[i] = DAG.getConstant(It->second, DL, MVT::i32);
1657   }
1658 
1659   SwizzleRemap.clear();
1660   BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap);
1661   for (unsigned i = 0; i < 4; i++) {
1662     unsigned Idx = Swz[i]->getAsZExtVal();
1663     auto It = SwizzleRemap.find(Idx);
1664     if (It != SwizzleRemap.end())
1665       Swz[i] = DAG.getConstant(It->second, DL, MVT::i32);
1666   }
1667 
1668   return BuildVector;
1669 }
1670 
1671 SDValue R600TargetLowering::constBufferLoad(LoadSDNode *LoadNode, int Block,
1672                                             SelectionDAG &DAG) const {
1673   SDLoc DL(LoadNode);
1674   EVT VT = LoadNode->getValueType(0);
1675   SDValue Chain = LoadNode->getChain();
1676   SDValue Ptr = LoadNode->getBasePtr();
1677   assert (isa<ConstantSDNode>(Ptr));
1678 
1679   //TODO: Support smaller loads
1680   if (LoadNode->getMemoryVT().getScalarType() != MVT::i32 || !ISD::isNON_EXTLoad(LoadNode))
1681     return SDValue();
1682 
1683   if (LoadNode->getAlign() < Align(4))
1684     return SDValue();
1685 
1686   int ConstantBlock = ConstantAddressBlock(Block);
1687 
1688   SDValue Slots[4];
1689   for (unsigned i = 0; i < 4; i++) {
1690     // We want Const position encoded with the following formula :
1691     // (((512 + (kc_bank << 12) + const_index) << 2) + chan)
1692     // const_index is Ptr computed by llvm using an alignment of 16.
1693     // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and
1694     // then div by 4 at the ISel step
1695     SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
1696         DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32));
1697     Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr);
1698   }
1699   EVT NewVT = MVT::v4i32;
1700   unsigned NumElements = 4;
1701   if (VT.isVector()) {
1702     NewVT = VT;
1703     NumElements = VT.getVectorNumElements();
1704   }
1705   SDValue Result = DAG.getBuildVector(NewVT, DL, ArrayRef(Slots, NumElements));
1706   if (!VT.isVector()) {
1707     Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result,
1708                          DAG.getConstant(0, DL, MVT::i32));
1709   }
1710   SDValue MergedValues[2] = {
1711     Result,
1712     Chain
1713   };
1714   return DAG.getMergeValues(MergedValues, DL);
1715 }
1716 
1717 //===----------------------------------------------------------------------===//
1718 // Custom DAG Optimizations
1719 //===----------------------------------------------------------------------===//
1720 
1721 SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
1722                                               DAGCombinerInfo &DCI) const {
1723   SelectionDAG &DAG = DCI.DAG;
1724   SDLoc DL(N);
1725 
1726   switch (N->getOpcode()) {
1727   // (f32 fp_round (f64 uint_to_fp a)) -> (f32 uint_to_fp a)
1728   case ISD::FP_ROUND: {
1729       SDValue Arg = N->getOperand(0);
1730       if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) {
1731         return DAG.getNode(ISD::UINT_TO_FP, DL, N->getValueType(0),
1732                            Arg.getOperand(0));
1733       }
1734       break;
1735     }
1736 
1737   // (i32 fp_to_sint (fneg (select_cc f32, f32, 1.0, 0.0 cc))) ->
1738   // (i32 select_cc f32, f32, -1, 0 cc)
1739   //
1740   // Mesa's GLSL frontend generates the above pattern a lot and we can lower
1741   // this to one of the SET*_DX10 instructions.
1742   case ISD::FP_TO_SINT: {
1743     SDValue FNeg = N->getOperand(0);
1744     if (FNeg.getOpcode() != ISD::FNEG) {
1745       return SDValue();
1746     }
1747     SDValue SelectCC = FNeg.getOperand(0);
1748     if (SelectCC.getOpcode() != ISD::SELECT_CC ||
1749         SelectCC.getOperand(0).getValueType() != MVT::f32 || // LHS
1750         SelectCC.getOperand(2).getValueType() != MVT::f32 || // True
1751         !isHWTrueValue(SelectCC.getOperand(2)) ||
1752         !isHWFalseValue(SelectCC.getOperand(3))) {
1753       return SDValue();
1754     }
1755 
1756     return DAG.getNode(ISD::SELECT_CC, DL, N->getValueType(0),
1757                        SelectCC.getOperand(0),               // LHS
1758                        SelectCC.getOperand(1),               // RHS
1759                        DAG.getAllOnesConstant(DL, MVT::i32), // True
1760                        DAG.getConstant(0, DL, MVT::i32),     // False
1761                        SelectCC.getOperand(4));              // CC
1762   }
1763 
1764   // insert_vector_elt (build_vector elt0, ... , eltN), NewEltIdx, idx
1765   // => build_vector elt0, ... , NewEltIdx, ... , eltN
1766   case ISD::INSERT_VECTOR_ELT: {
1767     SDValue InVec = N->getOperand(0);
1768     SDValue InVal = N->getOperand(1);
1769     SDValue EltNo = N->getOperand(2);
1770 
1771     // If the inserted element is an UNDEF, just use the input vector.
1772     if (InVal.isUndef())
1773       return InVec;
1774 
1775     EVT VT = InVec.getValueType();
1776 
1777     // If we can't generate a legal BUILD_VECTOR, exit
1778     if (!isOperationLegal(ISD::BUILD_VECTOR, VT))
1779       return SDValue();
1780 
1781     // Check that we know which element is being inserted
1782     if (!isa<ConstantSDNode>(EltNo))
1783       return SDValue();
1784     unsigned Elt = EltNo->getAsZExtVal();
1785 
1786     // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially
1787     // be converted to a BUILD_VECTOR).  Fill in the Ops vector with the
1788     // vector elements.
1789     SmallVector<SDValue, 8> Ops;
1790     if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
1791       Ops.append(InVec.getNode()->op_begin(),
1792                  InVec.getNode()->op_end());
1793     } else if (InVec.isUndef()) {
1794       unsigned NElts = VT.getVectorNumElements();
1795       Ops.append(NElts, DAG.getUNDEF(InVal.getValueType()));
1796     } else {
1797       return SDValue();
1798     }
1799 
1800     // Insert the element
1801     if (Elt < Ops.size()) {
1802       // All the operands of BUILD_VECTOR must have the same type;
1803       // we enforce that here.
1804       EVT OpVT = Ops[0].getValueType();
1805       if (InVal.getValueType() != OpVT)
1806         InVal = OpVT.bitsGT(InVal.getValueType()) ?
1807           DAG.getNode(ISD::ANY_EXTEND, DL, OpVT, InVal) :
1808           DAG.getNode(ISD::TRUNCATE, DL, OpVT, InVal);
1809       Ops[Elt] = InVal;
1810     }
1811 
1812     // Return the new vector
1813     return DAG.getBuildVector(VT, DL, Ops);
1814   }
1815 
1816   // Extract_vec (Build_vector) generated by custom lowering
1817   // also needs to be customly combined
1818   case ISD::EXTRACT_VECTOR_ELT: {
1819     SDValue Arg = N->getOperand(0);
1820     if (Arg.getOpcode() == ISD::BUILD_VECTOR) {
1821       if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
1822         unsigned Element = Const->getZExtValue();
1823         return Arg->getOperand(Element);
1824       }
1825     }
1826     if (Arg.getOpcode() == ISD::BITCAST &&
1827         Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR &&
1828         (Arg.getOperand(0).getValueType().getVectorNumElements() ==
1829          Arg.getValueType().getVectorNumElements())) {
1830       if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
1831         unsigned Element = Const->getZExtValue();
1832         return DAG.getNode(ISD::BITCAST, DL, N->getVTList(),
1833                            Arg->getOperand(0).getOperand(Element));
1834       }
1835     }
1836     break;
1837   }
1838 
1839   case ISD::SELECT_CC: {
1840     // Try common optimizations
1841     if (SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI))
1842       return Ret;
1843 
1844     // fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq ->
1845     //      selectcc x, y, a, b, inv(cc)
1846     //
1847     // fold selectcc (selectcc x, y, a, b, cc), b, a, b, setne ->
1848     //      selectcc x, y, a, b, cc
1849     SDValue LHS = N->getOperand(0);
1850     if (LHS.getOpcode() != ISD::SELECT_CC) {
1851       return SDValue();
1852     }
1853 
1854     SDValue RHS = N->getOperand(1);
1855     SDValue True = N->getOperand(2);
1856     SDValue False = N->getOperand(3);
1857     ISD::CondCode NCC = cast<CondCodeSDNode>(N->getOperand(4))->get();
1858 
1859     if (LHS.getOperand(2).getNode() != True.getNode() ||
1860         LHS.getOperand(3).getNode() != False.getNode() ||
1861         RHS.getNode() != False.getNode()) {
1862       return SDValue();
1863     }
1864 
1865     switch (NCC) {
1866     default: return SDValue();
1867     case ISD::SETNE: return LHS;
1868     case ISD::SETEQ: {
1869       ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get();
1870       LHSCC = ISD::getSetCCInverse(LHSCC, LHS.getOperand(0).getValueType());
1871       if (DCI.isBeforeLegalizeOps() ||
1872           isCondCodeLegal(LHSCC, LHS.getOperand(0).getSimpleValueType()))
1873         return DAG.getSelectCC(DL,
1874                                LHS.getOperand(0),
1875                                LHS.getOperand(1),
1876                                LHS.getOperand(2),
1877                                LHS.getOperand(3),
1878                                LHSCC);
1879       break;
1880     }
1881     }
1882     return SDValue();
1883   }
1884 
1885   case AMDGPUISD::R600_EXPORT: {
1886     SDValue Arg = N->getOperand(1);
1887     if (Arg.getOpcode() != ISD::BUILD_VECTOR)
1888       break;
1889 
1890     SDValue NewArgs[8] = {
1891       N->getOperand(0), // Chain
1892       SDValue(),
1893       N->getOperand(2), // ArrayBase
1894       N->getOperand(3), // Type
1895       N->getOperand(4), // SWZ_X
1896       N->getOperand(5), // SWZ_Y
1897       N->getOperand(6), // SWZ_Z
1898       N->getOperand(7) // SWZ_W
1899     };
1900     NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG, DL);
1901     return DAG.getNode(AMDGPUISD::R600_EXPORT, DL, N->getVTList(), NewArgs);
1902   }
1903   case AMDGPUISD::TEXTURE_FETCH: {
1904     SDValue Arg = N->getOperand(1);
1905     if (Arg.getOpcode() != ISD::BUILD_VECTOR)
1906       break;
1907 
1908     SDValue NewArgs[19] = {
1909       N->getOperand(0),
1910       N->getOperand(1),
1911       N->getOperand(2),
1912       N->getOperand(3),
1913       N->getOperand(4),
1914       N->getOperand(5),
1915       N->getOperand(6),
1916       N->getOperand(7),
1917       N->getOperand(8),
1918       N->getOperand(9),
1919       N->getOperand(10),
1920       N->getOperand(11),
1921       N->getOperand(12),
1922       N->getOperand(13),
1923       N->getOperand(14),
1924       N->getOperand(15),
1925       N->getOperand(16),
1926       N->getOperand(17),
1927       N->getOperand(18),
1928     };
1929     NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG, DL);
1930     return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, N->getVTList(), NewArgs);
1931   }
1932 
1933   case ISD::LOAD: {
1934     LoadSDNode *LoadNode = cast<LoadSDNode>(N);
1935     SDValue Ptr = LoadNode->getBasePtr();
1936     if (LoadNode->getAddressSpace() == AMDGPUAS::PARAM_I_ADDRESS &&
1937          isa<ConstantSDNode>(Ptr))
1938       return constBufferLoad(LoadNode, AMDGPUAS::CONSTANT_BUFFER_0, DAG);
1939     break;
1940   }
1941 
1942   default: break;
1943   }
1944 
1945   return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
1946 }
1947 
1948 bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx,
1949                                      SDValue &Src, SDValue &Neg, SDValue &Abs,
1950                                      SDValue &Sel, SDValue &Imm,
1951                                      SelectionDAG &DAG) const {
1952   const R600InstrInfo *TII = Subtarget->getInstrInfo();
1953   if (!Src.isMachineOpcode())
1954     return false;
1955 
1956   switch (Src.getMachineOpcode()) {
1957   case R600::FNEG_R600:
1958     if (!Neg.getNode())
1959       return false;
1960     Src = Src.getOperand(0);
1961     Neg = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32);
1962     return true;
1963   case R600::FABS_R600:
1964     if (!Abs.getNode())
1965       return false;
1966     Src = Src.getOperand(0);
1967     Abs = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32);
1968     return true;
1969   case R600::CONST_COPY: {
1970     unsigned Opcode = ParentNode->getMachineOpcode();
1971     bool HasDst = TII->getOperandIdx(Opcode, R600::OpName::dst) > -1;
1972 
1973     if (!Sel.getNode())
1974       return false;
1975 
1976     SDValue CstOffset = Src.getOperand(0);
1977     if (ParentNode->getValueType(0).isVector())
1978       return false;
1979 
1980     // Gather constants values
1981     int SrcIndices[] = {
1982       TII->getOperandIdx(Opcode, R600::OpName::src0),
1983       TII->getOperandIdx(Opcode, R600::OpName::src1),
1984       TII->getOperandIdx(Opcode, R600::OpName::src2),
1985       TII->getOperandIdx(Opcode, R600::OpName::src0_X),
1986       TII->getOperandIdx(Opcode, R600::OpName::src0_Y),
1987       TII->getOperandIdx(Opcode, R600::OpName::src0_Z),
1988       TII->getOperandIdx(Opcode, R600::OpName::src0_W),
1989       TII->getOperandIdx(Opcode, R600::OpName::src1_X),
1990       TII->getOperandIdx(Opcode, R600::OpName::src1_Y),
1991       TII->getOperandIdx(Opcode, R600::OpName::src1_Z),
1992       TII->getOperandIdx(Opcode, R600::OpName::src1_W)
1993     };
1994     std::vector<unsigned> Consts;
1995     for (int OtherSrcIdx : SrcIndices) {
1996       int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx);
1997       if (OtherSrcIdx < 0 || OtherSelIdx < 0)
1998         continue;
1999       if (HasDst) {
2000         OtherSrcIdx--;
2001         OtherSelIdx--;
2002       }
2003       if (RegisterSDNode *Reg =
2004           dyn_cast<RegisterSDNode>(ParentNode->getOperand(OtherSrcIdx))) {
2005         if (Reg->getReg() == R600::ALU_CONST) {
2006           Consts.push_back(ParentNode->getConstantOperandVal(OtherSelIdx));
2007         }
2008       }
2009     }
2010 
2011     ConstantSDNode *Cst = cast<ConstantSDNode>(CstOffset);
2012     Consts.push_back(Cst->getZExtValue());
2013     if (!TII->fitsConstReadLimitations(Consts)) {
2014       return false;
2015     }
2016 
2017     Sel = CstOffset;
2018     Src = DAG.getRegister(R600::ALU_CONST, MVT::f32);
2019     return true;
2020   }
2021   case R600::MOV_IMM_GLOBAL_ADDR:
2022     // Check if the Imm slot is used. Taken from below.
2023     if (Imm->getAsZExtVal())
2024       return false;
2025     Imm = Src.getOperand(0);
2026     Src = DAG.getRegister(R600::ALU_LITERAL_X, MVT::i32);
2027     return true;
2028   case R600::MOV_IMM_I32:
2029   case R600::MOV_IMM_F32: {
2030     unsigned ImmReg = R600::ALU_LITERAL_X;
2031     uint64_t ImmValue = 0;
2032 
2033     if (Src.getMachineOpcode() == R600::MOV_IMM_F32) {
2034       ConstantFPSDNode *FPC = cast<ConstantFPSDNode>(Src.getOperand(0));
2035       float FloatValue = FPC->getValueAPF().convertToFloat();
2036       if (FloatValue == 0.0) {
2037         ImmReg = R600::ZERO;
2038       } else if (FloatValue == 0.5) {
2039         ImmReg = R600::HALF;
2040       } else if (FloatValue == 1.0) {
2041         ImmReg = R600::ONE;
2042       } else {
2043         ImmValue = FPC->getValueAPF().bitcastToAPInt().getZExtValue();
2044       }
2045     } else {
2046       uint64_t Value = Src.getConstantOperandVal(0);
2047       if (Value == 0) {
2048         ImmReg = R600::ZERO;
2049       } else if (Value == 1) {
2050         ImmReg = R600::ONE_INT;
2051       } else {
2052         ImmValue = Value;
2053       }
2054     }
2055 
2056     // Check that we aren't already using an immediate.
2057     // XXX: It's possible for an instruction to have more than one
2058     // immediate operand, but this is not supported yet.
2059     if (ImmReg == R600::ALU_LITERAL_X) {
2060       if (!Imm.getNode())
2061         return false;
2062       ConstantSDNode *C = cast<ConstantSDNode>(Imm);
2063       if (C->getZExtValue())
2064         return false;
2065       Imm = DAG.getTargetConstant(ImmValue, SDLoc(ParentNode), MVT::i32);
2066     }
2067     Src = DAG.getRegister(ImmReg, MVT::i32);
2068     return true;
2069   }
2070   default:
2071     return false;
2072   }
2073 }
2074 
2075 /// Fold the instructions after selecting them
2076 SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node,
2077                                             SelectionDAG &DAG) const {
2078   const R600InstrInfo *TII = Subtarget->getInstrInfo();
2079   if (!Node->isMachineOpcode())
2080     return Node;
2081 
2082   unsigned Opcode = Node->getMachineOpcode();
2083   SDValue FakeOp;
2084 
2085   std::vector<SDValue> Ops(Node->op_begin(), Node->op_end());
2086 
2087   if (Opcode == R600::DOT_4) {
2088     int OperandIdx[] = {
2089       TII->getOperandIdx(Opcode, R600::OpName::src0_X),
2090       TII->getOperandIdx(Opcode, R600::OpName::src0_Y),
2091       TII->getOperandIdx(Opcode, R600::OpName::src0_Z),
2092       TII->getOperandIdx(Opcode, R600::OpName::src0_W),
2093       TII->getOperandIdx(Opcode, R600::OpName::src1_X),
2094       TII->getOperandIdx(Opcode, R600::OpName::src1_Y),
2095       TII->getOperandIdx(Opcode, R600::OpName::src1_Z),
2096       TII->getOperandIdx(Opcode, R600::OpName::src1_W)
2097         };
2098     int NegIdx[] = {
2099       TII->getOperandIdx(Opcode, R600::OpName::src0_neg_X),
2100       TII->getOperandIdx(Opcode, R600::OpName::src0_neg_Y),
2101       TII->getOperandIdx(Opcode, R600::OpName::src0_neg_Z),
2102       TII->getOperandIdx(Opcode, R600::OpName::src0_neg_W),
2103       TII->getOperandIdx(Opcode, R600::OpName::src1_neg_X),
2104       TII->getOperandIdx(Opcode, R600::OpName::src1_neg_Y),
2105       TII->getOperandIdx(Opcode, R600::OpName::src1_neg_Z),
2106       TII->getOperandIdx(Opcode, R600::OpName::src1_neg_W)
2107     };
2108     int AbsIdx[] = {
2109       TII->getOperandIdx(Opcode, R600::OpName::src0_abs_X),
2110       TII->getOperandIdx(Opcode, R600::OpName::src0_abs_Y),
2111       TII->getOperandIdx(Opcode, R600::OpName::src0_abs_Z),
2112       TII->getOperandIdx(Opcode, R600::OpName::src0_abs_W),
2113       TII->getOperandIdx(Opcode, R600::OpName::src1_abs_X),
2114       TII->getOperandIdx(Opcode, R600::OpName::src1_abs_Y),
2115       TII->getOperandIdx(Opcode, R600::OpName::src1_abs_Z),
2116       TII->getOperandIdx(Opcode, R600::OpName::src1_abs_W)
2117     };
2118     for (unsigned i = 0; i < 8; i++) {
2119       if (OperandIdx[i] < 0)
2120         return Node;
2121       SDValue &Src = Ops[OperandIdx[i] - 1];
2122       SDValue &Neg = Ops[NegIdx[i] - 1];
2123       SDValue &Abs = Ops[AbsIdx[i] - 1];
2124       bool HasDst = TII->getOperandIdx(Opcode, R600::OpName::dst) > -1;
2125       int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]);
2126       if (HasDst)
2127         SelIdx--;
2128       SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp;
2129       if (FoldOperand(Node, i, Src, Neg, Abs, Sel, FakeOp, DAG))
2130         return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
2131     }
2132   } else if (Opcode == R600::REG_SEQUENCE) {
2133     for (unsigned i = 1, e = Node->getNumOperands(); i < e; i += 2) {
2134       SDValue &Src = Ops[i];
2135       if (FoldOperand(Node, i, Src, FakeOp, FakeOp, FakeOp, FakeOp, DAG))
2136         return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
2137     }
2138   } else {
2139     if (!TII->hasInstrModifiers(Opcode))
2140       return Node;
2141     int OperandIdx[] = {
2142       TII->getOperandIdx(Opcode, R600::OpName::src0),
2143       TII->getOperandIdx(Opcode, R600::OpName::src1),
2144       TII->getOperandIdx(Opcode, R600::OpName::src2)
2145     };
2146     int NegIdx[] = {
2147       TII->getOperandIdx(Opcode, R600::OpName::src0_neg),
2148       TII->getOperandIdx(Opcode, R600::OpName::src1_neg),
2149       TII->getOperandIdx(Opcode, R600::OpName::src2_neg)
2150     };
2151     int AbsIdx[] = {
2152       TII->getOperandIdx(Opcode, R600::OpName::src0_abs),
2153       TII->getOperandIdx(Opcode, R600::OpName::src1_abs),
2154       -1
2155     };
2156     for (unsigned i = 0; i < 3; i++) {
2157       if (OperandIdx[i] < 0)
2158         return Node;
2159       SDValue &Src = Ops[OperandIdx[i] - 1];
2160       SDValue &Neg = Ops[NegIdx[i] - 1];
2161       SDValue FakeAbs;
2162       SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs;
2163       bool HasDst = TII->getOperandIdx(Opcode, R600::OpName::dst) > -1;
2164       int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]);
2165       int ImmIdx = TII->getOperandIdx(Opcode, R600::OpName::literal);
2166       if (HasDst) {
2167         SelIdx--;
2168         ImmIdx--;
2169       }
2170       SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp;
2171       SDValue &Imm = Ops[ImmIdx];
2172       if (FoldOperand(Node, i, Src, Neg, Abs, Sel, Imm, DAG))
2173         return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops);
2174     }
2175   }
2176 
2177   return Node;
2178 }
2179 
2180 TargetLowering::AtomicExpansionKind
2181 R600TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
2182   switch (RMW->getOperation()) {
2183   case AtomicRMWInst::Nand:
2184   case AtomicRMWInst::FAdd:
2185   case AtomicRMWInst::FSub:
2186   case AtomicRMWInst::FMax:
2187   case AtomicRMWInst::FMin:
2188     return AtomicExpansionKind::CmpXChg;
2189   case AtomicRMWInst::UIncWrap:
2190   case AtomicRMWInst::UDecWrap:
2191     // FIXME: Cayman at least appears to have instructions for this, but the
2192     // instruction defintions appear to be missing.
2193     return AtomicExpansionKind::CmpXChg;
2194   case AtomicRMWInst::Xchg: {
2195     const DataLayout &DL = RMW->getFunction()->getDataLayout();
2196     unsigned ValSize = DL.getTypeSizeInBits(RMW->getType());
2197     if (ValSize == 32 || ValSize == 64)
2198       return AtomicExpansionKind::None;
2199     return AtomicExpansionKind::CmpXChg;
2200   }
2201   default:
2202     if (auto *IntTy = dyn_cast<IntegerType>(RMW->getType())) {
2203       unsigned Size = IntTy->getBitWidth();
2204       if (Size == 32 || Size == 64)
2205         return AtomicExpansionKind::None;
2206     }
2207 
2208     return AtomicExpansionKind::CmpXChg;
2209   }
2210 
2211   llvm_unreachable("covered atomicrmw op switch");
2212 }
2213