1 //===-- R600ISelLowering.cpp - R600 DAG Lowering Implementation -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Custom DAG lowering for R600 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "R600ISelLowering.h" 15 #include "AMDGPU.h" 16 #include "MCTargetDesc/R600MCTargetDesc.h" 17 #include "R600Defines.h" 18 #include "R600InstrInfo.h" 19 #include "R600MachineFunctionInfo.h" 20 #include "R600Subtarget.h" 21 #include "R600TargetMachine.h" 22 #include "llvm/CodeGen/MachineFunction.h" 23 #include "llvm/IR/IntrinsicsAMDGPU.h" 24 #include "llvm/IR/IntrinsicsR600.h" 25 26 using namespace llvm; 27 28 #include "R600GenCallingConv.inc" 29 30 R600TargetLowering::R600TargetLowering(const TargetMachine &TM, 31 const R600Subtarget &STI) 32 : AMDGPUTargetLowering(TM, STI), Subtarget(&STI), Gen(STI.getGeneration()) { 33 addRegisterClass(MVT::f32, &R600::R600_Reg32RegClass); 34 addRegisterClass(MVT::i32, &R600::R600_Reg32RegClass); 35 addRegisterClass(MVT::v2f32, &R600::R600_Reg64RegClass); 36 addRegisterClass(MVT::v2i32, &R600::R600_Reg64RegClass); 37 addRegisterClass(MVT::v4f32, &R600::R600_Reg128RegClass); 38 addRegisterClass(MVT::v4i32, &R600::R600_Reg128RegClass); 39 40 setBooleanContents(ZeroOrNegativeOneBooleanContent); 41 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 42 43 computeRegisterProperties(Subtarget->getRegisterInfo()); 44 45 // Legalize loads and stores to the private address space. 46 setOperationAction(ISD::LOAD, {MVT::i32, MVT::v2i32, MVT::v4i32}, Custom); 47 48 // EXTLOAD should be the same as ZEXTLOAD. It is legal for some address 49 // spaces, so it is custom lowered to handle those where it isn't. 50 for (auto Op : {ISD::SEXTLOAD, ISD::ZEXTLOAD, ISD::EXTLOAD}) 51 for (MVT VT : MVT::integer_valuetypes()) { 52 setLoadExtAction(Op, VT, MVT::i1, Promote); 53 setLoadExtAction(Op, VT, MVT::i8, Custom); 54 setLoadExtAction(Op, VT, MVT::i16, Custom); 55 } 56 57 // Workaround for LegalizeDAG asserting on expansion of i1 vector loads. 58 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v2i32, 59 MVT::v2i1, Expand); 60 61 setLoadExtAction({ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD}, MVT::v4i32, 62 MVT::v4i1, Expand); 63 64 setOperationAction(ISD::STORE, {MVT::i8, MVT::i32, MVT::v2i32, MVT::v4i32}, 65 Custom); 66 67 setTruncStoreAction(MVT::i32, MVT::i8, Custom); 68 setTruncStoreAction(MVT::i32, MVT::i16, Custom); 69 // We need to include these since trunc STORES to PRIVATE need 70 // special handling to accommodate RMW 71 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom); 72 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Custom); 73 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Custom); 74 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Custom); 75 setTruncStoreAction(MVT::v32i32, MVT::v32i16, Custom); 76 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom); 77 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom); 78 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Custom); 79 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Custom); 80 setTruncStoreAction(MVT::v32i32, MVT::v32i8, Custom); 81 82 // Workaround for LegalizeDAG asserting on expansion of i1 vector stores. 83 setTruncStoreAction(MVT::v2i32, MVT::v2i1, Expand); 84 setTruncStoreAction(MVT::v4i32, MVT::v4i1, Expand); 85 86 // Set condition code actions 87 setCondCodeAction({ISD::SETO, ISD::SETUO, ISD::SETLT, ISD::SETLE, ISD::SETOLT, 88 ISD::SETOLE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGE, 89 ISD::SETUGT, ISD::SETULT, ISD::SETULE}, 90 MVT::f32, Expand); 91 92 setCondCodeAction({ISD::SETLE, ISD::SETLT, ISD::SETULE, ISD::SETULT}, 93 MVT::i32, Expand); 94 95 setOperationAction({ISD::FCOS, ISD::FSIN}, MVT::f32, Custom); 96 97 setOperationAction(ISD::SETCC, {MVT::v4i32, MVT::v2i32}, Expand); 98 99 setOperationAction(ISD::BR_CC, {MVT::i32, MVT::f32}, Expand); 100 setOperationAction(ISD::BRCOND, MVT::Other, Custom); 101 102 setOperationAction(ISD::FSUB, MVT::f32, Expand); 103 104 setOperationAction({ISD::FCEIL, ISD::FTRUNC, ISD::FROUNDEVEN, ISD::FFLOOR}, 105 MVT::f64, Custom); 106 107 setOperationAction(ISD::SELECT_CC, {MVT::f32, MVT::i32}, Custom); 108 109 setOperationAction(ISD::SETCC, {MVT::i32, MVT::f32}, Expand); 110 setOperationAction({ISD::FP_TO_UINT, ISD::FP_TO_SINT}, {MVT::i1, MVT::i64}, 111 Custom); 112 113 setOperationAction(ISD::SELECT, {MVT::i32, MVT::f32, MVT::v2i32, MVT::v4i32}, 114 Expand); 115 116 // ADD, SUB overflow. 117 // TODO: turn these into Legal? 118 if (Subtarget->hasCARRY()) 119 setOperationAction(ISD::UADDO, MVT::i32, Custom); 120 121 if (Subtarget->hasBORROW()) 122 setOperationAction(ISD::USUBO, MVT::i32, Custom); 123 124 // Expand sign extension of vectors 125 if (!Subtarget->hasBFE()) 126 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand); 127 128 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i1, MVT::v4i1}, Expand); 129 130 if (!Subtarget->hasBFE()) 131 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand); 132 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i8, MVT::v4i8}, Expand); 133 134 if (!Subtarget->hasBFE()) 135 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand); 136 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i16, MVT::v4i16}, Expand); 137 138 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal); 139 setOperationAction(ISD::SIGN_EXTEND_INREG, {MVT::v2i32, MVT::v4i32}, Expand); 140 141 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::Other, Expand); 142 143 setOperationAction(ISD::FrameIndex, MVT::i32, Custom); 144 145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, 146 {MVT::v2i32, MVT::v2f32, MVT::v4i32, MVT::v4f32}, Custom); 147 148 setOperationAction(ISD::INSERT_VECTOR_ELT, 149 {MVT::v2i32, MVT::v2f32, MVT::v4i32, MVT::v4f32}, Custom); 150 151 // We don't have 64-bit shifts. Thus we need either SHX i64 or SHX_PARTS i32 152 // to be Legal/Custom in order to avoid library calls. 153 setOperationAction({ISD::SHL_PARTS, ISD::SRL_PARTS, ISD::SRA_PARTS}, MVT::i32, 154 Custom); 155 156 if (!Subtarget->hasFMA()) 157 setOperationAction(ISD::FMA, {MVT::f32, MVT::f64}, Expand); 158 159 // FIXME: May need no denormals check 160 setOperationAction(ISD::FMAD, MVT::f32, Legal); 161 162 if (!Subtarget->hasBFI()) 163 // fcopysign can be done in a single instruction with BFI. 164 setOperationAction(ISD::FCOPYSIGN, {MVT::f32, MVT::f64}, Expand); 165 166 if (!Subtarget->hasBCNT(32)) 167 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 168 169 if (!Subtarget->hasBCNT(64)) 170 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 171 172 if (Subtarget->hasFFBH()) 173 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 174 175 if (Subtarget->hasFFBL()) 176 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); 177 178 // FIXME: This was moved from AMDGPUTargetLowering, I'm not sure if we 179 // need it for R600. 180 if (Subtarget->hasBFE()) 181 setHasExtractBitsInsn(true); 182 183 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom); 184 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom); 185 186 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 187 for (MVT VT : ScalarIntVTs) 188 setOperationAction({ISD::ADDC, ISD::SUBC, ISD::ADDE, ISD::SUBE}, VT, 189 Expand); 190 191 // LLVM will expand these to atomic_cmp_swap(0) 192 // and atomic_swap, respectively. 193 setOperationAction({ISD::ATOMIC_LOAD, ISD::ATOMIC_STORE}, MVT::i32, Expand); 194 195 // We need to custom lower some of the intrinsics 196 setOperationAction({ISD::INTRINSIC_VOID, ISD::INTRINSIC_WO_CHAIN}, MVT::Other, 197 Custom); 198 199 setSchedulingPreference(Sched::Source); 200 201 setTargetDAGCombine({ISD::FP_ROUND, ISD::FP_TO_SINT, ISD::EXTRACT_VECTOR_ELT, 202 ISD::SELECT_CC, ISD::INSERT_VECTOR_ELT, ISD::LOAD}); 203 } 204 205 static inline bool isEOP(MachineBasicBlock::iterator I) { 206 if (std::next(I) == I->getParent()->end()) 207 return false; 208 return std::next(I)->getOpcode() == R600::RETURN; 209 } 210 211 MachineBasicBlock * 212 R600TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, 213 MachineBasicBlock *BB) const { 214 MachineFunction *MF = BB->getParent(); 215 MachineRegisterInfo &MRI = MF->getRegInfo(); 216 MachineBasicBlock::iterator I = MI; 217 const R600InstrInfo *TII = Subtarget->getInstrInfo(); 218 219 switch (MI.getOpcode()) { 220 default: 221 // Replace LDS_*_RET instruction that don't have any uses with the 222 // equivalent LDS_*_NORET instruction. 223 if (TII->isLDSRetInstr(MI.getOpcode())) { 224 int DstIdx = TII->getOperandIdx(MI.getOpcode(), R600::OpName::dst); 225 assert(DstIdx != -1); 226 MachineInstrBuilder NewMI; 227 // FIXME: getLDSNoRetOp method only handles LDS_1A1D LDS ops. Add 228 // LDS_1A2D support and remove this special case. 229 if (!MRI.use_empty(MI.getOperand(DstIdx).getReg()) || 230 MI.getOpcode() == R600::LDS_CMPST_RET) 231 return BB; 232 233 NewMI = BuildMI(*BB, I, BB->findDebugLoc(I), 234 TII->get(R600::getLDSNoRetOp(MI.getOpcode()))); 235 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) 236 NewMI.add(MO); 237 } else { 238 return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB); 239 } 240 break; 241 242 case R600::FABS_R600: { 243 MachineInstr *NewMI = TII->buildDefaultInstruction( 244 *BB, I, R600::MOV, MI.getOperand(0).getReg(), 245 MI.getOperand(1).getReg()); 246 TII->addFlag(*NewMI, 0, MO_FLAG_ABS); 247 break; 248 } 249 250 case R600::FNEG_R600: { 251 MachineInstr *NewMI = TII->buildDefaultInstruction( 252 *BB, I, R600::MOV, MI.getOperand(0).getReg(), 253 MI.getOperand(1).getReg()); 254 TII->addFlag(*NewMI, 0, MO_FLAG_NEG); 255 break; 256 } 257 258 case R600::MASK_WRITE: { 259 Register maskedRegister = MI.getOperand(0).getReg(); 260 assert(maskedRegister.isVirtual()); 261 MachineInstr * defInstr = MRI.getVRegDef(maskedRegister); 262 TII->addFlag(*defInstr, 0, MO_FLAG_MASK); 263 break; 264 } 265 266 case R600::MOV_IMM_F32: 267 TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(), MI.getOperand(1) 268 .getFPImm() 269 ->getValueAPF() 270 .bitcastToAPInt() 271 .getZExtValue()); 272 break; 273 274 case R600::MOV_IMM_I32: 275 TII->buildMovImm(*BB, I, MI.getOperand(0).getReg(), 276 MI.getOperand(1).getImm()); 277 break; 278 279 case R600::MOV_IMM_GLOBAL_ADDR: { 280 //TODO: Perhaps combine this instruction with the next if possible 281 auto MIB = TII->buildDefaultInstruction( 282 *BB, MI, R600::MOV, MI.getOperand(0).getReg(), R600::ALU_LITERAL_X); 283 int Idx = TII->getOperandIdx(*MIB, R600::OpName::literal); 284 //TODO: Ugh this is rather ugly 285 const MachineOperand &MO = MI.getOperand(1); 286 MIB->getOperand(Idx).ChangeToGA(MO.getGlobal(), MO.getOffset(), 287 MO.getTargetFlags()); 288 break; 289 } 290 291 case R600::CONST_COPY: { 292 MachineInstr *NewMI = TII->buildDefaultInstruction( 293 *BB, MI, R600::MOV, MI.getOperand(0).getReg(), R600::ALU_CONST); 294 TII->setImmOperand(*NewMI, R600::OpName::src0_sel, 295 MI.getOperand(1).getImm()); 296 break; 297 } 298 299 case R600::RAT_WRITE_CACHELESS_32_eg: 300 case R600::RAT_WRITE_CACHELESS_64_eg: 301 case R600::RAT_WRITE_CACHELESS_128_eg: 302 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) 303 .add(MI.getOperand(0)) 304 .add(MI.getOperand(1)) 305 .addImm(isEOP(I)); // Set End of program bit 306 break; 307 308 case R600::RAT_STORE_TYPED_eg: 309 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) 310 .add(MI.getOperand(0)) 311 .add(MI.getOperand(1)) 312 .add(MI.getOperand(2)) 313 .addImm(isEOP(I)); // Set End of program bit 314 break; 315 316 case R600::BRANCH: 317 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::JUMP)) 318 .add(MI.getOperand(0)); 319 break; 320 321 case R600::BRANCH_COND_f32: { 322 MachineInstr *NewMI = 323 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::PRED_X), 324 R600::PREDICATE_BIT) 325 .add(MI.getOperand(1)) 326 .addImm(R600::PRED_SETNE) 327 .addImm(0); // Flags 328 TII->addFlag(*NewMI, 0, MO_FLAG_PUSH); 329 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::JUMP_COND)) 330 .add(MI.getOperand(0)) 331 .addReg(R600::PREDICATE_BIT, RegState::Kill); 332 break; 333 } 334 335 case R600::BRANCH_COND_i32: { 336 MachineInstr *NewMI = 337 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::PRED_X), 338 R600::PREDICATE_BIT) 339 .add(MI.getOperand(1)) 340 .addImm(R600::PRED_SETNE_INT) 341 .addImm(0); // Flags 342 TII->addFlag(*NewMI, 0, MO_FLAG_PUSH); 343 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(R600::JUMP_COND)) 344 .add(MI.getOperand(0)) 345 .addReg(R600::PREDICATE_BIT, RegState::Kill); 346 break; 347 } 348 349 case R600::EG_ExportSwz: 350 case R600::R600_ExportSwz: { 351 // Instruction is left unmodified if its not the last one of its type 352 bool isLastInstructionOfItsType = true; 353 unsigned InstExportType = MI.getOperand(1).getImm(); 354 for (MachineBasicBlock::iterator NextExportInst = std::next(I), 355 EndBlock = BB->end(); NextExportInst != EndBlock; 356 NextExportInst = std::next(NextExportInst)) { 357 if (NextExportInst->getOpcode() == R600::EG_ExportSwz || 358 NextExportInst->getOpcode() == R600::R600_ExportSwz) { 359 unsigned CurrentInstExportType = NextExportInst->getOperand(1) 360 .getImm(); 361 if (CurrentInstExportType == InstExportType) { 362 isLastInstructionOfItsType = false; 363 break; 364 } 365 } 366 } 367 bool EOP = isEOP(I); 368 if (!EOP && !isLastInstructionOfItsType) 369 return BB; 370 unsigned CfInst = (MI.getOpcode() == R600::EG_ExportSwz) ? 84 : 40; 371 BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(MI.getOpcode())) 372 .add(MI.getOperand(0)) 373 .add(MI.getOperand(1)) 374 .add(MI.getOperand(2)) 375 .add(MI.getOperand(3)) 376 .add(MI.getOperand(4)) 377 .add(MI.getOperand(5)) 378 .add(MI.getOperand(6)) 379 .addImm(CfInst) 380 .addImm(EOP); 381 break; 382 } 383 case R600::RETURN: { 384 return BB; 385 } 386 } 387 388 MI.eraseFromParent(); 389 return BB; 390 } 391 392 //===----------------------------------------------------------------------===// 393 // Custom DAG Lowering Operations 394 //===----------------------------------------------------------------------===// 395 396 SDValue R600TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { 397 MachineFunction &MF = DAG.getMachineFunction(); 398 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>(); 399 switch (Op.getOpcode()) { 400 default: return AMDGPUTargetLowering::LowerOperation(Op, DAG); 401 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG); 402 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG); 403 case ISD::SHL_PARTS: 404 case ISD::SRA_PARTS: 405 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG); 406 case ISD::UADDO: return LowerUADDSUBO(Op, DAG, ISD::ADD, AMDGPUISD::CARRY); 407 case ISD::USUBO: return LowerUADDSUBO(Op, DAG, ISD::SUB, AMDGPUISD::BORROW); 408 case ISD::FCOS: 409 case ISD::FSIN: return LowerTrig(Op, DAG); 410 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG); 411 case ISD::STORE: return LowerSTORE(Op, DAG); 412 case ISD::LOAD: { 413 SDValue Result = LowerLOAD(Op, DAG); 414 assert((!Result.getNode() || 415 Result.getNode()->getNumValues() == 2) && 416 "Load should return a value and a chain"); 417 return Result; 418 } 419 420 case ISD::BRCOND: return LowerBRCOND(Op, DAG); 421 case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG); 422 case ISD::FrameIndex: return lowerFrameIndex(Op, DAG); 423 case ISD::ADDRSPACECAST: 424 return lowerADDRSPACECAST(Op, DAG); 425 case ISD::INTRINSIC_VOID: { 426 SDValue Chain = Op.getOperand(0); 427 unsigned IntrinsicID = Op.getConstantOperandVal(1); 428 switch (IntrinsicID) { 429 case Intrinsic::r600_store_swizzle: { 430 SDLoc DL(Op); 431 const SDValue Args[8] = { 432 Chain, 433 Op.getOperand(2), // Export Value 434 Op.getOperand(3), // ArrayBase 435 Op.getOperand(4), // Type 436 DAG.getConstant(0, DL, MVT::i32), // SWZ_X 437 DAG.getConstant(1, DL, MVT::i32), // SWZ_Y 438 DAG.getConstant(2, DL, MVT::i32), // SWZ_Z 439 DAG.getConstant(3, DL, MVT::i32) // SWZ_W 440 }; 441 return DAG.getNode(AMDGPUISD::R600_EXPORT, DL, Op.getValueType(), Args); 442 } 443 444 // default for switch(IntrinsicID) 445 default: break; 446 } 447 // break out of case ISD::INTRINSIC_VOID in switch(Op.getOpcode()) 448 break; 449 } 450 case ISD::INTRINSIC_WO_CHAIN: { 451 unsigned IntrinsicID = Op.getConstantOperandVal(0); 452 EVT VT = Op.getValueType(); 453 SDLoc DL(Op); 454 switch (IntrinsicID) { 455 case Intrinsic::r600_tex: 456 case Intrinsic::r600_texc: { 457 unsigned TextureOp; 458 switch (IntrinsicID) { 459 case Intrinsic::r600_tex: 460 TextureOp = 0; 461 break; 462 case Intrinsic::r600_texc: 463 TextureOp = 1; 464 break; 465 default: 466 llvm_unreachable("unhandled texture operation"); 467 } 468 469 SDValue TexArgs[19] = { 470 DAG.getConstant(TextureOp, DL, MVT::i32), 471 Op.getOperand(1), 472 DAG.getConstant(0, DL, MVT::i32), 473 DAG.getConstant(1, DL, MVT::i32), 474 DAG.getConstant(2, DL, MVT::i32), 475 DAG.getConstant(3, DL, MVT::i32), 476 Op.getOperand(2), 477 Op.getOperand(3), 478 Op.getOperand(4), 479 DAG.getConstant(0, DL, MVT::i32), 480 DAG.getConstant(1, DL, MVT::i32), 481 DAG.getConstant(2, DL, MVT::i32), 482 DAG.getConstant(3, DL, MVT::i32), 483 Op.getOperand(5), 484 Op.getOperand(6), 485 Op.getOperand(7), 486 Op.getOperand(8), 487 Op.getOperand(9), 488 Op.getOperand(10) 489 }; 490 return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, MVT::v4f32, TexArgs); 491 } 492 case Intrinsic::r600_dot4: { 493 SDValue Args[8] = { 494 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), 495 DAG.getConstant(0, DL, MVT::i32)), 496 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), 497 DAG.getConstant(0, DL, MVT::i32)), 498 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), 499 DAG.getConstant(1, DL, MVT::i32)), 500 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), 501 DAG.getConstant(1, DL, MVT::i32)), 502 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), 503 DAG.getConstant(2, DL, MVT::i32)), 504 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), 505 DAG.getConstant(2, DL, MVT::i32)), 506 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(1), 507 DAG.getConstant(3, DL, MVT::i32)), 508 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Op.getOperand(2), 509 DAG.getConstant(3, DL, MVT::i32)) 510 }; 511 return DAG.getNode(AMDGPUISD::DOT4, DL, MVT::f32, Args); 512 } 513 514 case Intrinsic::r600_implicitarg_ptr: { 515 MVT PtrVT = getPointerTy(DAG.getDataLayout(), AMDGPUAS::PARAM_I_ADDRESS); 516 uint32_t ByteOffset = getImplicitParameterOffset(MF, FIRST_IMPLICIT); 517 return DAG.getConstant(ByteOffset, DL, PtrVT); 518 } 519 case Intrinsic::r600_read_ngroups_x: 520 return LowerImplicitParameter(DAG, VT, DL, 0); 521 case Intrinsic::r600_read_ngroups_y: 522 return LowerImplicitParameter(DAG, VT, DL, 1); 523 case Intrinsic::r600_read_ngroups_z: 524 return LowerImplicitParameter(DAG, VT, DL, 2); 525 case Intrinsic::r600_read_global_size_x: 526 return LowerImplicitParameter(DAG, VT, DL, 3); 527 case Intrinsic::r600_read_global_size_y: 528 return LowerImplicitParameter(DAG, VT, DL, 4); 529 case Intrinsic::r600_read_global_size_z: 530 return LowerImplicitParameter(DAG, VT, DL, 5); 531 case Intrinsic::r600_read_local_size_x: 532 return LowerImplicitParameter(DAG, VT, DL, 6); 533 case Intrinsic::r600_read_local_size_y: 534 return LowerImplicitParameter(DAG, VT, DL, 7); 535 case Intrinsic::r600_read_local_size_z: 536 return LowerImplicitParameter(DAG, VT, DL, 8); 537 538 case Intrinsic::r600_read_tgid_x: 539 case Intrinsic::amdgcn_workgroup_id_x: 540 return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass, 541 R600::T1_X, VT); 542 case Intrinsic::r600_read_tgid_y: 543 case Intrinsic::amdgcn_workgroup_id_y: 544 return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass, 545 R600::T1_Y, VT); 546 case Intrinsic::r600_read_tgid_z: 547 case Intrinsic::amdgcn_workgroup_id_z: 548 return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass, 549 R600::T1_Z, VT); 550 case Intrinsic::r600_read_tidig_x: 551 case Intrinsic::amdgcn_workitem_id_x: 552 return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass, 553 R600::T0_X, VT); 554 case Intrinsic::r600_read_tidig_y: 555 case Intrinsic::amdgcn_workitem_id_y: 556 return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass, 557 R600::T0_Y, VT); 558 case Intrinsic::r600_read_tidig_z: 559 case Intrinsic::amdgcn_workitem_id_z: 560 return CreateLiveInRegisterRaw(DAG, &R600::R600_TReg32RegClass, 561 R600::T0_Z, VT); 562 563 case Intrinsic::r600_recipsqrt_ieee: 564 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 565 566 case Intrinsic::r600_recipsqrt_clamped: 567 return DAG.getNode(AMDGPUISD::RSQ_CLAMP, DL, VT, Op.getOperand(1)); 568 default: 569 return Op; 570 } 571 572 // break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode()) 573 break; 574 } 575 } // end switch(Op.getOpcode()) 576 return SDValue(); 577 } 578 579 void R600TargetLowering::ReplaceNodeResults(SDNode *N, 580 SmallVectorImpl<SDValue> &Results, 581 SelectionDAG &DAG) const { 582 switch (N->getOpcode()) { 583 default: 584 AMDGPUTargetLowering::ReplaceNodeResults(N, Results, DAG); 585 return; 586 case ISD::FP_TO_UINT: 587 if (N->getValueType(0) == MVT::i1) { 588 Results.push_back(lowerFP_TO_UINT(N->getOperand(0), DAG)); 589 return; 590 } 591 // Since we don't care about out of bounds values we can use FP_TO_SINT for 592 // uints too. The DAGLegalizer code for uint considers some extra cases 593 // which are not necessary here. 594 [[fallthrough]]; 595 case ISD::FP_TO_SINT: { 596 if (N->getValueType(0) == MVT::i1) { 597 Results.push_back(lowerFP_TO_SINT(N->getOperand(0), DAG)); 598 return; 599 } 600 601 SDValue Result; 602 if (expandFP_TO_SINT(N, Result, DAG)) 603 Results.push_back(Result); 604 return; 605 } 606 case ISD::SDIVREM: { 607 SDValue Op = SDValue(N, 1); 608 SDValue RES = LowerSDIVREM(Op, DAG); 609 Results.push_back(RES); 610 Results.push_back(RES.getValue(1)); 611 break; 612 } 613 case ISD::UDIVREM: { 614 SDValue Op = SDValue(N, 0); 615 LowerUDIVREM64(Op, DAG, Results); 616 break; 617 } 618 } 619 } 620 621 SDValue R600TargetLowering::vectorToVerticalVector(SelectionDAG &DAG, 622 SDValue Vector) const { 623 SDLoc DL(Vector); 624 EVT VecVT = Vector.getValueType(); 625 EVT EltVT = VecVT.getVectorElementType(); 626 SmallVector<SDValue, 8> Args; 627 628 for (unsigned i = 0, e = VecVT.getVectorNumElements(); i != e; ++i) { 629 Args.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vector, 630 DAG.getVectorIdxConstant(i, DL))); 631 } 632 633 return DAG.getNode(AMDGPUISD::BUILD_VERTICAL_VECTOR, DL, VecVT, Args); 634 } 635 636 SDValue R600TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 637 SelectionDAG &DAG) const { 638 SDLoc DL(Op); 639 SDValue Vector = Op.getOperand(0); 640 SDValue Index = Op.getOperand(1); 641 642 if (isa<ConstantSDNode>(Index) || 643 Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR) 644 return Op; 645 646 Vector = vectorToVerticalVector(DAG, Vector); 647 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getValueType(), 648 Vector, Index); 649 } 650 651 SDValue R600TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 652 SelectionDAG &DAG) const { 653 SDLoc DL(Op); 654 SDValue Vector = Op.getOperand(0); 655 SDValue Value = Op.getOperand(1); 656 SDValue Index = Op.getOperand(2); 657 658 if (isa<ConstantSDNode>(Index) || 659 Vector.getOpcode() == AMDGPUISD::BUILD_VERTICAL_VECTOR) 660 return Op; 661 662 Vector = vectorToVerticalVector(DAG, Vector); 663 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, Op.getValueType(), 664 Vector, Value, Index); 665 return vectorToVerticalVector(DAG, Insert); 666 } 667 668 SDValue R600TargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI, 669 SDValue Op, 670 SelectionDAG &DAG) const { 671 GlobalAddressSDNode *GSD = cast<GlobalAddressSDNode>(Op); 672 if (GSD->getAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) 673 return AMDGPUTargetLowering::LowerGlobalAddress(MFI, Op, DAG); 674 675 const DataLayout &DL = DAG.getDataLayout(); 676 const GlobalValue *GV = GSD->getGlobal(); 677 MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 678 679 SDValue GA = DAG.getTargetGlobalAddress(GV, SDLoc(GSD), ConstPtrVT); 680 return DAG.getNode(AMDGPUISD::CONST_DATA_PTR, SDLoc(GSD), ConstPtrVT, GA); 681 } 682 683 SDValue R600TargetLowering::LowerTrig(SDValue Op, SelectionDAG &DAG) const { 684 // On hw >= R700, COS/SIN input must be between -1. and 1. 685 // Thus we lower them to TRIG ( FRACT ( x / 2Pi + 0.5) - 0.5) 686 EVT VT = Op.getValueType(); 687 SDValue Arg = Op.getOperand(0); 688 SDLoc DL(Op); 689 690 // TODO: Should this propagate fast-math-flags? 691 SDValue FractPart = DAG.getNode(AMDGPUISD::FRACT, DL, VT, 692 DAG.getNode(ISD::FADD, DL, VT, 693 DAG.getNode(ISD::FMUL, DL, VT, Arg, 694 DAG.getConstantFP(0.15915494309, DL, MVT::f32)), 695 DAG.getConstantFP(0.5, DL, MVT::f32))); 696 unsigned TrigNode; 697 switch (Op.getOpcode()) { 698 case ISD::FCOS: 699 TrigNode = AMDGPUISD::COS_HW; 700 break; 701 case ISD::FSIN: 702 TrigNode = AMDGPUISD::SIN_HW; 703 break; 704 default: 705 llvm_unreachable("Wrong trig opcode"); 706 } 707 SDValue TrigVal = DAG.getNode(TrigNode, DL, VT, 708 DAG.getNode(ISD::FADD, DL, VT, FractPart, 709 DAG.getConstantFP(-0.5, DL, MVT::f32))); 710 if (Gen >= AMDGPUSubtarget::R700) 711 return TrigVal; 712 // On R600 hw, COS/SIN input must be between -Pi and Pi. 713 return DAG.getNode(ISD::FMUL, DL, VT, TrigVal, 714 DAG.getConstantFP(numbers::pif, DL, MVT::f32)); 715 } 716 717 SDValue R600TargetLowering::LowerShiftParts(SDValue Op, 718 SelectionDAG &DAG) const { 719 SDValue Lo, Hi; 720 expandShiftParts(Op.getNode(), Lo, Hi, DAG); 721 return DAG.getMergeValues({Lo, Hi}, SDLoc(Op)); 722 } 723 724 SDValue R600TargetLowering::LowerUADDSUBO(SDValue Op, SelectionDAG &DAG, 725 unsigned mainop, unsigned ovf) const { 726 SDLoc DL(Op); 727 EVT VT = Op.getValueType(); 728 729 SDValue Lo = Op.getOperand(0); 730 SDValue Hi = Op.getOperand(1); 731 732 SDValue OVF = DAG.getNode(ovf, DL, VT, Lo, Hi); 733 // Extend sign. 734 OVF = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, OVF, 735 DAG.getValueType(MVT::i1)); 736 737 SDValue Res = DAG.getNode(mainop, DL, VT, Lo, Hi); 738 739 return DAG.getNode(ISD::MERGE_VALUES, DL, DAG.getVTList(VT, VT), Res, OVF); 740 } 741 742 SDValue R600TargetLowering::lowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const { 743 SDLoc DL(Op); 744 return DAG.getNode( 745 ISD::SETCC, 746 DL, 747 MVT::i1, 748 Op, DAG.getConstantFP(1.0f, DL, MVT::f32), 749 DAG.getCondCode(ISD::SETEQ)); 750 } 751 752 SDValue R600TargetLowering::lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const { 753 SDLoc DL(Op); 754 return DAG.getNode( 755 ISD::SETCC, 756 DL, 757 MVT::i1, 758 Op, DAG.getConstantFP(-1.0f, DL, MVT::f32), 759 DAG.getCondCode(ISD::SETEQ)); 760 } 761 762 SDValue R600TargetLowering::LowerImplicitParameter(SelectionDAG &DAG, EVT VT, 763 const SDLoc &DL, 764 unsigned DwordOffset) const { 765 unsigned ByteOffset = DwordOffset * 4; 766 PointerType * PtrType = PointerType::get(VT.getTypeForEVT(*DAG.getContext()), 767 AMDGPUAS::PARAM_I_ADDRESS); 768 769 // We shouldn't be using an offset wider than 16-bits for implicit parameters. 770 assert(isInt<16>(ByteOffset)); 771 772 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 773 DAG.getConstant(ByteOffset, DL, MVT::i32), // PTR 774 MachinePointerInfo(ConstantPointerNull::get(PtrType))); 775 } 776 777 bool R600TargetLowering::isZero(SDValue Op) const { 778 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) 779 return Cst->isZero(); 780 if (ConstantFPSDNode *CstFP = dyn_cast<ConstantFPSDNode>(Op)) 781 return CstFP->isZero(); 782 return false; 783 } 784 785 bool R600TargetLowering::isHWTrueValue(SDValue Op) const { 786 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) { 787 return CFP->isExactlyValue(1.0); 788 } 789 return isAllOnesConstant(Op); 790 } 791 792 bool R600TargetLowering::isHWFalseValue(SDValue Op) const { 793 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) { 794 return CFP->getValueAPF().isZero(); 795 } 796 return isNullConstant(Op); 797 } 798 799 SDValue R600TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const { 800 SDLoc DL(Op); 801 EVT VT = Op.getValueType(); 802 803 SDValue LHS = Op.getOperand(0); 804 SDValue RHS = Op.getOperand(1); 805 SDValue True = Op.getOperand(2); 806 SDValue False = Op.getOperand(3); 807 SDValue CC = Op.getOperand(4); 808 SDValue Temp; 809 810 if (VT == MVT::f32) { 811 DAGCombinerInfo DCI(DAG, AfterLegalizeVectorOps, true, nullptr); 812 SDValue MinMax = combineFMinMaxLegacy(DL, VT, LHS, RHS, True, False, CC, DCI); 813 if (MinMax) 814 return MinMax; 815 } 816 817 // LHS and RHS are guaranteed to be the same value type 818 EVT CompareVT = LHS.getValueType(); 819 820 // Check if we can lower this to a native operation. 821 822 // Try to lower to a SET* instruction: 823 // 824 // SET* can match the following patterns: 825 // 826 // select_cc f32, f32, -1, 0, cc_supported 827 // select_cc f32, f32, 1.0f, 0.0f, cc_supported 828 // select_cc i32, i32, -1, 0, cc_supported 829 // 830 831 // Move hardware True/False values to the correct operand. 832 if (isHWTrueValue(False) && isHWFalseValue(True)) { 833 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 834 ISD::CondCode InverseCC = ISD::getSetCCInverse(CCOpcode, CompareVT); 835 if (isCondCodeLegal(InverseCC, CompareVT.getSimpleVT())) { 836 std::swap(False, True); 837 CC = DAG.getCondCode(InverseCC); 838 } else { 839 ISD::CondCode SwapInvCC = ISD::getSetCCSwappedOperands(InverseCC); 840 if (isCondCodeLegal(SwapInvCC, CompareVT.getSimpleVT())) { 841 std::swap(False, True); 842 std::swap(LHS, RHS); 843 CC = DAG.getCondCode(SwapInvCC); 844 } 845 } 846 } 847 848 if (isHWTrueValue(True) && isHWFalseValue(False) && 849 (CompareVT == VT || VT == MVT::i32)) { 850 // This can be matched by a SET* instruction. 851 return DAG.getNode(ISD::SELECT_CC, DL, VT, LHS, RHS, True, False, CC); 852 } 853 854 // Try to lower to a CND* instruction: 855 // 856 // CND* can match the following patterns: 857 // 858 // select_cc f32, 0.0, f32, f32, cc_supported 859 // select_cc f32, 0.0, i32, i32, cc_supported 860 // select_cc i32, 0, f32, f32, cc_supported 861 // select_cc i32, 0, i32, i32, cc_supported 862 // 863 864 // Try to move the zero value to the RHS 865 if (isZero(LHS)) { 866 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 867 // Try swapping the operands 868 ISD::CondCode CCSwapped = ISD::getSetCCSwappedOperands(CCOpcode); 869 if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) { 870 std::swap(LHS, RHS); 871 CC = DAG.getCondCode(CCSwapped); 872 } else { 873 // Try inverting the condition and then swapping the operands 874 ISD::CondCode CCInv = ISD::getSetCCInverse(CCOpcode, CompareVT); 875 CCSwapped = ISD::getSetCCSwappedOperands(CCInv); 876 if (isCondCodeLegal(CCSwapped, CompareVT.getSimpleVT())) { 877 std::swap(True, False); 878 std::swap(LHS, RHS); 879 CC = DAG.getCondCode(CCSwapped); 880 } 881 } 882 } 883 if (isZero(RHS)) { 884 SDValue Cond = LHS; 885 SDValue Zero = RHS; 886 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 887 if (CompareVT != VT) { 888 // Bitcast True / False to the correct types. This will end up being 889 // a nop, but it allows us to define only a single pattern in the 890 // .TD files for each CND* instruction rather than having to have 891 // one pattern for integer True/False and one for fp True/False 892 True = DAG.getNode(ISD::BITCAST, DL, CompareVT, True); 893 False = DAG.getNode(ISD::BITCAST, DL, CompareVT, False); 894 } 895 896 switch (CCOpcode) { 897 case ISD::SETONE: 898 case ISD::SETUNE: 899 case ISD::SETNE: 900 CCOpcode = ISD::getSetCCInverse(CCOpcode, CompareVT); 901 Temp = True; 902 True = False; 903 False = Temp; 904 break; 905 default: 906 break; 907 } 908 SDValue SelectNode = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, 909 Cond, Zero, 910 True, False, 911 DAG.getCondCode(CCOpcode)); 912 return DAG.getNode(ISD::BITCAST, DL, VT, SelectNode); 913 } 914 915 // If we make it this for it means we have no native instructions to handle 916 // this SELECT_CC, so we must lower it. 917 SDValue HWTrue, HWFalse; 918 919 if (CompareVT == MVT::f32) { 920 HWTrue = DAG.getConstantFP(1.0f, DL, CompareVT); 921 HWFalse = DAG.getConstantFP(0.0f, DL, CompareVT); 922 } else if (CompareVT == MVT::i32) { 923 HWTrue = DAG.getConstant(-1, DL, CompareVT); 924 HWFalse = DAG.getConstant(0, DL, CompareVT); 925 } 926 else { 927 llvm_unreachable("Unhandled value type in LowerSELECT_CC"); 928 } 929 930 // Lower this unsupported SELECT_CC into a combination of two supported 931 // SELECT_CC operations. 932 SDValue Cond = DAG.getNode(ISD::SELECT_CC, DL, CompareVT, LHS, RHS, HWTrue, HWFalse, CC); 933 934 return DAG.getNode(ISD::SELECT_CC, DL, VT, 935 Cond, HWFalse, 936 True, False, 937 DAG.getCondCode(ISD::SETNE)); 938 } 939 940 SDValue R600TargetLowering::lowerADDRSPACECAST(SDValue Op, 941 SelectionDAG &DAG) const { 942 SDLoc SL(Op); 943 EVT VT = Op.getValueType(); 944 945 const R600TargetMachine &TM = 946 static_cast<const R600TargetMachine &>(getTargetMachine()); 947 948 const AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(Op); 949 unsigned SrcAS = ASC->getSrcAddressSpace(); 950 unsigned DestAS = ASC->getDestAddressSpace(); 951 952 if (isNullConstant(Op.getOperand(0)) && SrcAS == AMDGPUAS::FLAT_ADDRESS) 953 return DAG.getConstant(TM.getNullPointerValue(DestAS), SL, VT); 954 955 return Op; 956 } 957 958 /// LLVM generates byte-addressed pointers. For indirect addressing, we need to 959 /// convert these pointers to a register index. Each register holds 960 /// 16 bytes, (4 x 32bit sub-register), but we need to take into account the 961 /// \p StackWidth, which tells us how many of the 4 sub-registers will be used 962 /// for indirect addressing. 963 SDValue R600TargetLowering::stackPtrToRegIndex(SDValue Ptr, 964 unsigned StackWidth, 965 SelectionDAG &DAG) const { 966 unsigned SRLPad; 967 switch(StackWidth) { 968 case 1: 969 SRLPad = 2; 970 break; 971 case 2: 972 SRLPad = 3; 973 break; 974 case 4: 975 SRLPad = 4; 976 break; 977 default: llvm_unreachable("Invalid stack width"); 978 } 979 980 SDLoc DL(Ptr); 981 return DAG.getNode(ISD::SRL, DL, Ptr.getValueType(), Ptr, 982 DAG.getConstant(SRLPad, DL, MVT::i32)); 983 } 984 985 void R600TargetLowering::getStackAddress(unsigned StackWidth, 986 unsigned ElemIdx, 987 unsigned &Channel, 988 unsigned &PtrIncr) const { 989 switch (StackWidth) { 990 default: 991 case 1: 992 Channel = 0; 993 if (ElemIdx > 0) { 994 PtrIncr = 1; 995 } else { 996 PtrIncr = 0; 997 } 998 break; 999 case 2: 1000 Channel = ElemIdx % 2; 1001 if (ElemIdx == 2) { 1002 PtrIncr = 1; 1003 } else { 1004 PtrIncr = 0; 1005 } 1006 break; 1007 case 4: 1008 Channel = ElemIdx; 1009 PtrIncr = 0; 1010 break; 1011 } 1012 } 1013 1014 SDValue R600TargetLowering::lowerPrivateTruncStore(StoreSDNode *Store, 1015 SelectionDAG &DAG) const { 1016 SDLoc DL(Store); 1017 //TODO: Who creates the i8 stores? 1018 assert(Store->isTruncatingStore() 1019 || Store->getValue().getValueType() == MVT::i8); 1020 assert(Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS); 1021 1022 SDValue Mask; 1023 if (Store->getMemoryVT() == MVT::i8) { 1024 assert(Store->getAlign() >= 1); 1025 Mask = DAG.getConstant(0xff, DL, MVT::i32); 1026 } else if (Store->getMemoryVT() == MVT::i16) { 1027 assert(Store->getAlign() >= 2); 1028 Mask = DAG.getConstant(0xffff, DL, MVT::i32); 1029 } else { 1030 llvm_unreachable("Unsupported private trunc store"); 1031 } 1032 1033 SDValue OldChain = Store->getChain(); 1034 bool VectorTrunc = (OldChain.getOpcode() == AMDGPUISD::DUMMY_CHAIN); 1035 // Skip dummy 1036 SDValue Chain = VectorTrunc ? OldChain->getOperand(0) : OldChain; 1037 SDValue BasePtr = Store->getBasePtr(); 1038 SDValue Offset = Store->getOffset(); 1039 EVT MemVT = Store->getMemoryVT(); 1040 1041 SDValue LoadPtr = BasePtr; 1042 if (!Offset.isUndef()) { 1043 LoadPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, Offset); 1044 } 1045 1046 // Get dword location 1047 // TODO: this should be eliminated by the future SHR ptr, 2 1048 SDValue Ptr = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr, 1049 DAG.getConstant(0xfffffffc, DL, MVT::i32)); 1050 1051 // Load dword 1052 // TODO: can we be smarter about machine pointer info? 1053 MachinePointerInfo PtrInfo(AMDGPUAS::PRIVATE_ADDRESS); 1054 SDValue Dst = DAG.getLoad(MVT::i32, DL, Chain, Ptr, PtrInfo); 1055 1056 Chain = Dst.getValue(1); 1057 1058 // Get offset in dword 1059 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr, 1060 DAG.getConstant(0x3, DL, MVT::i32)); 1061 1062 // Convert byte offset to bit shift 1063 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx, 1064 DAG.getConstant(3, DL, MVT::i32)); 1065 1066 // TODO: Contrary to the name of the function, 1067 // it also handles sub i32 non-truncating stores (like i1) 1068 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, 1069 Store->getValue()); 1070 1071 // Mask the value to the right type 1072 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT); 1073 1074 // Shift the value in place 1075 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32, 1076 MaskedValue, ShiftAmt); 1077 1078 // Shift the mask in place 1079 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, Mask, ShiftAmt); 1080 1081 // Invert the mask. NOTE: if we had native ROL instructions we could 1082 // use inverted mask 1083 DstMask = DAG.getNOT(DL, DstMask, MVT::i32); 1084 1085 // Cleanup the target bits 1086 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask); 1087 1088 // Add the new bits 1089 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue); 1090 1091 // Store dword 1092 // TODO: Can we be smarter about MachinePointerInfo? 1093 SDValue NewStore = DAG.getStore(Chain, DL, Value, Ptr, PtrInfo); 1094 1095 // If we are part of expanded vector, make our neighbors depend on this store 1096 if (VectorTrunc) { 1097 // Make all other vector elements depend on this store 1098 Chain = DAG.getNode(AMDGPUISD::DUMMY_CHAIN, DL, MVT::Other, NewStore); 1099 DAG.ReplaceAllUsesOfValueWith(OldChain, Chain); 1100 } 1101 return NewStore; 1102 } 1103 1104 SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 1105 StoreSDNode *StoreNode = cast<StoreSDNode>(Op); 1106 unsigned AS = StoreNode->getAddressSpace(); 1107 1108 SDValue Chain = StoreNode->getChain(); 1109 SDValue Ptr = StoreNode->getBasePtr(); 1110 SDValue Value = StoreNode->getValue(); 1111 1112 EVT VT = Value.getValueType(); 1113 EVT MemVT = StoreNode->getMemoryVT(); 1114 EVT PtrVT = Ptr.getValueType(); 1115 1116 SDLoc DL(Op); 1117 1118 const bool TruncatingStore = StoreNode->isTruncatingStore(); 1119 1120 // Neither LOCAL nor PRIVATE can do vectors at the moment 1121 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS || 1122 TruncatingStore) && 1123 VT.isVector()) { 1124 if ((AS == AMDGPUAS::PRIVATE_ADDRESS) && TruncatingStore) { 1125 // Add an extra level of chain to isolate this vector 1126 SDValue NewChain = DAG.getNode(AMDGPUISD::DUMMY_CHAIN, DL, MVT::Other, Chain); 1127 // TODO: can the chain be replaced without creating a new store? 1128 SDValue NewStore = DAG.getTruncStore( 1129 NewChain, DL, Value, Ptr, StoreNode->getPointerInfo(), MemVT, 1130 StoreNode->getAlign(), StoreNode->getMemOperand()->getFlags(), 1131 StoreNode->getAAInfo()); 1132 StoreNode = cast<StoreSDNode>(NewStore); 1133 } 1134 1135 return scalarizeVectorStore(StoreNode, DAG); 1136 } 1137 1138 Align Alignment = StoreNode->getAlign(); 1139 if (Alignment < MemVT.getStoreSize() && 1140 !allowsMisalignedMemoryAccesses(MemVT, AS, Alignment, 1141 StoreNode->getMemOperand()->getFlags(), 1142 nullptr)) { 1143 return expandUnalignedStore(StoreNode, DAG); 1144 } 1145 1146 SDValue DWordAddr = DAG.getNode(ISD::SRL, DL, PtrVT, Ptr, 1147 DAG.getConstant(2, DL, PtrVT)); 1148 1149 if (AS == AMDGPUAS::GLOBAL_ADDRESS) { 1150 // It is beneficial to create MSKOR here instead of combiner to avoid 1151 // artificial dependencies introduced by RMW 1152 if (TruncatingStore) { 1153 assert(VT.bitsLE(MVT::i32)); 1154 SDValue MaskConstant; 1155 if (MemVT == MVT::i8) { 1156 MaskConstant = DAG.getConstant(0xFF, DL, MVT::i32); 1157 } else { 1158 assert(MemVT == MVT::i16); 1159 assert(StoreNode->getAlign() >= 2); 1160 MaskConstant = DAG.getConstant(0xFFFF, DL, MVT::i32); 1161 } 1162 1163 SDValue ByteIndex = DAG.getNode(ISD::AND, DL, PtrVT, Ptr, 1164 DAG.getConstant(0x00000003, DL, PtrVT)); 1165 SDValue BitShift = DAG.getNode(ISD::SHL, DL, VT, ByteIndex, 1166 DAG.getConstant(3, DL, VT)); 1167 1168 // Put the mask in correct place 1169 SDValue Mask = DAG.getNode(ISD::SHL, DL, VT, MaskConstant, BitShift); 1170 1171 // Put the value bits in correct place 1172 SDValue TruncValue = DAG.getNode(ISD::AND, DL, VT, Value, MaskConstant); 1173 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, VT, TruncValue, BitShift); 1174 1175 // XXX: If we add a 64-bit ZW register class, then we could use a 2 x i32 1176 // vector instead. 1177 SDValue Src[4] = { 1178 ShiftedValue, 1179 DAG.getConstant(0, DL, MVT::i32), 1180 DAG.getConstant(0, DL, MVT::i32), 1181 Mask 1182 }; 1183 SDValue Input = DAG.getBuildVector(MVT::v4i32, DL, Src); 1184 SDValue Args[3] = { Chain, Input, DWordAddr }; 1185 return DAG.getMemIntrinsicNode(AMDGPUISD::STORE_MSKOR, DL, 1186 Op->getVTList(), Args, MemVT, 1187 StoreNode->getMemOperand()); 1188 } 1189 if (Ptr->getOpcode() != AMDGPUISD::DWORDADDR && VT.bitsGE(MVT::i32)) { 1190 // Convert pointer from byte address to dword address. 1191 Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, PtrVT, DWordAddr); 1192 1193 if (StoreNode->isIndexed()) { 1194 llvm_unreachable("Indexed stores not supported yet"); 1195 } else { 1196 Chain = DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand()); 1197 } 1198 return Chain; 1199 } 1200 } 1201 1202 // GLOBAL_ADDRESS has been handled above, LOCAL_ADDRESS allows all sizes 1203 if (AS != AMDGPUAS::PRIVATE_ADDRESS) 1204 return SDValue(); 1205 1206 if (MemVT.bitsLT(MVT::i32)) 1207 return lowerPrivateTruncStore(StoreNode, DAG); 1208 1209 // Standard i32+ store, tag it with DWORDADDR to note that the address 1210 // has been shifted 1211 if (Ptr.getOpcode() != AMDGPUISD::DWORDADDR) { 1212 Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, PtrVT, DWordAddr); 1213 return DAG.getStore(Chain, DL, Value, Ptr, StoreNode->getMemOperand()); 1214 } 1215 1216 // Tagged i32+ stores will be matched by patterns 1217 return SDValue(); 1218 } 1219 1220 // return (512 + (kc_bank << 12) 1221 static int 1222 ConstantAddressBlock(unsigned AddressSpace) { 1223 switch (AddressSpace) { 1224 case AMDGPUAS::CONSTANT_BUFFER_0: 1225 return 512; 1226 case AMDGPUAS::CONSTANT_BUFFER_1: 1227 return 512 + 4096; 1228 case AMDGPUAS::CONSTANT_BUFFER_2: 1229 return 512 + 4096 * 2; 1230 case AMDGPUAS::CONSTANT_BUFFER_3: 1231 return 512 + 4096 * 3; 1232 case AMDGPUAS::CONSTANT_BUFFER_4: 1233 return 512 + 4096 * 4; 1234 case AMDGPUAS::CONSTANT_BUFFER_5: 1235 return 512 + 4096 * 5; 1236 case AMDGPUAS::CONSTANT_BUFFER_6: 1237 return 512 + 4096 * 6; 1238 case AMDGPUAS::CONSTANT_BUFFER_7: 1239 return 512 + 4096 * 7; 1240 case AMDGPUAS::CONSTANT_BUFFER_8: 1241 return 512 + 4096 * 8; 1242 case AMDGPUAS::CONSTANT_BUFFER_9: 1243 return 512 + 4096 * 9; 1244 case AMDGPUAS::CONSTANT_BUFFER_10: 1245 return 512 + 4096 * 10; 1246 case AMDGPUAS::CONSTANT_BUFFER_11: 1247 return 512 + 4096 * 11; 1248 case AMDGPUAS::CONSTANT_BUFFER_12: 1249 return 512 + 4096 * 12; 1250 case AMDGPUAS::CONSTANT_BUFFER_13: 1251 return 512 + 4096 * 13; 1252 case AMDGPUAS::CONSTANT_BUFFER_14: 1253 return 512 + 4096 * 14; 1254 case AMDGPUAS::CONSTANT_BUFFER_15: 1255 return 512 + 4096 * 15; 1256 default: 1257 return -1; 1258 } 1259 } 1260 1261 SDValue R600TargetLowering::lowerPrivateExtLoad(SDValue Op, 1262 SelectionDAG &DAG) const { 1263 SDLoc DL(Op); 1264 LoadSDNode *Load = cast<LoadSDNode>(Op); 1265 ISD::LoadExtType ExtType = Load->getExtensionType(); 1266 EVT MemVT = Load->getMemoryVT(); 1267 assert(Load->getAlign() >= MemVT.getStoreSize()); 1268 1269 SDValue BasePtr = Load->getBasePtr(); 1270 SDValue Chain = Load->getChain(); 1271 SDValue Offset = Load->getOffset(); 1272 1273 SDValue LoadPtr = BasePtr; 1274 if (!Offset.isUndef()) { 1275 LoadPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr, Offset); 1276 } 1277 1278 // Get dword location 1279 // NOTE: this should be eliminated by the future SHR ptr, 2 1280 SDValue Ptr = DAG.getNode(ISD::AND, DL, MVT::i32, LoadPtr, 1281 DAG.getConstant(0xfffffffc, DL, MVT::i32)); 1282 1283 // Load dword 1284 // TODO: can we be smarter about machine pointer info? 1285 MachinePointerInfo PtrInfo(AMDGPUAS::PRIVATE_ADDRESS); 1286 SDValue Read = DAG.getLoad(MVT::i32, DL, Chain, Ptr, PtrInfo); 1287 1288 // Get offset within the register. 1289 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, 1290 LoadPtr, DAG.getConstant(0x3, DL, MVT::i32)); 1291 1292 // Bit offset of target byte (byteIdx * 8). 1293 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx, 1294 DAG.getConstant(3, DL, MVT::i32)); 1295 1296 // Shift to the right. 1297 SDValue Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Read, ShiftAmt); 1298 1299 // Eliminate the upper bits by setting them to ... 1300 EVT MemEltVT = MemVT.getScalarType(); 1301 1302 if (ExtType == ISD::SEXTLOAD) { // ... ones. 1303 SDValue MemEltVTNode = DAG.getValueType(MemEltVT); 1304 Ret = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode); 1305 } else { // ... or zeros. 1306 Ret = DAG.getZeroExtendInReg(Ret, DL, MemEltVT); 1307 } 1308 1309 SDValue Ops[] = { 1310 Ret, 1311 Read.getValue(1) // This should be our output chain 1312 }; 1313 1314 return DAG.getMergeValues(Ops, DL); 1315 } 1316 1317 SDValue R600TargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 1318 LoadSDNode *LoadNode = cast<LoadSDNode>(Op); 1319 unsigned AS = LoadNode->getAddressSpace(); 1320 EVT MemVT = LoadNode->getMemoryVT(); 1321 ISD::LoadExtType ExtType = LoadNode->getExtensionType(); 1322 1323 if (AS == AMDGPUAS::PRIVATE_ADDRESS && 1324 ExtType != ISD::NON_EXTLOAD && MemVT.bitsLT(MVT::i32)) { 1325 return lowerPrivateExtLoad(Op, DAG); 1326 } 1327 1328 SDLoc DL(Op); 1329 EVT VT = Op.getValueType(); 1330 SDValue Chain = LoadNode->getChain(); 1331 SDValue Ptr = LoadNode->getBasePtr(); 1332 1333 if ((LoadNode->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1334 LoadNode->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) && 1335 VT.isVector()) { 1336 SDValue Ops[2]; 1337 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LoadNode, DAG); 1338 return DAG.getMergeValues(Ops, DL); 1339 } 1340 1341 // This is still used for explicit load from addrspace(8) 1342 int ConstantBlock = ConstantAddressBlock(LoadNode->getAddressSpace()); 1343 if (ConstantBlock > -1 && 1344 ((LoadNode->getExtensionType() == ISD::NON_EXTLOAD) || 1345 (LoadNode->getExtensionType() == ISD::ZEXTLOAD))) { 1346 SDValue Result; 1347 if (isa<Constant>(LoadNode->getMemOperand()->getValue()) || 1348 isa<ConstantSDNode>(Ptr)) { 1349 return constBufferLoad(LoadNode, LoadNode->getAddressSpace(), DAG); 1350 } 1351 // TODO: Does this even work? 1352 // non-constant ptr can't be folded, keeps it as a v4f32 load 1353 Result = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::v4i32, 1354 DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, 1355 DAG.getConstant(4, DL, MVT::i32)), 1356 DAG.getConstant(LoadNode->getAddressSpace() - 1357 AMDGPUAS::CONSTANT_BUFFER_0, 1358 DL, MVT::i32)); 1359 1360 if (!VT.isVector()) { 1361 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result, 1362 DAG.getConstant(0, DL, MVT::i32)); 1363 } 1364 1365 SDValue MergedValues[2] = { 1366 Result, 1367 Chain 1368 }; 1369 return DAG.getMergeValues(MergedValues, DL); 1370 } 1371 1372 // For most operations returning SDValue() will result in the node being 1373 // expanded by the DAG Legalizer. This is not the case for ISD::LOAD, so we 1374 // need to manually expand loads that may be legal in some address spaces and 1375 // illegal in others. SEXT loads from CONSTANT_BUFFER_0 are supported for 1376 // compute shaders, since the data is sign extended when it is uploaded to the 1377 // buffer. However SEXT loads from other address spaces are not supported, so 1378 // we need to expand them here. 1379 if (LoadNode->getExtensionType() == ISD::SEXTLOAD) { 1380 assert(!MemVT.isVector() && (MemVT == MVT::i16 || MemVT == MVT::i8)); 1381 SDValue NewLoad = DAG.getExtLoad( 1382 ISD::EXTLOAD, DL, VT, Chain, Ptr, LoadNode->getPointerInfo(), MemVT, 1383 LoadNode->getAlign(), LoadNode->getMemOperand()->getFlags()); 1384 SDValue Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, NewLoad, 1385 DAG.getValueType(MemVT)); 1386 1387 SDValue MergedValues[2] = { Res, Chain }; 1388 return DAG.getMergeValues(MergedValues, DL); 1389 } 1390 1391 if (LoadNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) { 1392 return SDValue(); 1393 } 1394 1395 // DWORDADDR ISD marks already shifted address 1396 if (Ptr.getOpcode() != AMDGPUISD::DWORDADDR) { 1397 assert(VT == MVT::i32); 1398 Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Ptr, DAG.getConstant(2, DL, MVT::i32)); 1399 Ptr = DAG.getNode(AMDGPUISD::DWORDADDR, DL, MVT::i32, Ptr); 1400 return DAG.getLoad(MVT::i32, DL, Chain, Ptr, LoadNode->getMemOperand()); 1401 } 1402 return SDValue(); 1403 } 1404 1405 SDValue R600TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const { 1406 SDValue Chain = Op.getOperand(0); 1407 SDValue Cond = Op.getOperand(1); 1408 SDValue Jump = Op.getOperand(2); 1409 1410 return DAG.getNode(AMDGPUISD::BRANCH_COND, SDLoc(Op), Op.getValueType(), 1411 Chain, Jump, Cond); 1412 } 1413 1414 SDValue R600TargetLowering::lowerFrameIndex(SDValue Op, 1415 SelectionDAG &DAG) const { 1416 MachineFunction &MF = DAG.getMachineFunction(); 1417 const R600FrameLowering *TFL = Subtarget->getFrameLowering(); 1418 1419 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op); 1420 1421 unsigned FrameIndex = FIN->getIndex(); 1422 Register IgnoredFrameReg; 1423 StackOffset Offset = 1424 TFL->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg); 1425 return DAG.getConstant(Offset.getFixed() * 4 * TFL->getStackWidth(MF), 1426 SDLoc(Op), Op.getValueType()); 1427 } 1428 1429 CCAssignFn *R600TargetLowering::CCAssignFnForCall(CallingConv::ID CC, 1430 bool IsVarArg) const { 1431 switch (CC) { 1432 case CallingConv::AMDGPU_KERNEL: 1433 case CallingConv::SPIR_KERNEL: 1434 case CallingConv::C: 1435 case CallingConv::Fast: 1436 case CallingConv::Cold: 1437 llvm_unreachable("kernels should not be handled here"); 1438 case CallingConv::AMDGPU_VS: 1439 case CallingConv::AMDGPU_GS: 1440 case CallingConv::AMDGPU_PS: 1441 case CallingConv::AMDGPU_CS: 1442 case CallingConv::AMDGPU_HS: 1443 case CallingConv::AMDGPU_ES: 1444 case CallingConv::AMDGPU_LS: 1445 return CC_R600; 1446 default: 1447 report_fatal_error("Unsupported calling convention."); 1448 } 1449 } 1450 1451 /// XXX Only kernel functions are supported, so we can assume for now that 1452 /// every function is a kernel function, but in the future we should use 1453 /// separate calling conventions for kernel and non-kernel functions. 1454 SDValue R600TargetLowering::LowerFormalArguments( 1455 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 1456 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 1457 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 1458 SmallVector<CCValAssign, 16> ArgLocs; 1459 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 1460 *DAG.getContext()); 1461 MachineFunction &MF = DAG.getMachineFunction(); 1462 SmallVector<ISD::InputArg, 8> LocalIns; 1463 1464 if (AMDGPU::isShader(CallConv)) { 1465 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg)); 1466 } else { 1467 analyzeFormalArgumentsCompute(CCInfo, Ins); 1468 } 1469 1470 for (unsigned i = 0, e = Ins.size(); i < e; ++i) { 1471 CCValAssign &VA = ArgLocs[i]; 1472 const ISD::InputArg &In = Ins[i]; 1473 EVT VT = In.VT; 1474 EVT MemVT = VA.getLocVT(); 1475 if (!VT.isVector() && MemVT.isVector()) { 1476 // Get load source type if scalarized. 1477 MemVT = MemVT.getVectorElementType(); 1478 } 1479 1480 if (AMDGPU::isShader(CallConv)) { 1481 Register Reg = MF.addLiveIn(VA.getLocReg(), &R600::R600_Reg128RegClass); 1482 SDValue Register = DAG.getCopyFromReg(Chain, DL, Reg, VT); 1483 InVals.push_back(Register); 1484 continue; 1485 } 1486 1487 // i64 isn't a legal type, so the register type used ends up as i32, which 1488 // isn't expected here. It attempts to create this sextload, but it ends up 1489 // being invalid. Somehow this seems to work with i64 arguments, but breaks 1490 // for <1 x i64>. 1491 1492 // The first 36 bytes of the input buffer contains information about 1493 // thread group and global sizes. 1494 ISD::LoadExtType Ext = ISD::NON_EXTLOAD; 1495 if (MemVT.getScalarSizeInBits() != VT.getScalarSizeInBits()) { 1496 // FIXME: This should really check the extload type, but the handling of 1497 // extload vector parameters seems to be broken. 1498 1499 // Ext = In.Flags.isSExt() ? ISD::SEXTLOAD : ISD::ZEXTLOAD; 1500 Ext = ISD::SEXTLOAD; 1501 } 1502 1503 // Compute the offset from the value. 1504 // XXX - I think PartOffset should give you this, but it seems to give the 1505 // size of the register which isn't useful. 1506 1507 unsigned PartOffset = VA.getLocMemOffset(); 1508 Align Alignment = commonAlignment(Align(VT.getStoreSize()), PartOffset); 1509 1510 MachinePointerInfo PtrInfo(AMDGPUAS::PARAM_I_ADDRESS); 1511 SDValue Arg = DAG.getLoad( 1512 ISD::UNINDEXED, Ext, VT, DL, Chain, 1513 DAG.getConstant(PartOffset, DL, MVT::i32), DAG.getUNDEF(MVT::i32), 1514 PtrInfo, 1515 MemVT, Alignment, MachineMemOperand::MONonTemporal | 1516 MachineMemOperand::MODereferenceable | 1517 MachineMemOperand::MOInvariant); 1518 1519 InVals.push_back(Arg); 1520 } 1521 return Chain; 1522 } 1523 1524 EVT R600TargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &, 1525 EVT VT) const { 1526 if (!VT.isVector()) 1527 return MVT::i32; 1528 return VT.changeVectorElementTypeToInteger(); 1529 } 1530 1531 bool R600TargetLowering::canMergeStoresTo(unsigned AS, EVT MemVT, 1532 const MachineFunction &MF) const { 1533 // Local and Private addresses do not handle vectors. Limit to i32 1534 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::PRIVATE_ADDRESS)) { 1535 return (MemVT.getSizeInBits() <= 32); 1536 } 1537 return true; 1538 } 1539 1540 bool R600TargetLowering::allowsMisalignedMemoryAccesses( 1541 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags, 1542 unsigned *IsFast) const { 1543 if (IsFast) 1544 *IsFast = 0; 1545 1546 if (!VT.isSimple() || VT == MVT::Other) 1547 return false; 1548 1549 if (VT.bitsLT(MVT::i32)) 1550 return false; 1551 1552 // TODO: This is a rough estimate. 1553 if (IsFast) 1554 *IsFast = 1; 1555 1556 return VT.bitsGT(MVT::i32) && Alignment >= Align(4); 1557 } 1558 1559 static SDValue CompactSwizzlableVector( 1560 SelectionDAG &DAG, SDValue VectorEntry, 1561 DenseMap<unsigned, unsigned> &RemapSwizzle) { 1562 assert(RemapSwizzle.empty()); 1563 1564 SDLoc DL(VectorEntry); 1565 EVT EltTy = VectorEntry.getValueType().getVectorElementType(); 1566 1567 SDValue NewBldVec[4]; 1568 for (unsigned i = 0; i < 4; i++) 1569 NewBldVec[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltTy, VectorEntry, 1570 DAG.getIntPtrConstant(i, DL)); 1571 1572 for (unsigned i = 0; i < 4; i++) { 1573 if (NewBldVec[i].isUndef()) 1574 // We mask write here to teach later passes that the ith element of this 1575 // vector is undef. Thus we can use it to reduce 128 bits reg usage, 1576 // break false dependencies and additionally make assembly easier to read. 1577 RemapSwizzle[i] = 7; // SEL_MASK_WRITE 1578 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(NewBldVec[i])) { 1579 if (C->isZero()) { 1580 RemapSwizzle[i] = 4; // SEL_0 1581 NewBldVec[i] = DAG.getUNDEF(MVT::f32); 1582 } else if (C->isExactlyValue(1.0)) { 1583 RemapSwizzle[i] = 5; // SEL_1 1584 NewBldVec[i] = DAG.getUNDEF(MVT::f32); 1585 } 1586 } 1587 1588 if (NewBldVec[i].isUndef()) 1589 continue; 1590 1591 for (unsigned j = 0; j < i; j++) { 1592 if (NewBldVec[i] == NewBldVec[j]) { 1593 NewBldVec[i] = DAG.getUNDEF(NewBldVec[i].getValueType()); 1594 RemapSwizzle[i] = j; 1595 break; 1596 } 1597 } 1598 } 1599 1600 return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry), 1601 NewBldVec); 1602 } 1603 1604 static SDValue ReorganizeVector(SelectionDAG &DAG, SDValue VectorEntry, 1605 DenseMap<unsigned, unsigned> &RemapSwizzle) { 1606 assert(RemapSwizzle.empty()); 1607 1608 SDLoc DL(VectorEntry); 1609 EVT EltTy = VectorEntry.getValueType().getVectorElementType(); 1610 1611 SDValue NewBldVec[4]; 1612 bool isUnmovable[4] = {false, false, false, false}; 1613 for (unsigned i = 0; i < 4; i++) 1614 NewBldVec[i] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltTy, VectorEntry, 1615 DAG.getIntPtrConstant(i, DL)); 1616 1617 for (unsigned i = 0; i < 4; i++) { 1618 RemapSwizzle[i] = i; 1619 if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 1620 unsigned Idx = NewBldVec[i].getConstantOperandVal(1); 1621 if (i == Idx) 1622 isUnmovable[Idx] = true; 1623 } 1624 } 1625 1626 for (unsigned i = 0; i < 4; i++) { 1627 if (NewBldVec[i].getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 1628 unsigned Idx = NewBldVec[i].getConstantOperandVal(1); 1629 if (isUnmovable[Idx]) 1630 continue; 1631 // Swap i and Idx 1632 std::swap(NewBldVec[Idx], NewBldVec[i]); 1633 std::swap(RemapSwizzle[i], RemapSwizzle[Idx]); 1634 break; 1635 } 1636 } 1637 1638 return DAG.getBuildVector(VectorEntry.getValueType(), SDLoc(VectorEntry), 1639 NewBldVec); 1640 } 1641 1642 SDValue R600TargetLowering::OptimizeSwizzle(SDValue BuildVector, SDValue Swz[], 1643 SelectionDAG &DAG, 1644 const SDLoc &DL) const { 1645 // Old -> New swizzle values 1646 DenseMap<unsigned, unsigned> SwizzleRemap; 1647 1648 BuildVector = CompactSwizzlableVector(DAG, BuildVector, SwizzleRemap); 1649 for (unsigned i = 0; i < 4; i++) { 1650 unsigned Idx = Swz[i]->getAsZExtVal(); 1651 if (SwizzleRemap.contains(Idx)) 1652 Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32); 1653 } 1654 1655 SwizzleRemap.clear(); 1656 BuildVector = ReorganizeVector(DAG, BuildVector, SwizzleRemap); 1657 for (unsigned i = 0; i < 4; i++) { 1658 unsigned Idx = Swz[i]->getAsZExtVal(); 1659 if (SwizzleRemap.contains(Idx)) 1660 Swz[i] = DAG.getConstant(SwizzleRemap[Idx], DL, MVT::i32); 1661 } 1662 1663 return BuildVector; 1664 } 1665 1666 SDValue R600TargetLowering::constBufferLoad(LoadSDNode *LoadNode, int Block, 1667 SelectionDAG &DAG) const { 1668 SDLoc DL(LoadNode); 1669 EVT VT = LoadNode->getValueType(0); 1670 SDValue Chain = LoadNode->getChain(); 1671 SDValue Ptr = LoadNode->getBasePtr(); 1672 assert (isa<ConstantSDNode>(Ptr)); 1673 1674 //TODO: Support smaller loads 1675 if (LoadNode->getMemoryVT().getScalarType() != MVT::i32 || !ISD::isNON_EXTLoad(LoadNode)) 1676 return SDValue(); 1677 1678 if (LoadNode->getAlign() < Align(4)) 1679 return SDValue(); 1680 1681 int ConstantBlock = ConstantAddressBlock(Block); 1682 1683 SDValue Slots[4]; 1684 for (unsigned i = 0; i < 4; i++) { 1685 // We want Const position encoded with the following formula : 1686 // (((512 + (kc_bank << 12) + const_index) << 2) + chan) 1687 // const_index is Ptr computed by llvm using an alignment of 16. 1688 // Thus we add (((512 + (kc_bank << 12)) + chan ) * 4 here and 1689 // then div by 4 at the ISel step 1690 SDValue NewPtr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr, 1691 DAG.getConstant(4 * i + ConstantBlock * 16, DL, MVT::i32)); 1692 Slots[i] = DAG.getNode(AMDGPUISD::CONST_ADDRESS, DL, MVT::i32, NewPtr); 1693 } 1694 EVT NewVT = MVT::v4i32; 1695 unsigned NumElements = 4; 1696 if (VT.isVector()) { 1697 NewVT = VT; 1698 NumElements = VT.getVectorNumElements(); 1699 } 1700 SDValue Result = DAG.getBuildVector(NewVT, DL, ArrayRef(Slots, NumElements)); 1701 if (!VT.isVector()) { 1702 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, Result, 1703 DAG.getConstant(0, DL, MVT::i32)); 1704 } 1705 SDValue MergedValues[2] = { 1706 Result, 1707 Chain 1708 }; 1709 return DAG.getMergeValues(MergedValues, DL); 1710 } 1711 1712 //===----------------------------------------------------------------------===// 1713 // Custom DAG Optimizations 1714 //===----------------------------------------------------------------------===// 1715 1716 SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, 1717 DAGCombinerInfo &DCI) const { 1718 SelectionDAG &DAG = DCI.DAG; 1719 SDLoc DL(N); 1720 1721 switch (N->getOpcode()) { 1722 // (f32 fp_round (f64 uint_to_fp a)) -> (f32 uint_to_fp a) 1723 case ISD::FP_ROUND: { 1724 SDValue Arg = N->getOperand(0); 1725 if (Arg.getOpcode() == ISD::UINT_TO_FP && Arg.getValueType() == MVT::f64) { 1726 return DAG.getNode(ISD::UINT_TO_FP, DL, N->getValueType(0), 1727 Arg.getOperand(0)); 1728 } 1729 break; 1730 } 1731 1732 // (i32 fp_to_sint (fneg (select_cc f32, f32, 1.0, 0.0 cc))) -> 1733 // (i32 select_cc f32, f32, -1, 0 cc) 1734 // 1735 // Mesa's GLSL frontend generates the above pattern a lot and we can lower 1736 // this to one of the SET*_DX10 instructions. 1737 case ISD::FP_TO_SINT: { 1738 SDValue FNeg = N->getOperand(0); 1739 if (FNeg.getOpcode() != ISD::FNEG) { 1740 return SDValue(); 1741 } 1742 SDValue SelectCC = FNeg.getOperand(0); 1743 if (SelectCC.getOpcode() != ISD::SELECT_CC || 1744 SelectCC.getOperand(0).getValueType() != MVT::f32 || // LHS 1745 SelectCC.getOperand(2).getValueType() != MVT::f32 || // True 1746 !isHWTrueValue(SelectCC.getOperand(2)) || 1747 !isHWFalseValue(SelectCC.getOperand(3))) { 1748 return SDValue(); 1749 } 1750 1751 return DAG.getNode(ISD::SELECT_CC, DL, N->getValueType(0), 1752 SelectCC.getOperand(0), // LHS 1753 SelectCC.getOperand(1), // RHS 1754 DAG.getConstant(-1, DL, MVT::i32), // True 1755 DAG.getConstant(0, DL, MVT::i32), // False 1756 SelectCC.getOperand(4)); // CC 1757 } 1758 1759 // insert_vector_elt (build_vector elt0, ... , eltN), NewEltIdx, idx 1760 // => build_vector elt0, ... , NewEltIdx, ... , eltN 1761 case ISD::INSERT_VECTOR_ELT: { 1762 SDValue InVec = N->getOperand(0); 1763 SDValue InVal = N->getOperand(1); 1764 SDValue EltNo = N->getOperand(2); 1765 1766 // If the inserted element is an UNDEF, just use the input vector. 1767 if (InVal.isUndef()) 1768 return InVec; 1769 1770 EVT VT = InVec.getValueType(); 1771 1772 // If we can't generate a legal BUILD_VECTOR, exit 1773 if (!isOperationLegal(ISD::BUILD_VECTOR, VT)) 1774 return SDValue(); 1775 1776 // Check that we know which element is being inserted 1777 if (!isa<ConstantSDNode>(EltNo)) 1778 return SDValue(); 1779 unsigned Elt = EltNo->getAsZExtVal(); 1780 1781 // Check that the operand is a BUILD_VECTOR (or UNDEF, which can essentially 1782 // be converted to a BUILD_VECTOR). Fill in the Ops vector with the 1783 // vector elements. 1784 SmallVector<SDValue, 8> Ops; 1785 if (InVec.getOpcode() == ISD::BUILD_VECTOR) { 1786 Ops.append(InVec.getNode()->op_begin(), 1787 InVec.getNode()->op_end()); 1788 } else if (InVec.isUndef()) { 1789 unsigned NElts = VT.getVectorNumElements(); 1790 Ops.append(NElts, DAG.getUNDEF(InVal.getValueType())); 1791 } else { 1792 return SDValue(); 1793 } 1794 1795 // Insert the element 1796 if (Elt < Ops.size()) { 1797 // All the operands of BUILD_VECTOR must have the same type; 1798 // we enforce that here. 1799 EVT OpVT = Ops[0].getValueType(); 1800 if (InVal.getValueType() != OpVT) 1801 InVal = OpVT.bitsGT(InVal.getValueType()) ? 1802 DAG.getNode(ISD::ANY_EXTEND, DL, OpVT, InVal) : 1803 DAG.getNode(ISD::TRUNCATE, DL, OpVT, InVal); 1804 Ops[Elt] = InVal; 1805 } 1806 1807 // Return the new vector 1808 return DAG.getBuildVector(VT, DL, Ops); 1809 } 1810 1811 // Extract_vec (Build_vector) generated by custom lowering 1812 // also needs to be customly combined 1813 case ISD::EXTRACT_VECTOR_ELT: { 1814 SDValue Arg = N->getOperand(0); 1815 if (Arg.getOpcode() == ISD::BUILD_VECTOR) { 1816 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 1817 unsigned Element = Const->getZExtValue(); 1818 return Arg->getOperand(Element); 1819 } 1820 } 1821 if (Arg.getOpcode() == ISD::BITCAST && 1822 Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && 1823 (Arg.getOperand(0).getValueType().getVectorNumElements() == 1824 Arg.getValueType().getVectorNumElements())) { 1825 if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N->getOperand(1))) { 1826 unsigned Element = Const->getZExtValue(); 1827 return DAG.getNode(ISD::BITCAST, DL, N->getVTList(), 1828 Arg->getOperand(0).getOperand(Element)); 1829 } 1830 } 1831 break; 1832 } 1833 1834 case ISD::SELECT_CC: { 1835 // Try common optimizations 1836 if (SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI)) 1837 return Ret; 1838 1839 // fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq -> 1840 // selectcc x, y, a, b, inv(cc) 1841 // 1842 // fold selectcc (selectcc x, y, a, b, cc), b, a, b, setne -> 1843 // selectcc x, y, a, b, cc 1844 SDValue LHS = N->getOperand(0); 1845 if (LHS.getOpcode() != ISD::SELECT_CC) { 1846 return SDValue(); 1847 } 1848 1849 SDValue RHS = N->getOperand(1); 1850 SDValue True = N->getOperand(2); 1851 SDValue False = N->getOperand(3); 1852 ISD::CondCode NCC = cast<CondCodeSDNode>(N->getOperand(4))->get(); 1853 1854 if (LHS.getOperand(2).getNode() != True.getNode() || 1855 LHS.getOperand(3).getNode() != False.getNode() || 1856 RHS.getNode() != False.getNode()) { 1857 return SDValue(); 1858 } 1859 1860 switch (NCC) { 1861 default: return SDValue(); 1862 case ISD::SETNE: return LHS; 1863 case ISD::SETEQ: { 1864 ISD::CondCode LHSCC = cast<CondCodeSDNode>(LHS.getOperand(4))->get(); 1865 LHSCC = ISD::getSetCCInverse(LHSCC, LHS.getOperand(0).getValueType()); 1866 if (DCI.isBeforeLegalizeOps() || 1867 isCondCodeLegal(LHSCC, LHS.getOperand(0).getSimpleValueType())) 1868 return DAG.getSelectCC(DL, 1869 LHS.getOperand(0), 1870 LHS.getOperand(1), 1871 LHS.getOperand(2), 1872 LHS.getOperand(3), 1873 LHSCC); 1874 break; 1875 } 1876 } 1877 return SDValue(); 1878 } 1879 1880 case AMDGPUISD::R600_EXPORT: { 1881 SDValue Arg = N->getOperand(1); 1882 if (Arg.getOpcode() != ISD::BUILD_VECTOR) 1883 break; 1884 1885 SDValue NewArgs[8] = { 1886 N->getOperand(0), // Chain 1887 SDValue(), 1888 N->getOperand(2), // ArrayBase 1889 N->getOperand(3), // Type 1890 N->getOperand(4), // SWZ_X 1891 N->getOperand(5), // SWZ_Y 1892 N->getOperand(6), // SWZ_Z 1893 N->getOperand(7) // SWZ_W 1894 }; 1895 NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[4], DAG, DL); 1896 return DAG.getNode(AMDGPUISD::R600_EXPORT, DL, N->getVTList(), NewArgs); 1897 } 1898 case AMDGPUISD::TEXTURE_FETCH: { 1899 SDValue Arg = N->getOperand(1); 1900 if (Arg.getOpcode() != ISD::BUILD_VECTOR) 1901 break; 1902 1903 SDValue NewArgs[19] = { 1904 N->getOperand(0), 1905 N->getOperand(1), 1906 N->getOperand(2), 1907 N->getOperand(3), 1908 N->getOperand(4), 1909 N->getOperand(5), 1910 N->getOperand(6), 1911 N->getOperand(7), 1912 N->getOperand(8), 1913 N->getOperand(9), 1914 N->getOperand(10), 1915 N->getOperand(11), 1916 N->getOperand(12), 1917 N->getOperand(13), 1918 N->getOperand(14), 1919 N->getOperand(15), 1920 N->getOperand(16), 1921 N->getOperand(17), 1922 N->getOperand(18), 1923 }; 1924 NewArgs[1] = OptimizeSwizzle(N->getOperand(1), &NewArgs[2], DAG, DL); 1925 return DAG.getNode(AMDGPUISD::TEXTURE_FETCH, DL, N->getVTList(), NewArgs); 1926 } 1927 1928 case ISD::LOAD: { 1929 LoadSDNode *LoadNode = cast<LoadSDNode>(N); 1930 SDValue Ptr = LoadNode->getBasePtr(); 1931 if (LoadNode->getAddressSpace() == AMDGPUAS::PARAM_I_ADDRESS && 1932 isa<ConstantSDNode>(Ptr)) 1933 return constBufferLoad(LoadNode, AMDGPUAS::CONSTANT_BUFFER_0, DAG); 1934 break; 1935 } 1936 1937 default: break; 1938 } 1939 1940 return AMDGPUTargetLowering::PerformDAGCombine(N, DCI); 1941 } 1942 1943 bool R600TargetLowering::FoldOperand(SDNode *ParentNode, unsigned SrcIdx, 1944 SDValue &Src, SDValue &Neg, SDValue &Abs, 1945 SDValue &Sel, SDValue &Imm, 1946 SelectionDAG &DAG) const { 1947 const R600InstrInfo *TII = Subtarget->getInstrInfo(); 1948 if (!Src.isMachineOpcode()) 1949 return false; 1950 1951 switch (Src.getMachineOpcode()) { 1952 case R600::FNEG_R600: 1953 if (!Neg.getNode()) 1954 return false; 1955 Src = Src.getOperand(0); 1956 Neg = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32); 1957 return true; 1958 case R600::FABS_R600: 1959 if (!Abs.getNode()) 1960 return false; 1961 Src = Src.getOperand(0); 1962 Abs = DAG.getTargetConstant(1, SDLoc(ParentNode), MVT::i32); 1963 return true; 1964 case R600::CONST_COPY: { 1965 unsigned Opcode = ParentNode->getMachineOpcode(); 1966 bool HasDst = TII->getOperandIdx(Opcode, R600::OpName::dst) > -1; 1967 1968 if (!Sel.getNode()) 1969 return false; 1970 1971 SDValue CstOffset = Src.getOperand(0); 1972 if (ParentNode->getValueType(0).isVector()) 1973 return false; 1974 1975 // Gather constants values 1976 int SrcIndices[] = { 1977 TII->getOperandIdx(Opcode, R600::OpName::src0), 1978 TII->getOperandIdx(Opcode, R600::OpName::src1), 1979 TII->getOperandIdx(Opcode, R600::OpName::src2), 1980 TII->getOperandIdx(Opcode, R600::OpName::src0_X), 1981 TII->getOperandIdx(Opcode, R600::OpName::src0_Y), 1982 TII->getOperandIdx(Opcode, R600::OpName::src0_Z), 1983 TII->getOperandIdx(Opcode, R600::OpName::src0_W), 1984 TII->getOperandIdx(Opcode, R600::OpName::src1_X), 1985 TII->getOperandIdx(Opcode, R600::OpName::src1_Y), 1986 TII->getOperandIdx(Opcode, R600::OpName::src1_Z), 1987 TII->getOperandIdx(Opcode, R600::OpName::src1_W) 1988 }; 1989 std::vector<unsigned> Consts; 1990 for (int OtherSrcIdx : SrcIndices) { 1991 int OtherSelIdx = TII->getSelIdx(Opcode, OtherSrcIdx); 1992 if (OtherSrcIdx < 0 || OtherSelIdx < 0) 1993 continue; 1994 if (HasDst) { 1995 OtherSrcIdx--; 1996 OtherSelIdx--; 1997 } 1998 if (RegisterSDNode *Reg = 1999 dyn_cast<RegisterSDNode>(ParentNode->getOperand(OtherSrcIdx))) { 2000 if (Reg->getReg() == R600::ALU_CONST) { 2001 Consts.push_back(ParentNode->getConstantOperandVal(OtherSelIdx)); 2002 } 2003 } 2004 } 2005 2006 ConstantSDNode *Cst = cast<ConstantSDNode>(CstOffset); 2007 Consts.push_back(Cst->getZExtValue()); 2008 if (!TII->fitsConstReadLimitations(Consts)) { 2009 return false; 2010 } 2011 2012 Sel = CstOffset; 2013 Src = DAG.getRegister(R600::ALU_CONST, MVT::f32); 2014 return true; 2015 } 2016 case R600::MOV_IMM_GLOBAL_ADDR: 2017 // Check if the Imm slot is used. Taken from below. 2018 if (Imm->getAsZExtVal()) 2019 return false; 2020 Imm = Src.getOperand(0); 2021 Src = DAG.getRegister(R600::ALU_LITERAL_X, MVT::i32); 2022 return true; 2023 case R600::MOV_IMM_I32: 2024 case R600::MOV_IMM_F32: { 2025 unsigned ImmReg = R600::ALU_LITERAL_X; 2026 uint64_t ImmValue = 0; 2027 2028 if (Src.getMachineOpcode() == R600::MOV_IMM_F32) { 2029 ConstantFPSDNode *FPC = cast<ConstantFPSDNode>(Src.getOperand(0)); 2030 float FloatValue = FPC->getValueAPF().convertToFloat(); 2031 if (FloatValue == 0.0) { 2032 ImmReg = R600::ZERO; 2033 } else if (FloatValue == 0.5) { 2034 ImmReg = R600::HALF; 2035 } else if (FloatValue == 1.0) { 2036 ImmReg = R600::ONE; 2037 } else { 2038 ImmValue = FPC->getValueAPF().bitcastToAPInt().getZExtValue(); 2039 } 2040 } else { 2041 uint64_t Value = Src.getConstantOperandVal(0); 2042 if (Value == 0) { 2043 ImmReg = R600::ZERO; 2044 } else if (Value == 1) { 2045 ImmReg = R600::ONE_INT; 2046 } else { 2047 ImmValue = Value; 2048 } 2049 } 2050 2051 // Check that we aren't already using an immediate. 2052 // XXX: It's possible for an instruction to have more than one 2053 // immediate operand, but this is not supported yet. 2054 if (ImmReg == R600::ALU_LITERAL_X) { 2055 if (!Imm.getNode()) 2056 return false; 2057 ConstantSDNode *C = cast<ConstantSDNode>(Imm); 2058 if (C->getZExtValue()) 2059 return false; 2060 Imm = DAG.getTargetConstant(ImmValue, SDLoc(ParentNode), MVT::i32); 2061 } 2062 Src = DAG.getRegister(ImmReg, MVT::i32); 2063 return true; 2064 } 2065 default: 2066 return false; 2067 } 2068 } 2069 2070 /// Fold the instructions after selecting them 2071 SDNode *R600TargetLowering::PostISelFolding(MachineSDNode *Node, 2072 SelectionDAG &DAG) const { 2073 const R600InstrInfo *TII = Subtarget->getInstrInfo(); 2074 if (!Node->isMachineOpcode()) 2075 return Node; 2076 2077 unsigned Opcode = Node->getMachineOpcode(); 2078 SDValue FakeOp; 2079 2080 std::vector<SDValue> Ops(Node->op_begin(), Node->op_end()); 2081 2082 if (Opcode == R600::DOT_4) { 2083 int OperandIdx[] = { 2084 TII->getOperandIdx(Opcode, R600::OpName::src0_X), 2085 TII->getOperandIdx(Opcode, R600::OpName::src0_Y), 2086 TII->getOperandIdx(Opcode, R600::OpName::src0_Z), 2087 TII->getOperandIdx(Opcode, R600::OpName::src0_W), 2088 TII->getOperandIdx(Opcode, R600::OpName::src1_X), 2089 TII->getOperandIdx(Opcode, R600::OpName::src1_Y), 2090 TII->getOperandIdx(Opcode, R600::OpName::src1_Z), 2091 TII->getOperandIdx(Opcode, R600::OpName::src1_W) 2092 }; 2093 int NegIdx[] = { 2094 TII->getOperandIdx(Opcode, R600::OpName::src0_neg_X), 2095 TII->getOperandIdx(Opcode, R600::OpName::src0_neg_Y), 2096 TII->getOperandIdx(Opcode, R600::OpName::src0_neg_Z), 2097 TII->getOperandIdx(Opcode, R600::OpName::src0_neg_W), 2098 TII->getOperandIdx(Opcode, R600::OpName::src1_neg_X), 2099 TII->getOperandIdx(Opcode, R600::OpName::src1_neg_Y), 2100 TII->getOperandIdx(Opcode, R600::OpName::src1_neg_Z), 2101 TII->getOperandIdx(Opcode, R600::OpName::src1_neg_W) 2102 }; 2103 int AbsIdx[] = { 2104 TII->getOperandIdx(Opcode, R600::OpName::src0_abs_X), 2105 TII->getOperandIdx(Opcode, R600::OpName::src0_abs_Y), 2106 TII->getOperandIdx(Opcode, R600::OpName::src0_abs_Z), 2107 TII->getOperandIdx(Opcode, R600::OpName::src0_abs_W), 2108 TII->getOperandIdx(Opcode, R600::OpName::src1_abs_X), 2109 TII->getOperandIdx(Opcode, R600::OpName::src1_abs_Y), 2110 TII->getOperandIdx(Opcode, R600::OpName::src1_abs_Z), 2111 TII->getOperandIdx(Opcode, R600::OpName::src1_abs_W) 2112 }; 2113 for (unsigned i = 0; i < 8; i++) { 2114 if (OperandIdx[i] < 0) 2115 return Node; 2116 SDValue &Src = Ops[OperandIdx[i] - 1]; 2117 SDValue &Neg = Ops[NegIdx[i] - 1]; 2118 SDValue &Abs = Ops[AbsIdx[i] - 1]; 2119 bool HasDst = TII->getOperandIdx(Opcode, R600::OpName::dst) > -1; 2120 int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]); 2121 if (HasDst) 2122 SelIdx--; 2123 SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp; 2124 if (FoldOperand(Node, i, Src, Neg, Abs, Sel, FakeOp, DAG)) 2125 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 2126 } 2127 } else if (Opcode == R600::REG_SEQUENCE) { 2128 for (unsigned i = 1, e = Node->getNumOperands(); i < e; i += 2) { 2129 SDValue &Src = Ops[i]; 2130 if (FoldOperand(Node, i, Src, FakeOp, FakeOp, FakeOp, FakeOp, DAG)) 2131 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 2132 } 2133 } else { 2134 if (!TII->hasInstrModifiers(Opcode)) 2135 return Node; 2136 int OperandIdx[] = { 2137 TII->getOperandIdx(Opcode, R600::OpName::src0), 2138 TII->getOperandIdx(Opcode, R600::OpName::src1), 2139 TII->getOperandIdx(Opcode, R600::OpName::src2) 2140 }; 2141 int NegIdx[] = { 2142 TII->getOperandIdx(Opcode, R600::OpName::src0_neg), 2143 TII->getOperandIdx(Opcode, R600::OpName::src1_neg), 2144 TII->getOperandIdx(Opcode, R600::OpName::src2_neg) 2145 }; 2146 int AbsIdx[] = { 2147 TII->getOperandIdx(Opcode, R600::OpName::src0_abs), 2148 TII->getOperandIdx(Opcode, R600::OpName::src1_abs), 2149 -1 2150 }; 2151 for (unsigned i = 0; i < 3; i++) { 2152 if (OperandIdx[i] < 0) 2153 return Node; 2154 SDValue &Src = Ops[OperandIdx[i] - 1]; 2155 SDValue &Neg = Ops[NegIdx[i] - 1]; 2156 SDValue FakeAbs; 2157 SDValue &Abs = (AbsIdx[i] > -1) ? Ops[AbsIdx[i] - 1] : FakeAbs; 2158 bool HasDst = TII->getOperandIdx(Opcode, R600::OpName::dst) > -1; 2159 int SelIdx = TII->getSelIdx(Opcode, OperandIdx[i]); 2160 int ImmIdx = TII->getOperandIdx(Opcode, R600::OpName::literal); 2161 if (HasDst) { 2162 SelIdx--; 2163 ImmIdx--; 2164 } 2165 SDValue &Sel = (SelIdx > -1) ? Ops[SelIdx] : FakeOp; 2166 SDValue &Imm = Ops[ImmIdx]; 2167 if (FoldOperand(Node, i, Src, Neg, Abs, Sel, Imm, DAG)) 2168 return DAG.getMachineNode(Opcode, SDLoc(Node), Node->getVTList(), Ops); 2169 } 2170 } 2171 2172 return Node; 2173 } 2174 2175 TargetLowering::AtomicExpansionKind 2176 R600TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 2177 switch (RMW->getOperation()) { 2178 case AtomicRMWInst::UIncWrap: 2179 case AtomicRMWInst::UDecWrap: 2180 // FIXME: Cayman at least appears to have instructions for this, but the 2181 // instruction defintions appear to be missing. 2182 return AtomicExpansionKind::CmpXChg; 2183 default: 2184 break; 2185 } 2186 2187 return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW); 2188 } 2189