1 //===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Interface definition of the TargetLowering class that is common 11 /// to all AMD GPUs. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H 16 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H 17 18 #include "llvm/CodeGen/CallingConvLower.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 21 namespace llvm { 22 23 class AMDGPUMachineFunction; 24 class AMDGPUSubtarget; 25 struct ArgDescriptor; 26 27 class AMDGPUTargetLowering : public TargetLowering { 28 private: 29 const AMDGPUSubtarget *Subtarget; 30 31 /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been 32 /// legalized from a smaller type VT. Need to match pre-legalized type because 33 /// the generic legalization inserts the add/sub between the select and 34 /// compare. 35 SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const; 36 37 public: 38 static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG); 39 static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG); 40 static bool hasDefinedInitializer(const GlobalValue *GV); 41 42 protected: 43 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 44 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 45 /// Split a vector store into multiple scalar stores. 46 /// \returns The resulting chain. 47 48 SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const; 49 SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const; 50 SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const; 51 SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const; 52 SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const; 53 54 SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const; 55 SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const; 56 SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG, 57 double Log2BaseInverted) const; 58 SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const; 59 60 SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const; 61 62 SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const; 63 SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const; 64 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 65 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 66 67 SDValue LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, bool Signed) const; 68 SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const; 69 SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const; 70 SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const; 71 72 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; 73 74 protected: 75 bool shouldCombineMemoryType(EVT VT) const; 76 SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const; 77 SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const; 78 SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const; 79 SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const; 80 81 SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL, 82 unsigned Opc, SDValue LHS, 83 uint32_t ValLo, uint32_t ValHi) const; 84 SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const; 85 SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const; 86 SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const; 87 SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const; 88 SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const; 89 SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const; 90 SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const; 91 SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS, 92 SDValue RHS, DAGCombinerInfo &DCI) const; 93 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const; 94 95 bool isConstantCostlierToNegate(SDValue N) const; 96 SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const; 97 SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const; 98 SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const; 99 100 static EVT getEquivalentMemType(LLVMContext &Context, EVT VT); 101 102 virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, 103 SelectionDAG &DAG) const; 104 105 /// Return 64-bit value Op as two 32-bit integers. 106 std::pair<SDValue, SDValue> split64BitValue(SDValue Op, 107 SelectionDAG &DAG) const; 108 SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const; 109 SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const; 110 111 /// Split a vector type into two parts. The first part is a power of two 112 /// vector. The second part is whatever is left over, and is a scalar if it 113 /// would otherwise be a 1-vector. 114 std::pair<EVT, EVT> getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const; 115 116 /// Split a vector value into two parts of types LoVT and HiVT. HiVT could be 117 /// scalar. 118 std::pair<SDValue, SDValue> splitVector(const SDValue &N, const SDLoc &DL, 119 const EVT &LoVT, const EVT &HighVT, 120 SelectionDAG &DAG) const; 121 122 /// Split a vector load into 2 loads of half the vector. 123 SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const; 124 125 /// Widen a suitably aligned v3 load. For all other cases, split the input 126 /// vector load. 127 SDValue WidenOrSplitVectorLoad(SDValue Op, SelectionDAG &DAG) const; 128 129 /// Split a vector store into 2 stores of half the vector. 130 SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const; 131 132 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; 133 SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; 134 SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const; 135 SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const; 136 void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG, 137 SmallVectorImpl<SDValue> &Results) const; 138 139 void analyzeFormalArgumentsCompute( 140 CCState &State, 141 const SmallVectorImpl<ISD::InputArg> &Ins) const; 142 143 public: 144 AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI); 145 146 bool mayIgnoreSignedZero(SDValue Op) const; 147 148 static inline SDValue stripBitcast(SDValue Val) { 149 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val; 150 } 151 152 static bool allUsesHaveSourceMods(const SDNode *N, 153 unsigned CostThreshold = 4); 154 bool isFAbsFree(EVT VT) const override; 155 bool isFNegFree(EVT VT) const override; 156 bool isTruncateFree(EVT Src, EVT Dest) const override; 157 bool isTruncateFree(Type *Src, Type *Dest) const override; 158 159 bool isZExtFree(Type *Src, Type *Dest) const override; 160 bool isZExtFree(EVT Src, EVT Dest) const override; 161 bool isZExtFree(SDValue Val, EVT VT2) const override; 162 163 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, 164 bool LegalOperations, bool ForCodeSize, 165 NegatibleCost &Cost, 166 unsigned Depth) const override; 167 168 bool isNarrowingProfitable(EVT VT1, EVT VT2) const override; 169 170 EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, 171 ISD::NodeType ExtendKind) const override; 172 173 MVT getVectorIdxTy(const DataLayout &) const override; 174 bool isSelectSupported(SelectSupportKind) const override; 175 176 bool isFPImmLegal(const APFloat &Imm, EVT VT, 177 bool ForCodeSize) const override; 178 bool ShouldShrinkFPConstant(EVT VT) const override; 179 bool shouldReduceLoadWidth(SDNode *Load, 180 ISD::LoadExtType ExtType, 181 EVT ExtVT) const override; 182 183 bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG, 184 const MachineMemOperand &MMO) const final; 185 186 bool storeOfVectorConstantIsCheap(EVT MemVT, 187 unsigned NumElem, 188 unsigned AS) const override; 189 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override; 190 bool isCheapToSpeculateCttz() const override; 191 bool isCheapToSpeculateCtlz() const override; 192 193 bool isSDNodeAlwaysUniform(const SDNode *N) const override; 194 static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg); 195 static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg); 196 197 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 198 const SmallVectorImpl<ISD::OutputArg> &Outs, 199 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 200 SelectionDAG &DAG) const override; 201 202 SDValue addTokenForArgument(SDValue Chain, 203 SelectionDAG &DAG, 204 MachineFrameInfo &MFI, 205 int ClobberedFI) const; 206 207 SDValue lowerUnhandledCall(CallLoweringInfo &CLI, 208 SmallVectorImpl<SDValue> &InVals, 209 StringRef Reason) const; 210 SDValue LowerCall(CallLoweringInfo &CLI, 211 SmallVectorImpl<SDValue> &InVals) const override; 212 213 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, 214 SelectionDAG &DAG) const; 215 216 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 217 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 218 void ReplaceNodeResults(SDNode * N, 219 SmallVectorImpl<SDValue> &Results, 220 SelectionDAG &DAG) const override; 221 222 SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS, 223 SDValue RHS, SDValue True, SDValue False, 224 SDValue CC, DAGCombinerInfo &DCI) const; 225 226 const char* getTargetNodeName(unsigned Opcode) const override; 227 228 // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for 229 // AMDGPU. Commit r319036, 230 // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6) 231 // turned on MergeConsecutiveStores() before Instruction Selection for all 232 // targets. Enough AMDGPU compiles go into an infinite loop ( 233 // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges; 234 // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for 235 // now. 236 bool mergeStoresAfterLegalization(EVT) const override { return false; } 237 238 bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override { 239 return true; 240 } 241 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 242 int &RefinementSteps, bool &UseOneConstNR, 243 bool Reciprocal) const override; 244 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 245 int &RefinementSteps) const override; 246 247 virtual SDNode *PostISelFolding(MachineSDNode *N, 248 SelectionDAG &DAG) const = 0; 249 250 /// Determine which of the bits specified in \p Mask are known to be 251 /// either zero or one and return them in the \p KnownZero and \p KnownOne 252 /// bitsets. 253 void computeKnownBitsForTargetNode(const SDValue Op, 254 KnownBits &Known, 255 const APInt &DemandedElts, 256 const SelectionDAG &DAG, 257 unsigned Depth = 0) const override; 258 259 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, 260 const SelectionDAG &DAG, 261 unsigned Depth = 0) const override; 262 263 unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, 264 Register R, 265 const APInt &DemandedElts, 266 const MachineRegisterInfo &MRI, 267 unsigned Depth = 0) const override; 268 269 bool isKnownNeverNaNForTargetNode(SDValue Op, 270 const SelectionDAG &DAG, 271 bool SNaN = false, 272 unsigned Depth = 0) const override; 273 274 /// Helper function that adds Reg to the LiveIn list of the DAG's 275 /// MachineFunction. 276 /// 277 /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise 278 /// a copy from the register. 279 SDValue CreateLiveInRegister(SelectionDAG &DAG, 280 const TargetRegisterClass *RC, 281 Register Reg, EVT VT, 282 const SDLoc &SL, 283 bool RawReg = false) const; 284 SDValue CreateLiveInRegister(SelectionDAG &DAG, 285 const TargetRegisterClass *RC, 286 Register Reg, EVT VT) const { 287 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode())); 288 } 289 290 // Returns the raw live in register rather than a copy from it. 291 SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG, 292 const TargetRegisterClass *RC, 293 Register Reg, EVT VT) const { 294 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()), true); 295 } 296 297 /// Similar to CreateLiveInRegister, except value maybe loaded from a stack 298 /// slot rather than passed in a register. 299 SDValue loadStackInputValue(SelectionDAG &DAG, 300 EVT VT, 301 const SDLoc &SL, 302 int64_t Offset) const; 303 304 SDValue storeStackInputValue(SelectionDAG &DAG, 305 const SDLoc &SL, 306 SDValue Chain, 307 SDValue ArgVal, 308 int64_t Offset) const; 309 310 SDValue loadInputValue(SelectionDAG &DAG, 311 const TargetRegisterClass *RC, 312 EVT VT, const SDLoc &SL, 313 const ArgDescriptor &Arg) const; 314 315 enum ImplicitParameter { 316 FIRST_IMPLICIT, 317 GRID_DIM = FIRST_IMPLICIT, 318 GRID_OFFSET, 319 }; 320 321 /// Helper function that returns the byte offset of the given 322 /// type of implicit parameter. 323 uint32_t getImplicitParameterOffset(const MachineFunction &MF, 324 const ImplicitParameter Param) const; 325 326 MVT getFenceOperandTy(const DataLayout &DL) const override { 327 return MVT::i32; 328 } 329 330 AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; 331 }; 332 333 namespace AMDGPUISD { 334 335 enum NodeType : unsigned { 336 // AMDIL ISD Opcodes 337 FIRST_NUMBER = ISD::BUILTIN_OP_END, 338 UMUL, // 32bit unsigned multiplication 339 BRANCH_COND, 340 // End AMDIL ISD Opcodes 341 342 // Function call. 343 CALL, 344 TC_RETURN, 345 TRAP, 346 347 // Masked control flow nodes. 348 IF, 349 ELSE, 350 LOOP, 351 352 // A uniform kernel return that terminates the wavefront. 353 ENDPGM, 354 355 // Return to a shader part's epilog code. 356 RETURN_TO_EPILOG, 357 358 // Return with values from a non-entry function. 359 RET_FLAG, 360 361 DWORDADDR, 362 FRACT, 363 364 /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output 365 /// modifier behavior with dx10_enable. 366 CLAMP, 367 368 // This is SETCC with the full mask result which is used for a compare with a 369 // result bit per item in the wavefront. 370 SETCC, 371 SETREG, 372 373 DENORM_MODE, 374 375 // FP ops with input and output chain. 376 FMA_W_CHAIN, 377 FMUL_W_CHAIN, 378 379 // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi. 380 // Denormals handled on some parts. 381 COS_HW, 382 SIN_HW, 383 FMAX_LEGACY, 384 FMIN_LEGACY, 385 386 FMAX3, 387 SMAX3, 388 UMAX3, 389 FMIN3, 390 SMIN3, 391 UMIN3, 392 FMED3, 393 SMED3, 394 UMED3, 395 FDOT2, 396 URECIP, 397 DIV_SCALE, 398 DIV_FMAS, 399 DIV_FIXUP, 400 // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is 401 // treated as an illegal operation. 402 FMAD_FTZ, 403 404 // RCP, RSQ - For f32, 1 ULP max error, no denormal handling. 405 // For f64, max error 2^29 ULP, handles denormals. 406 RCP, 407 RSQ, 408 RCP_LEGACY, 409 RCP_IFLAG, 410 FMUL_LEGACY, 411 RSQ_CLAMP, 412 LDEXP, 413 FP_CLASS, 414 DOT4, 415 CARRY, 416 BORROW, 417 BFE_U32, // Extract range of bits with zero extension to 32-bits. 418 BFE_I32, // Extract range of bits with sign extension to 32-bits. 419 BFI, // (src0 & src1) | (~src0 & src2) 420 BFM, // Insert a range of bits into a 32-bit word. 421 FFBH_U32, // ctlz with -1 if input is zero. 422 FFBH_I32, 423 FFBL_B32, // cttz with -1 if input is zero. 424 MUL_U24, 425 MUL_I24, 426 MULHI_U24, 427 MULHI_I24, 428 MAD_U24, 429 MAD_I24, 430 MAD_U64_U32, 431 MAD_I64_I32, 432 PERM, 433 TEXTURE_FETCH, 434 R600_EXPORT, 435 CONST_ADDRESS, 436 REGISTER_LOAD, 437 REGISTER_STORE, 438 SAMPLE, 439 SAMPLEB, 440 SAMPLED, 441 SAMPLEL, 442 443 // These cvt_f32_ubyte* nodes need to remain consecutive and in order. 444 CVT_F32_UBYTE0, 445 CVT_F32_UBYTE1, 446 CVT_F32_UBYTE2, 447 CVT_F32_UBYTE3, 448 449 // Convert two float 32 numbers into a single register holding two packed f16 450 // with round to zero. 451 CVT_PKRTZ_F16_F32, 452 CVT_PKNORM_I16_F32, 453 CVT_PKNORM_U16_F32, 454 CVT_PK_I16_I32, 455 CVT_PK_U16_U32, 456 457 // Same as the standard node, except the high bits of the resulting integer 458 // are known 0. 459 FP_TO_FP16, 460 461 // Wrapper around fp16 results that are known to zero the high bits. 462 FP16_ZEXT, 463 464 /// This node is for VLIW targets and it is used to represent a vector 465 /// that is stored in consecutive registers with the same channel. 466 /// For example: 467 /// |X |Y|Z|W| 468 /// T0|v.x| | | | 469 /// T1|v.y| | | | 470 /// T2|v.z| | | | 471 /// T3|v.w| | | | 472 BUILD_VERTICAL_VECTOR, 473 /// Pointer to the start of the shader's constant data. 474 CONST_DATA_PTR, 475 PC_ADD_REL_OFFSET, 476 LDS, 477 DUMMY_CHAIN, 478 FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE, 479 LOAD_D16_HI, 480 LOAD_D16_LO, 481 LOAD_D16_HI_I8, 482 LOAD_D16_HI_U8, 483 LOAD_D16_LO_I8, 484 LOAD_D16_LO_U8, 485 486 STORE_MSKOR, 487 LOAD_CONSTANT, 488 TBUFFER_STORE_FORMAT, 489 TBUFFER_STORE_FORMAT_D16, 490 TBUFFER_LOAD_FORMAT, 491 TBUFFER_LOAD_FORMAT_D16, 492 DS_ORDERED_COUNT, 493 ATOMIC_CMP_SWAP, 494 ATOMIC_INC, 495 ATOMIC_DEC, 496 ATOMIC_LOAD_FMIN, 497 ATOMIC_LOAD_FMAX, 498 BUFFER_LOAD, 499 BUFFER_LOAD_UBYTE, 500 BUFFER_LOAD_USHORT, 501 BUFFER_LOAD_BYTE, 502 BUFFER_LOAD_SHORT, 503 BUFFER_LOAD_FORMAT, 504 BUFFER_LOAD_FORMAT_D16, 505 SBUFFER_LOAD, 506 BUFFER_STORE, 507 BUFFER_STORE_BYTE, 508 BUFFER_STORE_SHORT, 509 BUFFER_STORE_FORMAT, 510 BUFFER_STORE_FORMAT_D16, 511 BUFFER_ATOMIC_SWAP, 512 BUFFER_ATOMIC_ADD, 513 BUFFER_ATOMIC_SUB, 514 BUFFER_ATOMIC_SMIN, 515 BUFFER_ATOMIC_UMIN, 516 BUFFER_ATOMIC_SMAX, 517 BUFFER_ATOMIC_UMAX, 518 BUFFER_ATOMIC_AND, 519 BUFFER_ATOMIC_OR, 520 BUFFER_ATOMIC_XOR, 521 BUFFER_ATOMIC_INC, 522 BUFFER_ATOMIC_DEC, 523 BUFFER_ATOMIC_CMPSWAP, 524 BUFFER_ATOMIC_CSUB, 525 BUFFER_ATOMIC_FADD, 526 527 LAST_AMDGPU_ISD_NUMBER 528 }; 529 530 531 } // End namespace AMDGPUISD 532 533 } // End namespace llvm 534 535 #endif 536