1 //===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Interface definition of the TargetLowering class that is common 11 /// to all AMD GPUs. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H 16 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H 17 18 #include "llvm/CodeGen/CallingConvLower.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 21 namespace llvm { 22 23 class AMDGPUMachineFunction; 24 class AMDGPUSubtarget; 25 struct ArgDescriptor; 26 27 class AMDGPUTargetLowering : public TargetLowering { 28 private: 29 const AMDGPUSubtarget *Subtarget; 30 31 /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been 32 /// legalized from a smaller type VT. Need to match pre-legalized type because 33 /// the generic legalization inserts the add/sub between the select and 34 /// compare. 35 SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const; 36 37 public: 38 static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG); 39 static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG); 40 static bool hasDefinedInitializer(const GlobalValue *GV); 41 42 protected: 43 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 44 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 45 /// Split a vector store into multiple scalar stores. 46 /// \returns The resulting chain. 47 48 SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const; 49 SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const; 50 SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const; 51 SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const; 52 SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const; 53 54 SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const; 55 SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const; 56 SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG, 57 double Log2BaseInverted) const; 58 SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const; 59 60 SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const; 61 62 SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const; 63 SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const; 64 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 65 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 66 67 SDValue LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG, bool Signed) const; 68 SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const; 69 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 70 71 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; 72 73 protected: 74 bool shouldCombineMemoryType(EVT VT) const; 75 SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const; 76 SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const; 77 SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const; 78 SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const; 79 80 SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL, 81 unsigned Opc, SDValue LHS, 82 uint32_t ValLo, uint32_t ValHi) const; 83 SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const; 84 SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const; 85 SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const; 86 SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const; 87 SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const; 88 SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const; 89 SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const; 90 SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS, 91 SDValue RHS, DAGCombinerInfo &DCI) const; 92 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const; 93 94 bool isConstantCostlierToNegate(SDValue N) const; 95 SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const; 96 SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const; 97 SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const; 98 99 static EVT getEquivalentMemType(LLVMContext &Context, EVT VT); 100 101 virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, 102 SelectionDAG &DAG) const; 103 104 /// Return 64-bit value Op as two 32-bit integers. 105 std::pair<SDValue, SDValue> split64BitValue(SDValue Op, 106 SelectionDAG &DAG) const; 107 SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const; 108 SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const; 109 110 /// Split a vector type into two parts. The first part is a power of two 111 /// vector. The second part is whatever is left over, and is a scalar if it 112 /// would otherwise be a 1-vector. 113 std::pair<EVT, EVT> getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const; 114 115 /// Split a vector value into two parts of types LoVT and HiVT. HiVT could be 116 /// scalar. 117 std::pair<SDValue, SDValue> splitVector(const SDValue &N, const SDLoc &DL, 118 const EVT &LoVT, const EVT &HighVT, 119 SelectionDAG &DAG) const; 120 121 /// Split a vector load into 2 loads of half the vector. 122 SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const; 123 124 /// Widen a suitably aligned v3 load. For all other cases, split the input 125 /// vector load. 126 SDValue WidenOrSplitVectorLoad(SDValue Op, SelectionDAG &DAG) const; 127 128 /// Split a vector store into 2 stores of half the vector. 129 SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const; 130 131 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; 132 SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; 133 SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const; 134 SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const; 135 void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG, 136 SmallVectorImpl<SDValue> &Results) const; 137 138 void analyzeFormalArgumentsCompute( 139 CCState &State, 140 const SmallVectorImpl<ISD::InputArg> &Ins) const; 141 142 public: 143 AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI); 144 145 bool mayIgnoreSignedZero(SDValue Op) const; 146 147 static inline SDValue stripBitcast(SDValue Val) { 148 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val; 149 } 150 151 static bool allUsesHaveSourceMods(const SDNode *N, 152 unsigned CostThreshold = 4); 153 bool isFAbsFree(EVT VT) const override; 154 bool isFNegFree(EVT VT) const override; 155 bool isTruncateFree(EVT Src, EVT Dest) const override; 156 bool isTruncateFree(Type *Src, Type *Dest) const override; 157 158 bool isZExtFree(Type *Src, Type *Dest) const override; 159 bool isZExtFree(EVT Src, EVT Dest) const override; 160 bool isZExtFree(SDValue Val, EVT VT2) const override; 161 162 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, 163 bool LegalOperations, bool ForCodeSize, 164 NegatibleCost &Cost, 165 unsigned Depth) const override; 166 167 bool isNarrowingProfitable(EVT VT1, EVT VT2) const override; 168 169 EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, 170 ISD::NodeType ExtendKind) const override; 171 172 MVT getVectorIdxTy(const DataLayout &) const override; 173 bool isSelectSupported(SelectSupportKind) const override; 174 175 bool isFPImmLegal(const APFloat &Imm, EVT VT, 176 bool ForCodeSize) const override; 177 bool ShouldShrinkFPConstant(EVT VT) const override; 178 bool shouldReduceLoadWidth(SDNode *Load, 179 ISD::LoadExtType ExtType, 180 EVT ExtVT) const override; 181 182 bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG, 183 const MachineMemOperand &MMO) const final; 184 185 bool storeOfVectorConstantIsCheap(EVT MemVT, 186 unsigned NumElem, 187 unsigned AS) const override; 188 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override; 189 bool isCheapToSpeculateCttz() const override; 190 bool isCheapToSpeculateCtlz() const override; 191 192 bool isSDNodeAlwaysUniform(const SDNode *N) const override; 193 static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg); 194 static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg); 195 196 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 197 const SmallVectorImpl<ISD::OutputArg> &Outs, 198 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 199 SelectionDAG &DAG) const override; 200 201 SDValue addTokenForArgument(SDValue Chain, 202 SelectionDAG &DAG, 203 MachineFrameInfo &MFI, 204 int ClobberedFI) const; 205 206 SDValue lowerUnhandledCall(CallLoweringInfo &CLI, 207 SmallVectorImpl<SDValue> &InVals, 208 StringRef Reason) const; 209 SDValue LowerCall(CallLoweringInfo &CLI, 210 SmallVectorImpl<SDValue> &InVals) const override; 211 212 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, 213 SelectionDAG &DAG) const; 214 215 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 216 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 217 void ReplaceNodeResults(SDNode * N, 218 SmallVectorImpl<SDValue> &Results, 219 SelectionDAG &DAG) const override; 220 221 SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS, 222 SDValue RHS, SDValue True, SDValue False, 223 SDValue CC, DAGCombinerInfo &DCI) const; 224 225 const char* getTargetNodeName(unsigned Opcode) const override; 226 227 // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for 228 // AMDGPU. Commit r319036, 229 // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6) 230 // turned on MergeConsecutiveStores() before Instruction Selection for all 231 // targets. Enough AMDGPU compiles go into an infinite loop ( 232 // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges; 233 // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for 234 // now. 235 bool mergeStoresAfterLegalization(EVT) const override { return false; } 236 237 bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override { 238 return true; 239 } 240 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 241 int &RefinementSteps, bool &UseOneConstNR, 242 bool Reciprocal) const override; 243 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 244 int &RefinementSteps) const override; 245 246 virtual SDNode *PostISelFolding(MachineSDNode *N, 247 SelectionDAG &DAG) const = 0; 248 249 /// Determine which of the bits specified in \p Mask are known to be 250 /// either zero or one and return them in the \p KnownZero and \p KnownOne 251 /// bitsets. 252 void computeKnownBitsForTargetNode(const SDValue Op, 253 KnownBits &Known, 254 const APInt &DemandedElts, 255 const SelectionDAG &DAG, 256 unsigned Depth = 0) const override; 257 258 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, 259 const SelectionDAG &DAG, 260 unsigned Depth = 0) const override; 261 262 unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, 263 Register R, 264 const APInt &DemandedElts, 265 const MachineRegisterInfo &MRI, 266 unsigned Depth = 0) const override; 267 268 bool isKnownNeverNaNForTargetNode(SDValue Op, 269 const SelectionDAG &DAG, 270 bool SNaN = false, 271 unsigned Depth = 0) const override; 272 273 /// Helper function that adds Reg to the LiveIn list of the DAG's 274 /// MachineFunction. 275 /// 276 /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise 277 /// a copy from the register. 278 SDValue CreateLiveInRegister(SelectionDAG &DAG, 279 const TargetRegisterClass *RC, 280 Register Reg, EVT VT, 281 const SDLoc &SL, 282 bool RawReg = false) const; 283 SDValue CreateLiveInRegister(SelectionDAG &DAG, 284 const TargetRegisterClass *RC, 285 Register Reg, EVT VT) const { 286 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode())); 287 } 288 289 // Returns the raw live in register rather than a copy from it. 290 SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG, 291 const TargetRegisterClass *RC, 292 Register Reg, EVT VT) const { 293 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()), true); 294 } 295 296 /// Similar to CreateLiveInRegister, except value maybe loaded from a stack 297 /// slot rather than passed in a register. 298 SDValue loadStackInputValue(SelectionDAG &DAG, 299 EVT VT, 300 const SDLoc &SL, 301 int64_t Offset) const; 302 303 SDValue storeStackInputValue(SelectionDAG &DAG, 304 const SDLoc &SL, 305 SDValue Chain, 306 SDValue ArgVal, 307 int64_t Offset) const; 308 309 SDValue loadInputValue(SelectionDAG &DAG, 310 const TargetRegisterClass *RC, 311 EVT VT, const SDLoc &SL, 312 const ArgDescriptor &Arg) const; 313 314 enum ImplicitParameter { 315 FIRST_IMPLICIT, 316 GRID_DIM = FIRST_IMPLICIT, 317 GRID_OFFSET, 318 }; 319 320 /// Helper function that returns the byte offset of the given 321 /// type of implicit parameter. 322 uint32_t getImplicitParameterOffset(const MachineFunction &MF, 323 const ImplicitParameter Param) const; 324 325 MVT getFenceOperandTy(const DataLayout &DL) const override { 326 return MVT::i32; 327 } 328 329 AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; 330 331 bool isConstantUnsignedBitfieldExtactLegal(unsigned Opc, LLT Ty1, 332 LLT Ty2) const override; 333 }; 334 335 namespace AMDGPUISD { 336 337 enum NodeType : unsigned { 338 // AMDIL ISD Opcodes 339 FIRST_NUMBER = ISD::BUILTIN_OP_END, 340 UMUL, // 32bit unsigned multiplication 341 BRANCH_COND, 342 // End AMDIL ISD Opcodes 343 344 // Function call. 345 CALL, 346 TC_RETURN, 347 TRAP, 348 349 // Masked control flow nodes. 350 IF, 351 ELSE, 352 LOOP, 353 354 // A uniform kernel return that terminates the wavefront. 355 ENDPGM, 356 357 // Return to a shader part's epilog code. 358 RETURN_TO_EPILOG, 359 360 // Return with values from a non-entry function. 361 RET_FLAG, 362 363 DWORDADDR, 364 FRACT, 365 366 /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output 367 /// modifier behavior with dx10_enable. 368 CLAMP, 369 370 // This is SETCC with the full mask result which is used for a compare with a 371 // result bit per item in the wavefront. 372 SETCC, 373 SETREG, 374 375 DENORM_MODE, 376 377 // FP ops with input and output chain. 378 FMA_W_CHAIN, 379 FMUL_W_CHAIN, 380 381 // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi. 382 // Denormals handled on some parts. 383 COS_HW, 384 SIN_HW, 385 FMAX_LEGACY, 386 FMIN_LEGACY, 387 388 FMAX3, 389 SMAX3, 390 UMAX3, 391 FMIN3, 392 SMIN3, 393 UMIN3, 394 FMED3, 395 SMED3, 396 UMED3, 397 FDOT2, 398 URECIP, 399 DIV_SCALE, 400 DIV_FMAS, 401 DIV_FIXUP, 402 // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is 403 // treated as an illegal operation. 404 FMAD_FTZ, 405 406 // RCP, RSQ - For f32, 1 ULP max error, no denormal handling. 407 // For f64, max error 2^29 ULP, handles denormals. 408 RCP, 409 RSQ, 410 RCP_LEGACY, 411 RCP_IFLAG, 412 FMUL_LEGACY, 413 RSQ_CLAMP, 414 LDEXP, 415 FP_CLASS, 416 DOT4, 417 CARRY, 418 BORROW, 419 BFE_U32, // Extract range of bits with zero extension to 32-bits. 420 BFE_I32, // Extract range of bits with sign extension to 32-bits. 421 BFI, // (src0 & src1) | (~src0 & src2) 422 BFM, // Insert a range of bits into a 32-bit word. 423 FFBH_U32, // ctlz with -1 if input is zero. 424 FFBH_I32, 425 FFBL_B32, // cttz with -1 if input is zero. 426 MUL_U24, 427 MUL_I24, 428 MULHI_U24, 429 MULHI_I24, 430 MAD_U24, 431 MAD_I24, 432 MAD_U64_U32, 433 MAD_I64_I32, 434 PERM, 435 TEXTURE_FETCH, 436 R600_EXPORT, 437 CONST_ADDRESS, 438 REGISTER_LOAD, 439 REGISTER_STORE, 440 SAMPLE, 441 SAMPLEB, 442 SAMPLED, 443 SAMPLEL, 444 445 // These cvt_f32_ubyte* nodes need to remain consecutive and in order. 446 CVT_F32_UBYTE0, 447 CVT_F32_UBYTE1, 448 CVT_F32_UBYTE2, 449 CVT_F32_UBYTE3, 450 451 // Convert two float 32 numbers into a single register holding two packed f16 452 // with round to zero. 453 CVT_PKRTZ_F16_F32, 454 CVT_PKNORM_I16_F32, 455 CVT_PKNORM_U16_F32, 456 CVT_PK_I16_I32, 457 CVT_PK_U16_U32, 458 459 // Same as the standard node, except the high bits of the resulting integer 460 // are known 0. 461 FP_TO_FP16, 462 463 /// This node is for VLIW targets and it is used to represent a vector 464 /// that is stored in consecutive registers with the same channel. 465 /// For example: 466 /// |X |Y|Z|W| 467 /// T0|v.x| | | | 468 /// T1|v.y| | | | 469 /// T2|v.z| | | | 470 /// T3|v.w| | | | 471 BUILD_VERTICAL_VECTOR, 472 /// Pointer to the start of the shader's constant data. 473 CONST_DATA_PTR, 474 PC_ADD_REL_OFFSET, 475 LDS, 476 DUMMY_CHAIN, 477 FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE, 478 LOAD_D16_HI, 479 LOAD_D16_LO, 480 LOAD_D16_HI_I8, 481 LOAD_D16_HI_U8, 482 LOAD_D16_LO_I8, 483 LOAD_D16_LO_U8, 484 485 STORE_MSKOR, 486 LOAD_CONSTANT, 487 TBUFFER_STORE_FORMAT, 488 TBUFFER_STORE_FORMAT_D16, 489 TBUFFER_LOAD_FORMAT, 490 TBUFFER_LOAD_FORMAT_D16, 491 DS_ORDERED_COUNT, 492 ATOMIC_CMP_SWAP, 493 ATOMIC_INC, 494 ATOMIC_DEC, 495 ATOMIC_LOAD_FMIN, 496 ATOMIC_LOAD_FMAX, 497 BUFFER_LOAD, 498 BUFFER_LOAD_UBYTE, 499 BUFFER_LOAD_USHORT, 500 BUFFER_LOAD_BYTE, 501 BUFFER_LOAD_SHORT, 502 BUFFER_LOAD_FORMAT, 503 BUFFER_LOAD_FORMAT_D16, 504 SBUFFER_LOAD, 505 BUFFER_STORE, 506 BUFFER_STORE_BYTE, 507 BUFFER_STORE_SHORT, 508 BUFFER_STORE_FORMAT, 509 BUFFER_STORE_FORMAT_D16, 510 BUFFER_ATOMIC_SWAP, 511 BUFFER_ATOMIC_ADD, 512 BUFFER_ATOMIC_SUB, 513 BUFFER_ATOMIC_SMIN, 514 BUFFER_ATOMIC_UMIN, 515 BUFFER_ATOMIC_SMAX, 516 BUFFER_ATOMIC_UMAX, 517 BUFFER_ATOMIC_AND, 518 BUFFER_ATOMIC_OR, 519 BUFFER_ATOMIC_XOR, 520 BUFFER_ATOMIC_INC, 521 BUFFER_ATOMIC_DEC, 522 BUFFER_ATOMIC_CMPSWAP, 523 BUFFER_ATOMIC_CSUB, 524 BUFFER_ATOMIC_FADD, 525 BUFFER_ATOMIC_FMIN, 526 BUFFER_ATOMIC_FMAX, 527 528 LAST_AMDGPU_ISD_NUMBER 529 }; 530 531 532 } // End namespace AMDGPUISD 533 534 } // End namespace llvm 535 536 #endif 537