1 //===-- AMDGPUISelLowering.h - AMDGPU Lowering Interface --------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// Interface definition of the TargetLowering class that is common 11 /// to all AMD GPUs. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H 16 #define LLVM_LIB_TARGET_AMDGPU_AMDGPUISELLOWERING_H 17 18 #include "llvm/CodeGen/CallingConvLower.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 21 namespace llvm { 22 23 class AMDGPUMachineFunction; 24 class AMDGPUSubtarget; 25 struct ArgDescriptor; 26 27 class AMDGPUTargetLowering : public TargetLowering { 28 private: 29 const AMDGPUSubtarget *Subtarget; 30 31 /// \returns AMDGPUISD::FFBH_U32 node if the incoming \p Op may have been 32 /// legalized from a smaller type VT. Need to match pre-legalized type because 33 /// the generic legalization inserts the add/sub between the select and 34 /// compare. 35 SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const; 36 37 public: 38 /// \returns The minimum number of bits needed to store the value of \Op as an 39 /// unsigned integer. Truncating to this size and then zero-extending to the 40 /// original size will not change the value. 41 static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG); 42 43 /// \returns The minimum number of bits needed to store the value of \Op as a 44 /// signed integer. Truncating to this size and then sign-extending to the 45 /// original size will not change the value. 46 static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG); 47 48 protected: 49 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const; 50 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; 51 /// Split a vector store into multiple scalar stores. 52 /// \returns The resulting chain. 53 54 SDValue LowerFREM(SDValue Op, SelectionDAG &DAG) const; 55 SDValue LowerFCEIL(SDValue Op, SelectionDAG &DAG) const; 56 SDValue LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const; 57 SDValue LowerFRINT(SDValue Op, SelectionDAG &DAG) const; 58 SDValue LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const; 59 60 SDValue LowerFROUNDEVEN(SDValue Op, SelectionDAG &DAG) const; 61 SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const; 62 SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const; 63 64 static bool allowApproxFunc(const SelectionDAG &DAG, SDNodeFlags Flags); 65 static bool needsDenormHandlingF32(const SelectionDAG &DAG, SDValue Src, 66 SDNodeFlags Flags); 67 SDValue getIsLtSmallestNormal(SelectionDAG &DAG, SDValue Op, 68 SDNodeFlags Flags) const; 69 SDValue getIsFinite(SelectionDAG &DAG, SDValue Op, SDNodeFlags Flags) const; 70 std::pair<SDValue, SDValue> getScaledLogInput(SelectionDAG &DAG, 71 const SDLoc SL, SDValue Op, 72 SDNodeFlags Flags) const; 73 74 SDValue LowerFLOG2(SDValue Op, SelectionDAG &DAG) const; 75 SDValue LowerFLOGCommon(SDValue Op, SelectionDAG &DAG) const; 76 SDValue LowerFLOG10(SDValue Op, SelectionDAG &DAG) const; 77 SDValue LowerFLOGUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, 78 bool IsLog10, SDNodeFlags Flags) const; 79 SDValue lowerFEXP2(SDValue Op, SelectionDAG &DAG) const; 80 81 SDValue lowerFEXPUnsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, 82 SDNodeFlags Flags) const; 83 SDValue lowerFEXP10Unsafe(SDValue Op, const SDLoc &SL, SelectionDAG &DAG, 84 SDNodeFlags Flags) const; 85 SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const; 86 87 SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const; 88 89 SDValue LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, bool Signed) const; 90 SDValue LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, bool Signed) const; 91 SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 92 SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const; 93 94 SDValue LowerFP_TO_INT64(SDValue Op, SelectionDAG &DAG, bool Signed) const; 95 SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const; 96 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const; 97 98 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; 99 100 protected: 101 bool shouldCombineMemoryType(EVT VT) const; 102 SDValue performLoadCombine(SDNode *N, DAGCombinerInfo &DCI) const; 103 SDValue performStoreCombine(SDNode *N, DAGCombinerInfo &DCI) const; 104 SDValue performAssertSZExtCombine(SDNode *N, DAGCombinerInfo &DCI) const; 105 SDValue performIntrinsicWOChainCombine(SDNode *N, DAGCombinerInfo &DCI) const; 106 107 SDValue splitBinaryBitConstantOpImpl(DAGCombinerInfo &DCI, const SDLoc &SL, 108 unsigned Opc, SDValue LHS, 109 uint32_t ValLo, uint32_t ValHi) const; 110 SDValue performShlCombine(SDNode *N, DAGCombinerInfo &DCI) const; 111 SDValue performSraCombine(SDNode *N, DAGCombinerInfo &DCI) const; 112 SDValue performSrlCombine(SDNode *N, DAGCombinerInfo &DCI) const; 113 SDValue performTruncateCombine(SDNode *N, DAGCombinerInfo &DCI) const; 114 SDValue performMulCombine(SDNode *N, DAGCombinerInfo &DCI) const; 115 SDValue performMulLoHiCombine(SDNode *N, DAGCombinerInfo &DCI) const; 116 SDValue performMulhsCombine(SDNode *N, DAGCombinerInfo &DCI) const; 117 SDValue performMulhuCombine(SDNode *N, DAGCombinerInfo &DCI) const; 118 SDValue performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, SDValue LHS, 119 SDValue RHS, DAGCombinerInfo &DCI) const; 120 121 SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, 122 SDValue N) const; 123 SDValue performSelectCombine(SDNode *N, DAGCombinerInfo &DCI) const; 124 125 TargetLowering::NegatibleCost 126 getConstantNegateCost(const ConstantFPSDNode *C) const; 127 128 bool isConstantCostlierToNegate(SDValue N) const; 129 bool isConstantCheaperToNegate(SDValue N) const; 130 SDValue performFNegCombine(SDNode *N, DAGCombinerInfo &DCI) const; 131 SDValue performFAbsCombine(SDNode *N, DAGCombinerInfo &DCI) const; 132 SDValue performRcpCombine(SDNode *N, DAGCombinerInfo &DCI) const; 133 134 static EVT getEquivalentMemType(LLVMContext &Context, EVT VT); 135 136 virtual SDValue LowerGlobalAddress(AMDGPUMachineFunction *MFI, SDValue Op, 137 SelectionDAG &DAG) const; 138 139 /// Return 64-bit value Op as two 32-bit integers. 140 std::pair<SDValue, SDValue> split64BitValue(SDValue Op, 141 SelectionDAG &DAG) const; 142 SDValue getLoHalf64(SDValue Op, SelectionDAG &DAG) const; 143 SDValue getHiHalf64(SDValue Op, SelectionDAG &DAG) const; 144 145 /// Split a vector type into two parts. The first part is a power of two 146 /// vector. The second part is whatever is left over, and is a scalar if it 147 /// would otherwise be a 1-vector. 148 std::pair<EVT, EVT> getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const; 149 150 /// Split a vector value into two parts of types LoVT and HiVT. HiVT could be 151 /// scalar. 152 std::pair<SDValue, SDValue> splitVector(const SDValue &N, const SDLoc &DL, 153 const EVT &LoVT, const EVT &HighVT, 154 SelectionDAG &DAG) const; 155 156 /// Split a vector load into 2 loads of half the vector. 157 SDValue SplitVectorLoad(SDValue Op, SelectionDAG &DAG) const; 158 159 /// Widen a suitably aligned v3 load. For all other cases, split the input 160 /// vector load. 161 SDValue WidenOrSplitVectorLoad(SDValue Op, SelectionDAG &DAG) const; 162 163 /// Split a vector store into 2 stores of half the vector. 164 SDValue SplitVectorStore(SDValue Op, SelectionDAG &DAG) const; 165 166 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const; 167 SDValue LowerSDIVREM(SDValue Op, SelectionDAG &DAG) const; 168 SDValue LowerUDIVREM(SDValue Op, SelectionDAG &DAG) const; 169 SDValue LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const; 170 void LowerUDIVREM64(SDValue Op, SelectionDAG &DAG, 171 SmallVectorImpl<SDValue> &Results) const; 172 173 void analyzeFormalArgumentsCompute( 174 CCState &State, 175 const SmallVectorImpl<ISD::InputArg> &Ins) const; 176 177 public: 178 AMDGPUTargetLowering(const TargetMachine &TM, const AMDGPUSubtarget &STI); 179 180 bool mayIgnoreSignedZero(SDValue Op) const; 181 182 static inline SDValue stripBitcast(SDValue Val) { 183 return Val.getOpcode() == ISD::BITCAST ? Val.getOperand(0) : Val; 184 } 185 186 static bool shouldFoldFNegIntoSrc(SDNode *FNeg, SDValue FNegSrc); 187 static bool allUsesHaveSourceMods(const SDNode *N, 188 unsigned CostThreshold = 4); 189 bool isFAbsFree(EVT VT) const override; 190 bool isFNegFree(EVT VT) const override; 191 bool isTruncateFree(EVT Src, EVT Dest) const override; 192 bool isTruncateFree(Type *Src, Type *Dest) const override; 193 194 bool isZExtFree(Type *Src, Type *Dest) const override; 195 bool isZExtFree(EVT Src, EVT Dest) const override; 196 197 SDValue getNegatedExpression(SDValue Op, SelectionDAG &DAG, 198 bool LegalOperations, bool ForCodeSize, 199 NegatibleCost &Cost, 200 unsigned Depth) const override; 201 202 bool isNarrowingProfitable(EVT SrcVT, EVT DestVT) const override; 203 204 bool isDesirableToCommuteWithShift(const SDNode *N, 205 CombineLevel Level) const override; 206 207 EVT getTypeForExtReturn(LLVMContext &Context, EVT VT, 208 ISD::NodeType ExtendKind) const override; 209 210 MVT getVectorIdxTy(const DataLayout &) const override; 211 bool isSelectSupported(SelectSupportKind) const override; 212 213 bool isFPImmLegal(const APFloat &Imm, EVT VT, 214 bool ForCodeSize) const override; 215 bool ShouldShrinkFPConstant(EVT VT) const override; 216 bool shouldReduceLoadWidth(SDNode *Load, 217 ISD::LoadExtType ExtType, 218 EVT ExtVT) const override; 219 220 bool isLoadBitCastBeneficial(EVT, EVT, const SelectionDAG &DAG, 221 const MachineMemOperand &MMO) const final; 222 223 bool storeOfVectorConstantIsCheap(bool IsZero, EVT MemVT, 224 unsigned NumElem, 225 unsigned AS) const override; 226 bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override; 227 bool isCheapToSpeculateCttz(Type *Ty) const override; 228 bool isCheapToSpeculateCtlz(Type *Ty) const override; 229 230 bool isSDNodeAlwaysUniform(const SDNode *N) const override; 231 static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg); 232 static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg); 233 234 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 235 const SmallVectorImpl<ISD::OutputArg> &Outs, 236 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL, 237 SelectionDAG &DAG) const override; 238 239 SDValue addTokenForArgument(SDValue Chain, 240 SelectionDAG &DAG, 241 MachineFrameInfo &MFI, 242 int ClobberedFI) const; 243 244 SDValue lowerUnhandledCall(CallLoweringInfo &CLI, 245 SmallVectorImpl<SDValue> &InVals, 246 StringRef Reason) const; 247 SDValue LowerCall(CallLoweringInfo &CLI, 248 SmallVectorImpl<SDValue> &InVals) const override; 249 250 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const; 251 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; 252 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; 253 void ReplaceNodeResults(SDNode * N, 254 SmallVectorImpl<SDValue> &Results, 255 SelectionDAG &DAG) const override; 256 257 SDValue combineFMinMaxLegacyImpl(const SDLoc &DL, EVT VT, SDValue LHS, 258 SDValue RHS, SDValue True, SDValue False, 259 SDValue CC, DAGCombinerInfo &DCI) const; 260 261 SDValue combineFMinMaxLegacy(const SDLoc &DL, EVT VT, SDValue LHS, 262 SDValue RHS, SDValue True, SDValue False, 263 SDValue CC, DAGCombinerInfo &DCI) const; 264 265 const char* getTargetNodeName(unsigned Opcode) const override; 266 267 // FIXME: Turn off MergeConsecutiveStores() before Instruction Selection for 268 // AMDGPU. Commit r319036, 269 // (https://github.com/llvm/llvm-project/commit/db77e57ea86d941a4262ef60261692f4cb6893e6) 270 // turned on MergeConsecutiveStores() before Instruction Selection for all 271 // targets. Enough AMDGPU compiles go into an infinite loop ( 272 // MergeConsecutiveStores() merges two stores; LegalizeStoreOps() un-merges; 273 // MergeConsecutiveStores() re-merges, etc. ) to warrant turning it off for 274 // now. 275 bool mergeStoresAfterLegalization(EVT) const override { return false; } 276 277 bool isFsqrtCheap(SDValue Operand, SelectionDAG &DAG) const override { 278 return true; 279 } 280 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 281 int &RefinementSteps, bool &UseOneConstNR, 282 bool Reciprocal) const override; 283 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled, 284 int &RefinementSteps) const override; 285 286 virtual SDNode *PostISelFolding(MachineSDNode *N, 287 SelectionDAG &DAG) const = 0; 288 289 /// Determine which of the bits specified in \p Mask are known to be 290 /// either zero or one and return them in the \p KnownZero and \p KnownOne 291 /// bitsets. 292 void computeKnownBitsForTargetNode(const SDValue Op, 293 KnownBits &Known, 294 const APInt &DemandedElts, 295 const SelectionDAG &DAG, 296 unsigned Depth = 0) const override; 297 298 unsigned ComputeNumSignBitsForTargetNode(SDValue Op, const APInt &DemandedElts, 299 const SelectionDAG &DAG, 300 unsigned Depth = 0) const override; 301 302 unsigned computeNumSignBitsForTargetInstr(GISelKnownBits &Analysis, 303 Register R, 304 const APInt &DemandedElts, 305 const MachineRegisterInfo &MRI, 306 unsigned Depth = 0) const override; 307 308 bool isKnownNeverNaNForTargetNode(SDValue Op, 309 const SelectionDAG &DAG, 310 bool SNaN = false, 311 unsigned Depth = 0) const override; 312 313 bool isReassocProfitable(MachineRegisterInfo &MRI, Register N0, 314 Register N1) const override; 315 316 /// Helper function that adds Reg to the LiveIn list of the DAG's 317 /// MachineFunction. 318 /// 319 /// \returns a RegisterSDNode representing Reg if \p RawReg is true, otherwise 320 /// a copy from the register. 321 SDValue CreateLiveInRegister(SelectionDAG &DAG, 322 const TargetRegisterClass *RC, 323 Register Reg, EVT VT, 324 const SDLoc &SL, 325 bool RawReg = false) const; 326 SDValue CreateLiveInRegister(SelectionDAG &DAG, 327 const TargetRegisterClass *RC, 328 Register Reg, EVT VT) const { 329 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode())); 330 } 331 332 // Returns the raw live in register rather than a copy from it. 333 SDValue CreateLiveInRegisterRaw(SelectionDAG &DAG, 334 const TargetRegisterClass *RC, 335 Register Reg, EVT VT) const { 336 return CreateLiveInRegister(DAG, RC, Reg, VT, SDLoc(DAG.getEntryNode()), true); 337 } 338 339 /// Similar to CreateLiveInRegister, except value maybe loaded from a stack 340 /// slot rather than passed in a register. 341 SDValue loadStackInputValue(SelectionDAG &DAG, 342 EVT VT, 343 const SDLoc &SL, 344 int64_t Offset) const; 345 346 SDValue storeStackInputValue(SelectionDAG &DAG, 347 const SDLoc &SL, 348 SDValue Chain, 349 SDValue ArgVal, 350 int64_t Offset) const; 351 352 SDValue loadInputValue(SelectionDAG &DAG, 353 const TargetRegisterClass *RC, 354 EVT VT, const SDLoc &SL, 355 const ArgDescriptor &Arg) const; 356 357 enum ImplicitParameter { 358 FIRST_IMPLICIT, 359 PRIVATE_BASE, 360 SHARED_BASE, 361 QUEUE_PTR, 362 }; 363 364 /// Helper function that returns the byte offset of the given 365 /// type of implicit parameter. 366 uint32_t getImplicitParameterOffset(const MachineFunction &MF, 367 const ImplicitParameter Param) const; 368 uint32_t getImplicitParameterOffset(const uint64_t ExplicitKernArgSize, 369 const ImplicitParameter Param) const; 370 371 MVT getFenceOperandTy(const DataLayout &DL) const override { 372 return MVT::i32; 373 } 374 375 AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override; 376 377 bool shouldSinkOperands(Instruction *I, 378 SmallVectorImpl<Use *> &Ops) const override; 379 }; 380 381 namespace AMDGPUISD { 382 383 enum NodeType : unsigned { 384 // AMDIL ISD Opcodes 385 FIRST_NUMBER = ISD::BUILTIN_OP_END, 386 UMUL, // 32bit unsigned multiplication 387 BRANCH_COND, 388 // End AMDIL ISD Opcodes 389 390 // Function call. 391 CALL, 392 TC_RETURN, 393 TC_RETURN_GFX, 394 TC_RETURN_CHAIN, 395 TRAP, 396 397 // Masked control flow nodes. 398 IF, 399 ELSE, 400 LOOP, 401 402 // A uniform kernel return that terminates the wavefront. 403 ENDPGM, 404 405 // s_endpgm, but we may want to insert it in the middle of the block. 406 ENDPGM_TRAP, 407 408 // Return to a shader part's epilog code. 409 RETURN_TO_EPILOG, 410 411 // Return with values from a non-entry function. 412 RET_GLUE, 413 414 // Convert a unswizzled wave uniform stack address to an address compatible 415 // with a vector offset for use in stack access. 416 WAVE_ADDRESS, 417 418 DWORDADDR, 419 FRACT, 420 421 /// CLAMP value between 0.0 and 1.0. NaN clamped to 0, following clamp output 422 /// modifier behavior with dx10_enable. 423 CLAMP, 424 425 // This is SETCC with the full mask result which is used for a compare with a 426 // result bit per item in the wavefront. 427 SETCC, 428 SETREG, 429 430 DENORM_MODE, 431 432 // FP ops with input and output chain. 433 FMA_W_CHAIN, 434 FMUL_W_CHAIN, 435 436 // SIN_HW, COS_HW - f32 for SI, 1 ULP max error, valid from -100 pi to 100 pi. 437 // Denormals handled on some parts. 438 COS_HW, 439 SIN_HW, 440 FMAX_LEGACY, 441 FMIN_LEGACY, 442 443 FMAX3, 444 SMAX3, 445 UMAX3, 446 FMIN3, 447 SMIN3, 448 UMIN3, 449 FMED3, 450 SMED3, 451 UMED3, 452 FMAXIMUM3, 453 FMINIMUM3, 454 FDOT2, 455 URECIP, 456 DIV_SCALE, 457 DIV_FMAS, 458 DIV_FIXUP, 459 // For emitting ISD::FMAD when f32 denormals are enabled because mac/mad is 460 // treated as an illegal operation. 461 FMAD_FTZ, 462 463 // RCP, RSQ - For f32, 1 ULP max error, no denormal handling. 464 // For f64, max error 2^29 ULP, handles denormals. 465 RCP, 466 RSQ, 467 RCP_LEGACY, 468 RCP_IFLAG, 469 470 // log2, no denormal handling for f32. 471 LOG, 472 473 // exp2, no denormal handling for f32. 474 EXP, 475 476 FMUL_LEGACY, 477 RSQ_CLAMP, 478 FP_CLASS, 479 DOT4, 480 CARRY, 481 BORROW, 482 BFE_U32, // Extract range of bits with zero extension to 32-bits. 483 BFE_I32, // Extract range of bits with sign extension to 32-bits. 484 BFI, // (src0 & src1) | (~src0 & src2) 485 BFM, // Insert a range of bits into a 32-bit word. 486 FFBH_U32, // ctlz with -1 if input is zero. 487 FFBH_I32, 488 FFBL_B32, // cttz with -1 if input is zero. 489 MUL_U24, 490 MUL_I24, 491 MULHI_U24, 492 MULHI_I24, 493 MAD_U24, 494 MAD_I24, 495 MAD_U64_U32, 496 MAD_I64_I32, 497 PERM, 498 TEXTURE_FETCH, 499 R600_EXPORT, 500 CONST_ADDRESS, 501 REGISTER_LOAD, 502 REGISTER_STORE, 503 SAMPLE, 504 SAMPLEB, 505 SAMPLED, 506 SAMPLEL, 507 508 // These cvt_f32_ubyte* nodes need to remain consecutive and in order. 509 CVT_F32_UBYTE0, 510 CVT_F32_UBYTE1, 511 CVT_F32_UBYTE2, 512 CVT_F32_UBYTE3, 513 514 // Convert two float 32 numbers into a single register holding two packed f16 515 // with round to zero. 516 CVT_PKRTZ_F16_F32, 517 CVT_PKNORM_I16_F32, 518 CVT_PKNORM_U16_F32, 519 CVT_PK_I16_I32, 520 CVT_PK_U16_U32, 521 522 // Same as the standard node, except the high bits of the resulting integer 523 // are known 0. 524 FP_TO_FP16, 525 526 /// This node is for VLIW targets and it is used to represent a vector 527 /// that is stored in consecutive registers with the same channel. 528 /// For example: 529 /// |X |Y|Z|W| 530 /// T0|v.x| | | | 531 /// T1|v.y| | | | 532 /// T2|v.z| | | | 533 /// T3|v.w| | | | 534 BUILD_VERTICAL_VECTOR, 535 /// Pointer to the start of the shader's constant data. 536 CONST_DATA_PTR, 537 PC_ADD_REL_OFFSET, 538 LDS, 539 FPTRUNC_ROUND_UPWARD, 540 FPTRUNC_ROUND_DOWNWARD, 541 542 DUMMY_CHAIN, 543 FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE, 544 LOAD_D16_HI, 545 LOAD_D16_LO, 546 LOAD_D16_HI_I8, 547 LOAD_D16_HI_U8, 548 LOAD_D16_LO_I8, 549 LOAD_D16_LO_U8, 550 551 STORE_MSKOR, 552 LOAD_CONSTANT, 553 TBUFFER_STORE_FORMAT, 554 TBUFFER_STORE_FORMAT_D16, 555 TBUFFER_LOAD_FORMAT, 556 TBUFFER_LOAD_FORMAT_D16, 557 DS_ORDERED_COUNT, 558 ATOMIC_CMP_SWAP, 559 ATOMIC_LOAD_FMIN, 560 ATOMIC_LOAD_FMAX, 561 BUFFER_LOAD, 562 BUFFER_LOAD_UBYTE, 563 BUFFER_LOAD_USHORT, 564 BUFFER_LOAD_BYTE, 565 BUFFER_LOAD_SHORT, 566 BUFFER_LOAD_FORMAT, 567 BUFFER_LOAD_FORMAT_TFE, 568 BUFFER_LOAD_FORMAT_D16, 569 SBUFFER_LOAD, 570 BUFFER_STORE, 571 BUFFER_STORE_BYTE, 572 BUFFER_STORE_SHORT, 573 BUFFER_STORE_FORMAT, 574 BUFFER_STORE_FORMAT_D16, 575 BUFFER_ATOMIC_SWAP, 576 BUFFER_ATOMIC_ADD, 577 BUFFER_ATOMIC_SUB, 578 BUFFER_ATOMIC_SMIN, 579 BUFFER_ATOMIC_UMIN, 580 BUFFER_ATOMIC_SMAX, 581 BUFFER_ATOMIC_UMAX, 582 BUFFER_ATOMIC_AND, 583 BUFFER_ATOMIC_OR, 584 BUFFER_ATOMIC_XOR, 585 BUFFER_ATOMIC_INC, 586 BUFFER_ATOMIC_DEC, 587 BUFFER_ATOMIC_CMPSWAP, 588 BUFFER_ATOMIC_CSUB, 589 BUFFER_ATOMIC_FADD, 590 BUFFER_ATOMIC_FMIN, 591 BUFFER_ATOMIC_FMAX, 592 593 LAST_AMDGPU_ISD_NUMBER 594 }; 595 596 } // End namespace AMDGPUISD 597 598 } // End namespace llvm 599 600 #endif 601