Lines Matching full:opcode
104 static BranchPredicate getBranchPredicate(unsigned Opcode);
133 unsigned Opcode) const;
136 unsigned Opcode) const;
139 unsigned Opcode, bool Swap = false) const;
142 unsigned Opcode,
158 unsigned Opcode,
295 // Splits a V_MOV_B64_DPP_PSEUDO opcode into a pair of v_mov_b32_dpp
303 // Returns an opcode that can be used to move a value to a \p DstRC
412 bool isSALU(uint16_t Opcode) const { in isSALU() argument
413 return get(Opcode).TSFlags & SIInstrFlags::SALU; in isSALU()
420 bool isVALU(uint16_t Opcode) const { in isVALU() argument
421 return get(Opcode).TSFlags & SIInstrFlags::VALU; in isVALU()
428 bool isImage(uint16_t Opcode) const { in isImage() argument
429 return isMIMG(Opcode) || isVSAMPLE(Opcode) || isVIMAGE(Opcode); in isImage()
436 bool isVMEM(uint16_t Opcode) const { in isVMEM() argument
437 return isMUBUF(Opcode) || isMTBUF(Opcode) || isImage(Opcode); in isVMEM()
444 bool isSOP1(uint16_t Opcode) const { in isSOP1() argument
445 return get(Opcode).TSFlags & SIInstrFlags::SOP1; in isSOP1()
452 bool isSOP2(uint16_t Opcode) const { in isSOP2() argument
453 return get(Opcode).TSFlags & SIInstrFlags::SOP2; in isSOP2()
460 bool isSOPC(uint16_t Opcode) const { in isSOPC() argument
461 return get(Opcode).TSFlags & SIInstrFlags::SOPC; in isSOPC()
468 bool isSOPK(uint16_t Opcode) const { in isSOPK() argument
469 return get(Opcode).TSFlags & SIInstrFlags::SOPK; in isSOPK()
476 bool isSOPP(uint16_t Opcode) const { in isSOPP() argument
477 return get(Opcode).TSFlags & SIInstrFlags::SOPP; in isSOPP()
484 bool isPacked(uint16_t Opcode) const { in isPacked() argument
485 return get(Opcode).TSFlags & SIInstrFlags::IsPacked; in isPacked()
492 bool isVOP1(uint16_t Opcode) const { in isVOP1() argument
493 return get(Opcode).TSFlags & SIInstrFlags::VOP1; in isVOP1()
500 bool isVOP2(uint16_t Opcode) const { in isVOP2() argument
501 return get(Opcode).TSFlags & SIInstrFlags::VOP2; in isVOP2()
508 bool isVOP3(uint16_t Opcode) const { in isVOP3() argument
509 return get(Opcode).TSFlags & SIInstrFlags::VOP3; in isVOP3()
516 bool isSDWA(uint16_t Opcode) const { in isSDWA() argument
517 return get(Opcode).TSFlags & SIInstrFlags::SDWA; in isSDWA()
524 bool isVOPC(uint16_t Opcode) const { in isVOPC() argument
525 return get(Opcode).TSFlags & SIInstrFlags::VOPC; in isVOPC()
532 bool isMUBUF(uint16_t Opcode) const { in isMUBUF() argument
533 return get(Opcode).TSFlags & SIInstrFlags::MUBUF; in isMUBUF()
540 bool isMTBUF(uint16_t Opcode) const { in isMTBUF() argument
541 return get(Opcode).TSFlags & SIInstrFlags::MTBUF; in isMTBUF()
548 bool isSMRD(uint16_t Opcode) const { in isSMRD() argument
549 return get(Opcode).TSFlags & SIInstrFlags::SMRD; in isSMRD()
558 bool isDS(uint16_t Opcode) const { in isDS() argument
559 return get(Opcode).TSFlags & SIInstrFlags::DS; in isDS()
566 bool isLDSDMA(uint16_t Opcode) { in isLDSDMA() argument
567 return isVALU(Opcode) && (isMUBUF(Opcode) || isFLAT(Opcode)); in isLDSDMA()
574 bool isGWS(uint16_t Opcode) const { in isGWS() argument
575 return get(Opcode).TSFlags & SIInstrFlags::GWS; in isGWS()
578 bool isAlwaysGDS(uint16_t Opcode) const;
584 bool isMIMG(uint16_t Opcode) const { in isMIMG() argument
585 return get(Opcode).TSFlags & SIInstrFlags::MIMG; in isMIMG()
592 bool isVIMAGE(uint16_t Opcode) const { in isVIMAGE() argument
593 return get(Opcode).TSFlags & SIInstrFlags::VIMAGE; in isVIMAGE()
600 bool isVSAMPLE(uint16_t Opcode) const { in isVSAMPLE() argument
601 return get(Opcode).TSFlags & SIInstrFlags::VSAMPLE; in isVSAMPLE()
608 bool isGather4(uint16_t Opcode) const { in isGather4() argument
609 return get(Opcode).TSFlags & SIInstrFlags::Gather4; in isGather4()
623 bool isSegmentSpecificFLAT(uint16_t Opcode) const { in isSegmentSpecificFLAT() argument
624 auto Flags = get(Opcode).TSFlags; in isSegmentSpecificFLAT()
632 bool isFLATGlobal(uint16_t Opcode) const { in isFLATGlobal() argument
633 return get(Opcode).TSFlags & SIInstrFlags::FlatGlobal; in isFLATGlobal()
640 bool isFLATScratch(uint16_t Opcode) const { in isFLATScratch() argument
641 return get(Opcode).TSFlags & SIInstrFlags::FlatScratch; in isFLATScratch()
645 bool isFLAT(uint16_t Opcode) const { in isFLAT() argument
646 return get(Opcode).TSFlags & SIInstrFlags::FLAT; in isFLAT()
661 bool isEXP(uint16_t Opcode) const { in isEXP() argument
662 return get(Opcode).TSFlags & SIInstrFlags::EXP; in isEXP()
669 bool isAtomicNoRet(uint16_t Opcode) const { in isAtomicNoRet() argument
670 return get(Opcode).TSFlags & SIInstrFlags::IsAtomicNoRet; in isAtomicNoRet()
677 bool isAtomicRet(uint16_t Opcode) const { in isAtomicRet() argument
678 return get(Opcode).TSFlags & SIInstrFlags::IsAtomicRet; in isAtomicRet()
686 bool isAtomic(uint16_t Opcode) const { in isAtomic() argument
687 return get(Opcode).TSFlags & (SIInstrFlags::IsAtomicRet | in isAtomic()
699 bool isWQM(uint16_t Opcode) const { in isWQM() argument
700 return get(Opcode).TSFlags & SIInstrFlags::WQM; in isWQM()
707 bool isDisableWQM(uint16_t Opcode) const { in isDisableWQM() argument
708 return get(Opcode).TSFlags & SIInstrFlags::DisableWQM; in isDisableWQM()
722 bool isVGPRSpill(uint16_t Opcode) const { in isVGPRSpill() argument
723 return Opcode != AMDGPU::SI_SPILL_S32_TO_VGPR && in isVGPRSpill()
724 Opcode != AMDGPU::SI_RESTORE_S32_FROM_VGPR && in isVGPRSpill()
725 (isSpill(Opcode) && isVALU(Opcode)); in isVGPRSpill()
734 bool isSGPRSpill(uint16_t Opcode) const { in isSGPRSpill() argument
735 return Opcode == AMDGPU::SI_SPILL_S32_TO_VGPR || in isSGPRSpill()
736 Opcode == AMDGPU::SI_RESTORE_S32_FROM_VGPR || in isSGPRSpill()
737 (isSpill(Opcode) && isSALU(Opcode)); in isSGPRSpill()
740 bool isSpill(uint16_t Opcode) const { in isSpill() argument
741 return get(Opcode).TSFlags & SIInstrFlags::Spill; in isSpill()
748 static bool isWWMRegSpillOpcode(uint16_t Opcode) { in isWWMRegSpillOpcode() argument
749 return Opcode == AMDGPU::SI_SPILL_WWM_V32_SAVE || in isWWMRegSpillOpcode()
750 Opcode == AMDGPU::SI_SPILL_WWM_AV32_SAVE || in isWWMRegSpillOpcode()
751 Opcode == AMDGPU::SI_SPILL_WWM_V32_RESTORE || in isWWMRegSpillOpcode()
752 Opcode == AMDGPU::SI_SPILL_WWM_AV32_RESTORE; in isWWMRegSpillOpcode()
755 static bool isChainCallOpcode(uint64_t Opcode) { in isChainCallOpcode() argument
756 return Opcode == AMDGPU::SI_CS_CHAIN_TC_W32 || in isChainCallOpcode()
757 Opcode == AMDGPU::SI_CS_CHAIN_TC_W64; in isChainCallOpcode()
764 bool isDPP(uint16_t Opcode) const { in isDPP() argument
765 return get(Opcode).TSFlags & SIInstrFlags::DPP; in isDPP()
772 bool isTRANS(uint16_t Opcode) const { in isTRANS() argument
773 return get(Opcode).TSFlags & SIInstrFlags::TRANS; in isTRANS()
780 bool isVOP3P(uint16_t Opcode) const { in isVOP3P() argument
781 return get(Opcode).TSFlags & SIInstrFlags::VOP3P; in isVOP3P()
788 bool isVINTRP(uint16_t Opcode) const { in isVINTRP() argument
789 return get(Opcode).TSFlags & SIInstrFlags::VINTRP; in isVINTRP()
796 bool isMAI(uint16_t Opcode) const { in isMAI() argument
797 return get(Opcode).TSFlags & SIInstrFlags::IsMAI; in isMAI()
813 bool isWMMA(uint16_t Opcode) const { in isWMMA() argument
814 return get(Opcode).TSFlags & SIInstrFlags::IsWMMA; in isWMMA()
825 bool isSWMMAC(uint16_t Opcode) const { in isSWMMAC() argument
826 return get(Opcode).TSFlags & SIInstrFlags::IsSWMMAC; in isSWMMAC()
829 bool isDOT(uint16_t Opcode) const { in isDOT() argument
830 return get(Opcode).TSFlags & SIInstrFlags::IsDOT; in isDOT()
837 bool isLDSDIR(uint16_t Opcode) const { in isLDSDIR() argument
838 return get(Opcode).TSFlags & SIInstrFlags::LDSDIR; in isLDSDIR()
845 bool isVINTERP(uint16_t Opcode) const { in isVINTERP() argument
846 return get(Opcode).TSFlags & SIInstrFlags::VINTERP; in isVINTERP()
863 static bool sopkIsZext(unsigned Opcode) { in sopkIsZext() argument
864 return Opcode == AMDGPU::S_CMPK_EQ_U32 || Opcode == AMDGPU::S_CMPK_LG_U32 || in sopkIsZext()
865 Opcode == AMDGPU::S_CMPK_GT_U32 || Opcode == AMDGPU::S_CMPK_GE_U32 || in sopkIsZext()
866 Opcode == AMDGPU::S_CMPK_LT_U32 || Opcode == AMDGPU::S_CMPK_LE_U32 || in sopkIsZext()
867 Opcode == AMDGPU::S_GETREG_B32; in sopkIsZext()
876 bool isScalarStore(uint16_t Opcode) const { in isScalarStore() argument
877 return get(Opcode).TSFlags & SIInstrFlags::SCALAR_STORE; in isScalarStore()
884 bool isFixedSize(uint16_t Opcode) const { in isFixedSize() argument
885 return get(Opcode).TSFlags & SIInstrFlags::FIXED_SIZE; in isFixedSize()
892 bool hasFPClamp(uint16_t Opcode) const { in hasFPClamp() argument
893 return get(Opcode).TSFlags & SIInstrFlags::FPClamp; in hasFPClamp()
912 bool usesFPDPRounding(uint16_t Opcode) const { in usesFPDPRounding() argument
913 return get(Opcode).TSFlags & SIInstrFlags::FPDPRounding; in usesFPDPRounding()
920 bool isFPAtomic(uint16_t Opcode) const { in isFPAtomic() argument
921 return get(Opcode).TSFlags & SIInstrFlags::FPAtomic; in isFPAtomic()
928 // Check to see if opcode is for a barrier start. Pre gfx12 this is just the
931 bool isBarrierStart(unsigned Opcode) const { in isBarrierStart() argument
932 return Opcode == AMDGPU::S_BARRIER || in isBarrierStart()
933 Opcode == AMDGPU::S_BARRIER_SIGNAL_M0 || in isBarrierStart()
934 Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_M0 || in isBarrierStart()
935 Opcode == AMDGPU::S_BARRIER_SIGNAL_IMM || in isBarrierStart()
936 Opcode == AMDGPU::S_BARRIER_SIGNAL_ISFIRST_IMM; in isBarrierStart()
939 bool isBarrier(unsigned Opcode) const { in isBarrier() argument
940 return isBarrierStart(Opcode) || Opcode == AMDGPU::S_BARRIER_WAIT || in isBarrier()
941 Opcode == AMDGPU::S_BARRIER_INIT_M0 || in isBarrier()
942 Opcode == AMDGPU::S_BARRIER_INIT_IMM || in isBarrier()
943 Opcode == AMDGPU::S_BARRIER_JOIN_IMM || in isBarrier()
944 Opcode == AMDGPU::S_BARRIER_LEAVE || in isBarrier()
945 Opcode == AMDGPU::DS_GWS_INIT || in isBarrier()
946 Opcode == AMDGPU::DS_GWS_BARRIER; in isBarrier()
949 static bool isF16PseudoScalarTrans(unsigned Opcode) { in isF16PseudoScalarTrans() argument
950 return Opcode == AMDGPU::V_S_EXP_F16_e64 || in isF16PseudoScalarTrans()
951 Opcode == AMDGPU::V_S_LOG_F16_e64 || in isF16PseudoScalarTrans()
952 Opcode == AMDGPU::V_S_RCP_F16_e64 || in isF16PseudoScalarTrans()
953 Opcode == AMDGPU::V_S_RSQ_F16_e64 || in isF16PseudoScalarTrans()
954 Opcode == AMDGPU::V_S_SQRT_F16_e64; in isF16PseudoScalarTrans()
961 bool doesNotReadTiedSource(uint16_t Opcode) const { in doesNotReadTiedSource() argument
962 return get(Opcode).TSFlags & SIInstrFlags::TiedSourceNotRead; in doesNotReadTiedSource()
965 static unsigned getNonSoftWaitcntOpcode(unsigned Opcode) { in getNonSoftWaitcntOpcode() argument
966 switch (Opcode) { in getNonSoftWaitcntOpcode()
984 return Opcode; in getNonSoftWaitcntOpcode()
988 bool isWaitcnt(unsigned Opcode) const { in isWaitcnt() argument
989 switch (getNonSoftWaitcntOpcode(Opcode)) { in isWaitcnt()
1107 bool hasVALU32BitEncoding(unsigned Opcode) const;
1116 bool hasModifiers(unsigned Opcode) const;
1151 // instruction opcode.
1152 unsigned getOpSize(uint16_t Opcode, unsigned OpNo) const { in getOpSize() argument
1153 const MCOperandInfo &OpInfo = get(Opcode).operands()[OpNo]; in getOpSize()
1239 /// Replace the instructions opcode with the equivalent VALU
1240 /// opcode. This function will also move the users of MachineInstruntions
1292 /// that corresponds to the specified pseudo or native opcode.
1293 const MCInstrDesc &getMCOpcodeFromPseudo(unsigned Opcode) const { in getMCOpcodeFromPseudo() argument
1294 return get(pseudoToMCOpcode(Opcode)); in getMCOpcodeFromPseudo()
1375 static bool isKillTerminator(unsigned Opcode);
1376 const MCInstrDesc &getKillTerminatorFromPseudo(unsigned Opcode) const;
1400 /// \brief Return a target-specific opcode if Opcode is a pseudo instruction.
1401 /// Return -1 if the target-specific opcode for the pseudo instruction does
1402 /// not exist. If Opcode is not a pseudo instruction, this is identity.
1403 int pseudoToMCOpcode(int Opcode) const;
1406 /// Return true if this opcode should not be used by codegen.
1495 int getVOPe64(uint16_t Opcode);
1498 int getVOPe32(uint16_t Opcode);
1501 int getSDWAOp(uint16_t Opcode);
1504 int getDPPOp32(uint16_t Opcode);
1507 int getDPPOp64(uint16_t Opcode);
1510 int getBasicFromSDWAOp(uint16_t Opcode);
1513 int getCommuteRev(uint16_t Opcode);
1516 int getCommuteOrig(uint16_t Opcode);
1519 int getAddr64Inst(uint16_t Opcode);
1521 /// Check if \p Opcode is an Addr64 opcode.
1523 /// \returns \p Opcode if it is an Addr64 opcode, otherwise -1.
1525 int getIfAddr64Inst(uint16_t Opcode);
1528 int getSOPKOp(uint16_t Opcode);
1530 /// \returns SADDR form of a FLAT Global instruction given an \p Opcode
1533 int getGlobalSaddrOp(uint16_t Opcode);
1535 /// \returns VADDR form of a FLAT Global instruction given an \p Opcode
1538 int getGlobalVaddrOp(uint16_t Opcode);
1541 int getVCMPXNoSDstOp(uint16_t Opcode);
1544 /// given an \p Opcode of an SS (SADDR) form.
1546 int getFlatScratchInstSTfromSS(uint16_t Opcode);
1548 /// \returns SV (VADDR) form of a FLAT Scratch instruction given an \p Opcode
1551 int getFlatScratchInstSVfromSVS(uint16_t Opcode);
1553 /// \returns SS (SADDR) form of a FLAT Scratch instruction given an \p Opcode
1556 int getFlatScratchInstSSfromSV(uint16_t Opcode);
1558 /// \returns SV (VADDR) form of a FLAT Scratch instruction given an \p Opcode
1561 int getFlatScratchInstSVfromSS(uint16_t Opcode);
1565 int getMFMAEarlyClobberOp(uint16_t Opcode);
1569 int getVCMPXOpFromVCMP(uint16_t Opcode);