Lines Matching refs:AMDGPU

63   MaxLookAhead = MF.getRegInfo().isPhysRegUsed(AMDGPU::AGPR0) ? 19 : 5;  in GCNHazardRecognizer()
81 return Opcode == AMDGPU::V_DIV_FMAS_F32_e64 || Opcode == AMDGPU::V_DIV_FMAS_F64_e64; in isDivFMas()
85 return Opcode == AMDGPU::S_GETREG_B32; in isSGetReg()
90 case AMDGPU::S_SETREG_B32: in isSSetReg()
91 case AMDGPU::S_SETREG_B32_mode: in isSSetReg()
92 case AMDGPU::S_SETREG_IMM32_B32: in isSSetReg()
93 case AMDGPU::S_SETREG_IMM32_B32_mode: in isSSetReg()
100 return Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32; in isRWLane()
104 return Opcode == AMDGPU::S_RFE_B64; in isRFE()
109 case AMDGPU::S_MOVRELS_B32: in isSMovRel()
110 case AMDGPU::S_MOVRELS_B64: in isSMovRel()
111 case AMDGPU::S_MOVRELD_B32: in isSMovRel()
112 case AMDGPU::S_MOVRELD_B64: in isSMovRel()
120 return AMDGPU::getMAIIsDGEMM(Opcode); in isDGEMM()
128 Opcode == AMDGPU::V_ACCVGPR_WRITE_B32_e64 || in isXDL()
129 Opcode == AMDGPU::V_ACCVGPR_READ_B32_e64) in isXDL()
135 return AMDGPU::getMAIIsGFX940XDL(Opcode); in isXDL()
144 case AMDGPU::S_SENDMSG: in isSendMsgTraceDataOrGDS()
145 case AMDGPU::S_SENDMSGHALT: in isSendMsgTraceDataOrGDS()
146 case AMDGPU::S_TTRACEDATA: in isSendMsgTraceDataOrGDS()
149 case AMDGPU::DS_NOP: in isSendMsgTraceDataOrGDS()
150 case AMDGPU::DS_PERMUTE_B32: in isSendMsgTraceDataOrGDS()
151 case AMDGPU::DS_BPERMUTE_B32: in isSendMsgTraceDataOrGDS()
155 int GDS = AMDGPU::getNamedOperandIdx(MI.getOpcode(), in isSendMsgTraceDataOrGDS()
156 AMDGPU::OpName::gds); in isSendMsgTraceDataOrGDS()
166 return Opcode == AMDGPU::V_PERMLANE16_B32_e64 || in isPermlane()
167 Opcode == AMDGPU::V_PERMLANE64_B32 || in isPermlane()
168 Opcode == AMDGPU::V_PERMLANEX16_B32_e64 || in isPermlane()
169 Opcode == AMDGPU::V_PERMLANE16_VAR_B32_e64 || in isPermlane()
170 Opcode == AMDGPU::V_PERMLANEX16_VAR_B32_e64; in isPermlane()
180 AMDGPU::OpName::simm16); in getHWReg()
181 return std::get<0>(AMDGPU::Hwreg::HwregEncoding::decode(RegOp->getImm())); in getHWReg()
240 MI->getOpcode() == AMDGPU::DS_WRITE_ADDTID_B32 || in getHazardType()
241 MI->getOpcode() == AMDGPU::DS_READ_ADDTID_B32)) || in getHazardType()
245 MI->readsRegister(AMDGPU::LDS_DIRECT, /*TRI=*/nullptr))) && in getHazardType()
268 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII.get(AMDGPU::S_NOP)) in insertNoopsInBundle()
381 MI->getOpcode() == AMDGPU::DS_WRITE_ADDTID_B32 || in PreEmitNoopsCommon()
382 MI->getOpcode() == AMDGPU::DS_READ_ADDTID_B32)) || in PreEmitNoopsCommon()
386 MI->readsRegister(AMDGPU::LDS_DIRECT, /*TRI=*/nullptr))) in PreEmitNoopsCommon()
766 DppExecWaitStates - getWaitStatesSinceDef(AMDGPU::EXEC, IsHazardDefFn, in checkDPPHazards()
781 int WaitStatesNeeded = getWaitStatesSinceDef(AMDGPU::VCC, IsHazardDefFn, in checkDivFMasHazards()
820 int VDataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); in createsVALUHazard()
833 TII->getNamedOperand(MI, AMDGPU::OpName::soffset); in createsVALUHazard()
836 if (AMDGPU::getRegBitWidth(VDataRCID) > 64 && in createsVALUHazard()
846 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); in createsVALUHazard()
848 AMDGPU::getRegBitWidth(Desc.operands()[SRsrcIdx].RegClass) == 256); in createsVALUHazard()
853 int DataIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdata); in createsVALUHazard()
854 if (AMDGPU::getRegBitWidth(Desc.operands()[DataIdx].RegClass) > 64) in createsVALUHazard()
897 Register Def = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)->getReg(); in checkVALUHazards()
921 if (auto *DstSel = TII->getNamedOperand(MI, AMDGPU::OpName::dst_sel)) in checkVALUHazards()
922 if (DstSel->getImm() == AMDGPU::SDWA::DWORD) in checkVALUHazards()
925 if (!AMDGPU::hasNamedOperand(MI.getOpcode(), AMDGPU::OpName::op_sel) || in checkVALUHazards()
926 !(TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers) in checkVALUHazards()
932 if (auto *Dst = TII->getNamedOperand(MI, AMDGPU::OpName::vdst)) { in checkVALUHazards()
978 if (VALU->readsRegister(AMDGPU::VCC, TRI)) { in checkVALUHazards()
979 UseReg = AMDGPU::VCC; in checkVALUHazards()
987 case AMDGPU::V_READLANE_B32: in checkVALUHazards()
988 case AMDGPU::V_READFIRSTLANE_B32: { in checkVALUHazards()
989 MachineOperand *Src = TII.getNamedOperand(*VALU, AMDGPU::OpName::src0); in checkVALUHazards()
997 case AMDGPU::V_WRITELANE_B32: { in checkVALUHazards()
998 UseReg = AMDGPU::EXEC; in checkVALUHazards()
1056 TII->getNamedOperand(*RWLane, AMDGPU::OpName::src1); in checkRWLaneHazards()
1079 return getHWReg(TII, MI) == AMDGPU::Hwreg::ID_TRAPSTS; in checkRFEHazards()
1090 getWaitStatesSinceDef(AMDGPU::M0, IsHazardFn, ReadM0WaitStates); in checkReadM0Hazards()
1120 MI.modifiesRegister(AMDGPU::EXEC, TRI); in fixVcmpxPermlaneHazards()
1125 return SIInstrInfo::isVALU(MI) && Opc != AMDGPU::V_NOP_e32 && in fixVcmpxPermlaneHazards()
1126 Opc != AMDGPU::V_NOP_e64 && Opc != AMDGPU::V_NOP_sdwa; in fixVcmpxPermlaneHazards()
1136 auto *Src0 = TII->getNamedOperand(*MI, AMDGPU::OpName::src0); in fixVcmpxPermlaneHazards()
1140 TII->get(AMDGPU::V_MOV_B32_e32)) in fixVcmpxPermlaneHazards()
1177 (MI.getOpcode() == AMDGPU::S_WAITCNT && in fixVMEMtoScalarWriteHazards()
1179 (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && in fixVMEMtoScalarWriteHazards()
1180 AMDGPU::DepCtr::decodeFieldVmVsrc(MI.getOperand(0).getImm()) == 0); in fixVMEMtoScalarWriteHazards()
1189 TII->get(AMDGPU::S_WAITCNT_DEPCTR)) in fixVMEMtoScalarWriteHazards()
1190 .addImm(AMDGPU::DepCtr::encodeFieldVmVsrc(0)); in fixVMEMtoScalarWriteHazards()
1204 case AMDGPU::V_READLANE_B32: in fixSMEMtoVectorWriteHazards()
1205 case AMDGPU::V_READFIRSTLANE_B32: in fixSMEMtoVectorWriteHazards()
1206 SDSTName = AMDGPU::OpName::vdst; in fixSMEMtoVectorWriteHazards()
1209 SDSTName = AMDGPU::OpName::sdst; in fixSMEMtoVectorWriteHazards()
1215 const AMDGPU::IsaVersion IV = AMDGPU::getIsaVersion(ST.getCPU()); in fixSMEMtoVectorWriteHazards()
1237 case AMDGPU::S_SETVSKIP: in fixSMEMtoVectorWriteHazards()
1238 case AMDGPU::S_VERSION: in fixSMEMtoVectorWriteHazards()
1239 case AMDGPU::S_WAITCNT_VSCNT: in fixSMEMtoVectorWriteHazards()
1240 case AMDGPU::S_WAITCNT_VMCNT: in fixSMEMtoVectorWriteHazards()
1241 case AMDGPU::S_WAITCNT_EXPCNT: in fixSMEMtoVectorWriteHazards()
1244 case AMDGPU::S_WAITCNT_LGKMCNT: in fixSMEMtoVectorWriteHazards()
1247 (MI.getOperand(0).getReg() == AMDGPU::SGPR_NULL); in fixSMEMtoVectorWriteHazards()
1248 case AMDGPU::S_WAITCNT: { in fixSMEMtoVectorWriteHazards()
1250 AMDGPU::Waitcnt Decoded = AMDGPU::decodeWaitcnt(IV, Imm); in fixSMEMtoVectorWriteHazards()
1276 TII->get(AMDGPU::S_MOV_B32), AMDGPU::SGPR_NULL) in fixSMEMtoVectorWriteHazards()
1290 if (!MI->modifiesRegister(AMDGPU::EXEC, TRI)) in fixVcmpxExecWARHazard()
1296 return I.readsRegister(AMDGPU::EXEC, TRI); in fixVcmpxExecWARHazard()
1302 if (TII->getNamedOperand(MI, AMDGPU::OpName::sdst)) in fixVcmpxExecWARHazard()
1308 if (MI.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && in fixVcmpxExecWARHazard()
1309 AMDGPU::DepCtr::decodeFieldSaSdst(MI.getOperand(0).getImm()) == 0) in fixVcmpxExecWARHazard()
1319 TII->get(AMDGPU::S_WAITCNT_DEPCTR)) in fixVcmpxExecWARHazard()
1320 .addImm(AMDGPU::DepCtr::encodeFieldSaSdst(0)); in fixVcmpxExecWARHazard()
1346 return I.getOpcode() == AMDGPU::S_WAITCNT_VSCNT && in isStoreCountWaitZero()
1347 I.getOperand(0).getReg() == AMDGPU::SGPR_NULL && in isStoreCountWaitZero()
1401 TII->get(AMDGPU::S_WAITCNT_VSCNT)) in fixLdsBranchVmemWARHazard()
1402 .addReg(AMDGPU::SGPR_NULL, RegState::Undef) in fixLdsBranchVmemWARHazard()
1413 const MachineOperand *VDST = TII.getNamedOperand(*MI, AMDGPU::OpName::vdst); in fixLdsDirectVALUHazard()
1446 TII.getNamedOperand(*MI, AMDGPU::OpName::waitvdst); in fixLdsDirectVALUHazard()
1456 const MachineOperand *VDST = TII.getNamedOperand(*MI, AMDGPU::OpName::vdst); in fixLdsDirectVMEMHazard()
1470 (I.getOpcode() == AMDGPU::S_WAITCNT && !I.getOperand(0).getImm()) || in fixLdsDirectVMEMHazard()
1471 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && in fixLdsDirectVMEMHazard()
1472 AMDGPU::DepCtr::decodeFieldVmVsrc(I.getOperand(0).getImm()) == 0) || in fixLdsDirectVMEMHazard()
1474 !TII.getNamedOperand(I, AMDGPU::OpName::waitvsrc)->getImm()); in fixLdsDirectVMEMHazard()
1482 TII.getNamedOperand(*MI, AMDGPU::OpName::waitvsrc)->setImm(0); in fixLdsDirectVMEMHazard()
1485 TII.get(AMDGPU::S_WAITCNT_DEPCTR)) in fixLdsDirectVMEMHazard()
1486 .addImm(AMDGPU::DepCtr::encodeFieldVmVsrc(0)); in fixLdsDirectVMEMHazard()
1548 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && in fixVALUPartialForwardingHazard()
1549 AMDGPU::DepCtr::decodeFieldVaVdst(I.getOperand(0).getImm()) == 0)) in fixVALUPartialForwardingHazard()
1563 if (!State.DefPos.empty() && I.modifiesRegister(AMDGPU::EXEC, &TRI)) { in fixVALUPartialForwardingHazard()
1635 TII.get(AMDGPU::S_WAITCNT_DEPCTR)) in fixVALUPartialForwardingHazard()
1685 (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && in fixVALUTransUseHazard()
1715 TII.get(AMDGPU::S_WAITCNT_DEPCTR)) in fixVALUTransUseHazard()
1716 .addImm(AMDGPU::DepCtr::encodeFieldVaVdst(0)); in fixVALUTransUseHazard()
1735 TII->getNamedOperand(*MI, AMDGPU::OpName::src0)->getReg(); in fixWMMAHazards()
1737 TII->getNamedOperand(*MI, AMDGPU::OpName::src1)->getReg(); in fixWMMAHazards()
1740 TII->getNamedOperand(I, AMDGPU::OpName::vdst)->getReg(); in fixWMMAHazards()
1749 if (AMDGPU::isGFX12Plus(ST)) { in fixWMMAHazards()
1752 TII->getNamedOperand(*MI, AMDGPU::OpName::src2)->getReg(); in fixWMMAHazards()
1770 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(AMDGPU::V_NOP_e32)); in fixWMMAHazards()
1783 case AMDGPU::V_LSHLREV_B64_e64: in fixShift64HighRegBug()
1784 case AMDGPU::V_LSHRREV_B64_e64: in fixShift64HighRegBug()
1785 case AMDGPU::V_ASHRREV_I64_e64: in fixShift64HighRegBug()
1789 MachineOperand *Amt = TII.getNamedOperand(*MI, AMDGPU::OpName::src0); in fixShift64HighRegBug()
1796 if (!TRI.isVGPR(MRI, AmtReg) || ((AmtReg - AMDGPU::VGPR0) & 7) != 7) in fixShift64HighRegBug()
1799 if (AmtReg != AMDGPU::VGPR255 && MRI.isPhysRegUsed(AmtReg + 1)) in fixShift64HighRegBug()
1802 MachineOperand *Src1 = TII.getNamedOperand(*MI, AMDGPU::OpName::src1); in fixShift64HighRegBug()
1810 static_assert(AMDGPU::VGPR0 + 1 == AMDGPU::VGPR1); in fixShift64HighRegBug()
1813 for (MCRegister Reg : Overlapped ? AMDGPU::VReg_64_Align2RegClass in fixShift64HighRegBug()
1814 : AMDGPU::VGPR_32RegClass) { in fixShift64HighRegBug()
1821 Register NewAmt = Overlapped ? (Register)TRI.getSubReg(NewReg, AMDGPU::sub1) in fixShift64HighRegBug()
1826 NewAmtLo = TRI.getSubReg(NewReg, AMDGPU::sub0); in fixShift64HighRegBug()
1831 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_WAITCNT)) in fixShift64HighRegBug()
1837 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_SWAP_B32), NewAmtLo) in fixShift64HighRegBug()
1841 runOnInstruction(BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_SWAP_B32), NewAmt) in fixShift64HighRegBug()
1848 BuildMI(*MBB, std::next(MI->getIterator()), DL, TII.get(AMDGPU::V_SWAP_B32), in fixShift64HighRegBug()
1854 BuildMI(*MBB, std::next(MI->getIterator()), DL, TII.get(AMDGPU::V_SWAP_B32), in fixShift64HighRegBug()
1888 const auto *Offset = TII->getNamedOperand(*MI, AMDGPU::OpName::offset); in checkNSAtoVMEMHazard()
1895 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(I.getOpcode()); in checkNSAtoVMEMHazard()
1896 return Info->MIMGEncoding == AMDGPU::MIMGEncGfx10NSA && in checkNSAtoVMEMHazard()
1910 if (MI->getOpcode() != AMDGPU::S_DENORM_MODE) in checkFPAtomicToDenormModeHazard()
1924 case AMDGPU::S_WAITCNT: in checkFPAtomicToDenormModeHazard()
1925 case AMDGPU::S_WAITCNT_VSCNT: in checkFPAtomicToDenormModeHazard()
1926 case AMDGPU::S_WAITCNT_VMCNT: in checkFPAtomicToDenormModeHazard()
1927 case AMDGPU::S_WAITCNT_EXPCNT: in checkFPAtomicToDenormModeHazard()
1928 case AMDGPU::S_WAITCNT_LGKMCNT: in checkFPAtomicToDenormModeHazard()
1929 case AMDGPU::S_WAIT_IDLE: in checkFPAtomicToDenormModeHazard()
1986 if (Opc != AMDGPU::V_ACCVGPR_READ_B32_e64) { // MFMA or v_accvgpr_write in checkMAIHazards908()
1992 getWaitStatesSinceDef(AMDGPU::EXEC, IsVALUFn, MaxWaitStates); in checkMAIHazards908()
2016 if (Op.isDef() && Opc != AMDGPU::V_ACCVGPR_WRITE_B32_e64) in checkMAIHazards908()
2046 int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in checkMAIHazards908()
2050 } else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64) { in checkMAIHazards908()
2060 } else if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) { in checkMAIHazards908()
2079 if (MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) in checkMAIHazards908()
2091 else if (Opc == AMDGPU::V_ACCVGPR_READ_B32_e64) in checkMAIHazards908()
2102 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64) { in checkMAIHazards908()
2114 Register Reg = TII.getNamedOperand(MI, AMDGPU::OpName::src2)->getReg(); in checkMAIHazards908()
2195 getWaitStatesSinceDef(AMDGPU::EXEC, IsLegacyVALUFn, in checkMAIHazards90A()
2199 int SrcCIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in checkMAIHazards90A()
2253 if ((Opc == AMDGPU::V_MFMA_F64_4X4X4F64_e64 || in checkMAIHazards90A()
2254 Opc == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64) && in checkMAIHazards90A()
2255 (Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_e64 || in checkMAIHazards90A()
2256 Opc1 == AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64)) in checkMAIHazards90A()
2263 case AMDGPU::V_MFMA_F64_16X16X4F64_e64: in checkMAIHazards90A()
2264 case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64: in checkMAIHazards90A()
2265 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64: in checkMAIHazards90A()
2266 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64: in checkMAIHazards90A()
2270 case AMDGPU::V_MFMA_F64_4X4X4F64_e64: in checkMAIHazards90A()
2271 case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64: in checkMAIHazards90A()
2315 case AMDGPU::V_MFMA_F64_16X16X4F64_e64: in checkMAIHazards90A()
2316 case AMDGPU::V_MFMA_F64_16X16X4F64_vgprcd_e64: in checkMAIHazards90A()
2317 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_e64: in checkMAIHazards90A()
2318 case AMDGPU::V_MFMA_F64_16X16X4F64_mac_vgprcd_e64: in checkMAIHazards90A()
2321 case AMDGPU::V_MFMA_F64_4X4X4F64_e64: in checkMAIHazards90A()
2322 case AMDGPU::V_MFMA_F64_4X4X4F64_vgprcd_e64: in checkMAIHazards90A()
2377 return MI.getOpcode() == AMDGPU::V_ACCVGPR_READ_B32_e64; in checkMAILdStHazards()
2398 if (MI.getOpcode() != AMDGPU::V_ACCVGPR_READ_B32_e64 && in checkMAILdStHazards()
2399 MI.getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) in checkMAILdStHazards()
2503 int SrcCIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), in checkMAIVALUHazards()
2504 AMDGPU::OpName::src2); in checkMAIVALUHazards()
2613 if ((Opc == AMDGPU::V_FMA_F64_e64 || in checkMAIVALUHazards()
2614 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64 || in checkMAIVALUHazards()
2615 Opc == AMDGPU::V_FMAC_F64_dpp) && in checkMAIVALUHazards()
2704 TII.getNamedOperand(MI, AMDGPU::OpName::src2); in checkMAIVALUHazards()
2780 const MachineOperand *SDSTOp = TII.getNamedOperand(*MI, AMDGPU::OpName::sdst); in fixVALUMaskWriteHazard()
2785 if (HazardReg == AMDGPU::EXEC || in fixVALUMaskWriteHazard()
2786 HazardReg == AMDGPU::EXEC_LO || in fixVALUMaskWriteHazard()
2787 HazardReg == AMDGPU::EXEC_HI || in fixVALUMaskWriteHazard()
2788 HazardReg == AMDGPU::M0) in fixVALUMaskWriteHazard()
2793 case AMDGPU::V_ADDC_U32_e32: in fixVALUMaskWriteHazard()
2794 case AMDGPU::V_ADDC_U32_dpp: in fixVALUMaskWriteHazard()
2795 case AMDGPU::V_CNDMASK_B16_e32: in fixVALUMaskWriteHazard()
2796 case AMDGPU::V_CNDMASK_B16_dpp: in fixVALUMaskWriteHazard()
2797 case AMDGPU::V_CNDMASK_B32_e32: in fixVALUMaskWriteHazard()
2798 case AMDGPU::V_CNDMASK_B32_dpp: in fixVALUMaskWriteHazard()
2799 case AMDGPU::V_DIV_FMAS_F32_e64: in fixVALUMaskWriteHazard()
2800 case AMDGPU::V_DIV_FMAS_F64_e64: in fixVALUMaskWriteHazard()
2801 case AMDGPU::V_SUBB_U32_e32: in fixVALUMaskWriteHazard()
2802 case AMDGPU::V_SUBB_U32_dpp: in fixVALUMaskWriteHazard()
2803 case AMDGPU::V_SUBBREV_U32_e32: in fixVALUMaskWriteHazard()
2804 case AMDGPU::V_SUBBREV_U32_dpp: in fixVALUMaskWriteHazard()
2806 return HazardReg == AMDGPU::VCC || in fixVALUMaskWriteHazard()
2807 HazardReg == AMDGPU::VCC_LO || in fixVALUMaskWriteHazard()
2808 HazardReg == AMDGPU::VCC_HI; in fixVALUMaskWriteHazard()
2809 case AMDGPU::V_ADDC_U32_e64: in fixVALUMaskWriteHazard()
2810 case AMDGPU::V_ADDC_U32_e64_dpp: in fixVALUMaskWriteHazard()
2811 case AMDGPU::V_CNDMASK_B16_e64: in fixVALUMaskWriteHazard()
2812 case AMDGPU::V_CNDMASK_B16_e64_dpp: in fixVALUMaskWriteHazard()
2813 case AMDGPU::V_CNDMASK_B32_e64: in fixVALUMaskWriteHazard()
2814 case AMDGPU::V_CNDMASK_B32_e64_dpp: in fixVALUMaskWriteHazard()
2815 case AMDGPU::V_SUBB_U32_e64: in fixVALUMaskWriteHazard()
2816 case AMDGPU::V_SUBB_U32_e64_dpp: in fixVALUMaskWriteHazard()
2817 case AMDGPU::V_SUBBREV_U32_e64: in fixVALUMaskWriteHazard()
2818 case AMDGPU::V_SUBBREV_U32_e64_dpp: { in fixVALUMaskWriteHazard()
2820 const MachineOperand *SSRCOp = TII.getNamedOperand(I, AMDGPU::OpName::src2); in fixVALUMaskWriteHazard()
2832 if (I.getOpcode() == AMDGPU::S_WAITCNT_DEPCTR && in fixVALUMaskWriteHazard()
2833 AMDGPU::DepCtr::decodeFieldSaSdst(I.getOperand(0).getImm()) == 0) in fixVALUMaskWriteHazard()
2849 if (OpReg == AMDGPU::EXEC || in fixVALUMaskWriteHazard()
2850 OpReg == AMDGPU::EXEC_LO || in fixVALUMaskWriteHazard()
2851 OpReg == AMDGPU::EXEC_HI) in fixVALUMaskWriteHazard()
2855 if (OpReg == AMDGPU::VCC || in fixVALUMaskWriteHazard()
2856 OpReg == AMDGPU::VCC_LO || in fixVALUMaskWriteHazard()
2857 OpReg == AMDGPU::VCC_HI) in fixVALUMaskWriteHazard()
2882 TII.get(AMDGPU::S_WAITCNT_DEPCTR)) in fixVALUMaskWriteHazard()
2883 .addImm(AMDGPU::DepCtr::encodeFieldSaSdst(0)); in fixVALUMaskWriteHazard()
2886 if (MI->getOpcode() == AMDGPU::S_GETPC_B64) { in fixVALUMaskWriteHazard()
2906 if (EntryMI.getOpcode() == AMDGPU::S_SETPRIO && in ensureEntrySetPrio()
2911 BuildMI(EntryMBB, EntryMBB.begin(), DebugLoc(), TII.get(AMDGPU::S_SETPRIO)) in ensureEntrySetPrio()
2941 case AMDGPU::S_ENDPGM: in fixRequiredExportPriority()
2942 case AMDGPU::S_ENDPGM_SAVED: in fixRequiredExportPriority()
2943 case AMDGPU::S_ENDPGM_ORDERED_PS_DONE: in fixRequiredExportPriority()
2944 case AMDGPU::SI_RETURN_TO_EPILOG: in fixRequiredExportPriority()
2950 case AMDGPU::S_SETPRIO: { in fixRequiredExportPriority()
2980 if (NextMI->getOpcode() == AMDGPU::S_SETPRIO && in fixRequiredExportPriority()
2983 EndOfShader = NextMI->getOpcode() == AMDGPU::S_ENDPGM; in fixRequiredExportPriority()
2989 BuildMI(*MBB, NextMI, DL, TII.get(AMDGPU::S_SETPRIO)) in fixRequiredExportPriority()
2994 BuildMI(*MBB, NextMI, DL, TII.get(AMDGPU::S_WAITCNT_EXPCNT)) in fixRequiredExportPriority()
2995 .addReg(AMDGPU::SGPR_NULL) in fixRequiredExportPriority()
2999 BuildMI(*MBB, NextMI, DL, TII.get(AMDGPU::S_NOP)).addImm(0); in fixRequiredExportPriority()
3000 BuildMI(*MBB, NextMI, DL, TII.get(AMDGPU::S_NOP)).addImm(0); in fixRequiredExportPriority()
3004 BuildMI(*MBB, NextMI, DL, TII.get(AMDGPU::S_SETPRIO)) in fixRequiredExportPriority()