Lines Matching refs:AMDGPU
160 case AMDGPU::V_MAC_F32_e64: in macToMad()
161 return AMDGPU::V_MAD_F32_e64; in macToMad()
162 case AMDGPU::V_MAC_F16_e64: in macToMad()
163 return AMDGPU::V_MAD_F16_e64; in macToMad()
164 case AMDGPU::V_FMAC_F32_e64: in macToMad()
165 return AMDGPU::V_FMA_F32_e64; in macToMad()
166 case AMDGPU::V_FMAC_F16_e64: in macToMad()
167 return AMDGPU::V_FMA_F16_gfx9_e64; in macToMad()
168 case AMDGPU::V_FMAC_F16_t16_e64: in macToMad()
169 return AMDGPU::V_FMA_F16_gfx9_e64; in macToMad()
170 case AMDGPU::V_FMAC_LEGACY_F32_e64: in macToMad()
171 return AMDGPU::V_FMA_LEGACY_F32_e64; in macToMad()
172 case AMDGPU::V_FMAC_F64_e64: in macToMad()
173 return AMDGPU::V_FMA_F64_e64; in macToMad()
175 return AMDGPU::INSTRUCTION_LIST_END; in macToMad()
187 return OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); in frameIndexMayFold()
191 int SIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr); in frameIndexMayFold()
195 int VIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); in frameIndexMayFold()
221 case AMDGPU::OPERAND_REG_IMM_V2FP16: in canUseImmWithOpSel()
222 case AMDGPU::OPERAND_REG_IMM_V2BF16: in canUseImmWithOpSel()
223 case AMDGPU::OPERAND_REG_IMM_V2INT16: in canUseImmWithOpSel()
224 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: in canUseImmWithOpSel()
225 case AMDGPU::OPERAND_REG_INLINE_C_V2BF16: in canUseImmWithOpSel()
226 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: in canUseImmWithOpSel()
243 if (AMDGPU::isInlinableLiteralV216(Fold.ImmToFold, OpType)) { in tryFoldImmWithOpSel()
252 if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0)) { in tryFoldImmWithOpSel()
253 ModIdx = AMDGPU::OpName::src0_modifiers; in tryFoldImmWithOpSel()
255 } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1)) { in tryFoldImmWithOpSel()
256 ModIdx = AMDGPU::OpName::src1_modifiers; in tryFoldImmWithOpSel()
258 } else if (OpNo == AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2)) { in tryFoldImmWithOpSel()
259 ModIdx = AMDGPU::OpName::src2_modifiers; in tryFoldImmWithOpSel()
263 ModIdx = AMDGPU::getNamedOperandIdx(Opcode, ModIdx); in tryFoldImmWithOpSel()
277 if (AMDGPU::isInlinableLiteralV216(Imm, OpType)) { in tryFoldImmWithOpSel()
288 if (AMDGPU::isInlinableLiteralV216(Lo, OpType)) { in tryFoldImmWithOpSel()
296 if (AMDGPU::isInlinableLiteralV216(SExt, OpType)) { in tryFoldImmWithOpSel()
304 if (OpType == AMDGPU::OPERAND_REG_IMM_V2INT16 || in tryFoldImmWithOpSel()
305 OpType == AMDGPU::OPERAND_REG_INLINE_AC_V2INT16) { in tryFoldImmWithOpSel()
306 if (AMDGPU::isInlinableLiteralV216(Lo << 16, OpType)) { in tryFoldImmWithOpSel()
314 if (AMDGPU::isInlinableLiteralV216(Swapped, OpType)) { in tryFoldImmWithOpSel()
333 bool IsUAdd = Opcode == AMDGPU::V_PK_ADD_U16; in tryFoldImmWithOpSel()
334 bool IsUSub = Opcode == AMDGPU::V_PK_SUB_U16; in tryFoldImmWithOpSel()
337 AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::clamp); in tryFoldImmWithOpSel()
347 IsUAdd ? AMDGPU::V_PK_SUB_U16 : AMDGPU::V_PK_ADD_U16; in tryFoldImmWithOpSel()
378 auto Liveness = MBB->computeRegisterLiveness(TRI, AMDGPU::VCC, MI, 16); in updateOperand()
397 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::COPY), in updateOperand()
399 .addReg(AMDGPU::VCC, RegState::Kill); in updateOperand()
411 MI->setDesc(TII->get(AMDGPU::IMPLICIT_DEF)); in updateOperand()
422 int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(MI->getOpcode()); in updateOperand()
477 const unsigned NewOpc = TryAK ? AMDGPU::S_FMAAK_F32 : AMDGPU::S_FMAMK_F32; in tryAddToFoldList()
515 if (NewOpc != AMDGPU::INSTRUCTION_LIST_END) { in tryAddToFoldList()
519 bool AddOpSel = !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::op_sel) && in tryAddToFoldList()
520 AMDGPU::hasNamedOperand(NewOpc, AMDGPU::OpName::op_sel); in tryAddToFoldList()
535 if (Opc == AMDGPU::S_FMAC_F32 && OpNo == 3) { in tryAddToFoldList()
543 if (Opc == AMDGPU::S_SETREG_B32) in tryAddToFoldList()
544 ImmOpc = AMDGPU::S_SETREG_IMM32_B32; in tryAddToFoldList()
545 else if (Opc == AMDGPU::S_SETREG_B32_mode) in tryAddToFoldList()
546 ImmOpc = AMDGPU::S_SETREG_IMM32_B32_mode; in tryAddToFoldList()
579 if ((Opc != AMDGPU::V_ADD_CO_U32_e64 && Opc != AMDGPU::V_SUB_CO_U32_e64 && in tryAddToFoldList()
580 Opc != AMDGPU::V_SUBREV_CO_U32_e64) || // FIXME in tryAddToFoldList()
597 Op32 = AMDGPU::getVOPe32(MaybeCommutedOpc); in tryAddToFoldList()
606 if ((Opc == AMDGPU::S_FMAAK_F32 || Opc == AMDGPU::S_FMAMK_F32) && in tryAddToFoldList()
608 unsigned ImmIdx = Opc == AMDGPU::S_FMAAK_F32 ? 3 : 2; in tryAddToFoldList()
620 if (Opc == AMDGPU::S_FMAC_F32 && in tryAddToFoldList()
696 if (!AMDGPU::isSISrcInlinableOperand(Desc, UseOpIdx)) in tryToFoldACImm()
768 (UseOp->isImplicit() || UseOp->getSubReg() != AMDGPU::NoSubRegister)) in foldOperand()
806 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() != in foldOperand()
813 *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset); in foldOperand()
824 AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::vaddr) && in foldOperand()
825 !AMDGPU::hasNamedOperand(Opc, AMDGPU::OpName::saddr)) { in foldOperand()
826 unsigned NewOpc = AMDGPU::getFlatScratchInstSSfromSV(Opc); in foldOperand()
851 if (DestRC == &AMDGPU::AGPR_32RegClass && in foldOperand()
852 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { in foldOperand()
853 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64)); in foldOperand()
864 if (MovOp == AMDGPU::COPY) in foldOperand()
876 if (MovOp == AMDGPU::V_MOV_B16_t16_e64) { in foldOperand()
910 getRegSeqInit(Defs, UseReg, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { in foldOperand()
914 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE)); in foldOperand()
925 TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { in foldOperand()
928 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); in foldOperand()
930 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addImm(Imm); in foldOperand()
955 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); in foldOperand()
956 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def); in foldOperand()
966 Vgpr = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); in foldOperand()
967 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def); in foldOperand()
970 auto Tmp = MRI->createVirtualRegister(&AMDGPU::AGPR_32RegClass); in foldOperand()
972 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addReg(Vgpr); in foldOperand()
988 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64)); in foldOperand()
990 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64)); in foldOperand()
993 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32)); in foldOperand()
998 if (UseOpc == AMDGPU::V_READFIRSTLANE_B32 || in foldOperand()
999 (UseOpc == AMDGPU::V_READLANE_B32 && in foldOperand()
1001 AMDGPU::getNamedOperandIdx(UseOpc, AMDGPU::OpName::src0))) { in foldOperand()
1013 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); in foldOperand()
1034 UseMI->setDesc(TII->get(AMDGPU::COPY)); in foldOperand()
1083 if (UseOp->getSubReg() && AMDGPU::getRegBitWidth(*FoldRC) == 64) { in foldOperand()
1086 if (AMDGPU::getRegBitWidth(*UseRC) != 64) in foldOperand()
1090 if (UseOp->getSubReg() == AMDGPU::sub0) { in foldOperand()
1093 assert(UseOp->getSubReg() == AMDGPU::sub1); in foldOperand()
1108 case AMDGPU::V_AND_B32_e64: in evalBinaryInstruction()
1109 case AMDGPU::V_AND_B32_e32: in evalBinaryInstruction()
1110 case AMDGPU::S_AND_B32: in evalBinaryInstruction()
1113 case AMDGPU::V_OR_B32_e64: in evalBinaryInstruction()
1114 case AMDGPU::V_OR_B32_e32: in evalBinaryInstruction()
1115 case AMDGPU::S_OR_B32: in evalBinaryInstruction()
1118 case AMDGPU::V_XOR_B32_e64: in evalBinaryInstruction()
1119 case AMDGPU::V_XOR_B32_e32: in evalBinaryInstruction()
1120 case AMDGPU::S_XOR_B32: in evalBinaryInstruction()
1123 case AMDGPU::S_XNOR_B32: in evalBinaryInstruction()
1126 case AMDGPU::S_NAND_B32: in evalBinaryInstruction()
1129 case AMDGPU::S_NOR_B32: in evalBinaryInstruction()
1132 case AMDGPU::S_ANDN2_B32: in evalBinaryInstruction()
1135 case AMDGPU::S_ORN2_B32: in evalBinaryInstruction()
1138 case AMDGPU::V_LSHL_B32_e64: in evalBinaryInstruction()
1139 case AMDGPU::V_LSHL_B32_e32: in evalBinaryInstruction()
1140 case AMDGPU::S_LSHL_B32: in evalBinaryInstruction()
1144 case AMDGPU::V_LSHLREV_B32_e64: in evalBinaryInstruction()
1145 case AMDGPU::V_LSHLREV_B32_e32: in evalBinaryInstruction()
1148 case AMDGPU::V_LSHR_B32_e64: in evalBinaryInstruction()
1149 case AMDGPU::V_LSHR_B32_e32: in evalBinaryInstruction()
1150 case AMDGPU::S_LSHR_B32: in evalBinaryInstruction()
1153 case AMDGPU::V_LSHRREV_B32_e64: in evalBinaryInstruction()
1154 case AMDGPU::V_LSHRREV_B32_e32: in evalBinaryInstruction()
1157 case AMDGPU::V_ASHR_I32_e64: in evalBinaryInstruction()
1158 case AMDGPU::V_ASHR_I32_e32: in evalBinaryInstruction()
1159 case AMDGPU::S_ASHR_I32: in evalBinaryInstruction()
1162 case AMDGPU::V_ASHRREV_I32_e64: in evalBinaryInstruction()
1163 case AMDGPU::V_ASHRREV_I32_e32: in evalBinaryInstruction()
1172 return IsScalar ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; in getMovOpc()
1192 if (!Op.isReg() || Op.getSubReg() != AMDGPU::NoSubRegister || in getImmOrMaterializedImm()
1215 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); in tryConstantFoldOp()
1220 if ((Opc == AMDGPU::V_NOT_B32_e64 || Opc == AMDGPU::V_NOT_B32_e32 || in tryConstantFoldOp()
1221 Opc == AMDGPU::S_NOT_B32) && in tryConstantFoldOp()
1224 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); in tryConstantFoldOp()
1228 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); in tryConstantFoldOp()
1263 if (Opc == AMDGPU::V_OR_B32_e64 || in tryConstantFoldOp()
1264 Opc == AMDGPU::V_OR_B32_e32 || in tryConstantFoldOp()
1265 Opc == AMDGPU::S_OR_B32) { in tryConstantFoldOp()
1269 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1273 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); in tryConstantFoldOp()
1280 if (Opc == AMDGPU::V_AND_B32_e64 || Opc == AMDGPU::V_AND_B32_e32 || in tryConstantFoldOp()
1281 Opc == AMDGPU::S_AND_B32) { in tryConstantFoldOp()
1285 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); in tryConstantFoldOp()
1289 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1296 if (Opc == AMDGPU::V_XOR_B32_e64 || Opc == AMDGPU::V_XOR_B32_e32 || in tryConstantFoldOp()
1297 Opc == AMDGPU::S_XOR_B32) { in tryConstantFoldOp()
1301 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1312 if (Opc != AMDGPU::V_CNDMASK_B32_e32 && Opc != AMDGPU::V_CNDMASK_B32_e64 && in tryFoldCndMask()
1313 Opc != AMDGPU::V_CNDMASK_B64_PSEUDO) in tryFoldCndMask()
1316 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in tryFoldCndMask()
1317 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in tryFoldCndMask()
1326 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1_modifiers); in tryFoldCndMask()
1328 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers); in tryFoldCndMask()
1335 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); in tryFoldCndMask()
1336 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); in tryFoldCndMask()
1339 MI.removeOperand(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1)); in tryFoldCndMask()
1350 if (MI.getOpcode() != AMDGPU::V_AND_B32_e64 && in tryFoldZeroHighBits()
1351 MI.getOpcode() != AMDGPU::V_AND_B32_e32) in tryFoldZeroHighBits()
1421 if (DefMI->readsRegister(AMDGPU::EXEC, TRI) && in foldInstOperand()
1449 if (MI.getOperand(0).getReg() == AMDGPU::M0) { in tryFoldFoldableCopy()
1517 case AMDGPU::V_MAX_F32_e64: in isClamp()
1518 case AMDGPU::V_MAX_F16_e64: in isClamp()
1519 case AMDGPU::V_MAX_F16_t16_e64: in isClamp()
1520 case AMDGPU::V_MAX_F16_fake16_e64: in isClamp()
1521 case AMDGPU::V_MAX_F64_e64: in isClamp()
1522 case AMDGPU::V_MAX_NUM_F64_e64: in isClamp()
1523 case AMDGPU::V_PK_MAX_F16: { in isClamp()
1527 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) in isClamp()
1531 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isClamp()
1532 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isClamp()
1536 Src0->getSubReg() != AMDGPU::NoSubRegister) in isClamp()
1540 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) in isClamp()
1544 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); in isClamp()
1546 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); in isClamp()
1550 unsigned UnsetMods = (Op == AMDGPU::V_PK_MAX_F16) ? SISrcMods::OP_SEL_1 in isClamp()
1576 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); in tryFoldClamp()
1590 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), in tryFoldClamp()
1609 case AMDGPU::V_MUL_F64_e64: in getOModValue()
1610 case AMDGPU::V_MUL_F64_pseudo_e64: { in getOModValue()
1622 case AMDGPU::V_MUL_F32_e64: { in getOModValue()
1634 case AMDGPU::V_MUL_F16_e64: in getOModValue()
1635 case AMDGPU::V_MUL_F16_t16_e64: in getOModValue()
1636 case AMDGPU::V_MUL_F16_fake16_e64: { in getOModValue()
1660 case AMDGPU::V_MUL_F64_e64: in isOMod()
1661 case AMDGPU::V_MUL_F64_pseudo_e64: in isOMod()
1662 case AMDGPU::V_MUL_F32_e64: in isOMod()
1663 case AMDGPU::V_MUL_F16_t16_e64: in isOMod()
1664 case AMDGPU::V_MUL_F16_fake16_e64: in isOMod()
1665 case AMDGPU::V_MUL_F16_e64: { in isOMod()
1667 if ((Op == AMDGPU::V_MUL_F32_e64 && in isOMod()
1669 ((Op == AMDGPU::V_MUL_F64_e64 || Op == AMDGPU::V_MUL_F64_pseudo_e64 || in isOMod()
1670 Op == AMDGPU::V_MUL_F16_e64 || Op == AMDGPU::V_MUL_F16_t16_e64 || in isOMod()
1671 Op == AMDGPU::V_MUL_F16_fake16_e64) && in isOMod()
1679 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isOMod()
1680 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isOMod()
1692 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || in isOMod()
1693 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || in isOMod()
1694 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || in isOMod()
1695 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) in isOMod()
1700 case AMDGPU::V_ADD_F64_e64: in isOMod()
1701 case AMDGPU::V_ADD_F64_pseudo_e64: in isOMod()
1702 case AMDGPU::V_ADD_F32_e64: in isOMod()
1703 case AMDGPU::V_ADD_F16_e64: in isOMod()
1704 case AMDGPU::V_ADD_F16_t16_e64: in isOMod()
1705 case AMDGPU::V_ADD_F16_fake16_e64: { in isOMod()
1707 if ((Op == AMDGPU::V_ADD_F32_e64 && in isOMod()
1709 ((Op == AMDGPU::V_ADD_F64_e64 || Op == AMDGPU::V_ADD_F64_pseudo_e64 || in isOMod()
1710 Op == AMDGPU::V_ADD_F16_e64 || Op == AMDGPU::V_ADD_F16_t16_e64 || in isOMod()
1711 Op == AMDGPU::V_ADD_F16_fake16_e64) && in isOMod()
1716 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isOMod()
1717 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isOMod()
1721 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && in isOMod()
1722 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && in isOMod()
1723 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && in isOMod()
1724 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) in isOMod()
1740 RegOp->getSubReg() != AMDGPU::NoSubRegister || in tryFoldOMod()
1745 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); in tryFoldOMod()
1754 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) in tryFoldOMod()
1822 TII->get(AMDGPU::REG_SEQUENCE), Dst); in tryFoldRegSequence()
1882 CopySrcDef->getOperand(0).getSubReg() != AMDGPU::NoSubRegister || in isAGPRCopy()
1883 OtherCopySrc.getSubReg() != AMDGPU::NoSubRegister || in isAGPRCopy()
1938 unsigned AGPRRegMask = AMDGPU::NoSubRegister; in tryFoldPhiAGPR()
1954 bool IsAGPR32 = (ARC == &AMDGPU::AGPR_32RegClass); in tryFoldPhiAGPR()
1966 unsigned CopyOpc = AMDGPU::COPY; in tryFoldPhiAGPR()
1973 unsigned AGPRSubReg = AMDGPU::NoSubRegister; in tryFoldPhiAGPR()
1989 CopyOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64; in tryFoldPhiAGPR()
2017 TII->get(AMDGPU::COPY), PhiOut) in tryFoldPhiAGPR()
2155 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TempVGPR) in tryOptimizeAGPRPhis()
2161 TII->get(AMDGPU::COPY), TempAGPR) in tryOptimizeAGPRPhis()
2167 MO->setSubReg(AMDGPU::NoSubRegister); in tryOptimizeAGPRPhis()
2226 if (CurrentKnownM0Val && MI.modifiesRegister(AMDGPU::M0, TRI)) in runOnMachineFunction()