Lines Matching refs:TII
73 const SIInstrInfo *TII; member in __anon62acd4700111::SIFoldOperands
186 if (TII->isMUBUF(UseMI)) in frameIndexMayFold()
188 if (!TII->isFLATScratch(UseMI)) in frameIndexMayFold()
217 uint8_t OpType = TII->get(Opcode).operands()[OpNo].OperandType; in canUseImmWithOpSel()
238 uint8_t OpType = TII->get(Opcode).operands()[OpNo].OperandType; in tryFoldImmWithOpSel()
348 MI->setDesc(TII->get(NegOpcode)); in tryFoldImmWithOpSel()
370 if (!TII->isOperandLegal(*MI, OpNo, &New)) in updateOperand()
394 MachineInstr *Inst32 = TII->buildShrunkInst(*MI, Op32); in updateOperand()
397 BuildMI(*MBB, MI, MI->getDebugLoc(), TII->get(AMDGPU::COPY), in updateOperand()
411 MI->setDesc(TII->get(AMDGPU::IMPLICIT_DEF)); in updateOperand()
414 TII->commuteInstruction(*Inst32, false); in updateOperand()
425 MI->setDesc(TII->get(NewMFMAOpc)); in updateOperand()
478 MI->setDesc(TII->get(NewOpc)); in tryAddToFoldList()
502 MI->setDesc(TII->get(Opc)); in tryAddToFoldList()
506 bool IsLegal = TII->isOperandLegal(*MI, OpNo, OpToFold); in tryAddToFoldList()
518 MI->setDesc(TII->get(NewOpc)); in tryAddToFoldList()
530 MI->setDesc(TII->get(Opc)); in tryAddToFoldList()
548 MI->setDesc(TII->get(ImmOpc)); in tryAddToFoldList()
563 bool CanCommute = TII->findCommutedOpIndices(*MI, OpNo, CommuteOpNo); in tryAddToFoldList()
574 if (!TII->commuteInstruction(*MI, false, OpNo, CommuteOpNo)) in tryAddToFoldList()
578 if (!TII->isOperandLegal(*MI, CommuteOpNo, OpToFold)) { in tryAddToFoldList()
582 TII->commuteInstruction(*MI, false, OpNo, CommuteOpNo); in tryAddToFoldList()
590 !TII->getRegisterInfo().isVGPR(*MRI, OtherOp.getReg())) in tryAddToFoldList()
607 !OpToFold->isReg() && !TII->isInlineConstant(*OpToFold)) { in tryAddToFoldList()
611 TII->isInlineConstant(*MI, MI->getOperand(OpNo), OpImm)) in tryAddToFoldList()
628 if (TII->isSALU(MI->getOpcode())) { in tryAddToFoldList()
633 if (!OpToFold->isReg() && !TII->isInlineConstant(*OpToFold, OpInfo)) { in tryAddToFoldList()
638 !TII->isInlineConstant(Op, InstDesc.operands()[i])) in tryAddToFoldList()
651 return !TII->isSDWA(MI); in isUseSafeToFold()
670 !Sub->getSubReg() && TII->isFoldableCopy(*SubDef); in getRegSeqInit()
674 if (TII->isInlineConstant(*Op, OpTy)) in getRegSeqInit()
700 if (OpToFold.isImm() && TII->isInlineConstant(OpToFold, OpTy) && in tryToFoldACImm()
701 TII->isOperandLegal(*UseMI, UseOpIdx, &OpToFold)) { in tryToFoldACImm()
719 if (!UseOp.getSubReg() && Def && TII->isFoldableCopy(*Def)) { in tryToFoldACImm()
721 if (DefOp.isImm() && TII->isInlineConstant(DefOp, OpTy) && in tryToFoldACImm()
722 TII->isOperandLegal(*UseMI, UseOpIdx, &DefOp)) { in tryToFoldACImm()
741 if (!TII->isInlineConstant(*Op, OpTy) || in tryToFoldACImm()
742 !TII->isOperandLegal(*UseMI, UseOpIdx, Op)) in tryToFoldACImm()
805 if (TII->isMUBUF(*UseMI)) { in foldOperand()
806 if (TII->getNamedOperand(*UseMI, AMDGPU::OpName::srsrc)->getReg() != in foldOperand()
813 *TII->getNamedOperand(*UseMI, AMDGPU::OpName::soffset); in foldOperand()
823 if (TII->isFLATScratch(*UseMI) && in foldOperand()
827 UseMI->setDesc(TII->get(NewOpc)); in foldOperand()
852 TII->isInlineConstant(OpToFold, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { in foldOperand()
853 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64)); in foldOperand()
863 unsigned MovOp = TII->getMovOpcode(DestRC); in foldOperand()
874 UseMI->setDesc(TII->get(MovOp)); in foldOperand()
893 unsigned Size = TII->getOpSize(*UseMI, 1); in foldOperand()
914 UseMI->setDesc(TII->get(AMDGPU::REG_SEQUENCE)); in foldOperand()
925 TII->isInlineConstant(*Def, AMDGPU::OPERAND_REG_INLINE_C_INT32)) { in foldOperand()
930 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addImm(Imm); in foldOperand()
956 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Tmp).add(*Def); in foldOperand()
967 BuildMI(MBB, UseMI, DL, TII->get(AMDGPU::COPY), Vgpr).add(*Def); in foldOperand()
972 TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), Tmp).addReg(Vgpr); in foldOperand()
988 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_WRITE_B32_e64)); in foldOperand()
990 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64)); in foldOperand()
993 UseMI->setDesc(TII->get(AMDGPU::V_ACCVGPR_MOV_B32)); in foldOperand()
1013 UseMI->setDesc(TII->get(AMDGPU::S_MOV_B32)); in foldOperand()
1034 UseMI->setDesc(TII->get(AMDGPU::COPY)); in foldOperand()
1224 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_NOT_B32))); in tryConstantFoldOp()
1250 mutateCopyOp(*MI, TII->get(getMovOpc(IsSGPR))); in tryConstantFoldOp()
1269 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1273 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_OR_B32))); in tryConstantFoldOp()
1285 mutateCopyOp(*MI, TII->get(getMovOpc(Opc == AMDGPU::S_AND_B32))); in tryConstantFoldOp()
1289 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1301 mutateCopyOp(*MI, TII->get(AMDGPU::COPY)); in tryConstantFoldOp()
1316 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in tryFoldCndMask()
1317 MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in tryFoldCndMask()
1335 TII->get(Src0->isReg() ? (unsigned)AMDGPU::COPY : getMovOpc(false)); in tryFoldCndMask()
1439 TII->commuteInstruction(*Fold.UseMI, false); in foldInstOperand()
1499 if (!InstToErase || !TII->isFoldableCopy(*InstToErase)) in tryFoldFoldableCopy()
1527 if (!TII->getNamedOperand(MI, AMDGPU::OpName::clamp)->getImm()) in isClamp()
1531 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isClamp()
1532 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isClamp()
1540 if (TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) in isClamp()
1544 = TII->getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm(); in isClamp()
1546 = TII->getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm(); in isClamp()
1570 if (TII->getClampMask(*Def) != TII->getClampMask(MI)) in tryFoldClamp()
1576 MachineOperand *DefClamp = TII->getNamedOperand(*Def, AMDGPU::OpName::clamp); in tryFoldClamp()
1590 BuildMI(*MI.getParent(), MI, MI.getDebugLoc(), TII->get(AMDGPU::COPY), in tryFoldClamp()
1601 if (TII->convertToThreeAddress(*Def, nullptr, nullptr)) in tryFoldClamp()
1679 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isOMod()
1680 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isOMod()
1692 TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || in isOMod()
1693 TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || in isOMod()
1694 TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || in isOMod()
1695 TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) in isOMod()
1716 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isOMod()
1717 const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); in isOMod()
1721 !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && in isOMod()
1722 !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && in isOMod()
1723 !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && in isOMod()
1724 !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) in isOMod()
1745 MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); in tryFoldOMod()
1754 if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) in tryFoldOMod()
1766 if (TII->convertToThreeAddress(*Def, nullptr, nullptr)) in tryFoldOMod()
1815 TII->getRegClass(InstDesc, OpIdx, TRI, *MI.getMF()); in tryFoldRegSequence()
1822 TII->get(AMDGPU::REG_SEQUENCE), Dst); in tryFoldRegSequence()
1837 if (!TII->isOperandLegal(*UseMI, OpIdx, Op)) { in tryFoldRegSequence()
2001 TII->get(CopyOpc), NewReg) in tryFoldPhiAGPR()
2017 TII->get(AMDGPU::COPY), PhiOut) in tryFoldPhiAGPR()
2065 if (!TII->isOperandLegal(MI, 0, &Def)) { in tryFoldLoad()
2155 TII->get(AMDGPU::V_ACCVGPR_READ_B32_e64), TempVGPR) in tryOptimizeAGPRPhis()
2161 TII->get(AMDGPU::COPY), TempAGPR) in tryOptimizeAGPRPhis()
2183 TII = ST->getInstrInfo(); in runOnMachineFunction()
2184 TRI = &TII->getRegisterInfo(); in runOnMachineFunction()
2220 if (TII->isFoldableCopy(MI)) { in runOnMachineFunction()