Lines Matching refs:Src0
2719 MachineOperand &Src0, in swapSourceModifiers() argument
2787 MachineOperand &Src0 = MI.getOperand(Src0Idx); in commuteInstructionImpl() local
2791 if (Src0.isReg() && Src1.isReg()) { in commuteInstructionImpl()
2792 if (isOperandLegal(MI, Src1Idx, &Src0)) { in commuteInstructionImpl()
2798 } else if (Src0.isReg() && !Src1.isReg()) { in commuteInstructionImpl()
2801 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); in commuteInstructionImpl()
2802 } else if (!Src0.isReg() && Src1.isReg()) { in commuteInstructionImpl()
2803 if (isOperandLegal(MI, Src1Idx, &Src0)) in commuteInstructionImpl()
2804 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); in commuteInstructionImpl()
2811 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, in commuteInstructionImpl()
3511 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); in foldImmediate() local
3514 if (isInlineConstant(UseMI, *Src0, *ImmOp)) in foldImmediate()
3527 if ((Src0->isReg() && Src0->getReg() == Reg) || in foldImmediate()
3530 Src1->isReg() && Src1->getReg() == Reg ? Src0 : Src1; in foldImmediate()
3567 const int64_t Imm = getImmFor(RegSrc == Src1 ? *Src0 : *Src1); in foldImmediate()
3574 Src0->setReg(SrcReg); in foldImmediate()
3575 Src0->setSubReg(SrcSubReg); in foldImmediate()
3576 Src0->setIsKill(RegSrc->isKill()); in foldImmediate()
3602 if (Src0->isReg()) { in foldImmediate()
3606 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); in foldImmediate()
3609 MRI->hasOneUse(Src0->getReg())) { in foldImmediate()
3610 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); in foldImmediate()
3613 RI.isSGPRReg(*MRI, Src0->getReg())) { in foldImmediate()
3625 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); in foldImmediate()
3916 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); in convertToThreeAddress() local
3917 if (!Src0->isReg() && !Src0->isImm()) in convertToThreeAddress()
3920 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) in convertToThreeAddress()
3929 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); in convertToThreeAddress() local
3945 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() || in convertToThreeAddress()
3946 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) { in convertToThreeAddress()
3972 .add(*Src0) in convertToThreeAddress()
3992 .add(*Src0) in convertToThreeAddress()
4003 if (Src0Literal || getFoldableImm(Src0, Imm, &DefMI)) { in convertToThreeAddress()
4005 Imm = Src0->getImm(); in convertToThreeAddress()
4047 .add(*Src0) in convertToThreeAddress()
5004 const MachineOperand &Src0 = MI.getOperand(Src0Idx); in verifyInstruction() local
5007 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { in verifyInstruction()
5008 if (!compareMachineOp(Src0, Src1) && in verifyInstruction()
5009 !compareMachineOp(Src0, Src2)) { in verifyInstruction()
5026 const MachineOperand &Src0 = MI.getOperand(Src0Idx); in verifyInstruction() local
5029 if (!Src0.isReg() && !Src1.isReg() && in verifyInstruction()
5030 !isInlineConstant(Src0, Desc.operands()[Src0Idx]) && in verifyInstruction()
5032 !Src0.isIdenticalTo(Src1)) { in verifyInstruction()
5095 const MachineOperand &Src0 = MI.getOperand(Src0Idx); in verifyInstruction() local
5099 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { in verifyInstruction()
5854 MachineOperand &Src0 = MI.getOperand(Src0Idx); in legalizeOperandsVOP2() local
5862 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && Src0.isReg() && in legalizeOperandsVOP2()
5863 RI.isSGPRReg(MRI, Src0.getReg())) in legalizeOperandsVOP2()
5871 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { in legalizeOperandsVOP2()
5874 .add(Src0); in legalizeOperandsVOP2()
5875 Src0.ChangeToRegister(Reg, false); in legalizeOperandsVOP2()
5888 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) in legalizeOperandsVOP2()
5934 !isLegalRegOperand(MRI, InstrDesc.operands()[Src1Idx], Src0)) { in legalizeOperandsVOP2()
5947 Register Src0Reg = Src0.getReg(); in legalizeOperandsVOP2()
5948 unsigned Src0SubReg = Src0.getSubReg(); in legalizeOperandsVOP2()
5949 bool Src0Kill = Src0.isKill(); in legalizeOperandsVOP2()
5952 Src0.ChangeToImmediate(Src1.getImm()); in legalizeOperandsVOP2()
5954 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); in legalizeOperandsVOP2()
5955 Src0.setSubReg(Src1.getSubReg()); in legalizeOperandsVOP2()
6671 Register Src0 = MI.getOperand(1).getReg(); in legalizeOperands() local
6673 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); in legalizeOperands()
6759 MachineOperand &Src0 = MI.getOperand(Src0Idx); in legalizeOperands() local
6761 .add(Src0); in legalizeOperands()
6762 Src0.ChangeToRegister(Reg, false); in legalizeOperands()
7226 MachineOperand &Src0 = Inst.getOperand(2); in moveToVALUImpl() local
7237 .add(Src0) in moveToVALUImpl()
7551 MachineOperand &Src0 = Inst.getOperand(1); in lowerSelect() local
7562 if (!IsSCC && Src0.isImm() && (Src0.getImm() == -1) && Src1.isImm() && in lowerSelect()
7611 .add(Src0) // True in lowerSelect()
7617 .add(Src0) // True in lowerSelect()
7660 MachineOperand &Src0 = Inst.getOperand(1); in lowerScalarXnor() local
7665 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); in lowerScalarXnor()
7669 .add(Src0) in lowerScalarXnor()
7679 bool Src0IsSGPR = Src0.isReg() && in lowerScalarXnor()
7680 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); in lowerScalarXnor()
7691 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); in lowerScalarXnor()
7698 .add(Src0) in lowerScalarXnor()
7702 .add(Src0) in lowerScalarXnor()
7726 MachineOperand &Src0 = Inst.getOperand(1); in splitScalarNotBinop() local
7733 .add(Src0) in splitScalarNotBinop()
7755 MachineOperand &Src0 = Inst.getOperand(1); in splitScalarBinOpN2() local
7765 .add(Src0) in splitScalarBinOpN2()
7782 MachineOperand &Src0 = Inst.getOperand(1); in splitScalar64BitUnaryOp() local
7788 const TargetRegisterClass *Src0RC = Src0.isReg() ? in splitScalar64BitUnaryOp()
7789 MRI.getRegClass(Src0.getReg()) : in splitScalar64BitUnaryOp()
7795 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, in splitScalar64BitUnaryOp()
7806 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, in splitScalar64BitUnaryOp()
7847 MachineOperand &Src0 = Inst.getOperand(1); in splitScalarSMulU64() local
7852 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); in splitScalarSMulU64()
7866 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); in splitScalarSMulU64()
7870 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); in splitScalarSMulU64()
7956 MachineOperand &Src0 = Inst.getOperand(1); in splitScalarSMulPseudo() local
7961 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); in splitScalarSMulPseudo()
7975 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); in splitScalarSMulPseudo()
8015 MachineOperand &Src0 = Inst.getOperand(1); in splitScalar64BitBinaryOp() local
8022 const TargetRegisterClass *Src0RC = Src0.isReg() ? in splitScalar64BitBinaryOp()
8023 MRI.getRegClass(Src0.getReg()) : in splitScalar64BitBinaryOp()
8035 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, in splitScalar64BitBinaryOp()
8039 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, in splitScalar64BitBinaryOp()
8082 MachineOperand &Src0 = Inst.getOperand(1); in splitScalar64BitXnor() local
8095 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { in splitScalar64BitXnor()
8096 Op0 = &Src0; in splitScalar64BitXnor()
8100 Op1 = &Src0; in splitScalar64BitXnor()
8312 MachineOperand &Src0 = Inst.getOperand(1); in movePackToVALU() local
8328 .add(Src0); in movePackToVALU()
8342 .add(Src0) in movePackToVALU()
8350 .add(Src0); in movePackToVALU()
8362 .add(Src0); in movePackToVALU()