/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | MachineIRBuilder.h | 1149 MachineInstrBuilder buildInsertSubvector(const DstOp &Res, const SrcOp &Src0, 1673 MachineInstrBuilder buildAdd(const DstOp &Dst, const SrcOp &Src0, 1676 return buildInstr(TargetOpcode::G_ADD, {Dst}, {Src0, Src1}, Flags); 1690 MachineInstrBuilder buildSub(const DstOp &Dst, const SrcOp &Src0, 1693 return buildInstr(TargetOpcode::G_SUB, {Dst}, {Src0, Src1}, Flags); 1706 MachineInstrBuilder buildMul(const DstOp &Dst, const SrcOp &Src0, 1709 return buildInstr(TargetOpcode::G_MUL, {Dst}, {Src0, Src1}, Flags); 1712 MachineInstrBuilder buildUMulH(const DstOp &Dst, const SrcOp &Src0, 1715 return buildInstr(TargetOpcode::G_UMULH, {Dst}, {Src0, Src1}, Flags); 1718 MachineInstrBuilder buildSMulH(const DstOp &Dst, const SrcOp &Src0, [all …]
|
H A D | GISelKnownBits.h | 38 void computeKnownBitsMin(Register Src0, Register Src1, KnownBits &Known, 42 unsigned computeNumSignBitsMin(Register Src0, Register Src1,
|
H A D | MIPatternMatch.h | 728 Src0Ty Src0; 732 TernaryOp_match(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) 733 : Src0(Src0), Src1(Src1), Src2(Src2) {} 739 return (Src0.match(MRI, TmpMI->getOperand(1).getReg()) && 750 m_GInsertVecElt(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) { 752 TargetOpcode::G_INSERT_VECTOR_ELT>(Src0, Src1, Src2); 757 m_GISelect(const Src0Ty &Src0, const Src1Ty &Src1, const Src2Ty &Src2) { 759 Src0, Src1, Src2);
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | R600ExpandSpecialInstrs.cpp | 146 Register Src0 = in runOnMachineFunction() local 152 (void) Src0; in runOnMachineFunction() 154 if ((TRI.getEncodingValue(Src0) & 0xff) < 127 && in runOnMachineFunction() 156 assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1)); in runOnMachineFunction() 198 Register Src0 = in runOnMachineFunction() local 211 Src0 = TRI.getSubReg(Src0, SubRegIndex); in runOnMachineFunction() 217 Src1 = TRI.getSubReg(Src0, SubRegIndex1); in runOnMachineFunction() 218 Src0 = TRI.getSubReg(Src0, SubRegIndex0); in runOnMachineFunction() 252 TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1); in runOnMachineFunction()
|
H A D | SIShrinkInstructions.cpp | 96 MachineOperand &Src0 = MI.getOperand(Src0Idx); in foldImmediates() local 97 if (Src0.isReg()) { in foldImmediates() 98 Register Reg = Src0.getReg(); in foldImmediates() 107 Src0.ChangeToImmediate(MovSrc.getImm()); in foldImmediates() 110 Src0.ChangeToFrameIndex(MovSrc.getIndex()); in foldImmediates() 113 Src0.ChangeToGA(MovSrc.getGlobal(), MovSrc.getOffset(), in foldImmediates() 243 const MachineOperand &Src0 = MI.getOperand(0); in shrinkScalarCompare() local 244 if (!Src0.isReg()) in shrinkScalarCompare() 418 MachineOperand &Src0 = *TII->getNamedOperand(MI, AMDGPU::OpName::src0); in shrinkMadFma() local 429 else if (Src0.isReg() && TRI->isVGPR(*MRI, Src0.getReg())) in shrinkMadFma() [all …]
|
H A D | GCNVOPDUtils.cpp | 83 const MachineOperand &Src0 = MI.getOperand(VOPD::Component::SRC0); in checkVOPDRegConstraints() local 84 if (Src0.isReg()) { in checkVOPDRegConstraints() 85 if (!TRI->isVectorRegister(MRI, Src0.getReg())) { in checkVOPDRegConstraints() 86 if (!is_contained(UniqueScalarRegs, Src0.getReg())) in checkVOPDRegConstraints() 87 UniqueScalarRegs.push_back(Src0.getReg()); in checkVOPDRegConstraints() 91 addLiteral(Src0); in checkVOPDRegConstraints()
|
H A D | AMDGPUInstCombineIntrinsic.cpp | 45 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, in fmed3AMDGCN() argument 47 APFloat Max3 = maxnum(maxnum(Src0, Src1), Src2); in fmed3AMDGCN() 49 APFloat::cmpResult Cmp0 = Max3.compare(Src0); in fmed3AMDGCN() 57 return maxnum(Src0, Src2); in fmed3AMDGCN() 59 return maxnum(Src0, Src1); in fmed3AMDGCN() 601 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() local 606 II.getModule(), Intrinsic::is_fpclass, Src0->getType())); in instCombineIntrinsic() 615 if (isa<PoisonValue>(Src0) || isa<PoisonValue>(Src1)) in instCombineIntrinsic() 623 if (IC.getSimplifyQuery().isUndefValue(Src0)) { in instCombineIntrinsic() 631 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() local [all …]
|
H A D | SIOptimizeExecMasking.cpp | 536 MachineOperand &Src0 = SaveExecInst->getOperand(1); in optimizeExecSequence() local 541 if (Src0.isReg() && Src0.getReg() == CopyFromExec) { in optimizeExecSequence() 547 OtherOp = &Src0; in optimizeExecSequence() 583 MachineOperand *Src0 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src0); in optimizeVCMPSaveExecSequence() local 609 Builder.add(*Src0); in optimizeVCMPSaveExecSequence() 617 if (Src0->isReg()) in optimizeVCMPSaveExecSequence() 618 MRI->clearKillFlags(Src0->getReg()); in optimizeVCMPSaveExecSequence() 681 MachineOperand *Src0 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src0); in tryRecordVCmpxAndSaveexecSequence() local 682 if (Src0->isReg() && TRI->isSGPRReg(*MRI, Src0->getReg()) && in tryRecordVCmpxAndSaveexecSequence() 683 MI.modifiesRegister(Src0->getReg(), TRI)) in tryRecordVCmpxAndSaveexecSequence() [all …]
|
H A D | AMDGPUCombinerHelper.cpp | 419 Register Src0, in matchExpandPromotedF16FMed3() argument 427 return isFPExtFromF16OrConst(MRI, Src0) && isFPExtFromF16OrConst(MRI, Src1) && in matchExpandPromotedF16FMed3() 432 Register Src0, in applyExpandPromotedF16FMed3() argument 437 Src0 = Builder.buildFPTrunc(LLT::scalar(16), Src0).getReg(0); in applyExpandPromotedF16FMed3() 441 LLT Ty = MRI.getType(Src0); in applyExpandPromotedF16FMed3() 442 auto A1 = Builder.buildFMinNumIEEE(Ty, Src0, Src1); in applyExpandPromotedF16FMed3() 443 auto B1 = Builder.buildFMaxNumIEEE(Ty, Src0, Src1); in applyExpandPromotedF16FMed3()
|
H A D | SIFoldOperands.cpp | 1218 MachineOperand *Src0 = getImmOrMaterializedImm(MI->getOperand(Src0Idx)); in tryConstantFoldOp() local 1222 Src0->isImm()) { in tryConstantFoldOp() 1223 MI->getOperand(1).ChangeToImmediate(~Src0->getImm()); in tryConstantFoldOp() 1233 if (!Src0->isImm() && !Src1->isImm()) in tryConstantFoldOp() 1239 if (Src0->isImm() && Src1->isImm()) { in tryConstantFoldOp() 1241 if (!evalBinaryInstruction(Opc, NewImm, Src0->getImm(), Src1->getImm())) in tryConstantFoldOp() 1257 if (Src0->isImm() && !Src1->isImm()) { in tryConstantFoldOp() 1258 std::swap(Src0, Src1); in tryConstantFoldOp() 1316 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in tryFoldCndMask() local 1318 if (!Src1->isIdenticalTo(*Src0)) { in tryFoldCndMask() [all …]
|
H A D | AMDGPUPostLegalizerCombiner.cpp | 324 Register Src0; in matchCvtF32UByteN() local 326 IsShr = mi_match(SrcReg, MRI, m_GLShr(m_Reg(Src0), m_ICst(ShiftAmt))); in matchCvtF32UByteN() 327 if (IsShr || mi_match(SrcReg, MRI, m_GShl(m_Reg(Src0), m_ICst(ShiftAmt)))) { in matchCvtF32UByteN() 336 MatchInfo.CvtVal = Src0; in matchCvtF32UByteN() 419 Register Src0 = MI.getOperand(1).getReg(); in matchCombine_s_mul_u64() local 421 if (MRI.getType(Src0) != LLT::scalar(64)) in matchCombine_s_mul_u64() 425 KB->getKnownBits(Src0).countMinLeadingZeros() >= 32) { in matchCombine_s_mul_u64() 431 KB->computeNumSignBits(Src0) >= 33) { in matchCombine_s_mul_u64()
|
H A D | GCNDPPCombine.cpp | 290 auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); in createDPPInst() local 291 assert(Src0); in createDPPInst() 293 if (!TII->isOperandLegal(*DPPInst.getInstr(), NumOperands, Src0)) { in createDPPInst() 298 DPPInst.add(*Src0); in createDPPInst() 684 auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0); in combineDPPMov() local 686 if (Use != Src0 && !(Use == Src1 && OrigMI.isCommutable())) { // [1] in combineDPPMov() 692 assert(Src0 && "Src1 without Src0?"); in combineDPPMov() 693 if ((Use == Src0 && ((Src1 && Src1->isIdenticalTo(*Src0)) || in combineDPPMov() 694 (Src2 && Src2->isIdenticalTo(*Src0)))) || in combineDPPMov() 695 (Use == Src1 && (Src1->isIdenticalTo(*Src0) || in combineDPPMov() [all …]
|
H A D | SIPeepholeSDWA.cpp | 582 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 583 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 622 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 623 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 689 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 692 if (!Src0->isReg() || Src0->getReg().isPhysical() || in matchSDWAOperand() 697 Src0, Dst, SrcSel, false, false, Opcode != AMDGPU::V_BFE_U32_e64); in matchSDWAOperand() 706 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 709 auto Imm = foldToImm(*Src0); in matchSDWAOperand() 713 ValSrc = Src0; in matchSDWAOperand() [all …]
|
H A D | AMDGPUCombinerHelper.h | 30 bool matchExpandPromotedF16FMed3(MachineInstr &MI, Register Src0, 32 void applyExpandPromotedF16FMed3(MachineInstr &MI, Register Src0,
|
H A D | SIInstrInfo.cpp | 2719 MachineOperand &Src0, in swapSourceModifiers() argument 2787 MachineOperand &Src0 = MI.getOperand(Src0Idx); in commuteInstructionImpl() local 2791 if (Src0.isReg() && Src1.isReg()) { in commuteInstructionImpl() 2792 if (isOperandLegal(MI, Src1Idx, &Src0)) { in commuteInstructionImpl() 2798 } else if (Src0.isReg() && !Src1.isReg()) { in commuteInstructionImpl() 2801 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); in commuteInstructionImpl() 2802 } else if (!Src0.isReg() && Src1.isReg()) { in commuteInstructionImpl() 2803 if (isOperandLegal(MI, Src1Idx, &Src0)) in commuteInstructionImpl() 2804 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); in commuteInstructionImpl() 2811 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, in commuteInstructionImpl() [all …]
|
H A D | AMDGPURegBankCombiner.cpp | 316 MachineInstr *Src0 = getDefIgnoringCopies(MI.getOperand(1).getReg(), MRI); in matchFPMed3ToClamp() local 320 if (isFCst(Src0) && !isFCst(Src1)) in matchFPMed3ToClamp() 321 std::swap(Src0, Src1); in matchFPMed3ToClamp() 324 if (isFCst(Src0) && !isFCst(Src1)) in matchFPMed3ToClamp() 325 std::swap(Src0, Src1); in matchFPMed3ToClamp() 329 Register Val = Src0->getOperand(0).getReg(); in matchFPMed3ToClamp()
|
H A D | SIModeRegister.cpp | 182 MachineOperand Src0 = MI.getOperand(1); in getInstructionMode() local 185 B.add(Src0); // re-add src0 operand in getInstructionMode() 199 MachineOperand Src0 = MI.getOperand(1); in getInstructionMode() local 202 B.add(Src0); // re-add src0 operand in getInstructionMode()
|
H A D | SIISelLowering.cpp | 4975 MachineOperand &Src0 = MI.getOperand(2); in EmitInstrWithCustomInserter() local 4981 BuildMI(*BB, MI, DL, TII->get(Opc), Dest0.getReg()).add(Src0).add(Src1); in EmitInstrWithCustomInserter() 4998 MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() local 5004 .add(Src0) in EmitInstrWithCustomInserter() 5014 MI, MRI, Src0, BoolRC, AMDGPU::sub0, &AMDGPU::SReg_32RegClass); in EmitInstrWithCustomInserter() 5016 MI, MRI, Src0, BoolRC, AMDGPU::sub1, &AMDGPU::SReg_32RegClass); in EmitInstrWithCustomInserter() 5050 MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() local 5056 .add(Src0) in EmitInstrWithCustomInserter() 5072 const TargetRegisterClass *Src0RC = Src0.isReg() in EmitInstrWithCustomInserter() 5073 ? MRI.getRegClass(Src0.getReg()) in EmitInstrWithCustomInserter() [all …]
|
H A D | SIFixSGPRCopies.cpp | 708 MachineOperand &Src0 = MI.getOperand(Src0Idx); in runOnMachineFunction() local 712 if ((Src0.isReg() && TRI->isSGPRReg(*MRI, Src0.getReg()) && in runOnMachineFunction() 713 Src0.getReg() != AMDGPU::M0) && in runOnMachineFunction() 722 for (MachineOperand *MO : {&Src0, &Src1}) { in runOnMachineFunction()
|
H A D | SILoadStoreOptimizer.cpp | 1263 const auto *Src0 = TII->getNamedOperand(*CI.I, OpName); in copyFromSrcRegs() local 1267 .add(*Src0) in copyFromSrcRegs() 2051 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0); in processBaseWithConstOffset() local 2054 auto Offset0P = extractConstOffset(*Src0); in processBaseWithConstOffset() 2060 BaseLo = *Src0; in processBaseWithConstOffset() 2063 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0); in processBaseWithConstOffset() 2066 if (Src0->isImm()) in processBaseWithConstOffset() 2067 std::swap(Src0, Src1); in processBaseWithConstOffset() 2069 if (!Src1->isImm() || Src0->isImm()) in processBaseWithConstOffset() 2073 BaseHi = *Src0; in processBaseWithConstOffset()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64AdvSIMDScalarPass.cpp | 298 unsigned Src0 = 0, SubReg0; in transformInstruction() local 309 Src0 = MOSrc0->getReg(); in transformInstruction() 341 if (!Src0) { in transformInstruction() 343 Src0 = MRI->createVirtualRegister(&AArch64::FPR64RegClass); in transformInstruction() 344 insertCopy(TII, MI, Src0, OrigSrc0, KillSrc0); in transformInstruction() 363 .addReg(Src0, getKillRegState(KillSrc0), SubReg0) in transformInstruction()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCExpandAtomicPseudoInsts.cpp | 53 Register Dest0, Register Dest1, Register Src0, in PairedCopy() argument 57 if (Dest0 == Src1 && Dest1 == Src0) { in PairedCopy() 62 } else if (Dest0 != Src0 || Dest1 != Src1) { in PairedCopy() 63 if (Dest0 == Src1 || Dest1 != Src0) { in PairedCopy() 65 BuildMI(MBB, MBBI, DL, OR, Dest0).addReg(Src0).addReg(Src0); in PairedCopy() 67 BuildMI(MBB, MBBI, DL, OR, Dest0).addReg(Src0).addReg(Src0); in PairedCopy()
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineCalls.cpp | 907 Value *Src0 = II.getArgOperand(0); in foldIntrinsicIsFPClass() local 920 if (match(Src0, m_FNeg(m_Value(FNegSrc)))) { in foldIntrinsicIsFPClass() 928 if (match(Src0, m_FAbs(m_Value(FAbsSrc)))) { in foldIntrinsicIsFPClass() 939 Constant *Inf = ConstantFP::getInfinity(Src0->getType()); in foldIntrinsicIsFPClass() 945 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Src0); in foldIntrinsicIsFPClass() 958 ConstantFP::getInfinity(Src0->getType(), OrderedMask == fcNegInf); in foldIntrinsicIsFPClass() 959 Value *EqInf = IsUnordered ? Builder.CreateFCmpUEQ(Src0, Inf) in foldIntrinsicIsFPClass() 960 : Builder.CreateFCmpOEQ(Src0, Inf); in foldIntrinsicIsFPClass() 972 Constant *Inf = ConstantFP::getInfinity(Src0->getType(), in foldIntrinsicIsFPClass() 974 Value *NeInf = IsUnordered ? Builder.CreateFCmpUNE(Src0, Inf) in foldIntrinsicIsFPClass() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | GISelKnownBits.cpp | 112 void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1, in computeKnownBitsMin() argument 124 computeKnownBitsImpl(Src0, Known2, DemandedElts, Depth); in computeKnownBitsMin() 619 unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1, in computeNumSignBitsMin() argument 626 return std::min(computeNumSignBits(Src0, DemandedElts, Depth), Src1SignBits); in computeNumSignBitsMin()
|
H A D | CSEMIRBuilder.cpp | 261 const SrcOp &Src0 = SrcOps[0]; in buildInstr() local 264 ConstantFoldExtOp(Opc, Src0.getReg(), Src1.getImm(), *getMRI())) in buildInstr()
|