/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | MachineIRBuilder.h | 1790 MachineInstrBuilder buildAnd(const DstOp &Dst, const SrcOp &Src0, in buildAnd() 1812 MachineInstrBuilder buildXor(const DstOp &Dst, const SrcOp &Src0, in buildXor() 1820 MachineInstrBuilder buildNot(const DstOp &Dst, const SrcOp &Src0) { in buildNot() 1828 MachineInstrBuilder buildNeg(const DstOp &Dst, const SrcOp &Src0) { in buildNeg() 1834 MachineInstrBuilder buildCTPOP(const DstOp &Dst, const SrcOp &Src0) { in buildCTPOP() 1839 MachineInstrBuilder buildCTLZ(const DstOp &Dst, const SrcOp &Src0) { in buildCTLZ() 1844 MachineInstrBuilder buildCTLZ_ZERO_UNDEF(const DstOp &Dst, const SrcOp &Src0) { in buildCTLZ_ZERO_UNDEF() 1849 MachineInstrBuilder buildCTTZ(const DstOp &Dst, const SrcOp &Src0) { in buildCTTZ() 1854 MachineInstrBuilder buildCTTZ_ZERO_UNDEF(const DstOp &Dst, const SrcOp &Src0) { in buildCTTZ_ZERO_UNDEF() 1859 MachineInstrBuilder buildBSwap(const DstOp &Dst, const SrcOp &Src0) { in buildBSwap() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | SIShrinkInstructions.cpp | 96 MachineOperand &Src0 = MI.getOperand(Src0Idx); in foldImmediates() local 243 const MachineOperand &Src0 = MI.getOperand(0); in shrinkScalarCompare() local 418 MachineOperand &Src0 = *TII->getNamedOperand(MI, AMDGPU::OpName::src0); in shrinkMadFma() local 512 MachineOperand *Src0 = &MI.getOperand(1); in shrinkScalarLogicOp() local 847 MachineOperand *Src0 = &MI.getOperand(1); in runOnMachineFunction() local
|
H A D | R600ExpandSpecialInstrs.cpp | 146 Register Src0 = in runOnMachineFunction() local 198 Register Src0 = in runOnMachineFunction() local
|
H A D | SIPeepholeSDWA.cpp | 582 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 622 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 689 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 706 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in matchSDWAOperand() local 1005 if (MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0)) { in isConvertibleToSDWA() local 1058 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in convertToSDWA() local
|
H A D | AMDGPUCombinerHelper.cpp | 419 Register Src0, in matchExpandPromotedF16FMed3() 432 Register Src0, in applyExpandPromotedF16FMed3()
|
H A D | AMDGPUInstCombineIntrinsic.cpp | 45 static APFloat fmed3AMDGCN(const APFloat &Src0, const APFloat &Src1, in fmed3AMDGCN() 601 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() local 631 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() local 660 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() local 763 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() local 852 Value *Src0 = II.getArgOperand(0); in instCombineIntrinsic() local
|
H A D | SIOptimizeExecMasking.cpp | 536 MachineOperand &Src0 = SaveExecInst->getOperand(1); in optimizeExecSequence() local 583 MachineOperand *Src0 = TII->getNamedOperand(VCmp, AMDGPU::OpName::src0); in optimizeVCMPSaveExecSequence() local 681 MachineOperand *Src0 = TII->getNamedOperand(*VCmp, AMDGPU::OpName::src0); in tryRecordVCmpxAndSaveexecSequence() local
|
H A D | SIModeRegister.cpp | 182 MachineOperand Src0 = MI.getOperand(1); in getInstructionMode() local 199 MachineOperand Src0 = MI.getOperand(1); in getInstructionMode() local
|
H A D | SIFoldOperands.cpp | 1218 MachineOperand *Src0 = getImmOrMaterializedImm(MI->getOperand(Src0Idx)); in tryConstantFoldOp() local 1316 MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in tryFoldCndMask() local 1354 MachineOperand *Src0 = getImmOrMaterializedImm(MI.getOperand(1)); in tryFoldZeroHighBits() local 1531 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isClamp() local 1679 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isOMod() local 1716 const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); in isOMod() local
|
H A D | GCNDPPCombine.cpp | 290 auto *Src0 = TII->getNamedOperand(MovMI, AMDGPU::OpName::src0); in createDPPInst() local 684 auto *Src0 = TII->getNamedOperand(OrigMI, AMDGPU::OpName::src0); in combineDPPMov() local
|
H A D | GCNVOPDUtils.cpp | 83 const MachineOperand &Src0 = MI.getOperand(VOPD::Component::SRC0); in checkVOPDRegConstraints() local
|
H A D | AMDGPUPostLegalizerCombiner.cpp | 324 Register Src0; in matchCvtF32UByteN() local 419 Register Src0 = MI.getOperand(1).getReg(); in matchCombine_s_mul_u64() local
|
H A D | SIInstrInfo.cpp | 2719 MachineOperand &Src0, in swapSourceModifiers() 2787 MachineOperand &Src0 = MI.getOperand(Src0Idx); in commuteInstructionImpl() local 3511 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); in foldImmediate() local 3916 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); in convertToThreeAddress() local 3929 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); in convertToThreeAddress() local 5004 const MachineOperand &Src0 = MI.getOperand(Src0Idx); in verifyInstruction() local 5026 const MachineOperand &Src0 = MI.getOperand(Src0Idx); in verifyInstruction() local 5095 const MachineOperand &Src0 = MI.getOperand(Src0Idx); in verifyInstruction() local 5854 MachineOperand &Src0 = MI.getOperand(Src0Idx); in legalizeOperandsVOP2() local 6671 Register Src0 = MI.getOperand(1).getReg(); in legalizeOperands() local [all …]
|
H A D | AMDGPURegBankCombiner.cpp | 316 MachineInstr *Src0 = getDefIgnoringCopies(MI.getOperand(1).getReg(), MRI); in matchFPMed3ToClamp() local
|
H A D | SIISelLowering.cpp | 4975 MachineOperand &Src0 = MI.getOperand(2); in EmitInstrWithCustomInserter() local 4998 MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() local 5050 MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() local 5132 MachineOperand &Src0 = MI.getOperand(2); in EmitInstrWithCustomInserter() local 5289 const MachineOperand &Src0 = MI.getOperand(1); in EmitInstrWithCustomInserter() local 6055 SDValue Src0 = N->getOperand(1); in lowerFCMPIntrinsic() local 6124 SDValue Src2, MVT ValT) -> SDValue { in lowerLaneOp() 6160 SDValue Src0 = N->getOperand(1); in lowerLaneOp() local 6312 SDValue Src0 = N->getOperand(1); in ReplaceNodeResults() local 6324 SDValue Src0 = N->getOperand(1); in ReplaceNodeResults() local [all …]
|
H A D | SILoadStoreOptimizer.cpp | 1263 const auto *Src0 = TII->getNamedOperand(*CI.I, OpName); in copyFromSrcRegs() local 2051 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0); in processBaseWithConstOffset() local
|
H A D | SIFixSGPRCopies.cpp | 708 MachineOperand &Src0 = MI.getOperand(Src0Idx); in runOnMachineFunction() local
|
H A D | AMDGPULegalizerInfo.cpp | 3733 Register Src0 = MI.getOperand(1).getReg(); in legalizeFPow() local 3842 Register Src0 = MI.getOperand(1).getReg(); in legalizeBuildVector() local 3869 ArrayRef<Register> Src0, in buildMultiply() 4129 Register Src0 = MI.getOperand(1).getReg(); in legalizeMul() local 5431 Register Src2, LLT VT) -> Register { in legalizeLaneOp() 5459 Register Src0 = MI.getOperand(2).getReg(); in legalizeLaneOp() local
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | GISelKnownBits.cpp | 112 void GISelKnownBits::computeKnownBitsMin(Register Src0, Register Src1, in computeKnownBitsMin() 619 unsigned GISelKnownBits::computeNumSignBitsMin(Register Src0, Register Src1, in computeNumSignBitsMin()
|
H A D | CSEMIRBuilder.cpp | 261 const SrcOp &Src0 = SrcOps[0]; in buildInstr() local
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64AdvSIMDScalarPass.cpp | 298 unsigned Src0 = 0, SubReg0; in transformInstruction() local
|
/freebsd/contrib/llvm-project/llvm/lib/Target/PowerPC/ |
H A D | PPCExpandAtomicPseudoInsts.cpp | 53 Register Dest0, Register Dest1, Register Src0, in PairedCopy()
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/ |
H A D | ScalarizeMaskedMemIntrin.cpp | 148 Value *Src0 = CI->getArgOperand(3); in scalarizeMaskedLoad() local 410 Value *Src0 = CI->getArgOperand(3); in scalarizeMaskedGather() local
|
H A D | InferAddressSpaces.cpp | 929 Value *Src0 = Op.getOperand(1); in updateAddressSpace() local
|
/freebsd/contrib/llvm-project/clang/lib/CodeGen/ |
H A D | CGBuiltin.cpp | 497 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitUnaryMaybeConstrainedFPBuiltin() local 514 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitBinaryMaybeConstrainedFPBuiltin() local 531 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitBinaryExpMaybeConstrainedFPBuiltin() local 551 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitTernaryMaybeConstrainedFPBuiltin() local 603 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitFPIntBuiltin() local 616 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitMaybeConstrainedFPToIntRoundBuiltin() local 632 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0)); in emitFrexpBuiltin() local 3624 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitBuiltinExpr() local 18552 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr() local 18647 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0)); in EmitAMDGPUBuiltinExpr() local [all …]
|