/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | AMDGPUGlobalISelUtils.cpp | 43 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset))) in getBaseWithConstantOffset() 47 if (mi_match(Def->getOperand(2).getReg(), MRI, m_Copy(m_ICst(Offset)))) in getBaseWithConstantOffset() 52 if (KnownBits && mi_match(Reg, MRI, m_GOr(m_Reg(Base), m_ICst(Offset))) && in getBaseWithConstantOffset() 59 if (mi_match(Def->getOperand(1).getReg(), MRI, in getBaseWithConstantOffset()
|
H A D | AMDGPUPreLegalizerCombiner.cpp | 151 if (mi_match(MI.getOperand(1).getReg(), MRI, in matchClampI64ToI16() 153 if (mi_match(Base, MRI, in matchClampI64ToI16() 159 if (mi_match(MI.getOperand(1).getReg(), MRI, in matchClampI64ToI16() 161 if (mi_match(Base, MRI, in matchClampI64ToI16()
|
H A D | AMDGPUCombinerHelper.cpp | 158 if (mi_match(Reg, MRI, m_GFCstOrSplat(FPValReg))) { in isConstantCostlierToNegate() 279 if (!mi_match(Reg, MRI, m_GFNeg(m_Reg(Reg)))) in applyFoldableFneg() 288 if (mi_match(XReg, MRI, m_GFNeg(m_Reg(XReg)))) in applyFoldableFneg() 290 else if (mi_match(YReg, MRI, m_GFNeg(m_Reg(YReg)))) in applyFoldableFneg()
|
H A D | AMDGPUPostLegalizerCombiner.cpp | 269 mi_match(MI.getOperand(0).getReg(), MRI, m_GFSqrt(m_MInstr(SqrtSrcMI))); in matchRcpSqrtToRsq() 322 bool IsShr = mi_match(SrcReg, MRI, m_GZExt(m_Reg(SrcReg))); in matchCvtF32UByteN() 326 IsShr = mi_match(SrcReg, MRI, m_GLShr(m_Reg(Src0), m_ICst(ShiftAmt))); in matchCvtF32UByteN() 327 if (IsShr || mi_match(SrcReg, MRI, m_GShl(m_Reg(Src0), m_ICst(ShiftAmt)))) { in matchCvtF32UByteN()
|
H A D | AMDGPUInstructionSelector.cpp | 734 bool Shift0 = mi_match( in selectG_BUILD_VECTOR() 737 bool Shift1 = mi_match( in selectG_BUILD_VECTOR() 2493 if (mi_match(In, MRI, in isExtractHiElt() 3281 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) in matchZeroExtendFromS32() 3291 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) { in matchZeroExtendFromS32() 3941 if (!mi_match(El, MRI, m_GFabs(m_Reg(FabsSrc)))) in selectWMMAModsNegAbs() 4001 if (!mi_match(CV->getSourceReg(i), *MRI, m_GFNeg(m_Reg(FNegSrc)))) in selectWMMAModsF16Neg() 4054 if (mi_match(Root.getReg(), *MRI, m_GFCstOrSplat(FPValReg))) { in selectWMMAVISrc() 4066 if (mi_match(Root.getReg(), *MRI, m_ICstOrSplat(ICst))) { in selectWMMAVISrc() 4084 if (mi_match(Src, *MRI, m_GLShr(m_Reg(ShiftSrc), m_GCst(ShiftAmt))) && in selectSWMMACIndex8() [all …]
|
H A D | AMDGPURegBankCombiner.cpp | 181 return mi_match( in matchMed()
|
H A D | VOP3Instructions.td | 516 if (!mi_match(MI.getOperand(2).getReg(), MRI, m_ICst(Imm)) && 517 !mi_match(MI.getOperand(2).getReg(), MRI, m_Copy(m_ICst(Imm))))
|
H A D | AMDGPURegisterBankInfo.cpp | 1578 if (mi_match(Src2, MRI, m_ZeroInt())) in applyMappingMAD_64_32() 1784 if (mi_match(Reg, MRI, m_ICst(Const))) in getBaseWithConstantOffset() 1788 if (mi_match(Reg, MRI, m_GAdd(m_Reg(Base), m_ICst(Const)))) in getBaseWithConstantOffset()
|
H A D | SIISelLowering.cpp | 12808 if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) { in isCanonicalized()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CombinerHelper.cpp | 1005 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) in matchSextTruncSextLoad() 1269 if (!mi_match(Addr, MRI, m_GPtrAdd(m_Reg(Base), m_Reg(Offset))) || in findPreIndexCandidate() 1986 if (!mi_match(SrcReg, MRI, in matchCommuteShift() 1992 if (!mi_match(C1, MRI, m_ICstOrSplat(C1Val)) || in matchCommuteShift() 1993 !mi_match(ShiftReg, MRI, m_ICstOrSplat(C2Val))) in matchCommuteShift() 2042 if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) && in matchCombineShlOfExtend() 2043 !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) && in matchCombineShlOfExtend() 2044 !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc)))) in matchCombineShlOfExtend() 2107 while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg)))) in peekThroughBitcast() 2259 if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg)))) in matchCombineUnmergeZExtToZExt() [all …]
|
H A D | LoadStoreOpt.cpp | 88 if (!mi_match(Ptr, MRI, m_GPtrAdd(m_Reg(BaseReg), m_Reg(PtrAddRHS)))) { in getPointerInfo() 207 if (!mi_match(LS->getPointerReg(), MRI, in instMayAlias() 654 if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal)))) in getTruncStoreByteOffset() 665 if (!mi_match(TruncVal, MRI, in getTruncStoreByteOffset() 746 if (!mi_match(LastStore.getPointerReg(), *MRI, in mergeTruncStore() 791 if (!mi_match(NewStore->getPointerReg(), *MRI, in mergeTruncStore()
|
H A D | Utils.cpp | 1381 return mi_match(SplatValAndReg->VReg, MRI, m_SpecificICst(SplatValue)); in isBuildVectorConstantSplat()
|
H A D | LegalizerHelper.cpp | 4147 if (mi_match(IdxReg, *B.getMRI(), m_ICst(IdxVal))) { in clampVectorIndex() 7541 if (mi_match(Idx, MRI, m_ICst(IdxVal)) && IdxVal <= NumElts) { in lowerExtractInsertVectorElt() 7574 if (mi_match(Idx, MRI, m_ICst(IdxVal))) { in lowerExtractInsertVectorElt()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | MIPatternMatch.h | 25 [[nodiscard]] bool mi_match(Reg R, const MachineRegisterInfo &MRI, in mi_match() function 31 [[nodiscard]] bool mi_match(MachineInstr &MI, const MachineRegisterInfo &MRI, in mi_match() function 190 return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal; in match() 222 if (mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal) in match() 378 if (mi_match(Reg, MRI, m_MInstr(TmpMI))) 400 if (mi_match(Op, MRI, m_MInstr(TmpMI))) { 424 if (mi_match(Op, MRI, m_MInstr(TmpMI))) { 560 if (mi_match(Op, MRI, m_MInstr(TmpMI))) { 654 if (!mi_match(Op, MRI, m_MInstr(TmpMI)) || TmpMI->getOpcode() != Opcode) 737 if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
|
H A D | LegalizationArtifactCombiner.h | 72 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineAnyExt() 87 if (mi_match(SrcReg, MRI, in tryCombineAnyExt() 133 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) || in tryCombineZExt() 134 mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) { in tryCombineZExt() 167 if (mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZextSrc)))) { in tryCombineZExt() 206 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineSExt() 231 if (mi_match(SrcReg, MRI, in tryCombineSExt() 344 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineTrunc()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/RISCV/GISel/ |
H A D | RISCVInstructionSelector.cpp | 174 if (mi_match(ShAmtReg, MRI, m_GZExt(m_Reg(ZExtSrcReg)))) { in selectShiftMask() 194 if (mi_match(ShAmtReg, MRI, m_GAnd(m_Reg(AndSrcReg), m_ICst(AndMask)))) { in selectShiftMask() 209 if (mi_match(ShAmtReg, MRI, m_GAdd(m_Reg(Reg), m_ICst(Imm)))) { in selectShiftMask() 214 } else if (mi_match(ShAmtReg, MRI, m_GSub(m_ICst(Imm), m_Reg(Reg)))) { in selectShiftMask() 258 if (mi_match(RootReg, MRI, in selectSHXADDOp() 262 else if (mi_match(RootReg, MRI, in selectSHXADDOp() 304 if (mi_match(RootReg, MRI, in selectSHXADDOp() 309 else if (mi_match(RootReg, MRI, in selectSHXADDOp() 358 if (mi_match( in selectSHXADD_UWOp() 449 if (!mi_match(CondReg, MRI, m_GICmp(m_Pred(Pred), m_Reg(LHS), m_Reg(RHS)))) { in getOperandsForBranch()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64PreLegalizerCombiner.cpp | 92 if (!mi_match(LHS, MRI, m_GTrunc(m_Reg(WideReg))) || in matchICmpRedundantTrunc() 93 !mi_match(RHS, MRI, m_SpecificICst(0))) in matchICmpRedundantTrunc() 644 if (!mi_match(DefOp0->getParent(), MRI, m_GTrunc(m_Reg(Op0Wide))) || in tryToSimplifyUADDO() 645 !mi_match(DefOp1->getParent(), MRI, m_GTrunc(m_Reg(Op1Wide)))) in tryToSimplifyUADDO() 711 if (mi_match(U.getParent(), MRI, m_GZExt(m_Reg(WideReg)))) { in tryToSimplifyUADDO()
|
H A D | AArch64PostLegalizerCombiner.cpp | 264 return mi_match(Merge.getSourceReg(1), MRI, m_SpecificICst(0)); in matchFoldMergeToZext() 292 mi_match(Src, MRI, in matchMutateAnyExtToZExt() 352 if (!mi_match(MI, MRI, in matchOrToBSP() 728 if (mi_match( in optimizeConsecutiveMemOpAddressing()
|
H A D | AArch64PostLegalizerLowering.cpp | 278 if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ZeroInt())) in matchDupFromInsertVectorElt() 1058 if (!mi_match(DstReg, MRI, m_GTrunc(m_Reg(SrcReg)))) in matchFormTruncstore()
|
H A D | AArch64InstructionSelector.cpp | 1212 if (mi_match(Reg, MRI, m_Neg(m_Reg(MatchReg)))) { in emitSelect() 1229 if (mi_match(Reg, MRI, m_Not(m_Reg(MatchReg)))) { in emitSelect() 1246 if (mi_match(Reg, MRI, in emitSelect() 2184 if (!mi_match(I.getOperand(2).getReg(), MRI, m_Neg(m_Reg(NegatedReg)))) in convertPtrAddToAdd() 2360 if (!mi_match(Reg, MRI, in earlySelect() 2408 if (!mi_match( in earlySelect() 5190 if (!Extract || !mi_match(Extract->getOperand(2).getReg(), MRI, m_ICst(Lane))) in selectUSMovFromExtend()
|
H A D | AArch64LegalizerInfo.cpp | 1721 if (mi_match(Root, MRI, m_GPtrAdd(m_Reg(NewBase), m_ICst(NewOffset))) && in matchLDPSTPAddrMode()
|