/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/ |
H A D | AArch64PromoteConstant.cpp | 272 static bool shouldConvertUse(const Constant *Cst, const Instruction *Instr, in shouldConvertUse() argument 338 static bool shouldConvertImpl(const Constant *Cst) { in shouldConvertImpl() argument 339 if (isa<const UndefValue>(Cst)) in shouldConvertImpl() 349 if (Cst->isZeroValue()) in shouldConvertImpl() 356 if (Cst->getType()->isVectorTy()) in shouldConvertImpl() 358 return isConstantUsingVectorTy(Cst->getType()); in shouldConvertImpl() 565 Constant *Cst = dyn_cast<Constant>(U); in runOnFunction() local 570 if (!Cst || isa<GlobalValue>(Cst) || !containsOnlyConstantData(Cst)) in runOnFunction() 574 if (!shouldConvert(*Cst, PromotionCache)) in runOnFunction() 579 if (!shouldConvertUse(Cst, &I, OpNo)) in runOnFunction() [all …]
|
H A D | AArch64StackTagging.cpp | 176 uint64_t Cst = 0x0101010101010101UL; in applyMemSet() local 179 Cst = (Cst >> LowBits) << LowBits; in applyMemSet() 182 Cst = (Cst << HighBits) >> HighBits; in applyMemSet() 184 ConstantInt::get(IRB.getInt64Ty(), Cst * V->getZExtValue()); in applyMemSet()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/MC/ |
H A D | MCValue.h | 38 int64_t Cst = 0; variable 43 int64_t getConstant() const { return Cst; } in getConstant() 63 R.Cst = Val; 72 R.Cst = Val; in get()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
H A D | CSEMIRBuilder.cpp | 187 if (std::optional<SmallVector<APInt>> Cst = in buildInstr() local 191 return buildBuildVectorConstant(DstOps[0], *Cst); in buildInstr() 192 return buildConstant(DstOps[0], Cst->front()); in buildInstr() 232 if (std::optional<APInt> Cst = ConstantFoldBinOp( in buildInstr() local 234 return buildConstant(DstOps[0], *Cst); in buildInstr() 252 if (std::optional<APFloat> Cst = ConstantFoldFPBinOp( in buildInstr() local 254 return buildFConstant(DstOps[0], *Cst); in buildInstr() 273 if (std::optional<APFloat> Cst = ConstantFoldIntToFloat( in buildInstr() local 275 return buildFConstant(DstOps[0], *Cst); in buildInstr() 295 for (unsigned Cst : *MaybeCsts) in buildInstr() [all …]
|
H A D | CombinerHelper.cpp | 1694 const ConstantFP *Cst) { in applyCombineConstantFoldFpUnary() argument 1695 APFloat Folded = constantFoldFpUnary(MI, MRI, Cst->getValue()); in applyCombineConstantFoldFpUnary() 2483 APInt Cst; in matchCombineConstPtrAddToI2P() local 2484 if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) { in matchCombineConstPtrAddToI2P() 2487 NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits()); in matchCombineConstPtrAddToI2P() 2769 auto Cst = in matchConstantSelectCmp() local 2771 if (!Cst) in matchConstantSelectCmp() 2773 OpIdx = Cst->isZero() ? 3 : 2; in matchConstantSelectCmp() 3388 int64_t Cst, bool IsVector, bool IsFP) { in isConstValidTrue() argument 3390 return (ScalarSizeBits == 1 && Cst == -1) || in isConstValidTrue() [all …]
|
H A D | IRTranslator.cpp | 884 auto Cst = getOrCreateVReg( in emitJumpTableHeader() local 886 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0); in emitJumpTableHeader() 887 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst); in emitJumpTableHeader() 2391 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1)); in translateKnownIntrinsic() local 2394 ? Cst->isZero() ? TargetOpcode::G_CTTZ in translateKnownIntrinsic() 2396 : Cst->isZero() ? TargetOpcode::G_CTLZ in translateKnownIntrinsic()
|
H A D | MachineIRBuilder.cpp | 223 auto Cst = buildConstant(ValueTy, Value); in materializePtrAdd() local 224 return buildPtrAdd(Res, Op0, Cst.getReg(0)); in materializePtrAdd()
|
/freebsd/contrib/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
H A D | Utils.h | 392 int64_t Cst; variable 398 explicit RegOrConstant(int64_t Cst) : Cst(Cst), IsReg(false) {} in RegOrConstant() argument 407 return Cst; in getCst()
|
H A D | MIPatternMatch.h | 93 inline ConstantMatch<APInt> m_ICst(APInt &Cst) { in m_ICst() argument 94 return ConstantMatch<APInt>(Cst); in m_ICst() 96 inline ConstantMatch<int64_t> m_ICst(int64_t &Cst) { in m_ICst() argument 97 return ConstantMatch<int64_t>(Cst); in m_ICst() 134 inline ICstOrSplatMatch<APInt> m_ICstOrSplat(APInt &Cst) { in m_ICstOrSplat() argument 135 return ICstOrSplatMatch<APInt>(Cst); in m_ICstOrSplat() 138 inline ICstOrSplatMatch<int64_t> m_ICstOrSplat(int64_t &Cst) { in m_ICstOrSplat() argument 139 return ICstOrSplatMatch<int64_t>(Cst); in m_ICstOrSplat()
|
H A D | CombinerHelper.h | 361 void applyCombineConstantFoldFpUnary(MachineInstr &MI, const ConstantFP *Cst);
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AArch64/GISel/ |
H A D | AArch64PostLegalizerLowering.cpp | 397 auto Cst = in applyEXT() local 400 {MatchInfo.SrcOps[0], MatchInfo.SrcOps[1], Cst}); in applyEXT() 515 auto Cst = getAArch64VectorSplatScalar(*MI, MRI); in isVShiftRImm() local 516 if (!Cst) in isVShiftRImm() 518 Cnt = *Cst; in isVShiftRImm() 681 auto Cst = MIB.buildConstant(MRI.cloneVirtualRegister(RHS.getReg()), in applyAdjustICmpImmAndPred() local 684 RHS.setReg(Cst->getOperand(0).getReg()); in applyAdjustICmpImmAndPred() 800 int64_t Cst = Splat->getCst(); in matchBuildVectorToDup() local 801 return (Cst != 0 && Cst != -1); in matchBuildVectorToDup()
|
H A D | AArch64PostLegalizerCombiner.cpp | 73 auto Cst = getIConstantVRegValWithLookThrough(Src2, MRI); in matchExtractVecEltPairwiseAdd() local 74 if (!Cst || Cst->Value != 0) in matchExtractVecEltPairwiseAdd()
|
H A D | AArch64PreLegalizerCombiner.cpp | 159 auto Cst = getIConstantVRegValWithLookThrough( in matchFoldGlobalOffset() local 161 if (!Cst) in matchFoldGlobalOffset() 163 MinOffset = std::min(MinOffset, Cst->Value.getZExtValue()); in matchFoldGlobalOffset()
|
H A D | AArch64InstructionSelector.cpp | 5467 auto Cst = getIConstantVRegVal(Offset, MRI); in selectIndexedExtLoad() local 5468 if (!Cst) in selectIndexedExtLoad() 5472 .addImm(Cst->getSExtValue()); in selectIndexedExtLoad() 5532 auto Cst = getIConstantVRegVal(Offset, MRI); in selectIndexedLoad() local 5533 if (!Cst) in selectIndexedLoad() 5536 MIB.buildInstr(Opc, {WriteBack, Dst}, {Base}).addImm(Cst->getSExtValue()); in selectIndexedLoad() 5579 auto Cst = getIConstantVRegVal(Offset, MRI); in selectIndexedStore() local 5580 if (!Cst) in selectIndexedStore() 5583 MIB.buildInstr(Opc, {Dst}, {Val, Base}).addImm(Cst->getSExtValue()); in selectIndexedStore()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/AMDGPU/ |
H A D | R600ISelDAGToDAG.cpp | 81 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) { in SelectGlobalValueConstantOffset() local 83 CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, SDLoc(Addr), true); in SelectGlobalValueConstantOffset()
|
H A D | R600ISelLowering.cpp | 778 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) in isZero() local 779 return Cst->isZero(); in isZero() 2006 ConstantSDNode *Cst = cast<ConstantSDNode>(CstOffset); in FoldOperand() local 2007 Consts.push_back(Cst->getZExtValue()); in FoldOperand()
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/Scalar/ |
H A D | Reassociate.cpp | 1895 Constant *Cst = nullptr; in OptimizeExpression() local 1899 if (!Cst) { in OptimizeExpression() 1901 Cst = C; in OptimizeExpression() 1904 if (Constant *Res = ConstantFoldBinaryOpOperands(Opcode, C, Cst, DL)) { in OptimizeExpression() 1906 Cst = Res; in OptimizeExpression() 1914 return Cst; in OptimizeExpression() 1919 if (Cst && Cst != ConstantExpr::getBinOpIdentity(Opcode, I->getType())) { in OptimizeExpression() 1920 if (Cst == ConstantExpr::getBinOpAbsorber(Opcode, I->getType())) in OptimizeExpression() 1921 return Cst; in OptimizeExpression() 1922 Ops.push_back(ValueEntry(0, Cst)); in OptimizeExpression()
|
H A D | JumpThreading.cpp | 598 Constant *Cst; in computeValueKnownInPredecessorsImpl() local 599 if (!PredCst && match(V, m_Cmp(Pred, m_Value(Val), m_Constant(Cst)))) in computeValueKnownInPredecessorsImpl() 600 PredCst = LVI->getPredicateOnEdge(Pred, Val, Cst, P, BB, CxtI); in computeValueKnownInPredecessorsImpl() 1506 if (Constant *Cst = dyn_cast<Constant>(V)) { in evaluateOnPredecessorEdge() local 1507 return Cst; in evaluateOnPredecessorEdge()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/Mips/ |
H A D | MipsISelLowering.h | 464 SDValue Cst = DAG.getConstant(16, DL, MVT::i32); in getAddrNonPICSym64() local 465 SDValue Shift = DAG.getNode(ISD::SHL, DL, Ty, HigherPart, Cst); in getAddrNonPICSym64() 468 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, Ty, Add, Cst); in getAddrNonPICSym64()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/M68k/ |
H A D | M68kISelDAGToDAG.cpp | 455 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) in matchAddressRecursively() local 456 if (foldOffsetIntoAddress(Cst->getSExtValue(), AM)) in matchAddressRecursively()
|
/freebsd/contrib/llvm-project/llvm/lib/Target/X86/ |
H A D | X86ISelDAGToDAG.cpp | 2476 if (auto *Cst = dyn_cast<ConstantSDNode>(N)) in matchAddressRecursively() local 2477 if (!foldOffsetIntoAddress(Cst->getSExtValue(), AM)) in matchAddressRecursively() 4418 auto *Cst = dyn_cast<ConstantSDNode>(N1); in tryShrinkShlLogicImm() local 4419 if (!Cst) in tryShrinkShlLogicImm() 4422 int64_t Val = Cst->getSExtValue(); in tryShrinkShlLogicImm() 4491 unsigned ZExtWidth = Cst->getAPIntValue().getActiveBits(); in tryShrinkShlLogicImm() 4497 NeededMask &= ~Cst->getAPIntValue(); in tryShrinkShlLogicImm() 5386 auto *Cst = dyn_cast<ConstantSDNode>(N1); in Select() local 5387 if (!Cst) in Select() 5390 int64_t Val = Cst->getSExtValue(); in Select()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/ |
H A D | CodeGenPrepare.cpp | 4539 if (const auto *Cst = dyn_cast<ConstantInt>(Inst->getOperand(1))) in canGetThrough() local 4540 if (!Cst->getValue().isAllOnes()) in canGetThrough() 4562 const auto *Cst = dyn_cast<ConstantInt>(AndInst->getOperand(1)); in canGetThrough() local 4563 if (Cst && in canGetThrough() 4564 Cst->getValue().isIntN(Inst->getType()->getIntegerBitWidth())) in canGetThrough() 4741 if (const ConstantInt *Cst = dyn_cast<ConstantInt>(Opnd)) { in promoteOperandForOther() local 4744 APInt CstVal = IsSExt ? Cst->getValue().sext(BitWidth) in promoteOperandForOther() 4745 : Cst->getValue().zext(BitWidth); in promoteOperandForOther()
|
/freebsd/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/ |
H A D | SelectionDAG.cpp | 376 auto *Cst = dyn_cast<ConstNodeType>(Op.getOperand(i)); in matchUnaryPredicateImpl() local 377 if (!Cst || Cst->getValueType(0) != SVT || !Match(Cst)) in matchUnaryPredicateImpl() 3022 if (auto *Cst = dyn_cast<ConstantSDNode>(V.getOperand(1))) { in getValidShiftAmountRange() local 3023 const APInt &ShAmt = Cst->getAPIntValue(); in getValidShiftAmountRange() 3716 const Constant *Cst = TLI->getTargetConstantFromLoad(LD); in computeKnownBits() local 3717 if (ISD::isNON_EXTLoad(LD) && Cst) { in computeKnownBits() 3719 Type *CstTy = Cst->getType(); in computeKnownBits() 3725 if (const Constant *Splat = Cst->getSplatValue()) { in computeKnownBits() 3726 Cst = Splat; in computeKnownBits() 3727 CstTy = Cst->getType(); in computeKnownBits() [all …]
|
H A D | DAGCombiner.cpp | 8356 ConstantSDNode *Cst = isConstOrConstSplat(Op.getOperand(1)); in MatchFunnelPosNeg() local 8357 return Cst && (Cst->getAPIntValue() == Imm); in MatchFunnelPosNeg() 10794 if (ConstantSDNode *Cst = isConstOrConstSplat(N2)) { in visitFunnelShift() local 10798 if (Cst->getAPIntValue().uge(BitWidth)) { in visitFunnelShift() 10799 uint64_t RotAmt = Cst->getAPIntValue().urem(BitWidth); in visitFunnelShift() 10804 unsigned ShAmt = Cst->getZExtValue(); in visitFunnelShift() 15549 SDValue Cst = DAG.getBitcast(VT, N0.getOperand(0)); in visitBITCAST() local 15550 AddToWorklist(Cst.getNode()); in visitBITCAST() 15553 SDValue XorResult = DAG.getNode(ISD::XOR, SDLoc(N0), VT, Cst, X); in visitBITCAST() 15567 return DAG.getNode(ISD::XOR, SDLoc(N), VT, Cst, FlipBits); in visitBITCAST() [all …]
|
/freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/ |
H A D | InstCombineCasts.cpp | 860 ConstantInt *Cst; in visitTrunc() local 861 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) { in visitTrunc() 869 uint64_t VecOpIdx = Cst->getZExtValue(); in visitTrunc()
|