Lines Matching +full:- +full:- +full:depth

1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
88 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
95 if (unsigned BitWidth = Ty->getScalarSizeInBits()) in getBitWidth()
106 if (CxtI && CxtI->getParent()) in safeCxtI()
109 // If the value is really an already-inserted instruction, then use that. in safeCxtI()
111 if (CxtI && CxtI->getParent()) in safeCxtI()
120 if (CxtI && CxtI->getParent()) in safeCxtI()
123 // If the value is really an already-inserted instruction, then use that. in safeCxtI()
125 if (CxtI && CxtI->getParent()) in safeCxtI()
129 if (CxtI && CxtI->getParent()) in safeCxtI()
138 if (isa<ScalableVectorType>(Shuf->getType())) { in getShuffleDemandedElts()
145 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements(); in getShuffleDemandedElts()
146 return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(), in getShuffleDemandedElts()
151 KnownBits &Known, unsigned Depth,
154 void llvm::computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, in computeKnownBits() argument
159 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); in computeKnownBits()
161 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in computeKnownBits()
162 ::computeKnownBits(V, DemandedElts, Known, Depth, Q); in computeKnownBits()
166 const DataLayout &DL, unsigned Depth, in computeKnownBits() argument
170 V, Known, Depth, in computeKnownBits()
175 unsigned Depth, AssumptionCache *AC, in computeKnownBits() argument
179 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); in computeKnownBits()
183 const DataLayout &DL, unsigned Depth, in computeKnownBits() argument
187 V, DemandedElts, Depth, in computeKnownBits()
207 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern in haveNoCommonBitsSetSpecialCases()
242 assert(LHS->getType() == RHS->getType() && in haveNoCommonBitsSet()
244 assert(LHS->getType()->isIntOrIntVectorTy() && in haveNoCommonBitsSet()
256 return !I->user_empty() && all_of(I->users(), [](const User *U) { in isOnlyUsedInZeroComparison()
263 return !I->user_empty() && all_of(I->users(), [](const User *U) { in isOnlyUsedInZeroEqualityComparison()
269 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
273 bool OrZero, unsigned Depth, in isKnownToBeAPowerOfTwo() argument
277 V, OrZero, Depth, in isKnownToBeAPowerOfTwo()
282 const SimplifyQuery &Q, unsigned Depth);
285 unsigned Depth) { in isKnownNonNegative() argument
286 return computeKnownBits(V, Depth, SQ).isNonNegative(); in isKnownNonNegative()
290 unsigned Depth) { in isKnownPositive() argument
292 return CI->getValue().isStrictlyPositive(); in isKnownPositive()
296 KnownBits Known = computeKnownBits(V, Depth, SQ); in isKnownPositive()
298 (Known.isNonZero() || isKnownNonZero(V, SQ, Depth)); in isKnownPositive()
302 unsigned Depth) { in isKnownNegative() argument
303 return computeKnownBits(V, Depth, SQ).isNegative(); in isKnownNegative()
307 const APInt &DemandedElts, unsigned Depth,
314 assert(V1->getType() == V2->getType() && in isKnownNonEqual()
315 "Testing equality of non-equal types!"); in isKnownNonEqual()
316 auto *FVTy = dyn_cast<FixedVectorType>(V1->getType()); in isKnownNonEqual()
318 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in isKnownNonEqual()
325 const SimplifyQuery &SQ, unsigned Depth) { in MaskedValueIsZero() argument
327 computeKnownBits(V, Known, Depth, SQ); in MaskedValueIsZero()
332 unsigned Depth, const SimplifyQuery &Q);
334 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, in ComputeNumSignBits() argument
336 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); in ComputeNumSignBits()
338 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in ComputeNumSignBits()
339 return ComputeNumSignBits(V, DemandedElts, Depth, Q); in ComputeNumSignBits()
343 unsigned Depth, AssumptionCache *AC, in ComputeNumSignBits() argument
347 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); in ComputeNumSignBits()
351 unsigned Depth, AssumptionCache *AC, in ComputeMaxSignificantBits() argument
354 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT); in ComputeMaxSignificantBits()
355 return V->getType()->getScalarSizeInBits() - SignBits + 1; in ComputeMaxSignificantBits()
362 unsigned Depth, const SimplifyQuery &Q) { in computeKnownBitsAddSub() argument
363 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); in computeKnownBitsAddSub()
370 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsAddSub()
376 KnownBits &Known2, unsigned Depth, in computeKnownBitsMul() argument
378 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); in computeKnownBitsMul()
379 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsMul()
386 // The product of a number with itself is non-negative. in computeKnownBitsMul()
393 // The product of two numbers with the same sign is non-negative. in computeKnownBitsMul()
396 // The product of a negative number and a non-negative number is either in computeKnownBitsMul()
409 isGuaranteedNotToBeUndef(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1); in computeKnownBitsMul()
412 // Only make use of no-wrap flags if we failed to compute the sign bit in computeKnownBitsMul()
437 ConstantRange Range(Lower->getValue(), Upper->getValue()); in computeKnownBitsFromRangeMetadata()
456 // non-ephemeral users). See r246696's test case for an example. in isEphemeralValueOf()
457 if (is_contained(I->operands(), E)) in isEphemeralValueOf()
466 if (llvm::all_of(V->users(), [&](const User *U) { in isEphemeralValueOf()
473 !cast<Instruction>(V)->mayHaveSideEffects() && in isEphemeralValueOf()
474 !cast<Instruction>(V)->isTerminator())) { in isEphemeralValueOf()
477 append_range(WorkSet, U->operands()); in isEphemeralValueOf()
488 return CI->isAssumeLikeIntrinsic(); in isAssumeLikeIntrinsic()
505 if (Inv->getParent() == CxtI->getParent()) { in isValidAssumeForContext()
508 if (Inv->comesBefore(CxtI)) in isValidAssumeForContext()
511 // Don't let an assume affect itself - this would cause the problems in isValidAssumeForContext()
521 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so in isValidAssumeForContext()
523 auto Range = make_range(CxtI->getIterator(), Inv->getIterator()); in isValidAssumeForContext()
532 if (DT->dominates(Inv, CxtI)) in isValidAssumeForContext()
534 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { in isValidAssumeForContext()
542 // TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but
543 // we still have enough information about `RHS` to conclude non-zero. For
552 // Special-case v != 0 to also handle v != null. in cmpExcludesZero()
556 // All other predicates - rely on generic ConstantRange handling. in cmpExcludesZero()
558 auto Zero = APInt::getZero(RHS->getType()->getScalarSizeInBits()); in cmpExcludesZero()
568 for (unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem; in cmpExcludesZero()
571 Pred, VC->getElementAsAPInt(ElemIdx)); in cmpExcludesZero()
579 // Use of assumptions is context-sensitive. If we don't have a context, we in isKnownNonZeroFromAssume()
584 for (AssumptionCache::ResultElem &Elem : Q.AC->assumptionsFor(V)) { in isKnownNonZeroFromAssume()
589 assert(I->getFunction() == Q.CxtI->getFunction() && in isKnownNonZeroFromAssume()
593 if (!V->getType()->isPointerTy()) in isKnownNonZeroFromAssume()
596 *I, I->bundle_op_info_begin()[Elem.Index])) { in isKnownNonZeroFromAssume()
600 !NullPointerIsDefined(Q.CxtI->getFunction(), in isKnownNonZeroFromAssume()
601 V->getType()->getPointerAddressSpace()))) && in isKnownNonZeroFromAssume()
615 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS)))) in isKnownNonZeroFromAssume()
628 if (RHS->getType()->isPointerTy()) { in computeKnownBitsFromCmp()
716 // X & Y u> C -> X u> C && Y u> C in computeKnownBitsFromCmp()
717 // X nuw- Y u> C -> X u> C in computeKnownBitsFromCmp()
724 // X | Y u< C -> X u< C && Y u< C in computeKnownBitsFromCmp()
725 // X nuw+ Y u< C -> X u< C && Y u< C in computeKnownBitsFromCmp()
729 (*C - (Pred == ICmpInst::ICMP_ULT)).countLeadingZeros()); in computeKnownBitsFromCmp()
741 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate(); in computeKnownBitsFromICmpCond()
742 Value *LHS = Cmp->getOperand(0); in computeKnownBitsFromICmpCond()
743 Value *RHS = Cmp->getOperand(1); in computeKnownBitsFromICmpCond()
747 KnownBits DstKnown(LHS->getType()->getScalarSizeInBits()); in computeKnownBitsFromICmpCond()
757 KnownBits &Known, unsigned Depth, in computeKnownBitsFromCond() argument
760 if (Depth < MaxAnalysisRecursionDepth && in computeKnownBitsFromCond()
764 computeKnownBitsFromCond(V, A, Known2, Depth + 1, SQ, Invert); in computeKnownBitsFromCond()
765 computeKnownBitsFromCond(V, B, Known3, Depth + 1, SQ, Invert); in computeKnownBitsFromCond()
779 unsigned Depth, const SimplifyQuery &Q) { in computeKnownBitsFromContext() argument
781 if (Q.CC && Q.CC->AffectedValues.contains(V)) in computeKnownBitsFromContext()
782 computeKnownBitsFromCond(V, Q.CC->Cond, Known, Depth, Q, Q.CC->Invert); in computeKnownBitsFromContext()
789 for (BranchInst *BI : Q.DC->conditionsFor(V)) { in computeKnownBitsFromContext()
790 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0)); in computeKnownBitsFromContext()
791 if (Q.DT->dominates(Edge0, Q.CxtI->getParent())) in computeKnownBitsFromContext()
792 computeKnownBitsFromCond(V, BI->getCondition(), Known, Depth, Q, in computeKnownBitsFromContext()
795 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1)); in computeKnownBitsFromContext()
796 if (Q.DT->dominates(Edge1, Q.CxtI->getParent())) in computeKnownBitsFromContext()
797 computeKnownBitsFromCond(V, BI->getCondition(), Known, Depth, Q, in computeKnownBitsFromContext()
813 for (AssumptionCache::ResultElem &Elem : Q.AC->assumptionsFor(V)) { in computeKnownBitsFromContext()
818 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && in computeKnownBitsFromContext()
822 if (!V->getType()->isPointerTy()) in computeKnownBitsFromContext()
825 *I, I->bundle_op_info_begin()[Elem.Index])) { in computeKnownBitsFromContext()
838 Value *Arg = I->getArgOperand(0); in computeKnownBitsFromContext()
855 if (Depth == MaxAnalysisRecursionDepth) in computeKnownBitsFromContext()
875 /// non-constant shift amount. Known is the output of this function. Known2 is a
876 /// pre-allocated temporary with the same bit width as Known and on return
878 /// operator-specific function that, given the known-bits and a shift amount,
879 /// compute the implied known-bits of the shift operator's result respectively
884 KnownBits &Known2, unsigned Depth, const SimplifyQuery &Q, in computeKnownBitsFromShiftOperator() argument
886 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromShiftOperator()
887 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromShiftOperator()
888 // To limit compile-time impact, only query isKnownNonZero() if we know at in computeKnownBitsFromShiftOperator()
893 isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth + 1)); in computeKnownBitsFromShiftOperator()
900 unsigned Depth, const SimplifyQuery &Q) { in getKnownBitsFromAndXorOr() argument
907 switch (I->getOpcode()) { in getKnownBitsFromAndXorOr()
911 // and(x, -x) is common idioms that will clear all but lowest set in getKnownBitsFromAndXorOr()
915 // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x). in getKnownBitsFromAndXorOr()
917 // -(-x) == x so using whichever (LHS/RHS) gets us a better result. in getKnownBitsFromAndXorOr()
929 // xor(x, x-1) is common idioms that will clear all but lowest set in getKnownBitsFromAndXorOr()
932 // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C != in getKnownBitsFromAndXorOr()
933 // -1 but for the purpose of demanded bits (xor(x, x-C) & in getKnownBitsFromAndXorOr()
934 // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern in getKnownBitsFromAndXorOr()
935 // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1). in getKnownBitsFromAndXorOr()
938 const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS; in getKnownBitsFromAndXorOr()
946 // and(x, add (x, -1)) is a common idiom that always clears the low bit; in getKnownBitsFromAndXorOr()
947 // xor/or(x, add (x, -1)) is an idiom that will always set the low bit. in getKnownBitsFromAndXorOr()
957 computeKnownBits(Y, DemandedElts, KnownY, Depth + 1, Q); in getKnownBitsFromAndXorOr()
969 const Operator *I, const APInt &DemandedElts, unsigned Depth, in computeKnownBitsForHorizontalOperation() argument
974 getHorizDemandedEltsForFirstOperand(Q.DL.getTypeSizeInBits(I->getType()), in computeKnownBitsForHorizontalOperation()
979 [Depth, &Q, KnownBitsFunc](const Value *Op, APInt &DemandedEltsOp) { in computeKnownBitsForHorizontalOperation()
981 computeKnownBits(Op, DemandedEltsOp, Depth + 1, Q), in computeKnownBitsForHorizontalOperation()
982 computeKnownBits(Op, DemandedEltsOp << 1, Depth + 1, Q)); in computeKnownBitsForHorizontalOperation()
986 return ComputeForSingleOpFunc(I->getOperand(0), DemandedEltsLHS); in computeKnownBitsForHorizontalOperation()
988 return ComputeForSingleOpFunc(I->getOperand(1), DemandedEltsRHS); in computeKnownBitsForHorizontalOperation()
990 return ComputeForSingleOpFunc(I->getOperand(0), DemandedEltsLHS) in computeKnownBitsForHorizontalOperation()
991 .intersectWith(ComputeForSingleOpFunc(I->getOperand(1), DemandedEltsRHS)); in computeKnownBitsForHorizontalOperation()
998 unsigned Depth, in analyzeKnownBitsFromAndXorOr() argument
1000 auto *FVTy = dyn_cast<FixedVectorType>(I->getType()); in analyzeKnownBitsFromAndXorOr()
1002 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in analyzeKnownBitsFromAndXorOr()
1004 return getKnownBitsFromAndXorOr(I, DemandedElts, KnownLHS, KnownRHS, Depth, in analyzeKnownBitsFromAndXorOr()
1009 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange); in getVScaleRange()
1010 // Without vscale_range, we only know that vscale is non-zero. in getVScaleRange()
1028 Value *Arm, bool Invert, unsigned Depth, in adjustKnownBitsForSelectArm() argument
1036 computeKnownBitsFromCond(Arm, Cond, CondRes, Depth + 1, Q, Invert); in adjustKnownBitsForSelectArm()
1053 if (!isGuaranteedNotToBeUndef(Arm, Q.AC, Q.CxtI, Q.DT, Depth + 1)) in adjustKnownBitsForSelectArm()
1063 KnownBits &Known, unsigned Depth, in computeKnownBitsFromOperator() argument
1068 switch (I->getOpcode()) { in computeKnownBitsFromOperator()
1076 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1077 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1079 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); in computeKnownBitsFromOperator()
1082 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1083 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1085 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); in computeKnownBitsFromOperator()
1088 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1089 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1091 Known = getKnownBitsFromAndXorOr(I, DemandedElts, Known2, Known, Depth, Q); in computeKnownBitsFromOperator()
1095 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, in computeKnownBitsFromOperator()
1096 Known, Known2, Depth, Q); in computeKnownBitsFromOperator()
1100 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1101 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1107 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1108 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1116 computeKnownBits(Arm, DemandedElts, Res, Depth + 1, Q); in computeKnownBitsFromOperator()
1117 adjustKnownBitsForSelectArm(Res, I->getOperand(0), Arm, Invert, Depth, Q); in computeKnownBitsFromOperator()
1122 ComputeForArm(I->getOperand(1), /*Invert=*/false) in computeKnownBitsFromOperator()
1123 .intersectWith(ComputeForArm(I->getOperand(2), /*Invert=*/true)); in computeKnownBitsFromOperator()
1139 Type *SrcTy = I->getOperand(0)->getType(); in computeKnownBitsFromOperator()
1144 Type *ScalarTy = SrcTy->getScalarType(); in computeKnownBitsFromOperator()
1145 SrcBitWidth = ScalarTy->isPointerTy() ? in computeKnownBitsFromOperator()
1151 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1153 Inst && Inst->hasNonNeg() && !Known.isNegative()) in computeKnownBitsFromOperator()
1159 Type *SrcTy = I->getOperand(0)->getType(); in computeKnownBitsFromOperator()
1160 if (SrcTy->isIntOrPtrTy() && in computeKnownBitsFromOperator()
1163 !I->getType()->isVectorTy()) { in computeKnownBitsFromOperator()
1164 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1171 V->getType()->isFPOrFPVectorTy()) { in computeKnownBitsFromOperator()
1172 Type *FPType = V->getType()->getScalarType(); in computeKnownBitsFromOperator()
1174 computeKnownFPClass(V, DemandedElts, fcAllFlags, Depth + 1, Q); in computeKnownBitsFromOperator()
1187 APFloat::getInf(FPType->getFltSemantics()).bitcastToAPInt())); in computeKnownBitsFromOperator()
1191 APInt::getZero(FPType->getScalarSizeInBits()))); in computeKnownBitsFromOperator()
1209 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() || in computeKnownBitsFromOperator()
1210 !I->getType()->isIntOrIntVectorTy() || in computeKnownBitsFromOperator()
1211 isa<ScalableVectorType>(I->getType())) in computeKnownBitsFromOperator()
1215 // Examples: v4i32 -> v2i64, v3i8 -> v24 in computeKnownBitsFromOperator()
1216 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits(); in computeKnownBitsFromOperator()
1222 // For this bitcast, each demanded element of the output is sub-divided in computeKnownBitsFromOperator()
1225 // bits for each sub-element sequentially. This is done by shifting the in computeKnownBitsFromOperator()
1226 // one-set-bit demanded elements parameter across the sub-elements for in computeKnownBitsFromOperator()
1230 // The known bits of each sub-element are then inserted into place in computeKnownBitsFromOperator()
1242 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, in computeKnownBitsFromOperator()
1243 Depth + 1, Q); in computeKnownBitsFromOperator()
1244 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i; in computeKnownBitsFromOperator()
1252 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); in computeKnownBitsFromOperator()
1255 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1268 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, in computeKnownBitsFromOperator()
1270 // Trailing zeros of a right-shifted constant never decrease. in computeKnownBitsFromOperator()
1272 if (match(I->getOperand(0), m_APInt(C))) in computeKnownBitsFromOperator()
1273 Known.Zero.setLowBits(C->countr_zero()); in computeKnownBitsFromOperator()
1282 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, in computeKnownBitsFromOperator()
1284 // Leading zeros of a left-shifted constant never decrease. in computeKnownBitsFromOperator()
1286 if (match(I->getOperand(0), m_APInt(C))) in computeKnownBitsFromOperator()
1287 Known.Zero.setHighBits(C->countl_zero()); in computeKnownBitsFromOperator()
1296 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, in computeKnownBitsFromOperator()
1303 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, NUW, in computeKnownBitsFromOperator()
1304 DemandedElts, Known, Known2, Depth, Q); in computeKnownBitsFromOperator()
1310 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, NUW, in computeKnownBitsFromOperator()
1311 DemandedElts, Known, Known2, Depth, Q); in computeKnownBitsFromOperator()
1315 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1316 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1321 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1322 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1326 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign())); in computeKnownBitsFromOperator()
1331 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1337 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { in computeKnownBitsFromOperator()
1338 // TrailZ can only become smaller, short-circuit if we hit zero. in computeKnownBitsFromOperator()
1342 Value *Index = I->getOperand(i); in computeKnownBitsFromOperator()
1346 if (CIndex && CIndex->isZeroValue()) in computeKnownBitsFromOperator()
1355 if (CIndex->getType()->isVectorTy()) in computeKnownBitsFromOperator()
1356 Index = CIndex->getSplatValue(); in computeKnownBitsFromOperator()
1358 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); in computeKnownBitsFromOperator()
1360 uint64_t Offset = SL->getElementOffset(Idx); in computeKnownBitsFromOperator()
1367 if (!IndexedTy->isSized()) { in computeKnownBitsFromOperator()
1372 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); in computeKnownBitsFromOperator()
1374 computeKnownBits(Index, IndexBits, Depth + 1, Q); in computeKnownBitsFromOperator()
1397 // to the language reference we need to sign-extend or truncate them in computeKnownBitsFromOperator()
1418 // Handle the case of a simple two-predecessor recurrence PHI. in computeKnownBitsFromOperator()
1421 unsigned Opcode = BO->getOpcode(); in computeKnownBitsFromOperator()
1428 BO->getOperand(0) == I) { in computeKnownBitsFromOperator()
1440 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); in computeKnownBitsFromOperator()
1473 unsigned OpNum = P->getOperand(0) == R ? 0 : 1; in computeKnownBitsFromOperator()
1474 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator(); in computeKnownBitsFromOperator()
1475 Instruction *LInst = P->getIncomingBlock(1 - OpNum)->getTerminator(); in computeKnownBitsFromOperator()
1480 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); in computeKnownBitsFromOperator()
1485 computeKnownBits(L, DemandedElts, Known3, Depth + 1, RecQ); in computeKnownBitsFromOperator()
1499 // (add non-negative, non-negative) --> non-negative in computeKnownBitsFromOperator()
1500 // (add negative, negative) --> negative in computeKnownBitsFromOperator()
1508 // (sub nsw non-negative, negative) --> non-negative in computeKnownBitsFromOperator()
1509 // (sub nsw negative, non-negative) --> negative in computeKnownBitsFromOperator()
1510 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) { in computeKnownBitsFromOperator()
1517 // (mul nsw non-negative, non-negative) --> non-negative in computeKnownBitsFromOperator()
1527 // Unreachable blocks may have zero-operand PHI nodes. in computeKnownBitsFromOperator()
1528 if (P->getNumIncomingValues() == 0) in computeKnownBitsFromOperator()
1533 if (Depth < MaxAnalysisRecursionDepth - 1 && Known.isUnknown()) { in computeKnownBitsFromOperator()
1535 if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) in computeKnownBitsFromOperator()
1540 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { in computeKnownBitsFromOperator()
1541 Value *IncValue = P->getIncomingValue(u); in computeKnownBitsFromOperator()
1550 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); in computeKnownBitsFromOperator()
1559 MaxAnalysisRecursionDepth - 1, RecQ); in computeKnownBitsFromOperator()
1572 if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) { in computeKnownBitsFromOperator()
1574 if (FalseSucc == P->getParent()) in computeKnownBitsFromOperator()
1613 if (std::optional<ConstantRange> Range = CB->getRange()) in computeKnownBitsFromOperator()
1614 Known = Known.unionWith(Range->toKnownBits()); in computeKnownBitsFromOperator()
1616 if (const Value *RV = CB->getReturnedArgOperand()) { in computeKnownBitsFromOperator()
1617 if (RV->getType() == I->getType()) { in computeKnownBitsFromOperator()
1618 computeKnownBits(RV, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1629 switch (II->getIntrinsicID()) { in computeKnownBitsFromOperator()
1633 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1634 bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); in computeKnownBitsFromOperator()
1639 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1644 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1649 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1653 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) in computeKnownBitsFromOperator()
1654 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); in computeKnownBitsFromOperator()
1660 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1664 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) in computeKnownBitsFromOperator()
1665 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); in computeKnownBitsFromOperator()
1671 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1684 if (!match(I->getOperand(2), m_APInt(SA))) in computeKnownBitsFromOperator()
1688 uint64_t ShiftAmt = SA->urem(BitWidth); in computeKnownBitsFromOperator()
1689 if (II->getIntrinsicID() == Intrinsic::fshr) in computeKnownBitsFromOperator()
1690 ShiftAmt = BitWidth - ShiftAmt; in computeKnownBitsFromOperator()
1693 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1694 computeKnownBits(I->getOperand(1), DemandedElts, Known3, Depth + 1, Q); in computeKnownBitsFromOperator()
1697 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); in computeKnownBitsFromOperator()
1699 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); in computeKnownBitsFromOperator()
1703 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1704 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1708 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1709 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1713 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1714 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1718 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1719 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1724 computeKnownBits(I->getOperand(0), DemandedElts.reverseBits(), Known, in computeKnownBitsFromOperator()
1725 Depth + 1, Q); in computeKnownBitsFromOperator()
1735 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1738 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1742 auto *VecTy = cast<VectorType>(I->getOperand(0)->getType()); in computeKnownBitsFromOperator()
1744 bool EvenCnt = VecTy->getElementCount().isKnownEven(); in computeKnownBitsFromOperator()
1748 if (VecTy->isScalableTy() || EvenCnt) in computeKnownBitsFromOperator()
1753 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1754 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1758 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1759 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1763 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1764 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1768 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1769 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1773 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1775 const Value *Mask = I->getOperand(1); in computeKnownBitsFromOperator()
1776 Known2 = KnownBits(Mask->getType()->getScalarSizeInBits()); in computeKnownBitsFromOperator()
1777 computeKnownBits(Mask, DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1778 // TODO: 1-extend would be more precise. in computeKnownBitsFromOperator()
1785 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1786 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1792 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1793 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1804 I, DemandedElts, Depth, Q, in computeKnownBitsFromOperator()
1814 Known = computeKnownBitsForHorizontalOperation(I, DemandedElts, Depth, in computeKnownBitsFromOperator()
1823 I, DemandedElts, Depth, Q, in computeKnownBitsFromOperator()
1833 Known = computeKnownBitsForHorizontalOperation(I, DemandedElts, Depth, in computeKnownBitsFromOperator()
1839 bool HasAVL = II->getIntrinsicID() == Intrinsic::riscv_vsetvli; in computeKnownBitsFromOperator()
1840 const ConstantRange Range = getVScaleRange(II->getFunction(), BitWidth); in computeKnownBitsFromOperator()
1842 cast<ConstantInt>(II->getArgOperand(HasAVL))->getZExtValue()); in computeKnownBitsFromOperator()
1844 cast<ConstantInt>(II->getArgOperand(1 + HasAVL))->getZExtValue()); in computeKnownBitsFromOperator()
1851 if (auto *CI = dyn_cast<ConstantInt>(II->getArgOperand(0))) in computeKnownBitsFromOperator()
1852 MaxVL = std::min(MaxVL, CI->getZExtValue()); in computeKnownBitsFromOperator()
1860 if (!II->getParent() || !II->getFunction()) in computeKnownBitsFromOperator()
1863 Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits(); in computeKnownBitsFromOperator()
1887 const Value *LHS = Shuf->getOperand(0); in computeKnownBitsFromOperator()
1888 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1894 const Value *RHS = Shuf->getOperand(1); in computeKnownBitsFromOperator()
1895 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1901 if (isa<ScalableVectorType>(I->getType())) { in computeKnownBitsFromOperator()
1905 const Value *Vec = I->getOperand(0); in computeKnownBitsFromOperator()
1906 const Value *Elt = I->getOperand(1); in computeKnownBitsFromOperator()
1907 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); in computeKnownBitsFromOperator()
1912 if (CIdx && CIdx->getValue().ult(NumElts)) { in computeKnownBitsFromOperator()
1913 DemandedVecElts.clearBit(CIdx->getZExtValue()); in computeKnownBitsFromOperator()
1914 NeedsElt = DemandedElts[CIdx->getZExtValue()]; in computeKnownBitsFromOperator()
1920 computeKnownBits(Elt, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1927 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1933 // Look through extract element. If the index is non-constant or in computeKnownBitsFromOperator()
1934 // out-of-range demand all elements, otherwise just the extracted element. in computeKnownBitsFromOperator()
1935 const Value *Vec = I->getOperand(0); in computeKnownBitsFromOperator()
1936 const Value *Idx = I->getOperand(1); in computeKnownBitsFromOperator()
1938 if (isa<ScalableVectorType>(Vec->getType())) { in computeKnownBitsFromOperator()
1943 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); in computeKnownBitsFromOperator()
1945 if (CIdx && CIdx->getValue().ult(NumElts)) in computeKnownBitsFromOperator()
1946 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); in computeKnownBitsFromOperator()
1947 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1951 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { in computeKnownBitsFromOperator()
1953 if (EVI->getNumIndices() != 1) break; in computeKnownBitsFromOperator()
1954 if (EVI->getIndices()[0] == 0) { in computeKnownBitsFromOperator()
1955 switch (II->getIntrinsicID()) { in computeKnownBitsFromOperator()
1960 true, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false, in computeKnownBitsFromOperator()
1961 /* NUW=*/false, DemandedElts, Known, Known2, Depth, Q); in computeKnownBitsFromOperator()
1966 false, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false, in computeKnownBitsFromOperator()
1967 /* NUW=*/false, DemandedElts, Known, Known2, Depth, Q); in computeKnownBitsFromOperator()
1971 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, in computeKnownBitsFromOperator()
1972 DemandedElts, Known, Known2, Depth, Q); in computeKnownBitsFromOperator()
1979 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, in computeKnownBitsFromOperator()
1980 Depth + 1)) in computeKnownBitsFromOperator()
1981 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1989 unsigned Depth, const SimplifyQuery &Q) { in computeKnownBits() argument
1990 KnownBits Known(getBitWidth(V->getType(), Q.DL)); in computeKnownBits()
1991 ::computeKnownBits(V, DemandedElts, Known, Depth, Q); in computeKnownBits()
1997 KnownBits llvm::computeKnownBits(const Value *V, unsigned Depth, in computeKnownBits() argument
1999 KnownBits Known(getBitWidth(V->getType(), Q.DL)); in computeKnownBits()
2000 computeKnownBits(V, Known, Depth, Q); in computeKnownBits()
2010 /// optimized based on the contradictory assumption that it is non-zero.
2020 KnownBits &Known, unsigned Depth, in computeKnownBits() argument
2029 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); in computeKnownBits()
2032 Type *Ty = V->getType(); in computeKnownBits()
2035 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && in computeKnownBits()
2040 FVTy->getNumElements() == DemandedElts.getBitWidth() && in computeKnownBits()
2047 Type *ScalarTy = Ty->getScalarType(); in computeKnownBits()
2048 if (ScalarTy->isPointerTy()) { in computeKnownBits()
2063 // Null and aggregate-zero are all-zeros. in computeKnownBits()
2071 assert(!isa<ScalableVectorType>(V->getType())); in computeKnownBits()
2075 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { in computeKnownBits()
2078 APInt Elt = CDV->getElementAsAPInt(i); in computeKnownBits()
2088 assert(!isa<ScalableVectorType>(V->getType())); in computeKnownBits()
2092 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { in computeKnownBits()
2095 Constant *Element = CV->getAggregateElement(i); in computeKnownBits()
2103 const APInt &Elt = ElementCI->getValue(); in computeKnownBits()
2124 if (std::optional<ConstantRange> Range = A->getRange()) in computeKnownBits()
2125 Known = Range->toKnownBits(); in computeKnownBits()
2127 // All recursive calls that increase depth must come after this. in computeKnownBits()
2128 if (Depth == MaxAnalysisRecursionDepth) in computeKnownBits()
2131 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has in computeKnownBits()
2134 if (!GA->isInterposable()) in computeKnownBits()
2135 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); in computeKnownBits()
2140 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q); in computeKnownBits()
2142 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) in computeKnownBits()
2143 Known = CR->toKnownBits(); in computeKnownBits()
2146 // Aligned pointers have trailing zeros - refine Known.Zero set in computeKnownBits()
2147 if (isa<PointerType>(V->getType())) { in computeKnownBits()
2148 Align Alignment = V->getPointerAlignment(Q.DL); in computeKnownBits()
2156 computeKnownBitsFromContext(V, Known, Depth, Q); in computeKnownBits()
2162 unsigned Depth, SimplifyQuery &Q) { in isPowerOfTwoRecurrence() argument
2169 for (const Use &U : PN->operands()) { in isPowerOfTwoRecurrence()
2173 Q.CxtI = PN->getIncomingBlock(U)->getTerminator(); in isPowerOfTwoRecurrence()
2174 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q)) in isPowerOfTwoRecurrence()
2181 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step) in isPowerOfTwoRecurrence()
2184 Q.CxtI = BO->getParent()->getTerminator(); in isPowerOfTwoRecurrence()
2185 switch (BO->getOpcode()) { in isPowerOfTwoRecurrence()
2190 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q); in isPowerOfTwoRecurrence()
2199 // If OrZero is false, cannot guarantee induction variable is non-zero after in isPowerOfTwoRecurrence()
2202 isKnownToBeAPowerOfTwo(Step, false, Depth, Q); in isPowerOfTwoRecurrence()
2220 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, in isKnownToBeAPowerOfTwo() argument
2222 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); in isKnownToBeAPowerOfTwo()
2228 if (OrZero && V->getType()->getScalarSizeInBits() == 1) in isKnownToBeAPowerOfTwo()
2236 const Function *F = Q.CxtI->getFunction(); in isKnownToBeAPowerOfTwo()
2237 // The vscale_range indicates vscale is a power-of-two. in isKnownToBeAPowerOfTwo()
2238 return F->hasFnAttribute(Attribute::VScaleRange); in isKnownToBeAPowerOfTwo()
2252 if (Depth++ == MaxAnalysisRecursionDepth) in isKnownToBeAPowerOfTwo()
2255 switch (I->getOpcode()) { in isKnownToBeAPowerOfTwo()
2257 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2259 return OrZero && isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2262 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2266 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2270 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2273 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) && in isKnownToBeAPowerOfTwo()
2274 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q) && in isKnownToBeAPowerOfTwo()
2275 (OrZero || isKnownNonZero(I, Q, Depth)); in isKnownToBeAPowerOfTwo()
2279 (isKnownToBeAPowerOfTwo(I->getOperand(1), /*OrZero*/ true, Depth, Q) || in isKnownToBeAPowerOfTwo()
2280 isKnownToBeAPowerOfTwo(I->getOperand(0), /*OrZero*/ true, Depth, Q))) in isKnownToBeAPowerOfTwo()
2282 // X & (-X) is always a power of two or zero. in isKnownToBeAPowerOfTwo()
2283 if (match(I->getOperand(0), m_Neg(m_Specific(I->getOperand(1)))) || in isKnownToBeAPowerOfTwo()
2284 match(I->getOperand(1), m_Neg(m_Specific(I->getOperand(0))))) in isKnownToBeAPowerOfTwo()
2285 return OrZero || isKnownNonZero(I->getOperand(0), Q, Depth); in isKnownToBeAPowerOfTwo()
2288 // Adding a power-of-two or zero to the same power-of-two or zero yields in isKnownToBeAPowerOfTwo()
2289 // either the original power-of-two, a larger power-of-two or zero. in isKnownToBeAPowerOfTwo()
2293 if (match(I->getOperand(0), in isKnownToBeAPowerOfTwo()
2294 m_c_And(m_Specific(I->getOperand(1)), m_Value())) && in isKnownToBeAPowerOfTwo()
2295 isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q)) in isKnownToBeAPowerOfTwo()
2297 if (match(I->getOperand(1), in isKnownToBeAPowerOfTwo()
2298 m_c_And(m_Specific(I->getOperand(0)), m_Value())) && in isKnownToBeAPowerOfTwo()
2299 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q)) in isKnownToBeAPowerOfTwo()
2302 unsigned BitWidth = V->getType()->getScalarSizeInBits(); in isKnownToBeAPowerOfTwo()
2304 computeKnownBits(I->getOperand(0), LHSBits, Depth, Q); in isKnownToBeAPowerOfTwo()
2307 computeKnownBits(I->getOperand(1), RHSBits, Depth, Q); in isKnownToBeAPowerOfTwo()
2325 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) && in isKnownToBeAPowerOfTwo()
2326 isKnownToBeAPowerOfTwo(I->getOperand(2), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2335 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ)) in isKnownToBeAPowerOfTwo()
2340 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); in isKnownToBeAPowerOfTwo()
2341 return llvm::all_of(PN->operands(), [&](const Use &U) { in isKnownToBeAPowerOfTwo()
2348 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); in isKnownToBeAPowerOfTwo()
2355 switch (II->getIntrinsicID()) { in isKnownToBeAPowerOfTwo()
2360 return isKnownToBeAPowerOfTwo(II->getArgOperand(1), OrZero, Depth, Q) && in isKnownToBeAPowerOfTwo()
2361 isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2363 // thus dont change pow2/non-pow2 status. in isKnownToBeAPowerOfTwo()
2366 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2370 if (II->getArgOperand(0) == II->getArgOperand(1)) in isKnownToBeAPowerOfTwo()
2371 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2384 /// Test whether a GEP's result is known to be non-null.
2387 /// to be non-null.
2390 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, in isGEPKnownNonNull() argument
2394 F = I->getFunction(); in isGEPKnownNonNull()
2398 if (!GEP->hasNoUnsignedWrap() && in isGEPKnownNonNull()
2399 !(GEP->isInBounds() && in isGEPKnownNonNull()
2400 !NullPointerIsDefined(F, GEP->getPointerAddressSpace()))) in isGEPKnownNonNull()
2403 // FIXME: Support vector-GEPs. in isGEPKnownNonNull()
2404 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); in isGEPKnownNonNull()
2406 // If the base pointer is non-null, we cannot walk to a null address with an in isGEPKnownNonNull()
2408 if (isKnownNonZero(GEP->getPointerOperand(), Q, Depth)) in isGEPKnownNonNull()
2411 // Walk the GEP operands and see if any operand introduces a non-zero offset. in isGEPKnownNonNull()
2416 // Struct types are easy -- they must always be indexed by a constant. in isGEPKnownNonNull()
2419 unsigned ElementIdx = OpC->getZExtValue(); in isGEPKnownNonNull()
2421 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); in isGEPKnownNonNull()
2427 // If we have a zero-sized type, the index doesn't matter. Keep looping. in isGEPKnownNonNull()
2432 // increment Depth when just zipping down an all-constant GEP. in isGEPKnownNonNull()
2434 if (!OpC->isZero()) in isGEPKnownNonNull()
2439 // We post-increment Depth here because while isKnownNonZero increments it in isGEPKnownNonNull()
2443 // of depth. in isGEPKnownNonNull()
2444 if (Depth++ >= MaxAnalysisRecursionDepth) in isGEPKnownNonNull()
2447 if (isKnownNonZero(GTI.getOperand(), Q, Depth)) in isGEPKnownNonNull()
2463 for (const auto *U : V->users()) { in isKnownNonNullFromDominatingCondition()
2470 // attributes may provide an answer about null-ness. in isKnownNonNullFromDominatingCondition()
2472 if (auto *CalledFunc = CB->getCalledFunction()) in isKnownNonNullFromDominatingCondition()
2473 for (const Argument &Arg : CalledFunc->args()) in isKnownNonNullFromDominatingCondition()
2474 if (CB->getArgOperand(Arg.getArgNo()) == V && in isKnownNonNullFromDominatingCondition()
2476 DT->dominates(CB, CtxI)) in isKnownNonNullFromDominatingCondition()
2482 if (!NullPointerIsDefined(I->getFunction(), in isKnownNonNullFromDominatingCondition()
2483 V->getType()->getPointerAddressSpace()) && in isKnownNonNullFromDominatingCondition()
2484 DT->dominates(I, CtxI)) in isKnownNonNullFromDominatingCondition()
2509 for (const auto *CmpU : U->users()) { in isKnownNonNullFromDominatingCondition()
2523 for (const auto *CurrU : Curr->users()) in isKnownNonNullFromDominatingCondition()
2530 assert(BI->isConditional() && "uses a comparison!"); in isKnownNonNullFromDominatingCondition()
2533 BI->getSuccessor(NonNullIfTrue ? 0 : 1); in isKnownNonNullFromDominatingCondition()
2534 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); in isKnownNonNullFromDominatingCondition()
2535 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) in isKnownNonNullFromDominatingCondition()
2538 DT->dominates(cast<Instruction>(Curr), CtxI)) { in isKnownNonNullFromDominatingCondition()
2552 const unsigned NumRanges = Ranges->getNumOperands() / 2; in rangeMetadataExcludesValue()
2556 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); in rangeMetadataExcludesValue()
2558 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); in rangeMetadataExcludesValue()
2559 ConstantRange Range(Lower->getValue(), Upper->getValue()); in rangeMetadataExcludesValue()
2567 /// non-zero starting value. These are common as induction variables.
2573 !match(Start, m_APInt(StartC)) || StartC->isZero()) in isNonZeroRecurrence()
2576 switch (BO->getOpcode()) { in isNonZeroRecurrence()
2578 // Starting from non-zero and stepping away from zero can never wrap back in isNonZeroRecurrence()
2580 return BO->hasNoUnsignedWrap() || in isNonZeroRecurrence()
2581 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) && in isNonZeroRecurrence()
2582 StartC->isNegative() == StepC->isNegative()); in isNonZeroRecurrence()
2584 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) && in isNonZeroRecurrence()
2585 match(Step, m_APInt(StepC)) && !StepC->isZero(); in isNonZeroRecurrence()
2587 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap(); in isNonZeroRecurrence()
2590 return BO->isExact(); in isNonZeroRecurrence()
2603 static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth, in isNonZeroAdd() argument
2611 return isKnownNonZero(Y, DemandedElts, Q, Depth) || in isNonZeroAdd()
2612 isKnownNonZero(X, DemandedElts, Q, Depth); in isNonZeroAdd()
2614 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); in isNonZeroAdd()
2615 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); in isNonZeroAdd()
2617 // If X and Y are both non-negative (as signed values) then their sum is not in isNonZeroAdd()
2620 if (isKnownNonZero(Y, DemandedElts, Q, Depth) || in isNonZeroAdd()
2621 isKnownNonZero(X, DemandedElts, Q, Depth)) in isNonZeroAdd()
2638 // The sum of a non-negative number and a power of two is not zero. in isNonZeroAdd()
2640 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) in isNonZeroAdd()
2643 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) in isNonZeroAdd()
2650 static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth, in isNonZeroSub() argument
2653 // (X - (X != 0)) is non zero in isNonZeroSub()
2654 // ((X != 0) - X) is non zero in isNonZeroSub()
2660 if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Q, Depth)) in isNonZeroSub()
2663 return ::isKnownNonEqual(X, Y, DemandedElts, Depth, Q); in isNonZeroSub()
2666 static bool isNonZeroMul(const APInt &DemandedElts, unsigned Depth, in isNonZeroMul() argument
2669 // If X and Y are non-zero then so is X * Y as long as the multiplication in isNonZeroMul()
2672 return isKnownNonZero(X, DemandedElts, Q, Depth) && in isNonZeroMul()
2673 isKnownNonZero(Y, DemandedElts, Q, Depth); in isNonZeroMul()
2675 // If either X or Y is odd, then if the other is non-zero the result can't in isNonZeroMul()
2677 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); in isNonZeroMul()
2679 return isKnownNonZero(Y, DemandedElts, Q, Depth); in isNonZeroMul()
2681 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); in isNonZeroMul()
2683 return XKnown.isNonZero() || isKnownNonZero(X, DemandedElts, Q, Depth); in isNonZeroMul()
2686 // non-zero, then X * Y is non-zero. We can find sX and sY by just taking in isNonZeroMul()
2687 // the lowest known One of X and Y. If they are non-zero, the result in isNonZeroMul()
2688 // must be non-zero. We can check if LSB(X) * LSB(Y) != 0 by doing in isNonZeroMul()
2695 unsigned Depth, const SimplifyQuery &Q, in isNonZeroShift() argument
2698 switch (I->getOpcode()) { in isNonZeroShift()
2711 switch (I->getOpcode()) { in isNonZeroShift()
2726 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); in isNonZeroShift()
2736 // non-zero then at least one non-zero bit must remain. in isNonZeroShift()
2737 if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift) in isNonZeroShift()
2738 .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) && in isNonZeroShift()
2739 isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth)) in isNonZeroShift()
2747 unsigned Depth, const SimplifyQuery &Q) { in isKnownNonZeroFromOperator() argument
2748 unsigned BitWidth = getBitWidth(I->getType()->getScalarType(), Q.DL); in isKnownNonZeroFromOperator()
2749 switch (I->getOpcode()) { in isKnownNonZeroFromOperator()
2752 return I->getType()->getPointerAddressSpace() == 0; in isKnownNonZeroFromOperator()
2754 if (I->getType()->isPointerTy()) in isKnownNonZeroFromOperator()
2755 return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q); in isKnownNonZeroFromOperator()
2765 // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a in isKnownNonZeroFromOperator()
2766 // <4 x i8> requires that all 4 i8 elements be non-zero which isn't in isKnownNonZeroFromOperator()
2781 // This is always safe as non-zero in the 4 i8 elements implies in isKnownNonZeroFromOperator()
2782 // non-zero in the combination of any two adjacent ones. Since i8 is a in isKnownNonZeroFromOperator()
2784 // This all implies the 2 i16 elements are non-zero. in isKnownNonZeroFromOperator()
2785 Type *FromTy = I->getOperand(0)->getType(); in isKnownNonZeroFromOperator()
2786 if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) && in isKnownNonZeroFromOperator()
2787 (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0) in isKnownNonZeroFromOperator()
2788 return isKnownNonZero(I->getOperand(0), Q, Depth); in isKnownNonZeroFromOperator()
2794 if (!isa<ScalableVectorType>(I->getType()) && in isKnownNonZeroFromOperator()
2795 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= in isKnownNonZeroFromOperator()
2796 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) in isKnownNonZeroFromOperator()
2797 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2801 // is a no-op or an extend and not a truncate. in isKnownNonZeroFromOperator()
2802 if (!isa<ScalableVectorType>(I->getType()) && in isKnownNonZeroFromOperator()
2803 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= in isKnownNonZeroFromOperator()
2804 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) in isKnownNonZeroFromOperator()
2805 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2808 // nuw/nsw trunc preserves zero/non-zero status of input. in isKnownNonZeroFromOperator()
2810 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap()) in isKnownNonZeroFromOperator()
2811 return isKnownNonZero(TI->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2815 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), in isKnownNonZeroFromOperator()
2816 I->getOperand(1)); in isKnownNonZeroFromOperator()
2819 if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1))) in isKnownNonZeroFromOperator()
2824 if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1))) in isKnownNonZeroFromOperator()
2827 return isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth) || in isKnownNonZeroFromOperator()
2828 isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2832 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2835 // shl nsw/nuw can't remove any non-zero bits. in isKnownNonZeroFromOperator()
2838 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2843 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q); in isKnownNonZeroFromOperator()
2847 return isNonZeroShift(I, DemandedElts, Depth, Q, Known); in isKnownNonZeroFromOperator()
2853 if (BO->isExact()) in isKnownNonZeroFromOperator()
2854 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2859 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
2863 return isNonZeroShift(I, DemandedElts, Depth, Q, Known); in isKnownNonZeroFromOperator()
2869 if (cast<PossiblyExactOperator>(I)->isExact()) in isKnownNonZeroFromOperator()
2870 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2873 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
2880 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
2881 if (I->getOpcode() == Instruction::SDiv) { in isKnownNonZeroFromOperator()
2888 // If X is total unknown or X u< Y we won't be able to prove non-zero in isKnownNonZeroFromOperator()
2895 // If Add has nuw wrap flag, then if either X or Y is non-zero the result is in isKnownNonZeroFromOperator()
2896 // non-zero. in isKnownNonZeroFromOperator()
2898 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), in isKnownNonZeroFromOperator()
2899 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO), in isKnownNonZeroFromOperator()
2904 return isNonZeroMul(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), in isKnownNonZeroFromOperator()
2905 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO), in isKnownNonZeroFromOperator()
2911 // First check if the arm is non-zero using `isKnownNonZero`. If that fails, in isKnownNonZeroFromOperator()
2912 // then see if the select condition implies the arm is non-zero. For example in isKnownNonZeroFromOperator()
2913 // (X != 0 ? X : Y), we know the true arm is non-zero as the `X` "return" is in isKnownNonZeroFromOperator()
2917 Op = IsTrueArm ? I->getOperand(1) : I->getOperand(2); in isKnownNonZeroFromOperator()
2918 // Op is trivially non-zero. in isKnownNonZeroFromOperator()
2919 if (isKnownNonZero(Op, DemandedElts, Q, Depth)) in isKnownNonZeroFromOperator()
2923 // condition implies that a given arm is non-zero. in isKnownNonZeroFromOperator()
2926 if (!match(I->getOperand(0), m_c_ICmp(Pred, m_Specific(Op), m_Value(X)))) in isKnownNonZeroFromOperator()
2945 // Check if all incoming values are non-zero using recursion. in isKnownNonZeroFromOperator()
2947 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); in isKnownNonZeroFromOperator()
2948 return llvm::all_of(PN->operands(), [&](const Use &U) { in isKnownNonZeroFromOperator()
2951 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); in isKnownNonZeroFromOperator()
2960 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) { in isKnownNonZeroFromOperator()
2962 if (FalseSucc == PN->getParent()) in isKnownNonZeroFromOperator()
2973 if (isa<ScalableVectorType>(I->getType())) in isKnownNonZeroFromOperator()
2976 const Value *Vec = I->getOperand(0); in isKnownNonZeroFromOperator()
2977 const Value *Elt = I->getOperand(1); in isKnownNonZeroFromOperator()
2978 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); in isKnownNonZeroFromOperator()
2984 if (CIdx && CIdx->getValue().ult(NumElts)) { in isKnownNonZeroFromOperator()
2985 DemandedVecElts.clearBit(CIdx->getZExtValue()); in isKnownNonZeroFromOperator()
2986 SkipElt = !DemandedElts[CIdx->getZExtValue()]; in isKnownNonZeroFromOperator()
2989 // Result is zero if Elt is non-zero and rest of the demanded elts in Vec in isKnownNonZeroFromOperator()
2990 // are non-zero. in isKnownNonZeroFromOperator()
2991 return (SkipElt || isKnownNonZero(Elt, Q, Depth)) && in isKnownNonZeroFromOperator()
2993 isKnownNonZero(Vec, DemandedVecElts, Q, Depth)); in isKnownNonZeroFromOperator()
2997 const Value *Vec = EEI->getVectorOperand(); in isKnownNonZeroFromOperator()
2998 const Value *Idx = EEI->getIndexOperand(); in isKnownNonZeroFromOperator()
3000 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { in isKnownNonZeroFromOperator()
3001 unsigned NumElts = VecTy->getNumElements(); in isKnownNonZeroFromOperator()
3003 if (CIdx && CIdx->getValue().ult(NumElts)) in isKnownNonZeroFromOperator()
3004 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); in isKnownNonZeroFromOperator()
3005 return isKnownNonZero(Vec, DemandedVecElts, Q, Depth); in isKnownNonZeroFromOperator()
3018 // If demanded elements for both vecs are non-zero, the shuffle is non-zero. in isKnownNonZeroFromOperator()
3020 isKnownNonZero(Shuf->getOperand(1), DemandedRHS, Q, Depth)) && in isKnownNonZeroFromOperator()
3022 isKnownNonZero(Shuf->getOperand(0), DemandedLHS, Q, Depth)); in isKnownNonZeroFromOperator()
3025 return isKnownNonZero(I->getOperand(0), Q, Depth) && in isKnownNonZeroFromOperator()
3026 isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, in isKnownNonZeroFromOperator()
3027 Depth); in isKnownNonZeroFromOperator()
3032 if (auto *PtrT = dyn_cast<PointerType>(I->getType())) { in isKnownNonZeroFromOperator()
3035 !NullPointerIsDefined(LI->getFunction(), PtrT->getAddressSpace()))) in isKnownNonZeroFromOperator()
3048 switch (WO->getBinaryOp()) { in isKnownNonZeroFromOperator()
3052 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, in isKnownNonZeroFromOperator()
3053 WO->getArgOperand(0), WO->getArgOperand(1), in isKnownNonZeroFromOperator()
3057 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, in isKnownNonZeroFromOperator()
3058 WO->getArgOperand(0), WO->getArgOperand(1)); in isKnownNonZeroFromOperator()
3060 return isNonZeroMul(DemandedElts, Depth, Q, BitWidth, in isKnownNonZeroFromOperator()
3061 WO->getArgOperand(0), WO->getArgOperand(1), in isKnownNonZeroFromOperator()
3071 if (I->getType()->isPointerTy()) { in isKnownNonZeroFromOperator()
3072 if (Call->isReturnNonNull()) in isKnownNonZeroFromOperator()
3075 return isKnownNonZero(RP, Q, Depth); in isKnownNonZeroFromOperator()
3079 if (std::optional<ConstantRange> Range = Call->getRange()) { in isKnownNonZeroFromOperator()
3080 const APInt ZeroValue(Range->getBitWidth(), 0); in isKnownNonZeroFromOperator()
3081 if (!Range->contains(ZeroValue)) in isKnownNonZeroFromOperator()
3084 if (const Value *RV = Call->getReturnedArgOperand()) in isKnownNonZeroFromOperator()
3085 if (RV->getType() == I->getType() && isKnownNonZero(RV, Q, Depth)) in isKnownNonZeroFromOperator()
3090 switch (II->getIntrinsicID()) { in isKnownNonZeroFromOperator()
3097 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3099 // non-zero, we will fold it to `sub nuw` in InstCombine. in isKnownNonZeroFromOperator()
3101 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, in isKnownNonZeroFromOperator()
3102 II->getArgOperand(0), II->getArgOperand(1)); in isKnownNonZeroFromOperator()
3104 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, in isKnownNonZeroFromOperator()
3105 II->getArgOperand(0), II->getArgOperand(1), in isKnownNonZeroFromOperator()
3107 // Vec reverse preserves zero/non-zero status from input vec. in isKnownNonZeroFromOperator()
3109 return isKnownNonZero(II->getArgOperand(0), DemandedElts.reverseBits(), in isKnownNonZeroFromOperator()
3110 Q, Depth); in isKnownNonZeroFromOperator()
3111 // umin/smin/smax/smin/or of all non-zero elements is always non-zero. in isKnownNonZeroFromOperator()
3117 return isKnownNonZero(II->getArgOperand(0), Q, Depth); in isKnownNonZeroFromOperator()
3122 if (matchOpWithOpEqZero(II->getArgOperand(0), II->getArgOperand(1))) in isKnownNonZeroFromOperator()
3125 return isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth) || in isKnownNonZeroFromOperator()
3126 isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3128 // If either arg is strictly positive the result is non-zero. Otherwise in isKnownNonZeroFromOperator()
3129 // the result is non-zero if both ops are non-zero. in isKnownNonZeroFromOperator()
3134 isKnownNonZero(Op, DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3137 // Avoid re-computing isKnownNonZero. in isKnownNonZeroFromOperator()
3140 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
3142 IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known)) in isKnownNonZeroFromOperator()
3145 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
3147 IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known)) in isKnownNonZeroFromOperator()
3149 return IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known) && in isKnownNonZeroFromOperator()
3150 IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known); in isKnownNonZeroFromOperator()
3153 // If either arg is negative the result is non-zero. Otherwise in isKnownNonZeroFromOperator()
3154 // the result is non-zero if both ops are non-zero. in isKnownNonZeroFromOperator()
3156 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
3160 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
3169 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth) && in isKnownNonZeroFromOperator()
3170 isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3172 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) in isKnownNonZeroFromOperator()
3175 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) in isKnownNonZeroFromOperator()
3180 if (II->getArgOperand(0) == II->getArgOperand(1)) in isKnownNonZeroFromOperator()
3181 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3186 return isKnownNonZero(I->getOperand(0), Q, Depth); in isKnownNonZeroFromOperator()
3198 computeKnownBits(I, DemandedElts, Known, Depth, Q); in isKnownNonZeroFromOperator()
3202 /// Return true if the given value is known to be non-zero when defined. For
3203 /// vectors, return true if every demanded element is known to be non-zero when
3205 /// specified, perform context-sensitive analysis and return true if the
3209 const SimplifyQuery &Q, unsigned Depth) { in isKnownNonZero() argument
3210 Type *Ty = V->getType(); in isKnownNonZero()
3213 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); in isKnownNonZero()
3217 FVTy->getNumElements() == DemandedElts.getBitWidth() && in isKnownNonZero()
3226 if (C->isNullValue()) in isKnownNonZero()
3229 // Must be non-zero due to null test above. in isKnownNonZero()
3233 // non-zero to determine that the whole vector is known non-zero. in isKnownNonZero()
3235 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { in isKnownNonZero()
3238 Constant *Elt = C->getAggregateElement(i); in isKnownNonZero()
3239 if (!Elt || Elt->isNullValue()) in isKnownNonZero()
3249 return isKnownNonZero(CPA->getPointer(), DemandedElts, Q, Depth); in isKnownNonZero()
3255 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && in isKnownNonZero()
3256 GV->getType()->getAddressSpace() == 0) in isKnownNonZero()
3266 if (std::optional<ConstantRange> Range = A->getRange()) { in isKnownNonZero()
3267 const APInt ZeroValue(Range->getBitWidth(), 0); in isKnownNonZero()
3268 if (!Range->contains(ZeroValue)) in isKnownNonZero()
3276 if (Depth++ >= MaxAnalysisRecursionDepth) in isKnownNonZero()
3282 // A byval, inalloca may not be null in a non-default addres space. A in isKnownNonZero()
3285 if (((A->hasPassPointeeByValueCopyAttr() && in isKnownNonZero()
3286 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) || in isKnownNonZero()
3287 A->hasNonNullAttr())) in isKnownNonZero()
3293 if (isKnownNonZeroFromOperator(I, DemandedElts, Depth, Q)) in isKnownNonZero()
3304 unsigned Depth) { in isKnownNonZero() argument
3305 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); in isKnownNonZero()
3307 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in isKnownNonZero()
3308 return ::isKnownNonZero(V, DemandedElts, Q, Depth); in isKnownNonZero()
3313 /// return std::nullopt. An invertible function is one that is 1-to-1 and maps
3320 if (Op1->getOpcode() != Op2->getOpcode()) in getInvertibleOperands()
3323 auto getOperands = [&](unsigned OpNum) -> auto { in getInvertibleOperands()
3324 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum)); in getInvertibleOperands()
3327 switch (Op1->getOpcode()) { in getInvertibleOperands()
3331 if (!cast<PossiblyDisjointInst>(Op1)->isDisjoint() || in getInvertibleOperands()
3332 !cast<PossiblyDisjointInst>(Op2)->isDisjoint()) in getInvertibleOperands()
3338 if (match(Op2, m_c_BinOp(m_Specific(Op1->getOperand(0)), m_Value(Other)))) in getInvertibleOperands()
3339 return std::make_pair(Op1->getOperand(1), Other); in getInvertibleOperands()
3340 if (match(Op2, m_c_BinOp(m_Specific(Op1->getOperand(1)), m_Value(Other)))) in getInvertibleOperands()
3341 return std::make_pair(Op1->getOperand(0), Other); in getInvertibleOperands()
3345 if (Op1->getOperand(0) == Op2->getOperand(0)) in getInvertibleOperands()
3347 if (Op1->getOperand(1) == Op2->getOperand(1)) in getInvertibleOperands()
3352 // and N is the bitwdith. The nsw case is non-obvious, but proven by in getInvertibleOperands()
3356 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && in getInvertibleOperands()
3357 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) in getInvertibleOperands()
3361 if (Op1->getOperand(1) == Op2->getOperand(1) && in getInvertibleOperands()
3362 isa<ConstantInt>(Op1->getOperand(1)) && in getInvertibleOperands()
3363 !cast<ConstantInt>(Op1->getOperand(1))->isZero()) in getInvertibleOperands()
3369 // for a non-zero multiply. Shifts always multiply by non-zero. in getInvertibleOperands()
3372 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && in getInvertibleOperands()
3373 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) in getInvertibleOperands()
3376 if (Op1->getOperand(1) == Op2->getOperand(1)) in getInvertibleOperands()
3384 if (!PEO1->isExact() || !PEO2->isExact()) in getInvertibleOperands()
3387 if (Op1->getOperand(1) == Op2->getOperand(1)) in getInvertibleOperands()
3393 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType()) in getInvertibleOperands()
3407 if (PN1->getParent() != PN2->getParent() || in getInvertibleOperands()
3418 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V in getInvertibleOperands()
3419 // * X_i = Y_i = X_(i-1) OP Y_(i-1) in getInvertibleOperands()
3422 if (Values->first != PN1 || Values->second != PN2) in getInvertibleOperands()
3431 /// Return true if V1 == (binop V2, X), where X is known non-zero.
3432 /// Only handle a small subset of binops where (binop V2, X) with non-zero X
3435 const APInt &DemandedElts, unsigned Depth, in isModifyingBinopOfNonZero() argument
3440 switch (BO->getOpcode()) { in isModifyingBinopOfNonZero()
3444 if (!cast<PossiblyDisjointInst>(V1)->isDisjoint()) in isModifyingBinopOfNonZero()
3450 if (V2 == BO->getOperand(0)) in isModifyingBinopOfNonZero()
3451 Op = BO->getOperand(1); in isModifyingBinopOfNonZero()
3452 else if (V2 == BO->getOperand(1)) in isModifyingBinopOfNonZero()
3453 Op = BO->getOperand(0); in isModifyingBinopOfNonZero()
3456 return isKnownNonZero(Op, DemandedElts, Q, Depth + 1); in isModifyingBinopOfNonZero()
3461 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
3464 const APInt &DemandedElts, unsigned Depth, in isNonEqualMul() argument
3469 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && in isNonEqualMul()
3470 !C->isZero() && !C->isOne() && in isNonEqualMul()
3471 isKnownNonZero(V1, DemandedElts, Q, Depth + 1); in isNonEqualMul()
3476 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
3479 const APInt &DemandedElts, unsigned Depth, in isNonEqualShl() argument
3484 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && in isNonEqualShl()
3485 !C->isZero() && isKnownNonZero(V1, DemandedElts, Q, Depth + 1); in isNonEqualShl()
3491 const APInt &DemandedElts, unsigned Depth, in isNonEqualPHIs() argument
3494 if (PN1->getParent() != PN2->getParent()) in isNonEqualPHIs()
3499 for (const BasicBlock *IncomBB : PN1->blocks()) { in isNonEqualPHIs()
3502 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB); in isNonEqualPHIs()
3503 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB); in isNonEqualPHIs()
3513 RecQ.CxtI = IncomBB->getTerminator(); in isNonEqualPHIs()
3514 if (!isKnownNonEqual(IV1, IV2, DemandedElts, Depth + 1, RecQ)) in isNonEqualPHIs()
3522 const APInt &DemandedElts, unsigned Depth, in isNonEqualSelect() argument
3529 const Value *Cond1 = SI1->getCondition(); in isNonEqualSelect()
3530 const Value *Cond2 = SI2->getCondition(); in isNonEqualSelect()
3532 return isKnownNonEqual(SI1->getTrueValue(), SI2->getTrueValue(), in isNonEqualSelect()
3533 DemandedElts, Depth + 1, Q) && in isNonEqualSelect()
3534 isKnownNonEqual(SI1->getFalseValue(), SI2->getFalseValue(), in isNonEqualSelect()
3535 DemandedElts, Depth + 1, Q); in isNonEqualSelect()
3537 return isKnownNonEqual(SI1->getTrueValue(), V2, DemandedElts, Depth + 1, Q) && in isNonEqualSelect()
3538 isKnownNonEqual(SI1->getFalseValue(), V2, DemandedElts, Depth + 1, Q); in isNonEqualSelect()
3548 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy()) in isNonEqualPointersWithRecursiveGEP()
3552 if (!GEPA || GEPA->getNumIndices() != 1 || !isa<Constant>(GEPA->idx_begin())) in isNonEqualPointersWithRecursiveGEP()
3556 auto *PN = dyn_cast<PHINode>(GEPA->getPointerOperand()); in isNonEqualPointersWithRecursiveGEP()
3557 if (!PN || PN->getNumIncomingValues() != 2) in isNonEqualPointersWithRecursiveGEP()
3564 if (PN->getIncomingValue(0) == Step) in isNonEqualPointersWithRecursiveGEP()
3565 Start = PN->getIncomingValue(1); in isNonEqualPointersWithRecursiveGEP()
3566 else if (PN->getIncomingValue(1) == Step) in isNonEqualPointersWithRecursiveGEP()
3567 Start = PN->getIncomingValue(0); in isNonEqualPointersWithRecursiveGEP()
3574 // Is non-equal if above are true. in isNonEqualPointersWithRecursiveGEP()
3577 unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(Start->getType()); in isNonEqualPointersWithRecursiveGEP()
3579 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.DL, StartOffset); in isNonEqualPointersWithRecursiveGEP()
3581 Step = Step->stripAndAccumulateInBoundsConstantOffsets(Q.DL, StepOffset); in isNonEqualPointersWithRecursiveGEP()
3587 B = B->stripAndAccumulateInBoundsConstantOffsets(Q.DL, OffsetB); in isNonEqualPointersWithRecursiveGEP()
3595 const APInt &DemandedElts, unsigned Depth, in isKnownNonEqual() argument
3599 if (V1->getType() != V2->getType()) in isKnownNonEqual()
3603 if (Depth >= MaxAnalysisRecursionDepth) in isKnownNonEqual()
3607 // requires our operation be 1-to-1 and map every input value to exactly in isKnownNonEqual()
3611 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { in isKnownNonEqual()
3613 return isKnownNonEqual(Values->first, Values->second, DemandedElts, in isKnownNonEqual()
3614 Depth + 1, Q); in isKnownNonEqual()
3620 if (isNonEqualPHIs(PN1, PN2, DemandedElts, Depth, Q)) in isKnownNonEqual()
3625 if (isModifyingBinopOfNonZero(V1, V2, DemandedElts, Depth, Q) || in isKnownNonEqual()
3626 isModifyingBinopOfNonZero(V2, V1, DemandedElts, Depth, Q)) in isKnownNonEqual()
3629 if (isNonEqualMul(V1, V2, DemandedElts, Depth, Q) || in isKnownNonEqual()
3630 isNonEqualMul(V2, V1, DemandedElts, Depth, Q)) in isKnownNonEqual()
3633 if (isNonEqualShl(V1, V2, DemandedElts, Depth, Q) || in isKnownNonEqual()
3634 isNonEqualShl(V2, V1, DemandedElts, Depth, Q)) in isKnownNonEqual()
3637 if (V1->getType()->isIntOrIntVectorTy()) { in isKnownNonEqual()
3640 KnownBits Known1 = computeKnownBits(V1, DemandedElts, Depth, Q); in isKnownNonEqual()
3642 KnownBits Known2 = computeKnownBits(V2, DemandedElts, Depth, Q); in isKnownNonEqual()
3649 if (isNonEqualSelect(V1, V2, DemandedElts, Depth, Q) || in isKnownNonEqual()
3650 isNonEqualSelect(V2, V1, DemandedElts, Depth, Q)) in isKnownNonEqual()
3662 return isKnownNonEqual(A, B, DemandedElts, Depth + 1, Q); in isKnownNonEqual()
3672 cast<Operator>(Select)->getOpcode() == Instruction::Select && in isSignedMinMaxClamp()
3695 return CLow->sle(*CHigh); in isSignedMinMaxClamp()
3701 assert((II->getIntrinsicID() == Intrinsic::smin || in isSignedMinMaxIntrinsicClamp()
3702 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax"); in isSignedMinMaxIntrinsicClamp()
3704 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); in isSignedMinMaxIntrinsicClamp()
3705 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); in isSignedMinMaxIntrinsicClamp()
3706 if (!InnerII || InnerII->getIntrinsicID() != InverseID || in isSignedMinMaxIntrinsicClamp()
3707 !match(II->getArgOperand(1), m_APInt(CLow)) || in isSignedMinMaxIntrinsicClamp()
3708 !match(InnerII->getArgOperand(1), m_APInt(CHigh))) in isSignedMinMaxIntrinsicClamp()
3711 if (II->getIntrinsicID() == Intrinsic::smin) in isSignedMinMaxIntrinsicClamp()
3713 return CLow->sle(*CHigh); in isSignedMinMaxIntrinsicClamp()
3724 if (!CV || !isa<FixedVectorType>(CV->getType())) in computeNumSignBitsVectorConstant()
3728 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); in computeNumSignBitsVectorConstant()
3732 // If we find a non-ConstantInt, bail out. in computeNumSignBitsVectorConstant()
3733 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); in computeNumSignBitsVectorConstant()
3737 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); in computeNumSignBitsVectorConstant()
3745 unsigned Depth, const SimplifyQuery &Q);
3748 unsigned Depth, const SimplifyQuery &Q) { in ComputeNumSignBits() argument
3749 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); in ComputeNumSignBits()
3763 unsigned Depth, const SimplifyQuery &Q) { in ComputeNumSignBitsImpl() argument
3764 Type *Ty = V->getType(); in ComputeNumSignBitsImpl()
3766 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); in ComputeNumSignBitsImpl()
3770 FVTy->getNumElements() == DemandedElts.getBitWidth() && in ComputeNumSignBitsImpl()
3780 // same behavior for poison though -- that's a FIXME today. in ComputeNumSignBitsImpl()
3782 Type *ScalarTy = Ty->getScalarType(); in ComputeNumSignBitsImpl()
3783 unsigned TyBits = ScalarTy->isPointerTy() ? in ComputeNumSignBitsImpl()
3793 if (Depth == MaxAnalysisRecursionDepth) in ComputeNumSignBitsImpl()
3800 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); in ComputeNumSignBitsImpl()
3801 return ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q) + in ComputeNumSignBitsImpl()
3806 // sdiv X, C -> adds log(C) sign bits. in ComputeNumSignBitsImpl()
3807 if (match(U->getOperand(1), m_APInt(Denominator))) { in ComputeNumSignBitsImpl()
3809 // Ignore non-positive denominator. in ComputeNumSignBitsImpl()
3810 if (!Denominator->isStrictlyPositive()) in ComputeNumSignBitsImpl()
3815 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3818 return std::min(TyBits, NumBits + Denominator->logBase2()); in ComputeNumSignBitsImpl()
3824 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3827 // srem X, C -> we know that the result is within [-C+1,C) when C is a in ComputeNumSignBitsImpl()
3830 if (match(U->getOperand(1), m_APInt(Denominator))) { in ComputeNumSignBitsImpl()
3832 // Ignore non-positive denominator. in ComputeNumSignBitsImpl()
3833 if (Denominator->isStrictlyPositive()) { in ComputeNumSignBitsImpl()
3841 // 2. The numerator is negative. Then the result range is (-C,0] and in ComputeNumSignBitsImpl()
3842 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). in ComputeNumSignBitsImpl()
3844 // Thus a lower bound on the number of sign bits is `TyBits - in ComputeNumSignBitsImpl()
3847 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); in ComputeNumSignBitsImpl()
3855 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3856 // ashr X, C -> adds C sign bits. Vectors too. in ComputeNumSignBitsImpl()
3858 if (match(U->getOperand(1), m_APInt(ShAmt))) { in ComputeNumSignBitsImpl()
3859 if (ShAmt->uge(TyBits)) in ComputeNumSignBitsImpl()
3861 unsigned ShAmtLimited = ShAmt->getZExtValue(); in ComputeNumSignBitsImpl()
3870 if (match(U->getOperand(1), m_APInt(ShAmt))) { in ComputeNumSignBitsImpl()
3872 if (ShAmt->uge(TyBits)) in ComputeNumSignBitsImpl()
3876 if (match(U->getOperand(0), m_ZExt(m_Value(X))) && in ComputeNumSignBitsImpl()
3877 ShAmt->uge(TyBits - X->getType()->getScalarSizeInBits())) { in ComputeNumSignBitsImpl()
3878 Tmp = ComputeNumSignBits(X, DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3879 Tmp += TyBits - X->getType()->getScalarSizeInBits(); in ComputeNumSignBitsImpl()
3882 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3883 if (ShAmt->uge(Tmp)) in ComputeNumSignBitsImpl()
3885 Tmp2 = ShAmt->getZExtValue(); in ComputeNumSignBitsImpl()
3886 return Tmp - Tmp2; in ComputeNumSignBitsImpl()
3894 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3896 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3910 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); in ComputeNumSignBitsImpl()
3912 Tmp = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3915 Tmp2 = ComputeNumSignBits(U->getOperand(2), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3922 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); in ComputeNumSignBitsImpl()
3925 // Special case decrementing a value (ADD X, -1): in ComputeNumSignBitsImpl()
3926 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) in ComputeNumSignBitsImpl()
3927 if (CRHS->isAllOnesValue()) { in ComputeNumSignBitsImpl()
3929 computeKnownBits(U->getOperand(0), DemandedElts, Known, Depth + 1, Q); in ComputeNumSignBitsImpl()
3931 // If the input is known to be 0 or 1, the output is 0/-1, which is in ComputeNumSignBitsImpl()
3942 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3945 return std::min(Tmp, Tmp2) - 1; in ComputeNumSignBitsImpl()
3948 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3953 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) in ComputeNumSignBitsImpl()
3954 if (CLHS->isNullValue()) { in ComputeNumSignBitsImpl()
3956 computeKnownBits(U->getOperand(1), DemandedElts, Known, Depth + 1, Q); in ComputeNumSignBitsImpl()
3957 // If the input is known to be 0 or 1, the output is 0/-1, which is in ComputeNumSignBitsImpl()
3973 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3976 return std::min(Tmp, Tmp2) - 1; in ComputeNumSignBitsImpl()
3982 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3986 ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3990 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); in ComputeNumSignBitsImpl()
3991 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; in ComputeNumSignBitsImpl()
3996 unsigned NumIncomingValues = PN->getNumIncomingValues(); in ComputeNumSignBitsImpl()
3997 // Don't analyze large in-degree PHIs. in ComputeNumSignBitsImpl()
3999 // Unreachable blocks may have zero-operand PHI nodes. in ComputeNumSignBitsImpl()
4003 // because of our depth threshold. in ComputeNumSignBitsImpl()
4008 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); in ComputeNumSignBitsImpl()
4009 Tmp = std::min(Tmp, ComputeNumSignBits(PN->getIncomingValue(i), in ComputeNumSignBitsImpl()
4010 DemandedElts, Depth + 1, RecQ)); in ComputeNumSignBitsImpl()
4019 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); in ComputeNumSignBitsImpl()
4020 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits(); in ComputeNumSignBitsImpl()
4021 if (Tmp > (OperandTyBits - TyBits)) in ComputeNumSignBitsImpl()
4022 return Tmp - (OperandTyBits - TyBits); in ComputeNumSignBitsImpl()
4032 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); in ComputeNumSignBitsImpl()
4049 const Value *LHS = Shuf->getOperand(0); in ComputeNumSignBitsImpl()
4050 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q); in ComputeNumSignBitsImpl()
4053 // fall-back. in ComputeNumSignBitsImpl()
4057 const Value *RHS = Shuf->getOperand(1); in ComputeNumSignBitsImpl()
4058 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q); in ComputeNumSignBitsImpl()
4062 // fall-back. in ComputeNumSignBitsImpl()
4070 switch (II->getIntrinsicID()) { in ComputeNumSignBitsImpl()
4075 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
4080 return Tmp - 1; in ComputeNumSignBitsImpl()
4085 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); in ComputeNumSignBitsImpl()
4103 computeKnownBits(V, DemandedElts, Known, Depth, Q); in ComputeNumSignBitsImpl()
4116 if (F->isIntrinsic()) in getIntrinsicForCallSite()
4117 return F->getIntrinsicID(); in getIntrinsicForCallSite()
4123 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) || in getIntrinsicForCallSite()
4222 Ty = Ty->getScalarType(); in inputDenormalIsIEEE()
4223 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE; in inputDenormalIsIEEE()
4227 Ty = Ty->getScalarType(); in inputDenormalIsIEEEOrPosZero()
4228 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics()); in inputDenormalIsIEEEOrPosZero()
4234 Ty = Ty->getScalarType(); in outputDenormalIsIEEEOrPosZero()
4235 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics()); in outputDenormalIsIEEEOrPosZero()
4260 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics()); in isKnownNeverLogicalPosZero()
4288 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics()); in propagateDenormal()
4320 case ICmpInst::ICMP_SLE: // True if LHS s<= -1 in isSignBitCheck()
4323 case ICmpInst::ICMP_SGT: // True if LHS s> -1 in isSignBitCheck()
4330 // True if LHS u> RHS and RHS == sign-bit-mask - 1 in isSignBitCheck()
4334 // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc) in isSignBitCheck()
4338 // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc) in isSignBitCheck()
4342 // True if LHS u<= RHS and RHS == sign-bit-mask - 1 in isSignBitCheck()
4400 // fcmp o__ x, nan -> false in fcmpImpliesClass()
4401 // fcmp u__ x, nan -> true in fcmpImpliesClass()
4405 // fcmp ord x, zero|normal|subnormal|inf -> ~fcNan in fcmpImpliesClass()
4409 // fcmp uno x, zero|normal|subnormal|inf -> fcNan in fcmpImpliesClass()
4423 if (!inputDenormalIsIEEE(F, LHS->getType())) in fcmpImpliesClass()
4437 // non-canonical other non-NaN constants or LHS == RHS. in fcmpImpliesClass()
4475 // fcmp oeq x, +inf -> is_fpclass x, fcPosInf in fcmpImpliesClass()
4476 // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf in fcmpImpliesClass()
4477 // fcmp oeq x, -inf -> is_fpclass x, fcNegInf in fcmpImpliesClass()
4478 // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false in fcmpImpliesClass()
4480 // fcmp une x, +inf -> is_fpclass x, ~fcPosInf in fcmpImpliesClass()
4481 // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf in fcmpImpliesClass()
4482 // fcmp une x, -inf -> is_fpclass x, ~fcNegInf in fcmpImpliesClass()
4483 // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true in fcmpImpliesClass()
4498 // fcmp one x, -inf -> is_fpclass x, fcNegInf in fcmpImpliesClass()
4499 // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan in fcmpImpliesClass()
4500 // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan in fcmpImpliesClass()
4501 // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan in fcmpImpliesClass()
4503 // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan in fcmpImpliesClass()
4504 // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan in fcmpImpliesClass()
4505 // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan in fcmpImpliesClass()
4506 // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan in fcmpImpliesClass()
4524 // fcmp olt x, -inf -> false in fcmpImpliesClass()
4525 // fcmp uge x, -inf -> true in fcmpImpliesClass()
4530 // fcmp olt fabs(x), +inf -> fcFinite in fcmpImpliesClass()
4531 // fcmp uge fabs(x), +inf -> ~fcFinite in fcmpImpliesClass()
4532 // fcmp olt x, +inf -> fcFinite|fcNegInf in fcmpImpliesClass()
4533 // fcmp uge x, +inf -> ~(fcFinite|fcNegInf) in fcmpImpliesClass()
4542 // fcmp oge x, -inf -> ~fcNan in fcmpImpliesClass()
4543 // fcmp oge fabs(x), -inf -> ~fcNan in fcmpImpliesClass()
4544 // fcmp ult x, -inf -> fcNan in fcmpImpliesClass()
4545 // fcmp ult fabs(x), -inf -> fcNan in fcmpImpliesClass()
4550 // fcmp oge fabs(x), +inf -> fcInf in fcmpImpliesClass()
4551 // fcmp oge x, +inf -> fcPosInf in fcmpImpliesClass()
4552 // fcmp ult fabs(x), +inf -> ~fcInf in fcmpImpliesClass()
4553 // fcmp ult x, +inf -> ~fcPosInf in fcmpImpliesClass()
4562 // fcmp ogt x, -inf -> fcmp one x, -inf in fcmpImpliesClass()
4563 // fcmp ogt fabs(x), -inf -> fcmp ord x, x in fcmpImpliesClass()
4564 // fcmp ule x, -inf -> fcmp ueq x, -inf in fcmpImpliesClass()
4565 // fcmp ule fabs(x), -inf -> fcmp uno x, x in fcmpImpliesClass()
4581 // fcmp ole x, +inf -> fcmp ord x, x in fcmpImpliesClass()
4582 // fcmp ole fabs(x), +inf -> fcmp ord x, x in fcmpImpliesClass()
4583 // fcmp ole x, -inf -> fcmp oeq x, -inf in fcmpImpliesClass()
4584 // fcmp ole fabs(x), -inf -> false in fcmpImpliesClass()
4622 // fabs(x) o> -k -> fcmp ord x, x in fcmpImpliesClass()
4623 // fabs(x) u> -k -> true in fcmpImpliesClass()
4624 // fabs(x) o< -k -> false in fcmpImpliesClass()
4625 // fabs(x) u< -k -> fcmp uno x, x in fcmpImpliesClass()
4718 // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero in fcmpImpliesClass()
4719 // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero in fcmpImpliesClass()
4720 // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf in fcmpImpliesClass()
4721 // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero) in fcmpImpliesClass()
4730 // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf in fcmpImpliesClass()
4731 // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal in fcmpImpliesClass()
4732 // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf) in fcmpImpliesClass()
4733 // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal) in fcmpImpliesClass()
4761 // TODO: Just call computeKnownFPClass for RHS to handle non-constants. in fcmpImpliesClass()
4776 Pred, *CxtI->getParent()->getParent(), LHS, *CRHS, LHS != V); in computeKnownFPClassFromCond()
4804 for (BranchInst *BI : Q.DC->conditionsFor(V)) { in computeKnownFPClassFromContext()
4805 Value *Cond = BI->getCondition(); in computeKnownFPClassFromContext()
4807 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0)); in computeKnownFPClassFromContext()
4808 if (Q.DT->dominates(Edge0, Q.CxtI->getParent())) in computeKnownFPClassFromContext()
4812 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1)); in computeKnownFPClassFromContext()
4813 if (Q.DT->dominates(Edge1, Q.CxtI->getParent())) in computeKnownFPClassFromContext()
4822 // Try to restrict the floating-point classes based on information from in computeKnownFPClassFromContext()
4824 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { in computeKnownFPClassFromContext()
4829 assert(I->getFunction() == Q.CxtI->getParent()->getParent() && in computeKnownFPClassFromContext()
4831 assert(I->getIntrinsicID() == Intrinsic::assume && in computeKnownFPClassFromContext()
4837 computeKnownFPClassFromCond(V, I->getArgOperand(0), /*CondIsTrue=*/true, in computeKnownFPClassFromContext()
4846 unsigned Depth, const SimplifyQuery &Q);
4849 FPClassTest InterestedClasses, unsigned Depth, in computeKnownFPClass() argument
4851 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); in computeKnownFPClass()
4853 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in computeKnownFPClass()
4854 computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q); in computeKnownFPClass()
4860 KnownFPClass &Known, unsigned Depth, in computeKnownFPClassForFPTrunc() argument
4867 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClassForFPTrunc()
4868 KnownSrc, Depth + 1, Q); in computeKnownFPClassForFPTrunc()
4882 unsigned Depth, const SimplifyQuery &Q) { in computeKnownFPClass() argument
4891 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); in computeKnownFPClass()
4894 Known.KnownFPClasses = CFP->getValueAPF().classify(); in computeKnownFPClass()
4895 Known.SignBit = CFP->isNegative(); in computeKnownFPClass()
4912 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); in computeKnownFPClass()
4920 unsigned NumElts = VFVTy->getNumElements(); in computeKnownFPClass()
4925 Constant *Elt = CV->getAggregateElement(i); in computeKnownFPClass()
4938 const APFloat &C = CElt->getValueAPF(); in computeKnownFPClass()
4952 KnownNotFromFlags |= CB->getRetNoFPClass(); in computeKnownFPClass()
4954 KnownNotFromFlags |= Arg->getNoFPClass(); in computeKnownFPClass()
4958 if (FPOp->hasNoNaNs()) in computeKnownFPClass()
4960 if (FPOp->hasNoInfs()) in computeKnownFPClass()
4984 // All recursive calls that increase depth must come after this. in computeKnownFPClass()
4985 if (Depth == MaxAnalysisRecursionDepth) in computeKnownFPClass()
4988 const unsigned Opc = Op->getOpcode(); in computeKnownFPClass()
4991 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
4992 Known, Depth + 1, Q); in computeKnownFPClass()
4997 Value *Cond = Op->getOperand(0); in computeKnownFPClass()
4998 Value *LHS = Op->getOperand(1); in computeKnownFPClass()
4999 Value *RHS = Op->getOperand(2); in computeKnownFPClass()
5008 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5038 Depth + 1, Q); in computeKnownFPClass()
5042 Known2, Depth + 1, Q); in computeKnownFPClass()
5050 const Intrinsic::ID IID = II->getIntrinsicID(); in computeKnownFPClass()
5056 computeKnownFPClass(II->getArgOperand(0), DemandedElts, in computeKnownFPClass()
5057 InterestedClasses, Known, Depth + 1, Q); in computeKnownFPClass()
5066 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5067 Known, Depth + 1, Q); in computeKnownFPClass()
5068 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses, in computeKnownFPClass()
5069 KnownSign, Depth + 1, Q); in computeKnownFPClass()
5078 if (II->getArgOperand(0) != II->getArgOperand(1)) in computeKnownFPClass()
5081 // The multiply cannot be -0 and therefore the add can't be -0 in computeKnownFPClass()
5084 // x * x + y is non-negative if y is non-negative. in computeKnownFPClass()
5086 computeKnownFPClass(II->getArgOperand(2), DemandedElts, InterestedClasses, in computeKnownFPClass()
5087 KnownAddend, Depth + 1, Q); in computeKnownFPClass()
5100 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5101 KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5108 // Any negative value besides -0 returns a nan. in computeKnownFPClass()
5112 // The only negative value that can be returned is -0 for -0 inputs. in computeKnownFPClass()
5117 const Function *F = II->getFunction(); in computeKnownFPClass()
5119 (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType()))) in computeKnownFPClass()
5128 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5129 KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5140 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5141 KnownLHS, Depth + 1, Q); in computeKnownFPClass()
5142 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses, in computeKnownFPClass()
5143 KnownRHS, Depth + 1, Q); in computeKnownFPClass()
5191 const Function *Parent = II->getFunction(); in computeKnownFPClass()
5195 DenormalMode Mode = Parent->getDenormalMode( in computeKnownFPClass()
5196 II->getType()->getScalarType()->getFltSemantics()); in computeKnownFPClass()
5225 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5226 KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5233 // denormal mode to preserve known-not-0 knowledge. in computeKnownFPClass()
5243 const Function *F = II->getFunction(); in computeKnownFPClass()
5250 II->getType()->getScalarType()->getFltSemantics(); in computeKnownFPClass()
5251 DenormalMode DenormMode = F->getDenormalMode(FPType); in computeKnownFPClass()
5276 Known = computeKnownFPClass(II->getArgOperand(0), II->getFastMathFlags(), in computeKnownFPClass()
5277 InterestedClasses, Depth + 1, Q); in computeKnownFPClass()
5286 II->getArgOperand(0), DemandedElts.reverseBits(), in computeKnownFPClass()
5287 II->getFastMathFlags(), InterestedClasses, Depth + 1, Q); in computeKnownFPClass()
5302 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5303 KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5312 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) { in computeKnownFPClass()
5319 // Negative round ups to 0 produce -0 in computeKnownFPClass()
5335 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5336 KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5346 Depth, Q); in computeKnownFPClass()
5355 // log(+inf) -> +inf in computeKnownFPClass()
5356 // log([+-]0.0) -> -inf in computeKnownFPClass()
5357 // log(-inf) -> nan in computeKnownFPClass()
5358 // log(-x) -> nan in computeKnownFPClass()
5369 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5370 KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5378 const Function *F = II->getFunction(); in computeKnownFPClass()
5379 if (F && KnownSrc.isKnownNeverLogicalZero(*F, II->getType())) in computeKnownFPClass()
5388 const Value *Exp = II->getArgOperand(1); in computeKnownFPClass()
5389 Type *ExpTy = Exp->getType(); in computeKnownFPClass()
5390 unsigned BitWidth = ExpTy->getScalarType()->getIntegerBitWidth(); in computeKnownFPClass()
5393 ExponentKnownBits, Depth + 1, Q); in computeKnownFPClass()
5403 // pow(-x, exp) --> negative if exp is odd and x is negative. in computeKnownFPClass()
5404 // pow(-0, exp) --> -inf if exp is negative odd. in computeKnownFPClass()
5405 // pow(-0, exp) --> -0 if exp is positive odd. in computeKnownFPClass()
5406 // pow(-inf, exp) --> -0 if exp is negative odd. in computeKnownFPClass()
5407 // pow(-inf, exp) --> -inf if exp is positive odd. in computeKnownFPClass()
5409 computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative, in computeKnownFPClass()
5410 KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5417 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5418 KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5440 II->getType()->getScalarType()->getFltSemantics(); in computeKnownFPClass()
5442 const Value *ExpArg = II->getArgOperand(1); in computeKnownFPClass()
5444 ExpArg, true, Q.IIQ.UseInstrInfo, Q.AC, Q.CxtI, Q.DT, Depth + 1); in computeKnownFPClass()
5446 const int MantissaBits = Precision - 1; in computeKnownFPClass()
5450 const Function *F = II->getFunction(); in computeKnownFPClass()
5452 if (ConstVal && ConstVal->isZero()) { in computeKnownFPClass()
5453 // ldexp(x, 0) -> x, so propagate everything. in computeKnownFPClass()
5454 Known.propagateCanonicalizingSrc(KnownSrc, *F, II->getType()); in computeKnownFPClass()
5467 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, II->getType())) in computeKnownFPClass()
5469 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType())) in computeKnownFPClass()
5476 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5477 Known, Depth + 1, Q); in computeKnownFPClass()
5506 Op->getOpcode() == Instruction::FAdd && in computeKnownFPClass()
5519 computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5520 KnownRHS, Depth + 1, Q); in computeKnownFPClass()
5528 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5529 KnownLHS, Depth + 1, Q); in computeKnownFPClass()
5537 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5539 if (Op->getOpcode() == Instruction::FAdd) { in computeKnownFPClass()
5546 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. in computeKnownFPClass()
5547 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) || in computeKnownFPClass()
5548 KnownRHS.isKnownNeverLogicalNegZero(*F, Op->getType())) && in computeKnownFPClass()
5549 // Make sure output negative denormal can't flush to -0 in computeKnownFPClass()
5550 outputDenormalIsIEEEOrPosZero(*F, Op->getType())) in computeKnownFPClass()
5556 // Only fsub -0, +0 can return -0 in computeKnownFPClass()
5557 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) || in computeKnownFPClass()
5558 KnownRHS.isKnownNeverLogicalPosZero(*F, Op->getType())) && in computeKnownFPClass()
5559 // Make sure output negative denormal can't flush to -0 in computeKnownFPClass()
5560 outputDenormalIsIEEEOrPosZero(*F, Op->getType())) in computeKnownFPClass()
5568 // X * X is always non-negative or a NaN. in computeKnownFPClass()
5569 if (Op->getOperand(0) == Op->getOperand(1)) in computeKnownFPClass()
5579 computeKnownFPClass(Op->getOperand(1), DemandedElts, NeedForNan, KnownRHS, in computeKnownFPClass()
5580 Depth + 1, Q); in computeKnownFPClass()
5584 computeKnownFPClass(Op->getOperand(0), DemandedElts, NeedForNan, KnownLHS, in computeKnownFPClass()
5585 Depth + 1, Q); in computeKnownFPClass()
5596 // If 0 * +/-inf produces NaN. in computeKnownFPClass()
5602 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5607 KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) && in computeKnownFPClass()
5609 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) in computeKnownFPClass()
5616 if (Op->getOperand(0) == Op->getOperand(1)) { in computeKnownFPClass()
5618 if (Op->getOpcode() == Instruction::FDiv) { in computeKnownFPClass()
5622 // X % X is always exactly [+-]0.0 or a NaN. in computeKnownFPClass()
5638 computeKnownFPClass(Op->getOperand(1), DemandedElts, in computeKnownFPClass()
5640 Depth + 1, Q); in computeKnownFPClass()
5650 computeKnownFPClass(Op->getOperand(0), DemandedElts, in computeKnownFPClass()
5652 Depth + 1, Q); in computeKnownFPClass()
5655 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5657 if (Op->getOpcode() == Instruction::FDiv) { in computeKnownFPClass()
5662 ((F && KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) || in computeKnownFPClass()
5663 (F && KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))) { in computeKnownFPClass()
5667 // X / -0.0 is -Inf (or NaN). in computeKnownFPClass()
5675 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) { in computeKnownFPClass()
5696 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5697 Known, Depth + 1, Q); in computeKnownFPClass()
5700 Op->getType()->getScalarType()->getFltSemantics(); in computeKnownFPClass()
5702 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics(); in computeKnownFPClass()
5720 Depth, Q); in computeKnownFPClass()
5733 if (Op->getOpcode() == Instruction::UIToFP) in computeKnownFPClass()
5740 int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits(); in computeKnownFPClass()
5741 if (Op->getOpcode() == Instruction::SIToFP) in computeKnownFPClass()
5742 --IntSize; in computeKnownFPClass()
5746 Type *FPTy = Op->getType()->getScalarType(); in computeKnownFPClass()
5747 if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize) in computeKnownFPClass()
5754 // Look through extract element. If the index is non-constant or in computeKnownFPClass()
5755 // out-of-range demand all elements, otherwise just the extracted element. in computeKnownFPClass()
5756 const Value *Vec = Op->getOperand(0); in computeKnownFPClass()
5757 const Value *Idx = Op->getOperand(1); in computeKnownFPClass()
5760 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { in computeKnownFPClass()
5761 unsigned NumElts = VecTy->getNumElements(); in computeKnownFPClass()
5763 if (CIdx && CIdx->getValue().ult(NumElts)) in computeKnownFPClass()
5764 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); in computeKnownFPClass()
5766 Depth + 1, Q); in computeKnownFPClass()
5772 if (isa<ScalableVectorType>(Op->getType())) in computeKnownFPClass()
5775 const Value *Vec = Op->getOperand(0); in computeKnownFPClass()
5776 const Value *Elt = Op->getOperand(1); in computeKnownFPClass()
5777 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2)); in computeKnownFPClass()
5782 if (CIdx && CIdx->getValue().ult(NumElts)) { in computeKnownFPClass()
5783 DemandedVecElts.clearBit(CIdx->getZExtValue()); in computeKnownFPClass()
5784 NeedsElt = DemandedElts[CIdx->getZExtValue()]; in computeKnownFPClass()
5789 computeKnownFPClass(Elt, Known, InterestedClasses, Depth + 1, Q); in computeKnownFPClass()
5801 Depth + 1, Q); in computeKnownFPClass()
5816 const Value *LHS = Shuf->getOperand(0); in computeKnownFPClass()
5818 Depth + 1, Q); in computeKnownFPClass()
5829 const Value *RHS = Shuf->getOperand(1); in computeKnownFPClass()
5831 Depth + 1, Q); in computeKnownFPClass()
5839 ArrayRef<unsigned> Indices = Extract->getIndices(); in computeKnownFPClass()
5840 const Value *Src = Extract->getAggregateOperand(); in computeKnownFPClass()
5841 if (isa<StructType>(Src->getType()) && Indices.size() == 1 && in computeKnownFPClass()
5844 switch (II->getIntrinsicID()) { in computeKnownFPClass()
5849 computeKnownFPClass(II->getArgOperand(0), DemandedElts, in computeKnownFPClass()
5850 InterestedClasses, KnownSrc, Depth + 1, Q); in computeKnownFPClass()
5852 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5857 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, Op->getType())) in computeKnownFPClass()
5866 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, Op->getType())) in computeKnownFPClass()
5881 computeKnownFPClass(Src, DemandedElts, InterestedClasses, Known, Depth + 1, in computeKnownFPClass()
5887 // Unreachable blocks may have zero-operand PHI nodes. in computeKnownFPClass()
5888 if (P->getNumIncomingValues() == 0) in computeKnownFPClass()
5893 const unsigned PhiRecursionLimit = MaxAnalysisRecursionDepth - 2; in computeKnownFPClass()
5895 if (Depth < PhiRecursionLimit) { in computeKnownFPClass()
5897 if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) in computeKnownFPClass()
5902 for (const Use &U : P->operands()) { in computeKnownFPClass()
5910 // to waste time spinning around in loops. We need at least depth 2 to in computeKnownFPClass()
5915 P->getIncomingBlock(U)->getTerminator())); in computeKnownFPClass()
5939 unsigned Depth, in computeKnownFPClass() argument
5942 ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth, in computeKnownFPClass()
5949 unsigned Depth, in computeKnownFPClass() argument
5952 ::computeKnownFPClass(V, Known, InterestedClasses, Depth, SQ); in computeKnownFPClass()
5958 // All byte-wide stores are splatable, even of arbitrary variables. in isBytewiseValue()
5959 if (V->getType()->isIntegerTy(8)) in isBytewiseValue()
5962 LLVMContext &Ctx = V->getContext(); in isBytewiseValue()
5969 // Return Undef for zero-sized type. in isBytewiseValue()
5970 if (DL.getTypeStoreSize(V->getType()).isZero()) in isBytewiseValue()
5985 if (C->isNullValue()) in isBytewiseValue()
5988 // Constant floating-point values can be handled as integer values if the in isBytewiseValue()
5992 if (CFP->getType()->isHalfTy()) in isBytewiseValue()
5994 else if (CFP->getType()->isFloatTy()) in isBytewiseValue()
5996 else if (CFP->getType()->isDoubleTy()) in isBytewiseValue()
6005 if (CI->getBitWidth() % 8 == 0) { in isBytewiseValue()
6006 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); in isBytewiseValue()
6007 if (!CI->getValue().isSplat(8)) in isBytewiseValue()
6009 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); in isBytewiseValue()
6014 if (CE->getOpcode() == Instruction::IntToPtr) { in isBytewiseValue()
6015 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) { in isBytewiseValue()
6016 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace()); in isBytewiseValue()
6018 CE->getOperand(0), Type::getIntNTy(Ctx, BitWidth), false, DL)) in isBytewiseValue()
6024 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { in isBytewiseValue()
6038 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) in isBytewiseValue()
6039 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) in isBytewiseValue()
6046 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) in isBytewiseValue()
6047 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) in isBytewiseValue()
6071 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { in BuildSubAggregate()
6075 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, in BuildSubAggregate()
6082 PrevTo = Del->getAggregateOperand(); in BuildSubAggregate()
6083 Del->eraseFromParent(); in BuildSubAggregate()
6123 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), in BuildSubAggregate()
6146 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && in FindInsertedValue()
6148 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && in FindInsertedValue()
6152 C = C->getAggregateElement(idx_range[0]); in FindInsertedValue()
6161 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); in FindInsertedValue()
6186 return FindInsertedValue(I->getAggregateOperand(), idx_range, in FindInsertedValue()
6192 return FindInsertedValue(I->getInsertedValueOperand(), in FindInsertedValue()
6202 unsigned size = I->getNumIndices() + idx_range.size(); in FindInsertedValue()
6207 Idxs.append(I->idx_begin(), I->idx_end()); in FindInsertedValue()
6215 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); in FindInsertedValue()
6225 if (GEP->getNumOperands() != 3) in isGEPBasedOnPointerToString()
6228 // Make sure the index-ee is a pointer to array of \p CharSize integers. in isGEPBasedOnPointerToString()
6230 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); in isGEPBasedOnPointerToString()
6231 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) in isGEPBasedOnPointerToString()
6236 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); in isGEPBasedOnPointerToString()
6237 if (!FirstIdx || !FirstIdx->isZero()) in isGEPBasedOnPointerToString()
6261 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) in getConstantDataArrayInfo()
6265 const DataLayout &DL = GV->getDataLayout(); in getConstantDataArrayInfo()
6266 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0); in getConstantDataArrayInfo()
6268 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off, in getConstantDataArrayInfo()
6287 if (GV->getInitializer()->isNullValue()) { in getConstantDataArrayInfo()
6288 Type *GVTy = GV->getValueType(); in getConstantDataArrayInfo()
6295 // transform even undefined library calls into simpler, well-defined in getConstantDataArrayInfo()
6298 Slice.Length = Length < Offset ? 0 : Length - Offset; in getConstantDataArrayInfo()
6302 auto *Init = const_cast<Constant *>(GV->getInitializer()); in getConstantDataArrayInfo()
6304 Type *InitElTy = ArrayInit->getElementType(); in getConstantDataArrayInfo()
6305 if (InitElTy->isIntegerTy(ElementSize)) { in getConstantDataArrayInfo()
6309 ArrayTy = ArrayInit->getType(); in getConstantDataArrayInfo()
6326 ArrayTy = dyn_cast<ArrayType>(Init->getType()); in getConstantDataArrayInfo()
6329 uint64_t NumElts = ArrayTy->getArrayNumElements(); in getConstantDataArrayInfo()
6335 Slice.Length = NumElts - Offset; in getConstantDataArrayInfo()
6340 /// not be a nul-terminated string. On success, store the bytes in Str and
6351 // Return a nul-terminated string even for an empty Slice. This is in getConstantStringInfo()
6370 Str = Slice.Array->getAsString(); in getConstantStringInfo()
6377 // some other way that the string is length-bound. in getConstantStringInfo()
6393 V = V->stripPointerCasts(); in GetStringLengthH()
6403 for (Value *IncValue : PN->incoming_values()) { in GetStringLengthH()
6405 if (Len == 0) return 0; // Unknown length -> unknown. in GetStringLengthH()
6410 return 0; // Disagree -> unknown. in GetStringLengthH()
6418 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) in GetStringLengthH()
6420 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); in GetStringLengthH()
6422 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); in GetStringLengthH()
6445 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) in GetStringLengthH()
6455 if (!V->getType()->isPointerTy()) in GetStringLength()
6470 if (const Value *RV = Call->getReturnedArgOperand()) in getArgumentAliasingToReturnedPointer()
6475 return Call->getArgOperand(0); in getArgumentAliasingToReturnedPointer()
6481 switch (Call->getIntrinsicID()) { in isIntrinsicReturningPointerAliasingArgumentWithoutCapturing()
6487 // input pointer (and thus preserve null-ness for the purposes of escape in isIntrinsicReturningPointerAliasingArgumentWithoutCapturing()
6502 return !Call->getParent()->getParent()->isPresplitCoroutine(); in isIntrinsicReturningPointerAliasingArgumentWithoutCapturing()
6508 /// \p PN defines a loop-variant pointer to an object. Check if the
6512 // Find the loop-defined value. in isSameUnderlyingObjectInLoop()
6513 Loop *L = LI->getLoopFor(PN->getParent()); in isSameUnderlyingObjectInLoop()
6514 if (PN->getNumIncomingValues() != 2) in isSameUnderlyingObjectInLoop()
6518 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); in isSameUnderlyingObjectInLoop()
6519 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) in isSameUnderlyingObjectInLoop()
6520 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); in isSameUnderlyingObjectInLoop()
6521 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) in isSameUnderlyingObjectInLoop()
6530 if (!L->isLoopInvariant(Load->getPointerOperand())) in isSameUnderlyingObjectInLoop()
6536 if (!V->getType()->isPointerTy()) in getUnderlyingObject()
6540 V = GEP->getPointerOperand(); in getUnderlyingObject()
6543 Value *NewV = cast<Operator>(V)->getOperand(0); in getUnderlyingObject()
6544 if (!NewV->getType()->isPointerTy()) in getUnderlyingObject()
6548 if (GA->isInterposable()) in getUnderlyingObject()
6550 V = GA->getAliasee(); in getUnderlyingObject()
6553 // Look through single-arg phi nodes created by LCSSA. in getUnderlyingObject()
6554 if (PHI->getNumIncomingValues() == 1) { in getUnderlyingObject()
6555 V = PHI->getIncomingValue(0); in getUnderlyingObject()
6576 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); in getUnderlyingObject()
6595 Worklist.push_back(SI->getTrueValue()); in getUnderlyingObjects()
6596 Worklist.push_back(SI->getFalseValue()); in getUnderlyingObjects()
6611 if (!LI || !LI->isLoopHeader(PN->getParent()) || in getUnderlyingObjects()
6613 append_range(Worklist, PN->incoming_values()); in getUnderlyingObjects()
6646 Worklist.push_back(SI->getTrueValue()); in getUnderlyingObjectAggressive()
6647 Worklist.push_back(SI->getFalseValue()); in getUnderlyingObjectAggressive()
6652 append_range(Worklist, PN->incoming_values()); in getUnderlyingObjectAggressive()
6672 if (U->getOpcode() == Instruction::PtrToInt) in getUnderlyingObjectFromInt()
6673 return U->getOperand(0); in getUnderlyingObjectFromInt()
6680 if (U->getOpcode() != Instruction::Add || in getUnderlyingObjectFromInt()
6681 (!isa<ConstantInt>(U->getOperand(1)) && in getUnderlyingObjectFromInt()
6682 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && in getUnderlyingObjectFromInt()
6683 !isa<PHINode>(U->getOperand(1)))) in getUnderlyingObjectFromInt()
6685 V = U->getOperand(0); in getUnderlyingObjectFromInt()
6689 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); in getUnderlyingObjectFromInt()
6711 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); in getUnderlyingObjectsForCodeGen()
6712 if (O->getType()->isPointerTy()) { in getUnderlyingObjectsForCodeGen()
6749 AddWork(CI->getOperand(0)); in findAllocaForValue()
6751 for (Value *IncValue : PN->incoming_values()) in findAllocaForValue()
6754 AddWork(SI->getTrueValue()); in findAllocaForValue()
6755 AddWork(SI->getFalseValue()); in findAllocaForValue()
6757 if (OffsetZero && !GEP->hasAllZeroIndices()) in findAllocaForValue()
6759 AddWork(GEP->getPointerOperand()); in findAllocaForValue()
6761 Value *Returned = CB->getReturnedArgOperand(); in findAllocaForValue()
6776 for (const User *U : V->users()) { in onlyUsedByLifetimeMarkersOrDroppableInstsHelper()
6781 if (AllowLifetime && II->isLifetimeStartOrEnd()) in onlyUsedByLifetimeMarkersOrDroppableInstsHelper()
6784 if (AllowDroppable && II->isDroppable()) in onlyUsedByLifetimeMarkersOrDroppableInstsHelper()
6818 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI, in isSafeToSpeculativelyExecute()
6827 if (Inst->getOpcode() != Opcode) { in isSafeToSpeculativelyExecuteWithOpcode()
6831 if (Inst->getNumOperands() < NumLeadingOperands) in isSafeToSpeculativelyExecuteWithOpcode()
6833 const Type *ExpectedType = Inst->getType(); in isSafeToSpeculativelyExecuteWithOpcode()
6835 if (Inst->getOperand(ItOp)->getType() != ExpectedType) in isSafeToSpeculativelyExecuteWithOpcode()
6853 if (match(Inst->getOperand(1), m_APInt(V))) in isSafeToSpeculativelyExecuteWithOpcode()
6859 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 in isSafeToSpeculativelyExecuteWithOpcode()
6861 if (!match(Inst->getOperand(1), m_APInt(Denominator))) in isSafeToSpeculativelyExecuteWithOpcode()
6866 // It's safe to hoist if the denominator is not 0 or -1. in isSafeToSpeculativelyExecuteWithOpcode()
6867 if (!Denominator->isAllOnes()) in isSafeToSpeculativelyExecuteWithOpcode()
6869 // At this point we know that the denominator is -1. It is safe to hoist as in isSafeToSpeculativelyExecuteWithOpcode()
6871 if (match(Inst->getOperand(0), m_APInt(Numerator))) in isSafeToSpeculativelyExecuteWithOpcode()
6872 return !Numerator->isMinSignedValue(); in isSafeToSpeculativelyExecuteWithOpcode()
6885 const DataLayout &DL = LI->getDataLayout(); in isSafeToSpeculativelyExecuteWithOpcode()
6886 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), in isSafeToSpeculativelyExecuteWithOpcode()
6887 LI->getType(), LI->getAlign(), DL, in isSafeToSpeculativelyExecuteWithOpcode()
6894 const Function *Callee = CI->getCalledFunction(); in isSafeToSpeculativelyExecuteWithOpcode()
6896 // The called function could have undefined behavior or side-effects, even in isSafeToSpeculativelyExecuteWithOpcode()
6898 return Callee && Callee->isSpeculatable(); in isSafeToSpeculativelyExecuteWithOpcode()
6934 // 1) Can't reorder two inf-loop calls, even if readonly in mayHaveNonDefUseDependency()
6935 // 2) Also can't reorder an inf-loop call below a instruction which isn't in mayHaveNonDefUseDependency()
6973 KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, SQ); in computeOverflowForUnsignedMul()
6974 KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, SQ); in computeOverflowForUnsignedMul()
6976 // mul nsw of two non-negative numbers is also nuw. in computeOverflowForUnsignedMul()
6994 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); in computeOverflowForSignedMul()
7016 KnownBits LHSKnown = computeKnownBits(LHS, /*Depth=*/0, SQ); in computeOverflowForSignedMul()
7017 KnownBits RHSKnown = computeKnownBits(RHS, /*Depth=*/0, SQ); in computeOverflowForSignedMul()
7039 if (Add && Add->hasNoSignedWrap()) { in computeOverflowForSignedAdd()
7085 computeKnownBitsFromContext(Add, AddKnown, /*Depth=*/0, SQ); in computeOverflowForSignedAdd()
7097 // X - (X % ?) in computeOverflowForUnsignedSub()
7101 // X - (X -nuw ?) in computeOverflowForUnsignedSub()
7104 // then determining no-overflow may allow other transforms. in computeOverflowForUnsignedSub()
7133 // X - (X % ?) in computeOverflowForSignedSub()
7137 // X - (X -nsw ?) in computeOverflowForSignedSub()
7140 // then determining no-overflow may allow other transforms. in computeOverflowForSignedSub()
7164 for (const User *U : WO->users()) { in isOverflowIntrinsicNoWrap()
7166 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); in isOverflowIntrinsicNoWrap()
7168 if (EVI->getIndices()[0] == 0) in isOverflowIntrinsicNoWrap()
7171 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); in isOverflowIntrinsicNoWrap()
7173 for (const auto *U : EVI->users()) in isOverflowIntrinsicNoWrap()
7175 assert(B->isConditional() && "How else is it using an i1?"); in isOverflowIntrinsicNoWrap()
7187 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); in isOverflowIntrinsicNoWrap()
7191 // Check if all users of the add are provably no-wrap. in isOverflowIntrinsicNoWrap()
7195 if (DT.dominates(NoWrapEdge, Result->getParent())) in isOverflowIntrinsicNoWrap()
7198 for (const auto &RU : Result->uses()) in isOverflowIntrinsicNoWrap()
7217 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) { in shiftAmountKnownInRange()
7218 unsigned NumElts = FVTy->getNumElements(); in shiftAmountKnownInRange()
7220 ShiftAmounts.push_back(C->getAggregateElement(i)); in shiftAmountKnownInRange()
7221 } else if (isa<ScalableVectorType>(C->getType())) in shiftAmountKnownInRange()
7228 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth()); in shiftAmountKnownInRange()
7252 Op->hasPoisonGeneratingAnnotations()) in canCreateUndefOrPoison()
7255 unsigned Opcode = Op->getOpcode(); in canCreateUndefOrPoison()
7257 // Check whether opcode is a poison/undef-generating operation in canCreateUndefOrPoison()
7262 return includesPoison(Kind) && !shiftAmountKnownInRange(Op->getOperand(1)); in canCreateUndefOrPoison()
7270 switch (II->getIntrinsicID()) { in canCreateUndefOrPoison()
7275 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue()) in canCreateUndefOrPoison()
7304 !shiftAmountKnownInRange(II->getArgOperand(1)); in canCreateUndefOrPoison()
7351 return !CB->hasRetAttr(Attribute::NoUndef); in canCreateUndefOrPoison()
7356 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType()); in canCreateUndefOrPoison()
7357 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1; in canCreateUndefOrPoison()
7358 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp)); in canCreateUndefOrPoison()
7361 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()); in canCreateUndefOrPoison()
7366 ? cast<ConstantExpr>(Op)->getShuffleMask() in canCreateUndefOrPoison()
7367 : cast<ShuffleVectorInst>(Op)->getShuffleMask(); in canCreateUndefOrPoison()
7392 if (isa<CastInst>(Op) || (CE && CE->isCast())) in canCreateUndefOrPoison()
7414 unsigned Depth) { in directlyImpliesPoison() argument
7419 if (Depth >= MaxDepth) in directlyImpliesPoison()
7423 if (any_of(I->operands(), [=](const Use &Op) { in directlyImpliesPoison()
7425 directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1); in directlyImpliesPoison()
7435 llvm::is_contained(II->args(), ValAssumedPoison))) in directlyImpliesPoison()
7442 unsigned Depth) { in impliesPoison() argument
7446 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0)) in impliesPoison()
7450 if (Depth >= MaxDepth) in impliesPoison()
7455 return all_of(I->operands(), [=](const Value *Op) { in impliesPoison()
7456 return impliesPoison(Op, V, Depth + 1); in impliesPoison()
7463 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0); in impliesPoison()
7470 const DominatorTree *DT, unsigned Depth, UndefPoisonKind Kind) { in isGuaranteedNotToBeUndefOrPoison() argument
7471 if (Depth >= MaxAnalysisRecursionDepth) in isGuaranteedNotToBeUndefOrPoison()
7478 if (A->hasAttribute(Attribute::NoUndef) || in isGuaranteedNotToBeUndefOrPoison()
7479 A->hasAttribute(Attribute::Dereferenceable) || in isGuaranteedNotToBeUndefOrPoison()
7480 A->hasAttribute(Attribute::DereferenceableOrNull)) in isGuaranteedNotToBeUndefOrPoison()
7495 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C)) { in isGuaranteedNotToBeUndefOrPoison()
7496 if (includesUndef(Kind) && C->containsUndefElement()) in isGuaranteedNotToBeUndefOrPoison()
7498 if (includesPoison(Kind) && C->containsPoisonElement()) in isGuaranteedNotToBeUndefOrPoison()
7500 return !C->containsConstantExpression(); in isGuaranteedNotToBeUndefOrPoison()
7511 // well. We believe that such addrspacecast is equivalent to no-op. in isGuaranteedNotToBeUndefOrPoison()
7512 auto *StrippedV = V->stripPointerCastsSameRepresentation(); in isGuaranteedNotToBeUndefOrPoison()
7518 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, Kind); in isGuaranteedNotToBeUndefOrPoison()
7528 if (CB->hasRetAttr(Attribute::NoUndef) || in isGuaranteedNotToBeUndefOrPoison()
7529 CB->hasRetAttr(Attribute::Dereferenceable) || in isGuaranteedNotToBeUndefOrPoison()
7530 CB->hasRetAttr(Attribute::DereferenceableOrNull)) in isGuaranteedNotToBeUndefOrPoison()
7535 unsigned Num = PN->getNumIncomingValues(); in isGuaranteedNotToBeUndefOrPoison()
7538 auto *TI = PN->getIncomingBlock(i)->getTerminator(); in isGuaranteedNotToBeUndefOrPoison()
7539 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, in isGuaranteedNotToBeUndefOrPoison()
7540 DT, Depth + 1, Kind)) { in isGuaranteedNotToBeUndefOrPoison()
7549 all_of(Opr->operands(), OpCheck)) in isGuaranteedNotToBeUndefOrPoison()
7554 if (I->hasMetadata(LLVMContext::MD_noundef) || in isGuaranteedNotToBeUndefOrPoison()
7555 I->hasMetadata(LLVMContext::MD_dereferenceable) || in isGuaranteedNotToBeUndefOrPoison()
7556 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null)) in isGuaranteedNotToBeUndefOrPoison()
7563 if (!CtxI || !CtxI->getParent() || !DT) in isGuaranteedNotToBeUndefOrPoison()
7566 auto *DNode = DT->getNode(CtxI->getParent()); in isGuaranteedNotToBeUndefOrPoison()
7576 auto *Dominator = DNode->getIDom(); in isGuaranteedNotToBeUndefOrPoison()
7579 if (!includesUndef(Kind) || V->getType()->isIntegerTy()) in isGuaranteedNotToBeUndefOrPoison()
7581 auto *TI = Dominator->getBlock()->getTerminator(); in isGuaranteedNotToBeUndefOrPoison()
7585 if (BI->isConditional()) in isGuaranteedNotToBeUndefOrPoison()
7586 Cond = BI->getCondition(); in isGuaranteedNotToBeUndefOrPoison()
7588 Cond = SI->getCondition(); in isGuaranteedNotToBeUndefOrPoison()
7597 if (any_of(Opr->operands(), [V](const Use &U) { in isGuaranteedNotToBeUndefOrPoison()
7604 Dominator = Dominator->getIDom(); in isGuaranteedNotToBeUndefOrPoison()
7616 unsigned Depth) { in isGuaranteedNotToBeUndefOrPoison() argument
7617 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, in isGuaranteedNotToBeUndefOrPoison()
7623 const DominatorTree *DT, unsigned Depth) { in isGuaranteedNotToBePoison() argument
7624 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, in isGuaranteedNotToBePoison()
7630 const DominatorTree *DT, unsigned Depth) { in isGuaranteedNotToBeUndef() argument
7631 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, in isGuaranteedNotToBeUndef()
7659 if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo)) in mustExecuteUBIfPoisonOnPathTo()
7664 if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) { in mustExecuteUBIfPoisonOnPathTo()
7670 for (const User *User : I->users()) in mustExecuteUBIfPoisonOnPathTo()
7674 // Might be non-UB, or might have a path we couldn't prove must execute on in mustExecuteUBIfPoisonOnPathTo()
7681 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), in computeOverflowForSignedAdd()
7708 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) { in isGuaranteedToTransferExecutionToSuccessor()
7721 return !I->mayThrow() && I->willReturn(); in isGuaranteedToTransferExecutionToSuccessor()
7742 assert(ScanLimit && "scan limit must be non-zero"); in isGuaranteedToTransferExecutionToSuccessor()
7746 if (--ScanLimit == 0) in isGuaranteedToTransferExecutionToSuccessor()
7760 if (I->getParent() != L->getHeader()) return false; in isGuaranteedToExecuteForEveryIteration()
7762 for (const Instruction &LI : *L->getHeader()) { in isGuaranteedToExecuteForEveryIteration()
7771 switch (I->getOpcode()) { in propagatesPoison()
7780 switch (II->getIntrinsicID()) { in propagatesPoison()
7831 switch (I->getOpcode()) { in handleGuaranteedWellDefinedOps()
7833 if (Handle(cast<StoreInst>(I)->getPointerOperand())) in handleGuaranteedWellDefinedOps()
7838 if (Handle(cast<LoadInst>(I)->getPointerOperand())) in handleGuaranteedWellDefinedOps()
7845 if (Handle(cast<AtomicCmpXchgInst>(I)->getPointerOperand())) in handleGuaranteedWellDefinedOps()
7850 if (Handle(cast<AtomicRMWInst>(I)->getPointerOperand())) in handleGuaranteedWellDefinedOps()
7857 if (CB->isIndirectCall() && Handle(CB->getCalledOperand())) in handleGuaranteedWellDefinedOps()
7859 for (unsigned i = 0; i < CB->arg_size(); ++i) in handleGuaranteedWellDefinedOps()
7860 if ((CB->paramHasAttr(i, Attribute::NoUndef) || in handleGuaranteedWellDefinedOps()
7861 CB->paramHasAttr(i, Attribute::Dereferenceable) || in handleGuaranteedWellDefinedOps()
7862 CB->paramHasAttr(i, Attribute::DereferenceableOrNull)) && in handleGuaranteedWellDefinedOps()
7863 Handle(CB->getArgOperand(i))) in handleGuaranteedWellDefinedOps()
7868 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef) && in handleGuaranteedWellDefinedOps()
7869 Handle(I->getOperand(0))) in handleGuaranteedWellDefinedOps()
7873 if (Handle(cast<SwitchInst>(I)->getCondition())) in handleGuaranteedWellDefinedOps()
7878 if (BR->isConditional() && Handle(BR->getCondition())) in handleGuaranteedWellDefinedOps()
7903 switch (I->getOpcode()) { in handleGuaranteedNonPoisonOps()
7909 return Handle(I->getOperand(1)); in handleGuaranteedNonPoisonOps()
7936 // this, look out for the distinction between post-dominance and strong in programUndefinedIfUndefOrPoison()
7937 // post-dominance. in programUndefinedIfUndefOrPoison()
7941 BB = Inst->getParent(); in programUndefinedIfUndefOrPoison()
7942 Begin = Inst->getIterator(); in programUndefinedIfUndefOrPoison()
7945 if (Arg->getParent()->isDeclaration()) in programUndefinedIfUndefOrPoison()
7947 BB = &Arg->getParent()->getEntryBlock(); in programUndefinedIfUndefOrPoison()
7948 Begin = BB->begin(); in programUndefinedIfUndefOrPoison()
7956 BasicBlock::const_iterator End = BB->end(); in programUndefinedIfUndefOrPoison()
7961 // well-defined operands. in programUndefinedIfUndefOrPoison()
7966 if (--ScanLimit == 0) in programUndefinedIfUndefOrPoison()
7992 if (--ScanLimit == 0) in programUndefinedIfUndefOrPoison()
8017 BB = BB->getSingleSuccessor(); in programUndefinedIfUndefOrPoison()
8021 Begin = BB->getFirstNonPHI()->getIterator(); in programUndefinedIfUndefOrPoison()
8022 End = BB->end(); in programUndefinedIfUndefOrPoison()
8040 return !C->isNaN(); in isKnownNonNaN()
8043 if (!C->getElementType()->isFloatingPointTy()) in isKnownNonNaN()
8045 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { in isKnownNonNaN()
8046 if (C->getElementAsAPFloat(I).isNaN()) in isKnownNonNaN()
8060 return !C->isZero(); in isKnownNonZero()
8063 if (!C->getElementType()->isFloatingPointTy()) in isKnownNonZero()
8065 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { in isKnownNonZero()
8066 if (C->getElementAsAPFloat(I).isZero()) in isKnownNonZero()
8076 /// Given non-min/max outer cmp/select from the clamp pattern this
8084 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) in matchFastFloatClamp()
8085 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) in matchFastFloatClamp()
8099 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) in matchFastFloatClamp()
8146 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) in matchClamp()
8151 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) in matchClamp()
8156 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) in matchClamp()
8161 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) in matchClamp()
8172 unsigned Depth) { in matchMinMaxOfMinMax() argument
8177 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); in matchMinMaxOfMinMax()
8182 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); in matchMinMaxOfMinMax()
8230 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) in matchMinMaxOfMinMax()
8231 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) in matchMinMaxOfMinMax()
8237 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) in matchMinMaxOfMinMax()
8238 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) in matchMinMaxOfMinMax()
8244 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) in matchMinMaxOfMinMax()
8245 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) in matchMinMaxOfMinMax()
8251 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) in matchMinMaxOfMinMax()
8252 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) in matchMinMaxOfMinMax()
8263 /// splat of a constant integer, return the bitwise-not source value.
8264 /// TODO: This could be extended to handle non-splat vector integer constants.
8272 return ConstantInt::get(V->getType(), ~(*C)); in getNotValue()
8277 /// Match non-obvious integer minimum and maximum sequences.
8282 unsigned Depth) { in matchMinMax() argument
8291 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); in matchMinMax()
8334 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue()) in matchMinMax()
8338 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX in matchMinMax()
8339 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN in matchMinMax()
8340 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue()) in matchMinMax()
8356 if (NeedNSW && !BO->hasNoSignedWrap()) in isKnownNegation()
8359 auto *Zero = cast<Constant>(BO->getOperand(0)); in isKnownNegation()
8360 if (!AllowPoison && !Zero->isNullValue()) in isKnownNegation()
8366 // X = -Y or Y = -X in isKnownNegation()
8405 unsigned Depth) { in matchSelectPattern() argument
8408 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one in matchSelectPattern()
8411 // elements because those can not be back-propagated for analysis. in matchSelectPattern()
8414 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement()) in matchSelectPattern()
8417 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement()) in matchSelectPattern()
8436 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 in matchSelectPattern()
8437 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) in matchSelectPattern()
8457 // When given one NaN and one non-NaN input: in matchSelectPattern()
8458 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. in matchSelectPattern()
8459 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the in matchSelectPattern()
8460 // ordered comparison fails), which could be NaN or non-NaN. in matchSelectPattern()
8467 // Both operands are known non-NaN. in matchSelectPattern()
8474 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. in matchSelectPattern()
8486 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. in matchSelectPattern()
8530 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can in matchSelectPattern()
8537 // Set the return values. If the compare uses the negated value (-X >s 0), in matchSelectPattern()
8544 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) in matchSelectPattern()
8545 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) in matchSelectPattern()
8549 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) in matchSelectPattern()
8553 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) in matchSelectPattern()
8554 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) in matchSelectPattern()
8559 // Set the return values. If the compare uses the negated value (-X >s 0), in matchSelectPattern()
8566 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) in matchSelectPattern()
8567 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) in matchSelectPattern()
8571 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) in matchSelectPattern()
8572 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) in matchSelectPattern()
8579 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); in matchSelectPattern()
8581 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar in matchSelectPattern()
8582 // may return either -0.0 or 0.0, so fcmp/select pair has stricter in matchSelectPattern()
8612 *CastOp = Cast1->getOpcode(); in lookThroughCast()
8613 Type *SrcTy = Cast1->getSrcTy(); in lookThroughCast()
8616 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) in lookThroughCast()
8617 return Cast2->getOperand(0); in lookThroughCast()
8625 const DataLayout &DL = CmpI->getDataLayout(); in lookThroughCast()
8629 if (CmpI->isUnsigned()) in lookThroughCast()
8633 if (CmpI->isSigned()) in lookThroughCast()
8638 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && in lookThroughCast()
8639 CmpConst->getType() == SrcTy) { in lookThroughCast()
8656 // select i1 %cond, x, -x. in lookThroughCast()
8663 unsigned ExtOp = CmpI->isSigned() ? Instruction::SExt : Instruction::ZExt; in lookThroughCast()
8694 ConstantFoldCastOperand(*CastOp, CastedTo, C->getType(), DL); in lookThroughCast()
8703 unsigned Depth) { in matchSelectPattern() argument
8704 if (Depth >= MaxAnalysisRecursionDepth) in matchSelectPattern()
8710 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); in matchSelectPattern()
8713 Value *TrueVal = SI->getTrueValue(); in matchSelectPattern()
8714 Value *FalseVal = SI->getFalseValue(); in matchSelectPattern()
8717 CastOp, Depth); in matchSelectPattern()
8722 Instruction::CastOps *CastOp, unsigned Depth) { in matchDecomposedSelectPattern() argument
8723 CmpInst::Predicate Pred = CmpI->getPredicate(); in matchDecomposedSelectPattern()
8724 Value *CmpLHS = CmpI->getOperand(0); in matchDecomposedSelectPattern()
8725 Value *CmpRHS = CmpI->getOperand(1); in matchDecomposedSelectPattern()
8728 FMF = CmpI->getFastMathFlags(); in matchDecomposedSelectPattern()
8731 if (CmpI->isEquality()) in matchDecomposedSelectPattern()
8735 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { in matchDecomposedSelectPattern()
8738 // -0.0 because there is no corresponding integer value. in matchDecomposedSelectPattern()
8742 cast<CastInst>(TrueVal)->getOperand(0), C, in matchDecomposedSelectPattern()
8743 LHS, RHS, Depth); in matchDecomposedSelectPattern()
8747 // -0.0 because there is no corresponding integer value. in matchDecomposedSelectPattern()
8751 C, cast<CastInst>(FalseVal)->getOperand(0), in matchDecomposedSelectPattern()
8752 LHS, RHS, Depth); in matchDecomposedSelectPattern()
8756 LHS, RHS, Depth); in matchDecomposedSelectPattern()
8848 // Handle the case of a simple two-predecessor recurrence PHI. in matchSimpleRecurrence()
8851 if (P->getNumIncomingValues() != 2) in matchSimpleRecurrence()
8855 Value *L = P->getIncomingValue(i); in matchSimpleRecurrence()
8856 Value *R = P->getIncomingValue(!i); in matchSimpleRecurrence()
8860 unsigned Opcode = LU->getOpcode(); in matchSimpleRecurrence()
8865 // TODO: Expand list -- xor, div, gep, uaddo, etc.. in matchSimpleRecurrence()
8875 Value *LL = LU->getOperand(0); in matchSimpleRecurrence()
8876 Value *LR = LU->getOperand(1); in matchSimpleRecurrence()
8906 P = dyn_cast<PHINode>(I->getOperand(0)); in matchSimpleRecurrence()
8908 P = dyn_cast<PHINode>(I->getOperand(1)); in matchSimpleRecurrence()
8929 return !C->isNegative(); in isTruePredicate()
8944 return CLHS->sle(*CRHS); in isTruePredicate()
8952 cast<OverflowingBinaryOperator>(RHS)->hasNoUnsignedWrap()) in isTruePredicate()
8969 if (match(LHS, m_UDiv(m_Specific(RHS), m_APInt(C))) && C->ugt(1)) in isTruePredicate()
8985 return CLHS->ule(*CRHS); in isTruePredicate()
9070 Value *L0 = LHS->getOperand(0); in isImpliedCondICmps()
9071 Value *L1 = LHS->getOperand(1); in isImpliedCondICmps()
9076 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); in isImpliedCondICmps()
9078 // We can have non-canonical operands, so try to normalize any common operand in isImpliedCondICmps()
9098 // See if we can infer anything if operand-0 matches and we have at least one in isImpliedCondICmps()
9108 /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1); in isImpliedCondICmps()
9111 /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1); in isImpliedCondICmps()
9146 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { in isImpliedCondAndOr() argument
9148 assert((LHS->getOpcode() == Instruction::And || in isImpliedCondAndOr()
9149 LHS->getOpcode() == Instruction::Or || in isImpliedCondAndOr()
9150 LHS->getOpcode() == Instruction::Select) && in isImpliedCondAndOr()
9153 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit"); in isImpliedCondAndOr()
9161 // FIXME: Make this non-recursion. in isImpliedCondAndOr()
9163 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) in isImpliedCondAndOr()
9166 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) in isImpliedCondAndOr()
9176 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { in isImpliedCondition() argument
9178 if (Depth == MaxAnalysisRecursionDepth) in isImpliedCondition()
9183 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy()) in isImpliedCondition()
9186 assert(LHS->getType()->isIntOrIntVectorTy(1) && in isImpliedCondition()
9202 if ((LHSI->getOpcode() == Instruction::And || in isImpliedCondition()
9203 LHSI->getOpcode() == Instruction::Or || in isImpliedCondition()
9204 LHSI->getOpcode() == Instruction::Select)) in isImpliedCondition()
9206 Depth); in isImpliedCondition()
9213 bool LHSIsTrue, unsigned Depth) { in isImpliedCondition() argument
9228 LHS, RHSCmp->getPredicate(), RHSCmp->getOperand(0), in isImpliedCondition()
9229 RHSCmp->getOperand(1), DL, LHSIsTrue, Depth)) in isImpliedCondition()
9234 if (Depth == MaxAnalysisRecursionDepth) in isImpliedCondition()
9242 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) in isImpliedCondition()
9246 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) in isImpliedCondition()
9252 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) in isImpliedCondition()
9256 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) in isImpliedCondition()
9268 if (!ContextI || !ContextI->getParent()) in getDomPredecessorCondition()
9273 const BasicBlock *ContextBB = ContextI->getParent(); in getDomPredecessorCondition()
9274 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); in getDomPredecessorCondition()
9281 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) in getDomPredecessorCondition()
9298 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); in isImpliedByDomCondition()
9324 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { in setLimitsForBinOp()
9329 // Otherwise if both no-wraps are set, use the unsigned range because it in setLimitsForBinOp()
9331 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125]. in setLimitsForBinOp()
9339 if (C->isNegative()) { in setLimitsForBinOp()
9340 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. in setLimitsForBinOp()
9356 // X & -X is a power of two or zero. So we can cap the value at max power of in setLimitsForBinOp()
9370 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { in setLimitsForBinOp()
9375 unsigned ShiftAmount = Width - 1; in setLimitsForBinOp()
9376 if (!C->isZero() && IIQ.isExact(&BO)) in setLimitsForBinOp()
9377 ShiftAmount = C->countr_zero(); in setLimitsForBinOp()
9378 if (C->isNegative()) { in setLimitsForBinOp()
9379 // 'ashr C, x' produces [C, C >> (Width-1)] in setLimitsForBinOp()
9381 Upper = C->ashr(ShiftAmount) + 1; in setLimitsForBinOp()
9383 // 'ashr C, x' produces [C >> (Width-1), C] in setLimitsForBinOp()
9384 Lower = C->ashr(ShiftAmount); in setLimitsForBinOp()
9391 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { in setLimitsForBinOp()
9395 // 'lshr C, x' produces [C >> (Width-1), C]. in setLimitsForBinOp()
9396 unsigned ShiftAmount = Width - 1; in setLimitsForBinOp()
9397 if (!C->isZero() && IIQ.isExact(&BO)) in setLimitsForBinOp()
9398 ShiftAmount = C->countr_zero(); in setLimitsForBinOp()
9399 Lower = C->lshr(ShiftAmount); in setLimitsForBinOp()
9411 if (C->isNegative()) { in setLimitsForBinOp()
9412 // 'shl nsw C, x' produces [C << CLO(C)-1, C] in setLimitsForBinOp()
9413 unsigned ShiftAmount = C->countl_one() - 1; in setLimitsForBinOp()
9414 Lower = C->shl(ShiftAmount); in setLimitsForBinOp()
9417 // 'shl nsw C, x' produces [C, C << CLZ(C)-1] in setLimitsForBinOp()
9418 unsigned ShiftAmount = C->countl_zero() - 1; in setLimitsForBinOp()
9420 Upper = C->shl(ShiftAmount) + 1; in setLimitsForBinOp()
9432 Upper = APInt::getHighBitsSet(Width, C->popcount()) + 1; in setLimitsForBinOp()
9434 } else if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { in setLimitsForBinOp()
9435 Upper = APInt::getBitsSetFrom(Width, C->getZExtValue()) + 1; in setLimitsForBinOp()
9443 if (C->isAllOnes()) { in setLimitsForBinOp()
9444 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] in setLimitsForBinOp()
9445 // where C != -1 and C != 0 and C != 1 in setLimitsForBinOp()
9448 } else if (C->countl_zero() < Width - 1) { in setLimitsForBinOp()
9450 // where C != -1 and C != 0 and C != 1 in setLimitsForBinOp()
9459 if (C->isMinSignedValue()) { in setLimitsForBinOp()
9460 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. in setLimitsForBinOp()
9464 // 'sdiv C, x' produces [-|C|, |C|]. in setLimitsForBinOp()
9465 Upper = C->abs() + 1; in setLimitsForBinOp()
9466 Lower = (-Upper) + 1; in setLimitsForBinOp()
9472 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { in setLimitsForBinOp()
9483 // 'srem x, C' produces (-|C|, |C|). in setLimitsForBinOp()
9484 Upper = C->abs(); in setLimitsForBinOp()
9485 Lower = (-Upper) + 1; in setLimitsForBinOp()
9487 if (C->isNegative()) { in setLimitsForBinOp()
9488 // 'srem -|C|, x' produces [-|C|, 0]. in setLimitsForBinOp()
9513 unsigned Width = II.getType()->getScalarSizeInBits(); in getRangeForIntrinsic()
9531 if (C->isNegative()) in getRangeForIntrinsic()
9532 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. in getRangeForIntrinsic()
9547 // usub.sat(x, C) produces [0, UINT_MAX - C]. in getRangeForIntrinsic()
9550 APInt::getMaxValue(Width) - *C + 1); in getRangeForIntrinsic()
9554 if (C->isNegative()) in getRangeForIntrinsic()
9555 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. in getRangeForIntrinsic()
9557 *C - APInt::getSignedMinValue(Width) + in getRangeForIntrinsic()
9560 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. in getRangeForIntrinsic()
9561 return ConstantRange::getNonEmpty(*C - APInt::getSignedMaxValue(Width), in getRangeForIntrinsic()
9564 if (C->isNegative()) in getRangeForIntrinsic()
9565 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: in getRangeForIntrinsic()
9566 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) - *C, in getRangeForIntrinsic()
9569 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. in getRangeForIntrinsic()
9571 APInt::getSignedMaxValue(Width) - *C + in getRangeForIntrinsic()
9600 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. in getRangeForIntrinsic()
9624 unsigned BitWidth = SI.getType()->getScalarSizeInBits(); in getRangeForSelectPattern()
9633 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. in getRangeForSelectPattern()
9644 // The result of -abs(X) is <= 0. in getRangeForSelectPattern()
9672 unsigned BitWidth = I->getType()->getScalarSizeInBits(); in setLimitForFPToI()
9673 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy()) in setLimitForFPToI()
9676 Lower = APInt(BitWidth, -65504); in setLimitForFPToI()
9690 unsigned Depth) { in computeConstantRange() argument
9691 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"); in computeConstantRange()
9693 if (Depth == MaxAnalysisRecursionDepth) in computeConstantRange()
9694 return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); in computeConstantRange()
9697 return C->toConstantRange(); in computeConstantRange()
9699 unsigned BitWidth = V->getType()->getScalarSizeInBits(); in computeConstantRange()
9712 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1); in computeConstantRange()
9714 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1); in computeConstantRange()
9724 if (std::optional<ConstantRange> Range = A->getRange()) in computeConstantRange()
9732 if (std::optional<ConstantRange> Range = CB->getRange()) in computeConstantRange()
9738 for (auto &AssumeVH : AC->assumptionsFor(V)) { in computeConstantRange()
9742 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() && in computeConstantRange()
9744 assert(I->getIntrinsicID() == Intrinsic::assume && in computeConstantRange()
9749 Value *Arg = I->getArgOperand(0); in computeConstantRange()
9752 if (!Cmp || Cmp->getOperand(0) != V) in computeConstantRange()
9754 // TODO: Set "ForSigned" parameter via Cmp->isSigned()? in computeConstantRange()
9756 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false, in computeConstantRange()
9757 UseInstrInfo, AC, I, DT, Depth + 1); in computeConstantRange()
9759 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS)); in computeConstantRange()
9816 // assume(A && B) is split to -> assume(A); assume(B); in findValuesAffectedByCondition()
9817 // assume(!(A || B)) is split to -> assume(!A); assume(!B); in findValuesAffectedByCondition()
9851 // X & Y u> C -> X >u C && Y >u C in findValuesAffectedByCondition()
9852 // X | Y u< C -> X u< C && Y u< C in findValuesAffectedByCondition()
9853 // X nuw+ Y u< C -> X u< C && Y u< C in findValuesAffectedByCondition()
9860 // X nuw- Y u> C -> X u> C in findValuesAffectedByCondition()
9866 // Handle icmp slt/sgt (bitcast X to int), 0/-1, which is supported in findValuesAffectedByCondition()