Lines Matching +full:riscv +full:- +full:v +full:- +full:spec

1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
88 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
95 if (unsigned BitWidth = Ty->getScalarSizeInBits()) in getBitWidth()
103 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { in safeCxtI() argument
106 if (CxtI && CxtI->getParent()) in safeCxtI()
109 // If the value is really an already-inserted instruction, then use that. in safeCxtI()
110 CxtI = dyn_cast<Instruction>(V); in safeCxtI()
111 if (CxtI && CxtI->getParent()) in safeCxtI()
120 if (CxtI && CxtI->getParent()) in safeCxtI()
123 // If the value is really an already-inserted instruction, then use that. in safeCxtI()
125 if (CxtI && CxtI->getParent()) in safeCxtI()
129 if (CxtI && CxtI->getParent()) in safeCxtI()
138 if (isa<ScalableVectorType>(Shuf->getType())) { in getShuffleDemandedElts()
145 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements(); in getShuffleDemandedElts()
146 return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(), in getShuffleDemandedElts()
150 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
154 void llvm::computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, in computeKnownBits() argument
159 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); in computeKnownBits()
161 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in computeKnownBits()
162 ::computeKnownBits(V, DemandedElts, Known, Depth, Q); in computeKnownBits()
165 void llvm::computeKnownBits(const Value *V, KnownBits &Known, in computeKnownBits() argument
170 V, Known, Depth, in computeKnownBits()
171 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); in computeKnownBits()
174 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, in computeKnownBits() argument
179 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); in computeKnownBits()
182 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, in computeKnownBits() argument
187 V, DemandedElts, Depth, in computeKnownBits()
188 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); in computeKnownBits()
207 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern in haveNoCommonBitsSetSpecialCases()
242 assert(LHS->getType() == RHS->getType() && in haveNoCommonBitsSet()
244 assert(LHS->getType()->isIntOrIntVectorTy() && in haveNoCommonBitsSet()
256 return !I->user_empty() && all_of(I->users(), [](const User *U) { in isOnlyUsedInZeroComparison()
263 return !I->user_empty() && all_of(I->users(), [](const User *U) { in isOnlyUsedInZeroEqualityComparison()
269 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
272 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, in isKnownToBeAPowerOfTwo() argument
277 V, OrZero, Depth, in isKnownToBeAPowerOfTwo()
278 SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); in isKnownToBeAPowerOfTwo()
281 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
284 bool llvm::isKnownNonNegative(const Value *V, const SimplifyQuery &SQ, in isKnownNonNegative() argument
286 return computeKnownBits(V, Depth, SQ).isNonNegative(); in isKnownNonNegative()
289 bool llvm::isKnownPositive(const Value *V, const SimplifyQuery &SQ, in isKnownPositive() argument
291 if (auto *CI = dyn_cast<ConstantInt>(V)) in isKnownPositive()
292 return CI->getValue().isStrictlyPositive(); in isKnownPositive()
296 KnownBits Known = computeKnownBits(V, Depth, SQ); in isKnownPositive()
298 (Known.isNonZero() || isKnownNonZero(V, SQ, Depth)); in isKnownPositive()
301 bool llvm::isKnownNegative(const Value *V, const SimplifyQuery &SQ, in isKnownNegative() argument
303 return computeKnownBits(V, Depth, SQ).isNegative(); in isKnownNegative()
314 assert(V1->getType() == V2->getType() && in isKnownNonEqual()
315 "Testing equality of non-equal types!"); in isKnownNonEqual()
316 auto *FVTy = dyn_cast<FixedVectorType>(V1->getType()); in isKnownNonEqual()
318 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in isKnownNonEqual()
324 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, in MaskedValueIsZero() argument
327 computeKnownBits(V, Known, Depth, SQ); in MaskedValueIsZero()
331 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
334 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, in ComputeNumSignBits() argument
336 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); in ComputeNumSignBits()
338 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in ComputeNumSignBits()
339 return ComputeNumSignBits(V, DemandedElts, Depth, Q); in ComputeNumSignBits()
342 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, in ComputeNumSignBits() argument
347 V, Depth, SimplifyQuery(DL, DT, AC, safeCxtI(V, CxtI), UseInstrInfo)); in ComputeNumSignBits()
350 unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL, in ComputeMaxSignificantBits() argument
354 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT); in ComputeMaxSignificantBits()
355 return V->getType()->getScalarSizeInBits() - SignBits + 1; in ComputeMaxSignificantBits()
386 // The product of a number with itself is non-negative. in computeKnownBitsMul()
393 // The product of two numbers with the same sign is non-negative. in computeKnownBitsMul()
396 // The product of a negative number and a non-negative number is either in computeKnownBitsMul()
412 // Only make use of no-wrap flags if we failed to compute the sign bit in computeKnownBitsMul()
437 ConstantRange Range(Lower->getValue(), Upper->getValue()); in computeKnownBitsFromRangeMetadata()
456 // non-ephemeral users). See r246696's test case for an example. in isEphemeralValueOf()
457 if (is_contained(I->operands(), E)) in isEphemeralValueOf()
461 const Value *V = WorkSet.pop_back_val(); in isEphemeralValueOf() local
462 if (!Visited.insert(V).second) in isEphemeralValueOf()
466 if (llvm::all_of(V->users(), [&](const User *U) { in isEphemeralValueOf()
469 if (V == E) in isEphemeralValueOf()
472 if (V == I || (isa<Instruction>(V) && in isEphemeralValueOf()
473 !cast<Instruction>(V)->mayHaveSideEffects() && in isEphemeralValueOf()
474 !cast<Instruction>(V)->isTerminator())) { in isEphemeralValueOf()
475 EphValues.insert(V); in isEphemeralValueOf()
476 if (const User *U = dyn_cast<User>(V)) in isEphemeralValueOf()
477 append_range(WorkSet, U->operands()); in isEphemeralValueOf()
488 return CI->isAssumeLikeIntrinsic(); in isAssumeLikeIntrinsic()
505 if (Inv->getParent() == CxtI->getParent()) { in isValidAssumeForContext()
508 if (Inv->comesBefore(CxtI)) in isValidAssumeForContext()
511 // Don't let an assume affect itself - this would cause the problems in isValidAssumeForContext()
521 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so in isValidAssumeForContext()
523 auto Range = make_range(CxtI->getIterator(), Inv->getIterator()); in isValidAssumeForContext()
532 if (DT->dominates(Inv, CxtI)) in isValidAssumeForContext()
534 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { in isValidAssumeForContext()
542 // TODO: cmpExcludesZero misses many cases where `RHS` is non-constant but
543 // we still have enough information about `RHS` to conclude non-zero. For
548 // v u> y implies v != 0. in cmpExcludesZero()
552 // Special-case v != 0 to also handle v != null. in cmpExcludesZero()
556 // All other predicates - rely on generic ConstantRange handling. in cmpExcludesZero()
558 auto Zero = APInt::getZero(RHS->getType()->getScalarSizeInBits()); in cmpExcludesZero()
568 for (unsigned ElemIdx = 0, NElem = VC->getNumElements(); ElemIdx < NElem; in cmpExcludesZero()
571 Pred, VC->getElementAsAPInt(ElemIdx)); in cmpExcludesZero()
578 static bool isKnownNonZeroFromAssume(const Value *V, const SimplifyQuery &Q) { in isKnownNonZeroFromAssume() argument
579 // Use of assumptions is context-sensitive. If we don't have a context, we in isKnownNonZeroFromAssume()
584 for (AssumptionCache::ResultElem &Elem : Q.AC->assumptionsFor(V)) { in isKnownNonZeroFromAssume()
589 assert(I->getFunction() == Q.CxtI->getFunction() && in isKnownNonZeroFromAssume()
593 if (!V->getType()->isPointerTy()) in isKnownNonZeroFromAssume()
596 *I, I->bundle_op_info_begin()[Elem.Index])) { in isKnownNonZeroFromAssume()
597 if (RK.WasOn == V && in isKnownNonZeroFromAssume()
600 !NullPointerIsDefined(Q.CxtI->getFunction(), in isKnownNonZeroFromAssume()
601 V->getType()->getPointerAddressSpace()))) && in isKnownNonZeroFromAssume()
614 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); in isKnownNonZeroFromAssume()
615 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS)))) in isKnownNonZeroFromAssume()
625 static void computeKnownBitsFromCmp(const Value *V, CmpInst::Predicate Pred, in computeKnownBitsFromCmp() argument
628 if (RHS->getType()->isPointerTy()) { in computeKnownBitsFromCmp()
631 if (LHS == V && match(RHS, m_Zero())) { in computeKnownBitsFromCmp()
652 m_CombineOr(m_Specific(V), m_PtrToIntSameSize(Q.DL, m_Specific(V))); in computeKnownBitsFromCmp()
659 // assume(V = C) in computeKnownBitsFromCmp()
662 // assume(V & Mask = C) in computeKnownBitsFromCmp()
665 // For one bits in Mask, we can propagate bits from C to V. in computeKnownBitsFromCmp()
669 // assume(V | Mask = C) in computeKnownBitsFromCmp()
671 // For zero bits in Mask, we can propagate bits from C to V. in computeKnownBitsFromCmp()
675 // assume(V ^ Mask = C) in computeKnownBitsFromCmp()
678 // Equivalent to assume(V == Mask ^ C) in computeKnownBitsFromCmp()
680 // assume(V << ShAmt = C) in computeKnownBitsFromCmp()
684 // bits in V shifted to the right by ShAmt. in computeKnownBitsFromCmp()
689 // assume(V >> ShAmt = C) in computeKnownBitsFromCmp()
694 // bits in V shifted to the right by C. in computeKnownBitsFromCmp()
700 // assume (V & B != 0) where B is a power of 2 in computeKnownBitsFromCmp()
716 // X & Y u> C -> X u> C && Y u> C in computeKnownBitsFromCmp()
717 // X nuw- Y u> C -> X u> C in computeKnownBitsFromCmp()
724 // X | Y u< C -> X u< C && Y u< C in computeKnownBitsFromCmp()
725 // X nuw+ Y u< C -> X u< C && Y u< C in computeKnownBitsFromCmp()
729 (*C - (Pred == ICmpInst::ICMP_ULT)).countLeadingZeros()); in computeKnownBitsFromCmp()
737 static void computeKnownBitsFromICmpCond(const Value *V, ICmpInst *Cmp, in computeKnownBitsFromICmpCond() argument
741 Invert ? Cmp->getInversePredicate() : Cmp->getPredicate(); in computeKnownBitsFromICmpCond()
742 Value *LHS = Cmp->getOperand(0); in computeKnownBitsFromICmpCond()
743 Value *RHS = Cmp->getOperand(1); in computeKnownBitsFromICmpCond()
745 // Handle icmp pred (trunc V), C in computeKnownBitsFromICmpCond()
746 if (match(LHS, m_Trunc(m_Specific(V)))) { in computeKnownBitsFromICmpCond()
747 KnownBits DstKnown(LHS->getType()->getScalarSizeInBits()); in computeKnownBitsFromICmpCond()
753 computeKnownBitsFromCmp(V, Pred, LHS, RHS, Known, SQ); in computeKnownBitsFromICmpCond()
756 static void computeKnownBitsFromCond(const Value *V, Value *Cond, in computeKnownBitsFromCond() argument
764 computeKnownBitsFromCond(V, A, Known2, Depth + 1, SQ, Invert); in computeKnownBitsFromCond()
765 computeKnownBitsFromCond(V, B, Known3, Depth + 1, SQ, Invert); in computeKnownBitsFromCond()
775 computeKnownBitsFromICmpCond(V, Cmp, Known, SQ, Invert); in computeKnownBitsFromCond()
778 void llvm::computeKnownBitsFromContext(const Value *V, KnownBits &Known, in computeKnownBitsFromContext() argument
781 if (Q.CC && Q.CC->AffectedValues.contains(V)) in computeKnownBitsFromContext()
782 computeKnownBitsFromCond(V, Q.CC->Cond, Known, Depth, Q, Q.CC->Invert); in computeKnownBitsFromContext()
789 for (BranchInst *BI : Q.DC->conditionsFor(V)) { in computeKnownBitsFromContext()
790 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0)); in computeKnownBitsFromContext()
791 if (Q.DT->dominates(Edge0, Q.CxtI->getParent())) in computeKnownBitsFromContext()
792 computeKnownBitsFromCond(V, BI->getCondition(), Known, Depth, Q, in computeKnownBitsFromContext()
795 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1)); in computeKnownBitsFromContext()
796 if (Q.DT->dominates(Edge1, Q.CxtI->getParent())) in computeKnownBitsFromContext()
797 computeKnownBitsFromCond(V, BI->getCondition(), Known, Depth, Q, in computeKnownBitsFromContext()
813 for (AssumptionCache::ResultElem &Elem : Q.AC->assumptionsFor(V)) { in computeKnownBitsFromContext()
818 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && in computeKnownBitsFromContext()
822 if (!V->getType()->isPointerTy()) in computeKnownBitsFromContext()
825 *I, I->bundle_op_info_begin()[Elem.Index])) { in computeKnownBitsFromContext()
826 if (RK.WasOn == V && RK.AttrKind == Attribute::Alignment && in computeKnownBitsFromContext()
838 Value *Arg = I->getArgOperand(0); in computeKnownBitsFromContext()
840 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { in computeKnownBitsFromContext()
846 if (match(Arg, m_Not(m_Specific(V))) && in computeKnownBitsFromContext()
865 computeKnownBitsFromICmpCond(V, Cmp, Known, Q, /*Invert=*/false); in computeKnownBitsFromContext()
875 /// non-constant shift amount. Known is the output of this function. Known2 is a
876 /// pre-allocated temporary with the same bit width as Known and on return
878 /// operator-specific function that, given the known-bits and a shift amount,
879 /// compute the implied known-bits of the shift operator's result respectively
886 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromShiftOperator()
887 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromShiftOperator()
888 // To limit compile-time impact, only query isKnownNonZero() if we know at in computeKnownBitsFromShiftOperator()
893 isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth + 1)); in computeKnownBitsFromShiftOperator()
907 switch (I->getOpcode()) { in getKnownBitsFromAndXorOr()
911 // and(x, -x) is common idioms that will clear all but lowest set in getKnownBitsFromAndXorOr()
915 // this pattern. Try to match and(x, and(-x, y)) / and(and(x, y), -x). in getKnownBitsFromAndXorOr()
917 // -(-x) == x so using whichever (LHS/RHS) gets us a better result. in getKnownBitsFromAndXorOr()
929 // xor(x, x-1) is common idioms that will clear all but lowest set in getKnownBitsFromAndXorOr()
932 // TODO: xor(x, x-1) is often rewritting as xor(x, x-C) where C != in getKnownBitsFromAndXorOr()
933 // -1 but for the purpose of demanded bits (xor(x, x-C) & in getKnownBitsFromAndXorOr()
934 // Demanded) == (xor(x, x-1) & Demanded). Extend the xor pattern in getKnownBitsFromAndXorOr()
935 // to use arbitrary C if xor(x, x-C) as the same as xor(x, x-1). in getKnownBitsFromAndXorOr()
938 const KnownBits &XBits = I->getOperand(0) == X ? KnownLHS : KnownRHS; in getKnownBitsFromAndXorOr()
946 // and(x, add (x, -1)) is a common idiom that always clears the low bit; in getKnownBitsFromAndXorOr()
947 // xor/or(x, add (x, -1)) is an idiom that will always set the low bit. in getKnownBitsFromAndXorOr()
974 getHorizDemandedEltsForFirstOperand(Q.DL.getTypeSizeInBits(I->getType()), in computeKnownBitsForHorizontalOperation()
986 return ComputeForSingleOpFunc(I->getOperand(0), DemandedEltsLHS); in computeKnownBitsForHorizontalOperation()
988 return ComputeForSingleOpFunc(I->getOperand(1), DemandedEltsRHS); in computeKnownBitsForHorizontalOperation()
990 return ComputeForSingleOpFunc(I->getOperand(0), DemandedEltsLHS) in computeKnownBitsForHorizontalOperation()
991 .intersectWith(ComputeForSingleOpFunc(I->getOperand(1), DemandedEltsRHS)); in computeKnownBitsForHorizontalOperation()
1000 auto *FVTy = dyn_cast<FixedVectorType>(I->getType()); in analyzeKnownBitsFromAndXorOr()
1002 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in analyzeKnownBitsFromAndXorOr()
1009 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange); in getVScaleRange()
1010 // Without vscale_range, we only know that vscale is non-zero. in getVScaleRange()
1068 switch (I->getOpcode()) { in computeKnownBitsFromOperator()
1076 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1077 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1082 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1083 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1088 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1089 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1095 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, in computeKnownBitsFromOperator()
1100 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1101 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1107 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1108 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1117 adjustKnownBitsForSelectArm(Res, I->getOperand(0), Arm, Invert, Depth, Q); in computeKnownBitsFromOperator()
1122 ComputeForArm(I->getOperand(1), /*Invert=*/false) in computeKnownBitsFromOperator()
1123 .intersectWith(ComputeForArm(I->getOperand(2), /*Invert=*/true)); in computeKnownBitsFromOperator()
1139 Type *SrcTy = I->getOperand(0)->getType(); in computeKnownBitsFromOperator()
1144 Type *ScalarTy = SrcTy->getScalarType(); in computeKnownBitsFromOperator()
1145 SrcBitWidth = ScalarTy->isPointerTy() ? in computeKnownBitsFromOperator()
1151 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1153 Inst && Inst->hasNonNeg() && !Known.isNegative()) in computeKnownBitsFromOperator()
1159 Type *SrcTy = I->getOperand(0)->getType(); in computeKnownBitsFromOperator()
1160 if (SrcTy->isIntOrPtrTy() && in computeKnownBitsFromOperator()
1163 !I->getType()->isVectorTy()) { in computeKnownBitsFromOperator()
1164 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1168 const Value *V; in computeKnownBitsFromOperator() local
1170 if (match(I, m_ElementWiseBitCast(m_Value(V))) && in computeKnownBitsFromOperator()
1171 V->getType()->isFPOrFPVectorTy()) { in computeKnownBitsFromOperator()
1172 Type *FPType = V->getType()->getScalarType(); in computeKnownBitsFromOperator()
1174 computeKnownFPClass(V, DemandedElts, fcAllFlags, Depth + 1, Q); in computeKnownBitsFromOperator()
1187 APFloat::getInf(FPType->getFltSemantics()).bitcastToAPInt())); in computeKnownBitsFromOperator()
1191 APInt::getZero(FPType->getScalarSizeInBits()))); in computeKnownBitsFromOperator()
1209 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() || in computeKnownBitsFromOperator()
1210 !I->getType()->isIntOrIntVectorTy() || in computeKnownBitsFromOperator()
1211 isa<ScalableVectorType>(I->getType())) in computeKnownBitsFromOperator()
1215 // Examples: v4i32 -> v2i64, v3i8 -> v24 in computeKnownBitsFromOperator()
1216 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits(); in computeKnownBitsFromOperator()
1222 // For this bitcast, each demanded element of the output is sub-divided in computeKnownBitsFromOperator()
1225 // bits for each sub-element sequentially. This is done by shifting the in computeKnownBitsFromOperator()
1226 // one-set-bit demanded elements parameter across the sub-elements for in computeKnownBitsFromOperator()
1230 // The known bits of each sub-element are then inserted into place in computeKnownBitsFromOperator()
1242 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, in computeKnownBitsFromOperator()
1244 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i; in computeKnownBitsFromOperator()
1252 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); in computeKnownBitsFromOperator()
1255 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1270 // Trailing zeros of a right-shifted constant never decrease. in computeKnownBitsFromOperator()
1272 if (match(I->getOperand(0), m_APInt(C))) in computeKnownBitsFromOperator()
1273 Known.Zero.setLowBits(C->countr_zero()); in computeKnownBitsFromOperator()
1284 // Leading zeros of a left-shifted constant never decrease. in computeKnownBitsFromOperator()
1286 if (match(I->getOperand(0), m_APInt(C))) in computeKnownBitsFromOperator()
1287 Known.Zero.setHighBits(C->countl_zero()); in computeKnownBitsFromOperator()
1303 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, NUW, in computeKnownBitsFromOperator()
1310 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, NUW, in computeKnownBitsFromOperator()
1315 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1316 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1321 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1322 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1326 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign())); in computeKnownBitsFromOperator()
1331 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1337 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { in computeKnownBitsFromOperator()
1338 // TrailZ can only become smaller, short-circuit if we hit zero. in computeKnownBitsFromOperator()
1342 Value *Index = I->getOperand(i); in computeKnownBitsFromOperator()
1346 if (CIndex && CIndex->isZeroValue()) in computeKnownBitsFromOperator()
1355 if (CIndex->getType()->isVectorTy()) in computeKnownBitsFromOperator()
1356 Index = CIndex->getSplatValue(); in computeKnownBitsFromOperator()
1358 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); in computeKnownBitsFromOperator()
1360 uint64_t Offset = SL->getElementOffset(Idx); in computeKnownBitsFromOperator()
1367 if (!IndexedTy->isSized()) { in computeKnownBitsFromOperator()
1372 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); in computeKnownBitsFromOperator()
1397 // to the language reference we need to sign-extend or truncate them in computeKnownBitsFromOperator()
1418 // Handle the case of a simple two-predecessor recurrence PHI. in computeKnownBitsFromOperator()
1421 unsigned Opcode = BO->getOpcode(); in computeKnownBitsFromOperator()
1428 BO->getOperand(0) == I) { in computeKnownBitsFromOperator()
1473 unsigned OpNum = P->getOperand(0) == R ? 0 : 1; in computeKnownBitsFromOperator()
1474 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator(); in computeKnownBitsFromOperator()
1475 Instruction *LInst = P->getIncomingBlock(1 - OpNum)->getTerminator(); in computeKnownBitsFromOperator()
1499 // (add non-negative, non-negative) --> non-negative in computeKnownBitsFromOperator()
1500 // (add negative, negative) --> negative in computeKnownBitsFromOperator()
1508 // (sub nsw non-negative, negative) --> non-negative in computeKnownBitsFromOperator()
1509 // (sub nsw negative, non-negative) --> negative in computeKnownBitsFromOperator()
1510 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) { in computeKnownBitsFromOperator()
1517 // (mul nsw non-negative, non-negative) --> non-negative in computeKnownBitsFromOperator()
1527 // Unreachable blocks may have zero-operand PHI nodes. in computeKnownBitsFromOperator()
1528 if (P->getNumIncomingValues() == 0) in computeKnownBitsFromOperator()
1533 if (Depth < MaxAnalysisRecursionDepth - 1 && Known.isUnknown()) { in computeKnownBitsFromOperator()
1535 if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) in computeKnownBitsFromOperator()
1540 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { in computeKnownBitsFromOperator()
1541 Value *IncValue = P->getIncomingValue(u); in computeKnownBitsFromOperator()
1550 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); in computeKnownBitsFromOperator()
1559 MaxAnalysisRecursionDepth - 1, RecQ); in computeKnownBitsFromOperator()
1572 if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) { in computeKnownBitsFromOperator()
1574 if (FalseSucc == P->getParent()) in computeKnownBitsFromOperator()
1613 if (std::optional<ConstantRange> Range = CB->getRange()) in computeKnownBitsFromOperator()
1614 Known = Known.unionWith(Range->toKnownBits()); in computeKnownBitsFromOperator()
1616 if (const Value *RV = CB->getReturnedArgOperand()) { in computeKnownBitsFromOperator()
1617 if (RV->getType() == I->getType()) { in computeKnownBitsFromOperator()
1629 switch (II->getIntrinsicID()) { in computeKnownBitsFromOperator()
1633 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1634 bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); in computeKnownBitsFromOperator()
1639 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1644 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1649 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1653 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) in computeKnownBitsFromOperator()
1654 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); in computeKnownBitsFromOperator()
1660 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1664 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) in computeKnownBitsFromOperator()
1665 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); in computeKnownBitsFromOperator()
1671 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1684 if (!match(I->getOperand(2), m_APInt(SA))) in computeKnownBitsFromOperator()
1688 uint64_t ShiftAmt = SA->urem(BitWidth); in computeKnownBitsFromOperator()
1689 if (II->getIntrinsicID() == Intrinsic::fshr) in computeKnownBitsFromOperator()
1690 ShiftAmt = BitWidth - ShiftAmt; in computeKnownBitsFromOperator()
1693 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1694 computeKnownBits(I->getOperand(1), DemandedElts, Known3, Depth + 1, Q); in computeKnownBitsFromOperator()
1697 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); in computeKnownBitsFromOperator()
1699 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); in computeKnownBitsFromOperator()
1703 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1704 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1708 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1709 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1713 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1714 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1718 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1719 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1724 computeKnownBits(I->getOperand(0), DemandedElts.reverseBits(), Known, in computeKnownBitsFromOperator()
1735 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1738 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1742 auto *VecTy = cast<VectorType>(I->getOperand(0)->getType()); in computeKnownBitsFromOperator()
1744 bool EvenCnt = VecTy->getElementCount().isKnownEven(); in computeKnownBitsFromOperator()
1748 if (VecTy->isScalableTy() || EvenCnt) in computeKnownBitsFromOperator()
1753 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1754 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1758 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1759 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1763 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1764 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1768 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1769 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1773 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1775 const Value *Mask = I->getOperand(1); in computeKnownBitsFromOperator()
1776 Known2 = KnownBits(Mask->getType()->getScalarSizeInBits()); in computeKnownBitsFromOperator()
1778 // TODO: 1-extend would be more precise. in computeKnownBitsFromOperator()
1785 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1786 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1792 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1793 computeKnownBits(I->getOperand(1), DemandedElts, Known2, Depth + 1, Q); in computeKnownBitsFromOperator()
1839 bool HasAVL = II->getIntrinsicID() == Intrinsic::riscv_vsetvli; in computeKnownBitsFromOperator()
1840 const ConstantRange Range = getVScaleRange(II->getFunction(), BitWidth); in computeKnownBitsFromOperator()
1842 cast<ConstantInt>(II->getArgOperand(HasAVL))->getZExtValue()); in computeKnownBitsFromOperator()
1844 cast<ConstantInt>(II->getArgOperand(1 + HasAVL))->getZExtValue()); in computeKnownBitsFromOperator()
1846 Range.getUnsignedMax().getZExtValue() * RISCV::RVVBitsPerBlock; in computeKnownBitsFromOperator()
1851 if (auto *CI = dyn_cast<ConstantInt>(II->getArgOperand(0))) in computeKnownBitsFromOperator()
1852 MaxVL = std::min(MaxVL, CI->getZExtValue()); in computeKnownBitsFromOperator()
1860 if (!II->getParent() || !II->getFunction()) in computeKnownBitsFromOperator()
1863 Known = getVScaleRange(II->getFunction(), BitWidth).toKnownBits(); in computeKnownBitsFromOperator()
1887 const Value *LHS = Shuf->getOperand(0); in computeKnownBitsFromOperator()
1894 const Value *RHS = Shuf->getOperand(1); in computeKnownBitsFromOperator()
1901 if (isa<ScalableVectorType>(I->getType())) { in computeKnownBitsFromOperator()
1905 const Value *Vec = I->getOperand(0); in computeKnownBitsFromOperator()
1906 const Value *Elt = I->getOperand(1); in computeKnownBitsFromOperator()
1907 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); in computeKnownBitsFromOperator()
1912 if (CIdx && CIdx->getValue().ult(NumElts)) { in computeKnownBitsFromOperator()
1913 DemandedVecElts.clearBit(CIdx->getZExtValue()); in computeKnownBitsFromOperator()
1914 NeedsElt = DemandedElts[CIdx->getZExtValue()]; in computeKnownBitsFromOperator()
1933 // Look through extract element. If the index is non-constant or in computeKnownBitsFromOperator()
1934 // out-of-range demand all elements, otherwise just the extracted element. in computeKnownBitsFromOperator()
1935 const Value *Vec = I->getOperand(0); in computeKnownBitsFromOperator()
1936 const Value *Idx = I->getOperand(1); in computeKnownBitsFromOperator()
1938 if (isa<ScalableVectorType>(Vec->getType())) { in computeKnownBitsFromOperator()
1943 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); in computeKnownBitsFromOperator()
1945 if (CIdx && CIdx->getValue().ult(NumElts)) in computeKnownBitsFromOperator()
1946 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); in computeKnownBitsFromOperator()
1951 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { in computeKnownBitsFromOperator()
1953 if (EVI->getNumIndices() != 1) break; in computeKnownBitsFromOperator()
1954 if (EVI->getIndices()[0] == 0) { in computeKnownBitsFromOperator()
1955 switch (II->getIntrinsicID()) { in computeKnownBitsFromOperator()
1960 true, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false, in computeKnownBitsFromOperator()
1966 false, II->getArgOperand(0), II->getArgOperand(1), /*NSW=*/false, in computeKnownBitsFromOperator()
1971 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, in computeKnownBitsFromOperator()
1979 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, in computeKnownBitsFromOperator()
1981 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); in computeKnownBitsFromOperator()
1986 /// Determine which bits of V are known to be either zero or one and return
1988 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, in computeKnownBits() argument
1990 KnownBits Known(getBitWidth(V->getType(), Q.DL)); in computeKnownBits()
1991 ::computeKnownBits(V, DemandedElts, Known, Depth, Q); in computeKnownBits()
1995 /// Determine which bits of V are known to be either zero or one and return
1997 KnownBits llvm::computeKnownBits(const Value *V, unsigned Depth, in computeKnownBits() argument
1999 KnownBits Known(getBitWidth(V->getType(), Q.DL)); in computeKnownBits()
2000 computeKnownBits(V, Known, Depth, Q); in computeKnownBits()
2004 /// Determine which bits of V are known to be either zero or one and return
2010 /// optimized based on the contradictory assumption that it is non-zero.
2016 /// where V is a vector, known zero, and known one values are the
2019 void computeKnownBits(const Value *V, const APInt &DemandedElts, in computeKnownBits() argument
2028 assert(V && "No Value?"); in computeKnownBits()
2032 Type *Ty = V->getType(); in computeKnownBits()
2035 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && in computeKnownBits()
2040 FVTy->getNumElements() == DemandedElts.getBitWidth() && in computeKnownBits()
2047 Type *ScalarTy = Ty->getScalarType(); in computeKnownBits()
2048 if (ScalarTy->isPointerTy()) { in computeKnownBits()
2050 "V and Known should have same BitWidth"); in computeKnownBits()
2053 "V and Known should have same BitWidth"); in computeKnownBits()
2058 if (match(V, m_APInt(C))) { in computeKnownBits()
2063 // Null and aggregate-zero are all-zeros. in computeKnownBits()
2064 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { in computeKnownBits()
2070 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) { in computeKnownBits()
2071 assert(!isa<ScalableVectorType>(V->getType())); in computeKnownBits()
2075 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { in computeKnownBits()
2078 APInt Elt = CDV->getElementAsAPInt(i); in computeKnownBits()
2087 if (const auto *CV = dyn_cast<ConstantVector>(V)) { in computeKnownBits()
2088 assert(!isa<ScalableVectorType>(V->getType())); in computeKnownBits()
2092 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { in computeKnownBits()
2095 Constant *Element = CV->getAggregateElement(i); in computeKnownBits()
2103 const APInt &Elt = ElementCI->getValue(); in computeKnownBits()
2116 if (isa<UndefValue>(V)) in computeKnownBits()
2121 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); in computeKnownBits()
2123 if (const auto *A = dyn_cast<Argument>(V)) in computeKnownBits()
2124 if (std::optional<ConstantRange> Range = A->getRange()) in computeKnownBits()
2125 Known = Range->toKnownBits(); in computeKnownBits()
2131 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has in computeKnownBits()
2133 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { in computeKnownBits()
2134 if (!GA->isInterposable()) in computeKnownBits()
2135 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); in computeKnownBits()
2139 if (const Operator *I = dyn_cast<Operator>(V)) in computeKnownBits()
2141 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { in computeKnownBits()
2142 if (std::optional<ConstantRange> CR = GV->getAbsoluteSymbolRange()) in computeKnownBits()
2143 Known = CR->toKnownBits(); in computeKnownBits()
2146 // Aligned pointers have trailing zeros - refine Known.Zero set in computeKnownBits()
2147 if (isa<PointerType>(V->getType())) { in computeKnownBits()
2148 Align Alignment = V->getPointerAlignment(Q.DL); in computeKnownBits()
2156 computeKnownBitsFromContext(V, Known, Depth, Q); in computeKnownBits()
2169 for (const Use &U : PN->operands()) { in isPowerOfTwoRecurrence()
2173 Q.CxtI = PN->getIncomingBlock(U)->getTerminator(); in isPowerOfTwoRecurrence()
2181 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step) in isPowerOfTwoRecurrence()
2184 Q.CxtI = BO->getParent()->getTerminator(); in isPowerOfTwoRecurrence()
2185 switch (BO->getOpcode()) { in isPowerOfTwoRecurrence()
2199 // If OrZero is false, cannot guarantee induction variable is non-zero after in isPowerOfTwoRecurrence()
2220 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, in isKnownToBeAPowerOfTwo() argument
2224 if (isa<Constant>(V)) in isKnownToBeAPowerOfTwo()
2225 return OrZero ? match(V, m_Power2OrZero()) : match(V, m_Power2()); in isKnownToBeAPowerOfTwo()
2228 if (OrZero && V->getType()->getScalarSizeInBits() == 1) in isKnownToBeAPowerOfTwo()
2231 auto *I = dyn_cast<Instruction>(V); in isKnownToBeAPowerOfTwo()
2235 if (Q.CxtI && match(V, m_VScale())) { in isKnownToBeAPowerOfTwo()
2236 const Function *F = Q.CxtI->getFunction(); in isKnownToBeAPowerOfTwo()
2237 // The vscale_range indicates vscale is a power-of-two. in isKnownToBeAPowerOfTwo()
2238 return F->hasFnAttribute(Attribute::VScaleRange); in isKnownToBeAPowerOfTwo()
2255 switch (I->getOpcode()) { in isKnownToBeAPowerOfTwo()
2257 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2259 return OrZero && isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2262 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2266 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2270 return isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2273 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) && in isKnownToBeAPowerOfTwo()
2274 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q) && in isKnownToBeAPowerOfTwo()
2279 (isKnownToBeAPowerOfTwo(I->getOperand(1), /*OrZero*/ true, Depth, Q) || in isKnownToBeAPowerOfTwo()
2280 isKnownToBeAPowerOfTwo(I->getOperand(0), /*OrZero*/ true, Depth, Q))) in isKnownToBeAPowerOfTwo()
2282 // X & (-X) is always a power of two or zero. in isKnownToBeAPowerOfTwo()
2283 if (match(I->getOperand(0), m_Neg(m_Specific(I->getOperand(1)))) || in isKnownToBeAPowerOfTwo()
2284 match(I->getOperand(1), m_Neg(m_Specific(I->getOperand(0))))) in isKnownToBeAPowerOfTwo()
2285 return OrZero || isKnownNonZero(I->getOperand(0), Q, Depth); in isKnownToBeAPowerOfTwo()
2288 // Adding a power-of-two or zero to the same power-of-two or zero yields in isKnownToBeAPowerOfTwo()
2289 // either the original power-of-two, a larger power-of-two or zero. in isKnownToBeAPowerOfTwo()
2290 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); in isKnownToBeAPowerOfTwo()
2293 if (match(I->getOperand(0), in isKnownToBeAPowerOfTwo()
2294 m_c_And(m_Specific(I->getOperand(1)), m_Value())) && in isKnownToBeAPowerOfTwo()
2295 isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q)) in isKnownToBeAPowerOfTwo()
2297 if (match(I->getOperand(1), in isKnownToBeAPowerOfTwo()
2298 m_c_And(m_Specific(I->getOperand(0)), m_Value())) && in isKnownToBeAPowerOfTwo()
2299 isKnownToBeAPowerOfTwo(I->getOperand(0), OrZero, Depth, Q)) in isKnownToBeAPowerOfTwo()
2302 unsigned BitWidth = V->getType()->getScalarSizeInBits(); in isKnownToBeAPowerOfTwo()
2304 computeKnownBits(I->getOperand(0), LHSBits, Depth, Q); in isKnownToBeAPowerOfTwo()
2307 computeKnownBits(I->getOperand(1), RHSBits, Depth, Q); in isKnownToBeAPowerOfTwo()
2308 // If i8 V is a power of two or zero: in isKnownToBeAPowerOfTwo()
2325 return isKnownToBeAPowerOfTwo(I->getOperand(1), OrZero, Depth, Q) && in isKnownToBeAPowerOfTwo()
2326 isKnownToBeAPowerOfTwo(I->getOperand(2), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2340 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); in isKnownToBeAPowerOfTwo()
2341 return llvm::all_of(PN->operands(), [&](const Use &U) { in isKnownToBeAPowerOfTwo()
2348 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); in isKnownToBeAPowerOfTwo()
2355 switch (II->getIntrinsicID()) { in isKnownToBeAPowerOfTwo()
2360 return isKnownToBeAPowerOfTwo(II->getArgOperand(1), OrZero, Depth, Q) && in isKnownToBeAPowerOfTwo()
2361 isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2363 // thus dont change pow2/non-pow2 status. in isKnownToBeAPowerOfTwo()
2366 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2370 if (II->getArgOperand(0) == II->getArgOperand(1)) in isKnownToBeAPowerOfTwo()
2371 return isKnownToBeAPowerOfTwo(II->getArgOperand(0), OrZero, Depth, Q); in isKnownToBeAPowerOfTwo()
2384 /// Test whether a GEP's result is known to be non-null.
2387 /// to be non-null.
2394 F = I->getFunction(); in isGEPKnownNonNull()
2398 if (!GEP->hasNoUnsignedWrap() && in isGEPKnownNonNull()
2399 !(GEP->isInBounds() && in isGEPKnownNonNull()
2400 !NullPointerIsDefined(F, GEP->getPointerAddressSpace()))) in isGEPKnownNonNull()
2403 // FIXME: Support vector-GEPs. in isGEPKnownNonNull()
2404 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); in isGEPKnownNonNull()
2406 // If the base pointer is non-null, we cannot walk to a null address with an in isGEPKnownNonNull()
2408 if (isKnownNonZero(GEP->getPointerOperand(), Q, Depth)) in isGEPKnownNonNull()
2411 // Walk the GEP operands and see if any operand introduces a non-zero offset. in isGEPKnownNonNull()
2416 // Struct types are easy -- they must always be indexed by a constant. in isGEPKnownNonNull()
2419 unsigned ElementIdx = OpC->getZExtValue(); in isGEPKnownNonNull()
2421 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); in isGEPKnownNonNull()
2427 // If we have a zero-sized type, the index doesn't matter. Keep looping. in isGEPKnownNonNull()
2432 // increment Depth when just zipping down an all-constant GEP. in isGEPKnownNonNull()
2434 if (!OpC->isZero()) in isGEPKnownNonNull()
2439 // We post-increment Depth here because while isKnownNonZero increments it in isGEPKnownNonNull()
2454 static bool isKnownNonNullFromDominatingCondition(const Value *V, in isKnownNonNullFromDominatingCondition() argument
2457 assert(!isa<Constant>(V) && "Called for constant?"); in isKnownNonNullFromDominatingCondition()
2463 for (const auto *U : V->users()) { in isKnownNonNullFromDominatingCondition()
2470 // attributes may provide an answer about null-ness. in isKnownNonNullFromDominatingCondition()
2472 if (auto *CalledFunc = CB->getCalledFunction()) in isKnownNonNullFromDominatingCondition()
2473 for (const Argument &Arg : CalledFunc->args()) in isKnownNonNullFromDominatingCondition()
2474 if (CB->getArgOperand(Arg.getArgNo()) == V && in isKnownNonNullFromDominatingCondition()
2476 DT->dominates(CB, CtxI)) in isKnownNonNullFromDominatingCondition()
2480 if (V == getLoadStorePointerOperand(U)) { in isKnownNonNullFromDominatingCondition()
2482 if (!NullPointerIsDefined(I->getFunction(), in isKnownNonNullFromDominatingCondition()
2483 V->getType()->getPointerAddressSpace()) && in isKnownNonNullFromDominatingCondition()
2484 DT->dominates(I, CtxI)) in isKnownNonNullFromDominatingCondition()
2488 if ((match(U, m_IDiv(m_Value(), m_Specific(V))) || in isKnownNonNullFromDominatingCondition()
2489 match(U, m_IRem(m_Value(), m_Specific(V)))) && in isKnownNonNullFromDominatingCondition()
2496 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS)))) in isKnownNonNullFromDominatingCondition()
2509 for (const auto *CmpU : U->users()) { in isKnownNonNullFromDominatingCondition()
2523 for (const auto *CurrU : Curr->users()) in isKnownNonNullFromDominatingCondition()
2530 assert(BI->isConditional() && "uses a comparison!"); in isKnownNonNullFromDominatingCondition()
2533 BI->getSuccessor(NonNullIfTrue ? 0 : 1); in isKnownNonNullFromDominatingCondition()
2534 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); in isKnownNonNullFromDominatingCondition()
2535 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) in isKnownNonNullFromDominatingCondition()
2538 DT->dominates(cast<Instruction>(Curr), CtxI)) { in isKnownNonNullFromDominatingCondition()
2552 const unsigned NumRanges = Ranges->getNumOperands() / 2; in rangeMetadataExcludesValue()
2556 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); in rangeMetadataExcludesValue()
2558 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); in rangeMetadataExcludesValue()
2559 ConstantRange Range(Lower->getValue(), Upper->getValue()); in rangeMetadataExcludesValue()
2567 /// non-zero starting value. These are common as induction variables.
2573 !match(Start, m_APInt(StartC)) || StartC->isZero()) in isNonZeroRecurrence()
2576 switch (BO->getOpcode()) { in isNonZeroRecurrence()
2578 // Starting from non-zero and stepping away from zero can never wrap back in isNonZeroRecurrence()
2580 return BO->hasNoUnsignedWrap() || in isNonZeroRecurrence()
2581 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) && in isNonZeroRecurrence()
2582 StartC->isNegative() == StepC->isNegative()); in isNonZeroRecurrence()
2584 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) && in isNonZeroRecurrence()
2585 match(Step, m_APInt(StepC)) && !StepC->isZero(); in isNonZeroRecurrence()
2587 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap(); in isNonZeroRecurrence()
2590 return BO->isExact(); in isNonZeroRecurrence()
2617 // If X and Y are both non-negative (as signed values) then their sum is not in isNonZeroAdd()
2638 // The sum of a non-negative number and a power of two is not zero. in isNonZeroAdd()
2653 // (X - (X != 0)) is non zero in isNonZeroSub()
2654 // ((X != 0) - X) is non zero in isNonZeroSub()
2660 if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Q, Depth)) in isNonZeroSub()
2669 // If X and Y are non-zero then so is X * Y as long as the multiplication in isNonZeroMul()
2675 // If either X or Y is odd, then if the other is non-zero the result can't in isNonZeroMul()
2686 // non-zero, then X * Y is non-zero. We can find sX and sY by just taking in isNonZeroMul()
2687 // the lowest known One of X and Y. If they are non-zero, the result in isNonZeroMul()
2688 // must be non-zero. We can check if LSB(X) * LSB(Y) != 0 by doing in isNonZeroMul()
2698 switch (I->getOpcode()) { in isNonZeroShift()
2711 switch (I->getOpcode()) { in isNonZeroShift()
2726 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); in isNonZeroShift()
2736 // non-zero then at least one non-zero bit must remain. in isNonZeroShift()
2737 if (InvShiftOp(KnownVal.Zero, NumBits - MaxShift) in isNonZeroShift()
2738 .eq(InvShiftOp(APInt::getAllOnes(NumBits), NumBits - MaxShift)) && in isNonZeroShift()
2739 isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth)) in isNonZeroShift()
2748 unsigned BitWidth = getBitWidth(I->getType()->getScalarType(), Q.DL); in isKnownNonZeroFromOperator()
2749 switch (I->getOpcode()) { in isKnownNonZeroFromOperator()
2752 return I->getType()->getPointerAddressSpace() == 0; in isKnownNonZeroFromOperator()
2754 if (I->getType()->isPointerTy()) in isKnownNonZeroFromOperator()
2765 // %NonZero can have 2 non-zero i16 elements, but isKnownNonZero on a in isKnownNonZeroFromOperator()
2766 // <4 x i8> requires that all 4 i8 elements be non-zero which isn't in isKnownNonZeroFromOperator()
2781 // This is always safe as non-zero in the 4 i8 elements implies in isKnownNonZeroFromOperator()
2782 // non-zero in the combination of any two adjacent ones. Since i8 is a in isKnownNonZeroFromOperator()
2784 // This all implies the 2 i16 elements are non-zero. in isKnownNonZeroFromOperator()
2785 Type *FromTy = I->getOperand(0)->getType(); in isKnownNonZeroFromOperator()
2786 if ((FromTy->isIntOrIntVectorTy() || FromTy->isPtrOrPtrVectorTy()) && in isKnownNonZeroFromOperator()
2787 (BitWidth % getBitWidth(FromTy->getScalarType(), Q.DL)) == 0) in isKnownNonZeroFromOperator()
2788 return isKnownNonZero(I->getOperand(0), Q, Depth); in isKnownNonZeroFromOperator()
2794 if (!isa<ScalableVectorType>(I->getType()) && in isKnownNonZeroFromOperator()
2795 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= in isKnownNonZeroFromOperator()
2796 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) in isKnownNonZeroFromOperator()
2797 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2801 // is a no-op or an extend and not a truncate. in isKnownNonZeroFromOperator()
2802 if (!isa<ScalableVectorType>(I->getType()) && in isKnownNonZeroFromOperator()
2803 Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <= in isKnownNonZeroFromOperator()
2804 Q.DL.getTypeSizeInBits(I->getType()).getFixedValue()) in isKnownNonZeroFromOperator()
2805 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2808 // nuw/nsw trunc preserves zero/non-zero status of input. in isKnownNonZeroFromOperator()
2810 if (TI->hasNoSignedWrap() || TI->hasNoUnsignedWrap()) in isKnownNonZeroFromOperator()
2811 return isKnownNonZero(TI->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2815 return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), in isKnownNonZeroFromOperator()
2816 I->getOperand(1)); in isKnownNonZeroFromOperator()
2819 if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1))) in isKnownNonZeroFromOperator()
2824 if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1))) in isKnownNonZeroFromOperator()
2827 return isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth) || in isKnownNonZeroFromOperator()
2828 isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2832 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2835 // shl nsw/nuw can't remove any non-zero bits. in isKnownNonZeroFromOperator()
2838 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2843 computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q); in isKnownNonZeroFromOperator()
2853 if (BO->isExact()) in isKnownNonZeroFromOperator()
2854 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2859 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
2869 if (cast<PossiblyExactOperator>(I)->isExact()) in isKnownNonZeroFromOperator()
2870 return isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
2873 computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
2880 computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
2881 if (I->getOpcode() == Instruction::SDiv) { in isKnownNonZeroFromOperator()
2888 // If X is total unknown or X u< Y we won't be able to prove non-zero in isKnownNonZeroFromOperator()
2895 // If Add has nuw wrap flag, then if either X or Y is non-zero the result is in isKnownNonZeroFromOperator()
2896 // non-zero. in isKnownNonZeroFromOperator()
2898 return isNonZeroAdd(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), in isKnownNonZeroFromOperator()
2899 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO), in isKnownNonZeroFromOperator()
2904 return isNonZeroMul(DemandedElts, Depth, Q, BitWidth, I->getOperand(0), in isKnownNonZeroFromOperator()
2905 I->getOperand(1), Q.IIQ.hasNoSignedWrap(BO), in isKnownNonZeroFromOperator()
2911 // First check if the arm is non-zero using `isKnownNonZero`. If that fails, in isKnownNonZeroFromOperator()
2912 // then see if the select condition implies the arm is non-zero. For example in isKnownNonZeroFromOperator()
2913 // (X != 0 ? X : Y), we know the true arm is non-zero as the `X` "return" is in isKnownNonZeroFromOperator()
2917 Op = IsTrueArm ? I->getOperand(1) : I->getOperand(2); in isKnownNonZeroFromOperator()
2918 // Op is trivially non-zero. in isKnownNonZeroFromOperator()
2923 // condition implies that a given arm is non-zero. in isKnownNonZeroFromOperator()
2926 if (!match(I->getOperand(0), m_c_ICmp(Pred, m_Specific(Op), m_Value(X)))) in isKnownNonZeroFromOperator()
2945 // Check if all incoming values are non-zero using recursion. in isKnownNonZeroFromOperator()
2947 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); in isKnownNonZeroFromOperator()
2948 return llvm::all_of(PN->operands(), [&](const Use &U) { in isKnownNonZeroFromOperator()
2951 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); in isKnownNonZeroFromOperator()
2960 if ((TrueSucc == PN->getParent()) != (FalseSucc == PN->getParent())) { in isKnownNonZeroFromOperator()
2962 if (FalseSucc == PN->getParent()) in isKnownNonZeroFromOperator()
2973 if (isa<ScalableVectorType>(I->getType())) in isKnownNonZeroFromOperator()
2976 const Value *Vec = I->getOperand(0); in isKnownNonZeroFromOperator()
2977 const Value *Elt = I->getOperand(1); in isKnownNonZeroFromOperator()
2978 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); in isKnownNonZeroFromOperator()
2984 if (CIdx && CIdx->getValue().ult(NumElts)) { in isKnownNonZeroFromOperator()
2985 DemandedVecElts.clearBit(CIdx->getZExtValue()); in isKnownNonZeroFromOperator()
2986 SkipElt = !DemandedElts[CIdx->getZExtValue()]; in isKnownNonZeroFromOperator()
2989 // Result is zero if Elt is non-zero and rest of the demanded elts in Vec in isKnownNonZeroFromOperator()
2990 // are non-zero. in isKnownNonZeroFromOperator()
2997 const Value *Vec = EEI->getVectorOperand(); in isKnownNonZeroFromOperator()
2998 const Value *Idx = EEI->getIndexOperand(); in isKnownNonZeroFromOperator()
3000 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { in isKnownNonZeroFromOperator()
3001 unsigned NumElts = VecTy->getNumElements(); in isKnownNonZeroFromOperator()
3003 if (CIdx && CIdx->getValue().ult(NumElts)) in isKnownNonZeroFromOperator()
3004 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); in isKnownNonZeroFromOperator()
3018 // If demanded elements for both vecs are non-zero, the shuffle is non-zero. in isKnownNonZeroFromOperator()
3020 isKnownNonZero(Shuf->getOperand(1), DemandedRHS, Q, Depth)) && in isKnownNonZeroFromOperator()
3022 isKnownNonZero(Shuf->getOperand(0), DemandedLHS, Q, Depth)); in isKnownNonZeroFromOperator()
3025 return isKnownNonZero(I->getOperand(0), Q, Depth) && in isKnownNonZeroFromOperator()
3026 isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, in isKnownNonZeroFromOperator()
3032 if (auto *PtrT = dyn_cast<PointerType>(I->getType())) { in isKnownNonZeroFromOperator()
3035 !NullPointerIsDefined(LI->getFunction(), PtrT->getAddressSpace()))) in isKnownNonZeroFromOperator()
3048 switch (WO->getBinaryOp()) { in isKnownNonZeroFromOperator()
3053 WO->getArgOperand(0), WO->getArgOperand(1), in isKnownNonZeroFromOperator()
3058 WO->getArgOperand(0), WO->getArgOperand(1)); in isKnownNonZeroFromOperator()
3061 WO->getArgOperand(0), WO->getArgOperand(1), in isKnownNonZeroFromOperator()
3071 if (I->getType()->isPointerTy()) { in isKnownNonZeroFromOperator()
3072 if (Call->isReturnNonNull()) in isKnownNonZeroFromOperator()
3079 if (std::optional<ConstantRange> Range = Call->getRange()) { in isKnownNonZeroFromOperator()
3080 const APInt ZeroValue(Range->getBitWidth(), 0); in isKnownNonZeroFromOperator()
3081 if (!Range->contains(ZeroValue)) in isKnownNonZeroFromOperator()
3084 if (const Value *RV = Call->getReturnedArgOperand()) in isKnownNonZeroFromOperator()
3085 if (RV->getType() == I->getType() && isKnownNonZero(RV, Q, Depth)) in isKnownNonZeroFromOperator()
3090 switch (II->getIntrinsicID()) { in isKnownNonZeroFromOperator()
3097 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3099 // non-zero, we will fold it to `sub nuw` in InstCombine. in isKnownNonZeroFromOperator()
3102 II->getArgOperand(0), II->getArgOperand(1)); in isKnownNonZeroFromOperator()
3105 II->getArgOperand(0), II->getArgOperand(1), in isKnownNonZeroFromOperator()
3107 // Vec reverse preserves zero/non-zero status from input vec. in isKnownNonZeroFromOperator()
3109 return isKnownNonZero(II->getArgOperand(0), DemandedElts.reverseBits(), in isKnownNonZeroFromOperator()
3111 // umin/smin/smax/smin/or of all non-zero elements is always non-zero. in isKnownNonZeroFromOperator()
3117 return isKnownNonZero(II->getArgOperand(0), Q, Depth); in isKnownNonZeroFromOperator()
3122 if (matchOpWithOpEqZero(II->getArgOperand(0), II->getArgOperand(1))) in isKnownNonZeroFromOperator()
3125 return isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth) || in isKnownNonZeroFromOperator()
3126 isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3128 // If either arg is strictly positive the result is non-zero. Otherwise in isKnownNonZeroFromOperator()
3129 // the result is non-zero if both ops are non-zero. in isKnownNonZeroFromOperator()
3137 // Avoid re-computing isKnownNonZero. in isKnownNonZeroFromOperator()
3140 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
3142 IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known)) in isKnownNonZeroFromOperator()
3145 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
3147 IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known)) in isKnownNonZeroFromOperator()
3149 return IsNonZero(II->getArgOperand(1), Op1NonZero, Op1Known) && in isKnownNonZeroFromOperator()
3150 IsNonZero(II->getArgOperand(0), Op0NonZero, Op0Known); in isKnownNonZeroFromOperator()
3153 // If either arg is negative the result is non-zero. Otherwise in isKnownNonZeroFromOperator()
3154 // the result is non-zero if both ops are non-zero. in isKnownNonZeroFromOperator()
3156 computeKnownBits(II->getArgOperand(1), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
3160 computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q); in isKnownNonZeroFromOperator()
3169 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth) && in isKnownNonZeroFromOperator()
3170 isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3172 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) in isKnownNonZeroFromOperator()
3175 return computeKnownBits(II->getArgOperand(0), DemandedElts, Depth, Q) in isKnownNonZeroFromOperator()
3180 if (II->getArgOperand(0) == II->getArgOperand(1)) in isKnownNonZeroFromOperator()
3181 return isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth); in isKnownNonZeroFromOperator()
3186 return isKnownNonZero(I->getOperand(0), Q, Depth); in isKnownNonZeroFromOperator()
3202 /// Return true if the given value is known to be non-zero when defined. For
3203 /// vectors, return true if every demanded element is known to be non-zero when
3205 /// specified, perform context-sensitive analysis and return true if the
3208 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, in isKnownNonZero() argument
3210 Type *Ty = V->getType(); in isKnownNonZero()
3217 FVTy->getNumElements() == DemandedElts.getBitWidth() && in isKnownNonZero()
3225 if (auto *C = dyn_cast<Constant>(V)) { in isKnownNonZero()
3226 if (C->isNullValue()) in isKnownNonZero()
3229 // Must be non-zero due to null test above. in isKnownNonZero()
3233 // non-zero to determine that the whole vector is known non-zero. in isKnownNonZero()
3235 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { in isKnownNonZero()
3238 Constant *Elt = C->getAggregateElement(i); in isKnownNonZero()
3239 if (!Elt || Elt->isNullValue()) in isKnownNonZero()
3248 if (auto *CPA = dyn_cast<ConstantPtrAuth>(V)) in isKnownNonZero()
3249 return isKnownNonZero(CPA->getPointer(), DemandedElts, Q, Depth); in isKnownNonZero()
3254 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { in isKnownNonZero()
3255 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && in isKnownNonZero()
3256 GV->getType()->getAddressSpace() == 0) in isKnownNonZero()
3261 if (!isa<ConstantExpr>(V)) in isKnownNonZero()
3265 if (const auto *A = dyn_cast<Argument>(V)) in isKnownNonZero()
3266 if (std::optional<ConstantRange> Range = A->getRange()) { in isKnownNonZero()
3267 const APInt ZeroValue(Range->getBitWidth(), 0); in isKnownNonZero()
3268 if (!Range->contains(ZeroValue)) in isKnownNonZero()
3272 if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q)) in isKnownNonZero()
3282 // A byval, inalloca may not be null in a non-default addres space. A in isKnownNonZero()
3284 if (const Argument *A = dyn_cast<Argument>(V)) { in isKnownNonZero()
3285 if (((A->hasPassPointeeByValueCopyAttr() && in isKnownNonZero()
3286 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) || in isKnownNonZero()
3287 A->hasNonNullAttr())) in isKnownNonZero()
3292 if (const auto *I = dyn_cast<Operator>(V)) in isKnownNonZero()
3296 if (!isa<Constant>(V) && in isKnownNonZero()
3297 isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) in isKnownNonZero()
3303 bool llvm::isKnownNonZero(const Value *V, const SimplifyQuery &Q, in isKnownNonZero() argument
3305 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); in isKnownNonZero()
3307 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in isKnownNonZero()
3308 return ::isKnownNonZero(V, DemandedElts, Q, Depth); in isKnownNonZero()
3313 /// return std::nullopt. An invertible function is one that is 1-to-1 and maps
3320 if (Op1->getOpcode() != Op2->getOpcode()) in getInvertibleOperands()
3323 auto getOperands = [&](unsigned OpNum) -> auto { in getInvertibleOperands()
3324 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum)); in getInvertibleOperands()
3327 switch (Op1->getOpcode()) { in getInvertibleOperands()
3331 if (!cast<PossiblyDisjointInst>(Op1)->isDisjoint() || in getInvertibleOperands()
3332 !cast<PossiblyDisjointInst>(Op2)->isDisjoint()) in getInvertibleOperands()
3338 if (match(Op2, m_c_BinOp(m_Specific(Op1->getOperand(0)), m_Value(Other)))) in getInvertibleOperands()
3339 return std::make_pair(Op1->getOperand(1), Other); in getInvertibleOperands()
3340 if (match(Op2, m_c_BinOp(m_Specific(Op1->getOperand(1)), m_Value(Other)))) in getInvertibleOperands()
3341 return std::make_pair(Op1->getOperand(0), Other); in getInvertibleOperands()
3345 if (Op1->getOperand(0) == Op2->getOperand(0)) in getInvertibleOperands()
3347 if (Op1->getOperand(1) == Op2->getOperand(1)) in getInvertibleOperands()
3352 // and N is the bitwdith. The nsw case is non-obvious, but proven by in getInvertibleOperands()
3356 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && in getInvertibleOperands()
3357 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) in getInvertibleOperands()
3361 if (Op1->getOperand(1) == Op2->getOperand(1) && in getInvertibleOperands()
3362 isa<ConstantInt>(Op1->getOperand(1)) && in getInvertibleOperands()
3363 !cast<ConstantInt>(Op1->getOperand(1))->isZero()) in getInvertibleOperands()
3369 // for a non-zero multiply. Shifts always multiply by non-zero. in getInvertibleOperands()
3372 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && in getInvertibleOperands()
3373 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) in getInvertibleOperands()
3376 if (Op1->getOperand(1) == Op2->getOperand(1)) in getInvertibleOperands()
3384 if (!PEO1->isExact() || !PEO2->isExact()) in getInvertibleOperands()
3387 if (Op1->getOperand(1) == Op2->getOperand(1)) in getInvertibleOperands()
3393 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType()) in getInvertibleOperands()
3407 if (PN1->getParent() != PN2->getParent() || in getInvertibleOperands()
3418 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V in getInvertibleOperands()
3419 // * X_i = Y_i = X_(i-1) OP Y_(i-1) in getInvertibleOperands()
3422 if (Values->first != PN1 || Values->second != PN2) in getInvertibleOperands()
3431 /// Return true if V1 == (binop V2, X), where X is known non-zero.
3432 /// Only handle a small subset of binops where (binop V2, X) with non-zero X
3440 switch (BO->getOpcode()) { in isModifyingBinopOfNonZero()
3444 if (!cast<PossiblyDisjointInst>(V1)->isDisjoint()) in isModifyingBinopOfNonZero()
3450 if (V2 == BO->getOperand(0)) in isModifyingBinopOfNonZero()
3451 Op = BO->getOperand(1); in isModifyingBinopOfNonZero()
3452 else if (V2 == BO->getOperand(1)) in isModifyingBinopOfNonZero()
3453 Op = BO->getOperand(0); in isModifyingBinopOfNonZero()
3461 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
3469 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && in isNonEqualMul()
3470 !C->isZero() && !C->isOne() && in isNonEqualMul()
3476 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
3484 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && in isNonEqualShl()
3485 !C->isZero() && isKnownNonZero(V1, DemandedElts, Q, Depth + 1); in isNonEqualShl()
3494 if (PN1->getParent() != PN2->getParent()) in isNonEqualPHIs()
3499 for (const BasicBlock *IncomBB : PN1->blocks()) { in isNonEqualPHIs()
3502 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB); in isNonEqualPHIs()
3503 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB); in isNonEqualPHIs()
3513 RecQ.CxtI = IncomBB->getTerminator(); in isNonEqualPHIs()
3529 const Value *Cond1 = SI1->getCondition(); in isNonEqualSelect()
3530 const Value *Cond2 = SI2->getCondition(); in isNonEqualSelect()
3532 return isKnownNonEqual(SI1->getTrueValue(), SI2->getTrueValue(), in isNonEqualSelect()
3534 isKnownNonEqual(SI1->getFalseValue(), SI2->getFalseValue(), in isNonEqualSelect()
3537 return isKnownNonEqual(SI1->getTrueValue(), V2, DemandedElts, Depth + 1, Q) && in isNonEqualSelect()
3538 isKnownNonEqual(SI1->getFalseValue(), V2, DemandedElts, Depth + 1, Q); in isNonEqualSelect()
3548 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy()) in isNonEqualPointersWithRecursiveGEP()
3552 if (!GEPA || GEPA->getNumIndices() != 1 || !isa<Constant>(GEPA->idx_begin())) in isNonEqualPointersWithRecursiveGEP()
3556 auto *PN = dyn_cast<PHINode>(GEPA->getPointerOperand()); in isNonEqualPointersWithRecursiveGEP()
3557 if (!PN || PN->getNumIncomingValues() != 2) in isNonEqualPointersWithRecursiveGEP()
3564 if (PN->getIncomingValue(0) == Step) in isNonEqualPointersWithRecursiveGEP()
3565 Start = PN->getIncomingValue(1); in isNonEqualPointersWithRecursiveGEP()
3566 else if (PN->getIncomingValue(1) == Step) in isNonEqualPointersWithRecursiveGEP()
3567 Start = PN->getIncomingValue(0); in isNonEqualPointersWithRecursiveGEP()
3574 // Is non-equal if above are true. in isNonEqualPointersWithRecursiveGEP()
3577 unsigned IndexWidth = Q.DL.getIndexTypeSizeInBits(Start->getType()); in isNonEqualPointersWithRecursiveGEP()
3579 Start = Start->stripAndAccumulateInBoundsConstantOffsets(Q.DL, StartOffset); in isNonEqualPointersWithRecursiveGEP()
3581 Step = Step->stripAndAccumulateInBoundsConstantOffsets(Q.DL, StepOffset); in isNonEqualPointersWithRecursiveGEP()
3587 B = B->stripAndAccumulateInBoundsConstantOffsets(Q.DL, OffsetB); in isNonEqualPointersWithRecursiveGEP()
3599 if (V1->getType() != V2->getType()) in isKnownNonEqual()
3607 // requires our operation be 1-to-1 and map every input value to exactly in isKnownNonEqual()
3611 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { in isKnownNonEqual()
3613 return isKnownNonEqual(Values->first, Values->second, DemandedElts, in isKnownNonEqual()
3637 if (V1->getType()->isIntOrIntVectorTy()) { in isKnownNonEqual()
3672 cast<Operator>(Select)->getOpcode() == Instruction::Select && in isSignedMinMaxClamp()
3695 return CLow->sle(*CHigh); in isSignedMinMaxClamp()
3701 assert((II->getIntrinsicID() == Intrinsic::smin || in isSignedMinMaxIntrinsicClamp()
3702 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax"); in isSignedMinMaxIntrinsicClamp()
3704 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); in isSignedMinMaxIntrinsicClamp()
3705 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); in isSignedMinMaxIntrinsicClamp()
3706 if (!InnerII || InnerII->getIntrinsicID() != InverseID || in isSignedMinMaxIntrinsicClamp()
3707 !match(II->getArgOperand(1), m_APInt(CLow)) || in isSignedMinMaxIntrinsicClamp()
3708 !match(InnerII->getArgOperand(1), m_APInt(CHigh))) in isSignedMinMaxIntrinsicClamp()
3711 if (II->getIntrinsicID() == Intrinsic::smin) in isSignedMinMaxIntrinsicClamp()
3713 return CLow->sle(*CHigh); in isSignedMinMaxIntrinsicClamp()
3720 static unsigned computeNumSignBitsVectorConstant(const Value *V, in computeNumSignBitsVectorConstant() argument
3723 const auto *CV = dyn_cast<Constant>(V); in computeNumSignBitsVectorConstant()
3724 if (!CV || !isa<FixedVectorType>(CV->getType())) in computeNumSignBitsVectorConstant()
3728 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); in computeNumSignBitsVectorConstant()
3732 // If we find a non-ConstantInt, bail out. in computeNumSignBitsVectorConstant()
3733 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); in computeNumSignBitsVectorConstant()
3737 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); in computeNumSignBitsVectorConstant()
3743 static unsigned ComputeNumSignBitsImpl(const Value *V,
3747 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, in ComputeNumSignBits() argument
3749 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); in ComputeNumSignBits()
3761 static unsigned ComputeNumSignBitsImpl(const Value *V, in ComputeNumSignBitsImpl() argument
3764 Type *Ty = V->getType(); in ComputeNumSignBitsImpl()
3770 FVTy->getNumElements() == DemandedElts.getBitWidth() && in ComputeNumSignBitsImpl()
3779 // in V, so for undef we have to conservatively return 1. We don't have the in ComputeNumSignBitsImpl()
3780 // same behavior for poison though -- that's a FIXME today. in ComputeNumSignBitsImpl()
3782 Type *ScalarTy = Ty->getScalarType(); in ComputeNumSignBitsImpl()
3783 unsigned TyBits = ScalarTy->isPointerTy() ? in ComputeNumSignBitsImpl()
3796 if (auto *U = dyn_cast<Operator>(V)) { in ComputeNumSignBitsImpl()
3797 switch (Operator::getOpcode(V)) { in ComputeNumSignBitsImpl()
3800 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); in ComputeNumSignBitsImpl()
3801 return ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q) + in ComputeNumSignBitsImpl()
3806 // sdiv X, C -> adds log(C) sign bits. in ComputeNumSignBitsImpl()
3807 if (match(U->getOperand(1), m_APInt(Denominator))) { in ComputeNumSignBitsImpl()
3809 // Ignore non-positive denominator. in ComputeNumSignBitsImpl()
3810 if (!Denominator->isStrictlyPositive()) in ComputeNumSignBitsImpl()
3815 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3818 return std::min(TyBits, NumBits + Denominator->logBase2()); in ComputeNumSignBitsImpl()
3824 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3827 // srem X, C -> we know that the result is within [-C+1,C) when C is a in ComputeNumSignBitsImpl()
3830 if (match(U->getOperand(1), m_APInt(Denominator))) { in ComputeNumSignBitsImpl()
3832 // Ignore non-positive denominator. in ComputeNumSignBitsImpl()
3833 if (Denominator->isStrictlyPositive()) { in ComputeNumSignBitsImpl()
3841 // 2. The numerator is negative. Then the result range is (-C,0] and in ComputeNumSignBitsImpl()
3842 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). in ComputeNumSignBitsImpl()
3844 // Thus a lower bound on the number of sign bits is `TyBits - in ComputeNumSignBitsImpl()
3847 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); in ComputeNumSignBitsImpl()
3855 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3856 // ashr X, C -> adds C sign bits. Vectors too. in ComputeNumSignBitsImpl()
3858 if (match(U->getOperand(1), m_APInt(ShAmt))) { in ComputeNumSignBitsImpl()
3859 if (ShAmt->uge(TyBits)) in ComputeNumSignBitsImpl()
3861 unsigned ShAmtLimited = ShAmt->getZExtValue(); in ComputeNumSignBitsImpl()
3870 if (match(U->getOperand(1), m_APInt(ShAmt))) { in ComputeNumSignBitsImpl()
3872 if (ShAmt->uge(TyBits)) in ComputeNumSignBitsImpl()
3876 if (match(U->getOperand(0), m_ZExt(m_Value(X))) && in ComputeNumSignBitsImpl()
3877 ShAmt->uge(TyBits - X->getType()->getScalarSizeInBits())) { in ComputeNumSignBitsImpl()
3879 Tmp += TyBits - X->getType()->getScalarSizeInBits(); in ComputeNumSignBitsImpl()
3882 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3883 if (ShAmt->uge(Tmp)) in ComputeNumSignBitsImpl()
3885 Tmp2 = ShAmt->getZExtValue(); in ComputeNumSignBitsImpl()
3886 return Tmp - Tmp2; in ComputeNumSignBitsImpl()
3894 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3896 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3910 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); in ComputeNumSignBitsImpl()
3912 Tmp = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3915 Tmp2 = ComputeNumSignBits(U->getOperand(2), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3922 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); in ComputeNumSignBitsImpl()
3925 // Special case decrementing a value (ADD X, -1): in ComputeNumSignBitsImpl()
3926 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) in ComputeNumSignBitsImpl()
3927 if (CRHS->isAllOnesValue()) { in ComputeNumSignBitsImpl()
3929 computeKnownBits(U->getOperand(0), DemandedElts, Known, Depth + 1, Q); in ComputeNumSignBitsImpl()
3931 // If the input is known to be 0 or 1, the output is 0/-1, which is in ComputeNumSignBitsImpl()
3942 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3945 return std::min(Tmp, Tmp2) - 1; in ComputeNumSignBitsImpl()
3948 Tmp2 = ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3953 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) in ComputeNumSignBitsImpl()
3954 if (CLHS->isNullValue()) { in ComputeNumSignBitsImpl()
3956 computeKnownBits(U->getOperand(1), DemandedElts, Known, Depth + 1, Q); in ComputeNumSignBitsImpl()
3957 // If the input is known to be 0 or 1, the output is 0/-1, which is in ComputeNumSignBitsImpl()
3973 Tmp = ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3976 return std::min(Tmp, Tmp2) - 1; in ComputeNumSignBitsImpl()
3982 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3986 ComputeNumSignBits(U->getOperand(1), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
3990 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); in ComputeNumSignBitsImpl()
3991 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; in ComputeNumSignBitsImpl()
3996 unsigned NumIncomingValues = PN->getNumIncomingValues(); in ComputeNumSignBitsImpl()
3997 // Don't analyze large in-degree PHIs. in ComputeNumSignBitsImpl()
3999 // Unreachable blocks may have zero-operand PHI nodes. in ComputeNumSignBitsImpl()
4008 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); in ComputeNumSignBitsImpl()
4009 Tmp = std::min(Tmp, ComputeNumSignBits(PN->getIncomingValue(i), in ComputeNumSignBitsImpl()
4019 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); in ComputeNumSignBitsImpl()
4020 unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits(); in ComputeNumSignBitsImpl()
4021 if (Tmp > (OperandTyBits - TyBits)) in ComputeNumSignBitsImpl()
4022 return Tmp - (OperandTyBits - TyBits); in ComputeNumSignBitsImpl()
4032 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); in ComputeNumSignBitsImpl()
4049 const Value *LHS = Shuf->getOperand(0); in ComputeNumSignBitsImpl()
4053 // fall-back. in ComputeNumSignBitsImpl()
4057 const Value *RHS = Shuf->getOperand(1); in ComputeNumSignBitsImpl()
4062 // fall-back. in ComputeNumSignBitsImpl()
4070 switch (II->getIntrinsicID()) { in ComputeNumSignBitsImpl()
4075 ComputeNumSignBits(U->getOperand(0), DemandedElts, Depth + 1, Q); in ComputeNumSignBitsImpl()
4080 return Tmp - 1; in ComputeNumSignBitsImpl()
4085 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); in ComputeNumSignBitsImpl()
4099 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits)) in ComputeNumSignBitsImpl()
4103 computeKnownBits(V, DemandedElts, Known, Depth, Q); in ComputeNumSignBitsImpl()
4116 if (F->isIntrinsic()) in getIntrinsicForCallSite()
4117 return F->getIntrinsicID(); in getIntrinsicForCallSite()
4123 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) || in getIntrinsicForCallSite()
4222 Ty = Ty->getScalarType(); in inputDenormalIsIEEE()
4223 return F.getDenormalMode(Ty->getFltSemantics()).Input == DenormalMode::IEEE; in inputDenormalIsIEEE()
4227 Ty = Ty->getScalarType(); in inputDenormalIsIEEEOrPosZero()
4228 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics()); in inputDenormalIsIEEEOrPosZero()
4234 Ty = Ty->getScalarType(); in outputDenormalIsIEEEOrPosZero()
4235 DenormalMode Mode = F.getDenormalMode(Ty->getFltSemantics()); in outputDenormalIsIEEEOrPosZero()
4260 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics()); in isKnownNeverLogicalPosZero()
4288 DenormalMode Mode = F.getDenormalMode(Ty->getScalarType()->getFltSemantics()); in propagateDenormal()
4320 case ICmpInst::ICMP_SLE: // True if LHS s<= -1 in isSignBitCheck()
4323 case ICmpInst::ICMP_SGT: // True if LHS s> -1 in isSignBitCheck()
4330 // True if LHS u> RHS and RHS == sign-bit-mask - 1 in isSignBitCheck()
4334 // True if LHS u>= RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc) in isSignBitCheck()
4338 // True if LHS u< RHS and RHS == sign-bit-mask (2^7, 2^15, 2^31, etc) in isSignBitCheck()
4342 // True if LHS u<= RHS and RHS == sign-bit-mask - 1 in isSignBitCheck()
4376 static std::tuple<Value *, FPClassTest, FPClassTest> exactClass(Value *V, in exactClass() argument
4378 return {V, M, ~M}; in exactClass()
4400 // fcmp o__ x, nan -> false in fcmpImpliesClass()
4401 // fcmp u__ x, nan -> true in fcmpImpliesClass()
4405 // fcmp ord x, zero|normal|subnormal|inf -> ~fcNan in fcmpImpliesClass()
4409 // fcmp uno x, zero|normal|subnormal|inf -> fcNan in fcmpImpliesClass()
4423 if (!inputDenormalIsIEEE(F, LHS->getType())) in fcmpImpliesClass()
4437 // non-canonical other non-NaN constants or LHS == RHS. in fcmpImpliesClass()
4475 // fcmp oeq x, +inf -> is_fpclass x, fcPosInf in fcmpImpliesClass()
4476 // fcmp oeq fabs(x), +inf -> is_fpclass x, fcInf in fcmpImpliesClass()
4477 // fcmp oeq x, -inf -> is_fpclass x, fcNegInf in fcmpImpliesClass()
4478 // fcmp oeq fabs(x), -inf -> is_fpclass x, 0 -> false in fcmpImpliesClass()
4480 // fcmp une x, +inf -> is_fpclass x, ~fcPosInf in fcmpImpliesClass()
4481 // fcmp une fabs(x), +inf -> is_fpclass x, ~fcInf in fcmpImpliesClass()
4482 // fcmp une x, -inf -> is_fpclass x, ~fcNegInf in fcmpImpliesClass()
4483 // fcmp une fabs(x), -inf -> is_fpclass x, fcAllFlags -> true in fcmpImpliesClass()
4498 // fcmp one x, -inf -> is_fpclass x, fcNegInf in fcmpImpliesClass()
4499 // fcmp one fabs(x), -inf -> is_fpclass x, ~fcNegInf & ~fcNan in fcmpImpliesClass()
4500 // fcmp one x, +inf -> is_fpclass x, ~fcNegInf & ~fcNan in fcmpImpliesClass()
4501 // fcmp one fabs(x), +inf -> is_fpclass x, ~fcInf & fcNan in fcmpImpliesClass()
4503 // fcmp ueq x, +inf -> is_fpclass x, fcPosInf|fcNan in fcmpImpliesClass()
4504 // fcmp ueq (fabs x), +inf -> is_fpclass x, fcInf|fcNan in fcmpImpliesClass()
4505 // fcmp ueq x, -inf -> is_fpclass x, fcNegInf|fcNan in fcmpImpliesClass()
4506 // fcmp ueq fabs(x), -inf -> is_fpclass x, fcNan in fcmpImpliesClass()
4524 // fcmp olt x, -inf -> false in fcmpImpliesClass()
4525 // fcmp uge x, -inf -> true in fcmpImpliesClass()
4530 // fcmp olt fabs(x), +inf -> fcFinite in fcmpImpliesClass()
4531 // fcmp uge fabs(x), +inf -> ~fcFinite in fcmpImpliesClass()
4532 // fcmp olt x, +inf -> fcFinite|fcNegInf in fcmpImpliesClass()
4533 // fcmp uge x, +inf -> ~(fcFinite|fcNegInf) in fcmpImpliesClass()
4542 // fcmp oge x, -inf -> ~fcNan in fcmpImpliesClass()
4543 // fcmp oge fabs(x), -inf -> ~fcNan in fcmpImpliesClass()
4544 // fcmp ult x, -inf -> fcNan in fcmpImpliesClass()
4545 // fcmp ult fabs(x), -inf -> fcNan in fcmpImpliesClass()
4550 // fcmp oge fabs(x), +inf -> fcInf in fcmpImpliesClass()
4551 // fcmp oge x, +inf -> fcPosInf in fcmpImpliesClass()
4552 // fcmp ult fabs(x), +inf -> ~fcInf in fcmpImpliesClass()
4553 // fcmp ult x, +inf -> ~fcPosInf in fcmpImpliesClass()
4562 // fcmp ogt x, -inf -> fcmp one x, -inf in fcmpImpliesClass()
4563 // fcmp ogt fabs(x), -inf -> fcmp ord x, x in fcmpImpliesClass()
4564 // fcmp ule x, -inf -> fcmp ueq x, -inf in fcmpImpliesClass()
4565 // fcmp ule fabs(x), -inf -> fcmp uno x, x in fcmpImpliesClass()
4581 // fcmp ole x, +inf -> fcmp ord x, x in fcmpImpliesClass()
4582 // fcmp ole fabs(x), +inf -> fcmp ord x, x in fcmpImpliesClass()
4583 // fcmp ole x, -inf -> fcmp oeq x, -inf in fcmpImpliesClass()
4584 // fcmp ole fabs(x), -inf -> false in fcmpImpliesClass()
4622 // fabs(x) o> -k -> fcmp ord x, x in fcmpImpliesClass()
4623 // fabs(x) u> -k -> true in fcmpImpliesClass()
4624 // fabs(x) o< -k -> false in fcmpImpliesClass()
4625 // fabs(x) u< -k -> fcmp uno x, x in fcmpImpliesClass()
4718 // fcmp olt x, smallest_normal -> fcNegInf|fcNegNormal|fcSubnormal|fcZero in fcmpImpliesClass()
4719 // fcmp olt fabs(x), smallest_normal -> fcSubnormal|fcZero in fcmpImpliesClass()
4720 // fcmp uge x, smallest_normal -> fcNan|fcPosNormal|fcPosInf in fcmpImpliesClass()
4721 // fcmp uge fabs(x), smallest_normal -> ~(fcSubnormal|fcZero) in fcmpImpliesClass()
4730 // fcmp oge x, smallest_normal -> fcPosNormal | fcPosInf in fcmpImpliesClass()
4731 // fcmp oge fabs(x), smallest_normal -> fcInf | fcNormal in fcmpImpliesClass()
4732 // fcmp ult x, smallest_normal -> ~(fcPosNormal | fcPosInf) in fcmpImpliesClass()
4733 // fcmp ult fabs(x), smallest_normal -> ~(fcInf | fcNormal) in fcmpImpliesClass()
4761 // TODO: Just call computeKnownFPClass for RHS to handle non-constants. in fcmpImpliesClass()
4765 static void computeKnownFPClassFromCond(const Value *V, Value *Cond, in computeKnownFPClassFromCond() argument
4776 Pred, *CxtI->getParent()->getParent(), LHS, *CRHS, LHS != V); in computeKnownFPClassFromCond()
4777 if (CmpVal == V) in computeKnownFPClassFromCond()
4795 static KnownFPClass computeKnownFPClassFromContext(const Value *V, in computeKnownFPClassFromContext() argument
4804 for (BranchInst *BI : Q.DC->conditionsFor(V)) { in computeKnownFPClassFromContext()
4805 Value *Cond = BI->getCondition(); in computeKnownFPClassFromContext()
4807 BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0)); in computeKnownFPClassFromContext()
4808 if (Q.DT->dominates(Edge0, Q.CxtI->getParent())) in computeKnownFPClassFromContext()
4809 computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/true, Q.CxtI, in computeKnownFPClassFromContext()
4812 BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1)); in computeKnownFPClassFromContext()
4813 if (Q.DT->dominates(Edge1, Q.CxtI->getParent())) in computeKnownFPClassFromContext()
4814 computeKnownFPClassFromCond(V, Cond, /*CondIsTrue=*/false, Q.CxtI, in computeKnownFPClassFromContext()
4822 // Try to restrict the floating-point classes based on information from in computeKnownFPClassFromContext()
4824 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { in computeKnownFPClassFromContext()
4829 assert(I->getFunction() == Q.CxtI->getParent()->getParent() && in computeKnownFPClassFromContext()
4831 assert(I->getIntrinsicID() == Intrinsic::assume && in computeKnownFPClassFromContext()
4837 computeKnownFPClassFromCond(V, I->getArgOperand(0), /*CondIsTrue=*/true, in computeKnownFPClassFromContext()
4844 void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
4848 static void computeKnownFPClass(const Value *V, KnownFPClass &Known, in computeKnownFPClass() argument
4851 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); in computeKnownFPClass()
4853 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); in computeKnownFPClass()
4854 computeKnownFPClass(V, DemandedElts, InterestedClasses, Known, Depth, Q); in computeKnownFPClass()
4867 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClassForFPTrunc()
4880 void computeKnownFPClass(const Value *V, const APInt &DemandedElts, in computeKnownFPClass() argument
4893 if (auto *CFP = dyn_cast<ConstantFP>(V)) { in computeKnownFPClass()
4894 Known.KnownFPClasses = CFP->getValueAPF().classify(); in computeKnownFPClass()
4895 Known.SignBit = CFP->isNegative(); in computeKnownFPClass()
4899 if (isa<ConstantAggregateZero>(V)) { in computeKnownFPClass()
4905 if (isa<PoisonValue>(V)) { in computeKnownFPClass()
4912 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); in computeKnownFPClass()
4913 const Constant *CV = dyn_cast<Constant>(V); in computeKnownFPClass()
4920 unsigned NumElts = VFVTy->getNumElements(); in computeKnownFPClass()
4925 Constant *Elt = CV->getAggregateElement(i); in computeKnownFPClass()
4938 const APFloat &C = CElt->getValueAPF(); in computeKnownFPClass()
4951 if (const auto *CB = dyn_cast<CallBase>(V)) in computeKnownFPClass()
4952 KnownNotFromFlags |= CB->getRetNoFPClass(); in computeKnownFPClass()
4953 else if (const auto *Arg = dyn_cast<Argument>(V)) in computeKnownFPClass()
4954 KnownNotFromFlags |= Arg->getNoFPClass(); in computeKnownFPClass()
4956 const Operator *Op = dyn_cast<Operator>(V); in computeKnownFPClass()
4958 if (FPOp->hasNoNaNs()) in computeKnownFPClass()
4960 if (FPOp->hasNoInfs()) in computeKnownFPClass()
4964 KnownFPClass AssumedClasses = computeKnownFPClassFromContext(V, Q); in computeKnownFPClass()
4988 const unsigned Opc = Op->getOpcode(); in computeKnownFPClass()
4991 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
4997 Value *Cond = Op->getOperand(0); in computeKnownFPClass()
4998 Value *LHS = Op->getOperand(1); in computeKnownFPClass()
4999 Value *RHS = Op->getOperand(2); in computeKnownFPClass()
5008 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5050 const Intrinsic::ID IID = II->getIntrinsicID(); in computeKnownFPClass()
5056 computeKnownFPClass(II->getArgOperand(0), DemandedElts, in computeKnownFPClass()
5066 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5068 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses, in computeKnownFPClass()
5078 if (II->getArgOperand(0) != II->getArgOperand(1)) in computeKnownFPClass()
5081 // The multiply cannot be -0 and therefore the add can't be -0 in computeKnownFPClass()
5084 // x * x + y is non-negative if y is non-negative. in computeKnownFPClass()
5086 computeKnownFPClass(II->getArgOperand(2), DemandedElts, InterestedClasses, in computeKnownFPClass()
5100 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5108 // Any negative value besides -0 returns a nan. in computeKnownFPClass()
5112 // The only negative value that can be returned is -0 for -0 inputs. in computeKnownFPClass()
5117 const Function *F = II->getFunction(); in computeKnownFPClass()
5119 (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType()))) in computeKnownFPClass()
5128 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5140 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5142 computeKnownFPClass(II->getArgOperand(1), DemandedElts, InterestedClasses, in computeKnownFPClass()
5184 // As there's no spec for denormal flushing, be conservative with the in computeKnownFPClass()
5191 const Function *Parent = II->getFunction(); in computeKnownFPClass()
5195 DenormalMode Mode = Parent->getDenormalMode( in computeKnownFPClass()
5196 II->getType()->getScalarType()->getFltSemantics()); in computeKnownFPClass()
5225 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5233 // denormal mode to preserve known-not-0 knowledge. in computeKnownFPClass()
5243 const Function *F = II->getFunction(); in computeKnownFPClass()
5250 II->getType()->getScalarType()->getFltSemantics(); in computeKnownFPClass()
5251 DenormalMode DenormMode = F->getDenormalMode(FPType); in computeKnownFPClass()
5276 Known = computeKnownFPClass(II->getArgOperand(0), II->getFastMathFlags(), in computeKnownFPClass()
5286 II->getArgOperand(0), DemandedElts.reverseBits(), in computeKnownFPClass()
5287 II->getFastMathFlags(), InterestedClasses, Depth + 1, Q); in computeKnownFPClass()
5302 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5312 if (IID == Intrinsic::trunc || !V->getType()->isMultiUnitFPType()) { in computeKnownFPClass()
5319 // Negative round ups to 0 produce -0 in computeKnownFPClass()
5335 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5355 // log(+inf) -> +inf in computeKnownFPClass()
5356 // log([+-]0.0) -> -inf in computeKnownFPClass()
5357 // log(-inf) -> nan in computeKnownFPClass()
5358 // log(-x) -> nan in computeKnownFPClass()
5369 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5378 const Function *F = II->getFunction(); in computeKnownFPClass()
5379 if (F && KnownSrc.isKnownNeverLogicalZero(*F, II->getType())) in computeKnownFPClass()
5388 const Value *Exp = II->getArgOperand(1); in computeKnownFPClass()
5389 Type *ExpTy = Exp->getType(); in computeKnownFPClass()
5390 unsigned BitWidth = ExpTy->getScalarType()->getIntegerBitWidth(); in computeKnownFPClass()
5403 // pow(-x, exp) --> negative if exp is odd and x is negative. in computeKnownFPClass()
5404 // pow(-0, exp) --> -inf if exp is negative odd. in computeKnownFPClass()
5405 // pow(-0, exp) --> -0 if exp is positive odd. in computeKnownFPClass()
5406 // pow(-inf, exp) --> -0 if exp is negative odd. in computeKnownFPClass()
5407 // pow(-inf, exp) --> -inf if exp is positive odd. in computeKnownFPClass()
5409 computeKnownFPClass(II->getArgOperand(0), DemandedElts, fcNegative, in computeKnownFPClass()
5417 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5440 II->getType()->getScalarType()->getFltSemantics(); in computeKnownFPClass()
5442 const Value *ExpArg = II->getArgOperand(1); in computeKnownFPClass()
5446 const int MantissaBits = Precision - 1; in computeKnownFPClass()
5450 const Function *F = II->getFunction(); in computeKnownFPClass()
5452 if (ConstVal && ConstVal->isZero()) { in computeKnownFPClass()
5453 // ldexp(x, 0) -> x, so propagate everything. in computeKnownFPClass()
5454 Known.propagateCanonicalizingSrc(KnownSrc, *F, II->getType()); in computeKnownFPClass()
5467 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, II->getType())) in computeKnownFPClass()
5469 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType())) in computeKnownFPClass()
5476 computeKnownFPClass(II->getArgOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5506 Op->getOpcode() == Instruction::FAdd && in computeKnownFPClass()
5519 computeKnownFPClass(Op->getOperand(1), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5528 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedSrcs, in computeKnownFPClass()
5537 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5539 if (Op->getOpcode() == Instruction::FAdd) { in computeKnownFPClass()
5546 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. in computeKnownFPClass()
5547 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) || in computeKnownFPClass()
5548 KnownRHS.isKnownNeverLogicalNegZero(*F, Op->getType())) && in computeKnownFPClass()
5549 // Make sure output negative denormal can't flush to -0 in computeKnownFPClass()
5550 outputDenormalIsIEEEOrPosZero(*F, Op->getType())) in computeKnownFPClass()
5556 // Only fsub -0, +0 can return -0 in computeKnownFPClass()
5557 if ((KnownLHS.isKnownNeverLogicalNegZero(*F, Op->getType()) || in computeKnownFPClass()
5558 KnownRHS.isKnownNeverLogicalPosZero(*F, Op->getType())) && in computeKnownFPClass()
5559 // Make sure output negative denormal can't flush to -0 in computeKnownFPClass()
5560 outputDenormalIsIEEEOrPosZero(*F, Op->getType())) in computeKnownFPClass()
5568 // X * X is always non-negative or a NaN. in computeKnownFPClass()
5569 if (Op->getOperand(0) == Op->getOperand(1)) in computeKnownFPClass()
5579 computeKnownFPClass(Op->getOperand(1), DemandedElts, NeedForNan, KnownRHS, in computeKnownFPClass()
5584 computeKnownFPClass(Op->getOperand(0), DemandedElts, NeedForNan, KnownLHS, in computeKnownFPClass()
5596 // If 0 * +/-inf produces NaN. in computeKnownFPClass()
5602 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5607 KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) && in computeKnownFPClass()
5609 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType()))) in computeKnownFPClass()
5616 if (Op->getOperand(0) == Op->getOperand(1)) { in computeKnownFPClass()
5618 if (Op->getOpcode() == Instruction::FDiv) { in computeKnownFPClass()
5622 // X % X is always exactly [+-]0.0 or a NaN. in computeKnownFPClass()
5638 computeKnownFPClass(Op->getOperand(1), DemandedElts, in computeKnownFPClass()
5650 computeKnownFPClass(Op->getOperand(0), DemandedElts, in computeKnownFPClass()
5655 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5657 if (Op->getOpcode() == Instruction::FDiv) { in computeKnownFPClass()
5662 ((F && KnownLHS.isKnownNeverLogicalZero(*F, Op->getType())) || in computeKnownFPClass()
5663 (F && KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())))) { in computeKnownFPClass()
5667 // X / -0.0 is -Inf (or NaN). in computeKnownFPClass()
5675 KnownRHS.isKnownNeverLogicalZero(*F, Op->getType())) { in computeKnownFPClass()
5696 computeKnownFPClass(Op->getOperand(0), DemandedElts, InterestedClasses, in computeKnownFPClass()
5700 Op->getType()->getScalarType()->getFltSemantics(); in computeKnownFPClass()
5702 Op->getOperand(0)->getType()->getScalarType()->getFltSemantics(); in computeKnownFPClass()
5733 if (Op->getOpcode() == Instruction::UIToFP) in computeKnownFPClass()
5740 int IntSize = Op->getOperand(0)->getType()->getScalarSizeInBits(); in computeKnownFPClass()
5741 if (Op->getOpcode() == Instruction::SIToFP) in computeKnownFPClass()
5742 --IntSize; in computeKnownFPClass()
5746 Type *FPTy = Op->getType()->getScalarType(); in computeKnownFPClass()
5747 if (ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize) in computeKnownFPClass()
5754 // Look through extract element. If the index is non-constant or in computeKnownFPClass()
5755 // out-of-range demand all elements, otherwise just the extracted element. in computeKnownFPClass()
5756 const Value *Vec = Op->getOperand(0); in computeKnownFPClass()
5757 const Value *Idx = Op->getOperand(1); in computeKnownFPClass()
5760 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { in computeKnownFPClass()
5761 unsigned NumElts = VecTy->getNumElements(); in computeKnownFPClass()
5763 if (CIdx && CIdx->getValue().ult(NumElts)) in computeKnownFPClass()
5764 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); in computeKnownFPClass()
5772 if (isa<ScalableVectorType>(Op->getType())) in computeKnownFPClass()
5775 const Value *Vec = Op->getOperand(0); in computeKnownFPClass()
5776 const Value *Elt = Op->getOperand(1); in computeKnownFPClass()
5777 auto *CIdx = dyn_cast<ConstantInt>(Op->getOperand(2)); in computeKnownFPClass()
5782 if (CIdx && CIdx->getValue().ult(NumElts)) { in computeKnownFPClass()
5783 DemandedVecElts.clearBit(CIdx->getZExtValue()); in computeKnownFPClass()
5784 NeedsElt = DemandedElts[CIdx->getZExtValue()]; in computeKnownFPClass()
5816 const Value *LHS = Shuf->getOperand(0); in computeKnownFPClass()
5829 const Value *RHS = Shuf->getOperand(1); in computeKnownFPClass()
5839 ArrayRef<unsigned> Indices = Extract->getIndices(); in computeKnownFPClass()
5840 const Value *Src = Extract->getAggregateOperand(); in computeKnownFPClass()
5841 if (isa<StructType>(Src->getType()) && Indices.size() == 1 && in computeKnownFPClass()
5844 switch (II->getIntrinsicID()) { in computeKnownFPClass()
5849 computeKnownFPClass(II->getArgOperand(0), DemandedElts, in computeKnownFPClass()
5852 const Function *F = cast<Instruction>(Op)->getFunction(); in computeKnownFPClass()
5857 if (F && KnownSrc.isKnownNeverLogicalNegZero(*F, Op->getType())) in computeKnownFPClass()
5866 if (F && KnownSrc.isKnownNeverLogicalPosZero(*F, Op->getType())) in computeKnownFPClass()
5887 // Unreachable blocks may have zero-operand PHI nodes. in computeKnownFPClass()
5888 if (P->getNumIncomingValues() == 0) in computeKnownFPClass()
5893 const unsigned PhiRecursionLimit = MaxAnalysisRecursionDepth - 2; in computeKnownFPClass()
5897 if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) in computeKnownFPClass()
5902 for (const Use &U : P->operands()) { in computeKnownFPClass()
5915 P->getIncomingBlock(U)->getTerminator())); in computeKnownFPClass()
5936 KnownFPClass llvm::computeKnownFPClass(const Value *V, in computeKnownFPClass() argument
5942 ::computeKnownFPClass(V, DemandedElts, InterestedClasses, KnownClasses, Depth, in computeKnownFPClass()
5947 KnownFPClass llvm::computeKnownFPClass(const Value *V, in computeKnownFPClass() argument
5952 ::computeKnownFPClass(V, Known, InterestedClasses, Depth, SQ); in computeKnownFPClass()
5956 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { in isBytewiseValue() argument
5958 // All byte-wide stores are splatable, even of arbitrary variables. in isBytewiseValue()
5959 if (V->getType()->isIntegerTy(8)) in isBytewiseValue()
5960 return V; in isBytewiseValue()
5962 LLVMContext &Ctx = V->getContext(); in isBytewiseValue()
5966 if (isa<UndefValue>(V)) in isBytewiseValue()
5969 // Return Undef for zero-sized type. in isBytewiseValue()
5970 if (DL.getTypeStoreSize(V->getType()).isZero()) in isBytewiseValue()
5973 Constant *C = dyn_cast<Constant>(V); in isBytewiseValue()
5985 if (C->isNullValue()) in isBytewiseValue()
5988 // Constant floating-point values can be handled as integer values if the in isBytewiseValue()
5992 if (CFP->getType()->isHalfTy()) in isBytewiseValue()
5994 else if (CFP->getType()->isFloatTy()) in isBytewiseValue()
5996 else if (CFP->getType()->isDoubleTy()) in isBytewiseValue()
6005 if (CI->getBitWidth() % 8 == 0) { in isBytewiseValue()
6006 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); in isBytewiseValue()
6007 if (!CI->getValue().isSplat(8)) in isBytewiseValue()
6009 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); in isBytewiseValue()
6014 if (CE->getOpcode() == Instruction::IntToPtr) { in isBytewiseValue()
6015 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) { in isBytewiseValue()
6016 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace()); in isBytewiseValue()
6018 CE->getOperand(0), Type::getIntNTy(Ctx, BitWidth), false, DL)) in isBytewiseValue()
6024 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { in isBytewiseValue()
6038 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) in isBytewiseValue()
6039 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) in isBytewiseValue()
6046 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) in isBytewiseValue()
6047 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) in isBytewiseValue()
6071 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { in BuildSubAggregate()
6075 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, in BuildSubAggregate()
6082 PrevTo = Del->getAggregateOperand(); in BuildSubAggregate()
6083 Del->eraseFromParent(); in BuildSubAggregate()
6099 Value *V = FindInsertedValue(From, Idxs); in BuildSubAggregate() local
6101 if (!V) in BuildSubAggregate()
6105 return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp", in BuildSubAggregate()
6123 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), in BuildSubAggregate()
6139 llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, in FindInsertedValue() argument
6141 // Nothing to index? Just return V then (this is useful at the end of our in FindInsertedValue()
6144 return V; in FindInsertedValue()
6145 // We have indices, so V should have an indexable type. in FindInsertedValue()
6146 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && in FindInsertedValue()
6148 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && in FindInsertedValue()
6151 if (Constant *C = dyn_cast<Constant>(V)) { in FindInsertedValue()
6152 C = C->getAggregateElement(idx_range[0]); in FindInsertedValue()
6157 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { in FindInsertedValue()
6161 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); in FindInsertedValue()
6178 return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx), in FindInsertedValue()
6186 return FindInsertedValue(I->getAggregateOperand(), idx_range, in FindInsertedValue()
6192 return FindInsertedValue(I->getInsertedValueOperand(), in FindInsertedValue()
6196 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { in FindInsertedValue()
6202 unsigned size = I->getNumIndices() + idx_range.size(); in FindInsertedValue()
6207 Idxs.append(I->idx_begin(), I->idx_end()); in FindInsertedValue()
6215 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); in FindInsertedValue()
6225 if (GEP->getNumOperands() != 3) in isGEPBasedOnPointerToString()
6228 // Make sure the index-ee is a pointer to array of \p CharSize integers. in isGEPBasedOnPointerToString()
6230 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); in isGEPBasedOnPointerToString()
6231 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) in isGEPBasedOnPointerToString()
6236 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); in isGEPBasedOnPointerToString()
6237 if (!FirstIdx || !FirstIdx->isZero()) in isGEPBasedOnPointerToString()
6243 // If V refers to an initialized global constant, set Slice either to
6248 bool llvm::getConstantDataArrayInfo(const Value *V, in getConstantDataArrayInfo() argument
6251 assert(V && "V should not be null."); in getConstantDataArrayInfo()
6256 // Drill down into the pointer expression V, ignoring any intervening in getConstantDataArrayInfo()
6260 dyn_cast<GlobalVariable>(getUnderlyingObject(V)); in getConstantDataArrayInfo()
6261 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) in getConstantDataArrayInfo()
6262 // Fail if V is not based on constant global object. in getConstantDataArrayInfo()
6265 const DataLayout &DL = GV->getDataLayout(); in getConstantDataArrayInfo()
6266 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0); in getConstantDataArrayInfo()
6268 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off, in getConstantDataArrayInfo()
6287 if (GV->getInitializer()->isNullValue()) { in getConstantDataArrayInfo()
6288 Type *GVTy = GV->getValueType(); in getConstantDataArrayInfo()
6295 // transform even undefined library calls into simpler, well-defined in getConstantDataArrayInfo()
6298 Slice.Length = Length < Offset ? 0 : Length - Offset; in getConstantDataArrayInfo()
6302 auto *Init = const_cast<Constant *>(GV->getInitializer()); in getConstantDataArrayInfo()
6304 Type *InitElTy = ArrayInit->getElementType(); in getConstantDataArrayInfo()
6305 if (InitElTy->isIntegerTy(ElementSize)) { in getConstantDataArrayInfo()
6309 ArrayTy = ArrayInit->getType(); in getConstantDataArrayInfo()
6326 ArrayTy = dyn_cast<ArrayType>(Init->getType()); in getConstantDataArrayInfo()
6329 uint64_t NumElts = ArrayTy->getArrayNumElements(); in getConstantDataArrayInfo()
6335 Slice.Length = NumElts - Offset; in getConstantDataArrayInfo()
6339 /// Extract bytes from the initializer of the constant array V, which need
6340 /// not be a nul-terminated string. On success, store the bytes in Str and
6343 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, in getConstantStringInfo() argument
6346 if (!getConstantDataArrayInfo(V, Slice, 8)) in getConstantStringInfo()
6351 // Return a nul-terminated string even for an empty Slice. This is in getConstantStringInfo()
6370 Str = Slice.Array->getAsString(); in getConstantStringInfo()
6377 // some other way that the string is length-bound. in getConstantStringInfo()
6389 static uint64_t GetStringLengthH(const Value *V, in GetStringLengthH() argument
6393 V = V->stripPointerCasts(); in GetStringLengthH()
6397 if (const PHINode *PN = dyn_cast<PHINode>(V)) { in GetStringLengthH()
6403 for (Value *IncValue : PN->incoming_values()) { in GetStringLengthH()
6405 if (Len == 0) return 0; // Unknown length -> unknown. in GetStringLengthH()
6410 return 0; // Disagree -> unknown. in GetStringLengthH()
6418 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) in GetStringLengthH()
6419 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { in GetStringLengthH()
6420 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); in GetStringLengthH()
6422 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); in GetStringLengthH()
6432 if (!getConstantDataArrayInfo(V, Slice, CharSize)) in GetStringLengthH()
6445 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) in GetStringLengthH()
6454 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { in GetStringLength() argument
6455 if (!V->getType()->isPointerTy()) in GetStringLength()
6459 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); in GetStringLength()
6470 if (const Value *RV = Call->getReturnedArgOperand()) in getArgumentAliasingToReturnedPointer()
6475 return Call->getArgOperand(0); in getArgumentAliasingToReturnedPointer()
6481 switch (Call->getIntrinsicID()) { in isIntrinsicReturningPointerAliasingArgumentWithoutCapturing()
6487 // input pointer (and thus preserve null-ness for the purposes of escape in isIntrinsicReturningPointerAliasingArgumentWithoutCapturing()
6502 return !Call->getParent()->getParent()->isPresplitCoroutine(); in isIntrinsicReturningPointerAliasingArgumentWithoutCapturing()
6508 /// \p PN defines a loop-variant pointer to an object. Check if the
6512 // Find the loop-defined value. in isSameUnderlyingObjectInLoop()
6513 Loop *L = LI->getLoopFor(PN->getParent()); in isSameUnderlyingObjectInLoop()
6514 if (PN->getNumIncomingValues() != 2) in isSameUnderlyingObjectInLoop()
6518 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); in isSameUnderlyingObjectInLoop()
6519 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) in isSameUnderlyingObjectInLoop()
6520 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); in isSameUnderlyingObjectInLoop()
6521 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) in isSameUnderlyingObjectInLoop()
6530 if (!L->isLoopInvariant(Load->getPointerOperand())) in isSameUnderlyingObjectInLoop()
6535 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) { in getUnderlyingObject() argument
6536 if (!V->getType()->isPointerTy()) in getUnderlyingObject()
6537 return V; in getUnderlyingObject()
6539 if (auto *GEP = dyn_cast<GEPOperator>(V)) { in getUnderlyingObject()
6540 V = GEP->getPointerOperand(); in getUnderlyingObject()
6541 } else if (Operator::getOpcode(V) == Instruction::BitCast || in getUnderlyingObject()
6542 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { in getUnderlyingObject()
6543 Value *NewV = cast<Operator>(V)->getOperand(0); in getUnderlyingObject()
6544 if (!NewV->getType()->isPointerTy()) in getUnderlyingObject()
6545 return V; in getUnderlyingObject()
6546 V = NewV; in getUnderlyingObject()
6547 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) { in getUnderlyingObject()
6548 if (GA->isInterposable()) in getUnderlyingObject()
6549 return V; in getUnderlyingObject()
6550 V = GA->getAliasee(); in getUnderlyingObject()
6552 if (auto *PHI = dyn_cast<PHINode>(V)) { in getUnderlyingObject()
6553 // Look through single-arg phi nodes created by LCSSA. in getUnderlyingObject()
6554 if (PHI->getNumIncomingValues() == 1) { in getUnderlyingObject()
6555 V = PHI->getIncomingValue(0); in getUnderlyingObject()
6558 } else if (auto *Call = dyn_cast<CallBase>(V)) { in getUnderlyingObject()
6569 V = RP; in getUnderlyingObject()
6574 return V; in getUnderlyingObject()
6576 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); in getUnderlyingObject()
6578 return V; in getUnderlyingObject()
6581 void llvm::getUnderlyingObjects(const Value *V, in getUnderlyingObjects() argument
6586 Worklist.push_back(V); in getUnderlyingObjects()
6595 Worklist.push_back(SI->getTrueValue()); in getUnderlyingObjects()
6596 Worklist.push_back(SI->getFalseValue()); in getUnderlyingObjects()
6611 if (!LI || !LI->isLoopHeader(PN->getParent()) || in getUnderlyingObjects()
6613 append_range(Worklist, PN->incoming_values()); in getUnderlyingObjects()
6623 const Value *llvm::getUnderlyingObjectAggressive(const Value *V) { in getUnderlyingObjectAggressive() argument
6628 Worklist.push_back(V); in getUnderlyingObjectAggressive()
6633 const Value *FirstObject = getUnderlyingObject(V); in getUnderlyingObjectAggressive()
6646 Worklist.push_back(SI->getTrueValue()); in getUnderlyingObjectAggressive()
6647 Worklist.push_back(SI->getFalseValue()); in getUnderlyingObjectAggressive()
6652 append_range(Worklist, PN->incoming_values()); in getUnderlyingObjectAggressive()
6667 static const Value *getUnderlyingObjectFromInt(const Value *V) { in getUnderlyingObjectFromInt() argument
6669 if (const Operator *U = dyn_cast<Operator>(V)) { in getUnderlyingObjectFromInt()
6672 if (U->getOpcode() == Instruction::PtrToInt) in getUnderlyingObjectFromInt()
6673 return U->getOperand(0); in getUnderlyingObjectFromInt()
6680 if (U->getOpcode() != Instruction::Add || in getUnderlyingObjectFromInt()
6681 (!isa<ConstantInt>(U->getOperand(1)) && in getUnderlyingObjectFromInt()
6682 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && in getUnderlyingObjectFromInt()
6683 !isa<PHINode>(U->getOperand(1)))) in getUnderlyingObjectFromInt()
6684 return V; in getUnderlyingObjectFromInt()
6685 V = U->getOperand(0); in getUnderlyingObjectFromInt()
6687 return V; in getUnderlyingObjectFromInt()
6689 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); in getUnderlyingObjectFromInt()
6696 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, in getUnderlyingObjectsForCodeGen() argument
6699 SmallVector<const Value *, 4> Working(1, V); in getUnderlyingObjectsForCodeGen()
6701 V = Working.pop_back_val(); in getUnderlyingObjectsForCodeGen()
6704 getUnderlyingObjects(V, Objs); in getUnderlyingObjectsForCodeGen()
6706 for (const Value *V : Objs) { in getUnderlyingObjectsForCodeGen() local
6707 if (!Visited.insert(V).second) in getUnderlyingObjectsForCodeGen()
6709 if (Operator::getOpcode(V) == Instruction::IntToPtr) { in getUnderlyingObjectsForCodeGen()
6711 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); in getUnderlyingObjectsForCodeGen()
6712 if (O->getType()->isPointerTy()) { in getUnderlyingObjectsForCodeGen()
6719 if (!isIdentifiedObject(V)) { in getUnderlyingObjectsForCodeGen()
6723 Objects.push_back(const_cast<Value *>(V)); in getUnderlyingObjectsForCodeGen()
6729 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) { in findAllocaForValue() argument
6734 auto AddWork = [&](Value *V) { in findAllocaForValue() argument
6735 if (Visited.insert(V).second) in findAllocaForValue()
6736 Worklist.push_back(V); in findAllocaForValue()
6739 AddWork(V); in findAllocaForValue()
6741 V = Worklist.pop_back_val(); in findAllocaForValue()
6742 assert(Visited.count(V)); in findAllocaForValue()
6744 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { in findAllocaForValue()
6748 } else if (CastInst *CI = dyn_cast<CastInst>(V)) { in findAllocaForValue()
6749 AddWork(CI->getOperand(0)); in findAllocaForValue()
6750 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { in findAllocaForValue()
6751 for (Value *IncValue : PN->incoming_values()) in findAllocaForValue()
6753 } else if (auto *SI = dyn_cast<SelectInst>(V)) { in findAllocaForValue()
6754 AddWork(SI->getTrueValue()); in findAllocaForValue()
6755 AddWork(SI->getFalseValue()); in findAllocaForValue()
6756 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { in findAllocaForValue()
6757 if (OffsetZero && !GEP->hasAllZeroIndices()) in findAllocaForValue()
6759 AddWork(GEP->getPointerOperand()); in findAllocaForValue()
6760 } else if (CallBase *CB = dyn_cast<CallBase>(V)) { in findAllocaForValue()
6761 Value *Returned = CB->getReturnedArgOperand(); in findAllocaForValue()
6775 const Value *V, bool AllowLifetime, bool AllowDroppable) { in onlyUsedByLifetimeMarkersOrDroppableInstsHelper() argument
6776 for (const User *U : V->users()) { in onlyUsedByLifetimeMarkersOrDroppableInstsHelper()
6781 if (AllowLifetime && II->isLifetimeStartOrEnd()) in onlyUsedByLifetimeMarkersOrDroppableInstsHelper()
6784 if (AllowDroppable && II->isDroppable()) in onlyUsedByLifetimeMarkersOrDroppableInstsHelper()
6792 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { in onlyUsedByLifetimeMarkers() argument
6794 V, /* AllowLifetime */ true, /* AllowDroppable */ false); in onlyUsedByLifetimeMarkers()
6796 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) { in onlyUsedByLifetimeMarkersOrDroppableInsts() argument
6798 V, /* AllowLifetime */ true, /* AllowDroppable */ true); in onlyUsedByLifetimeMarkersOrDroppableInsts()
6818 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI, in isSafeToSpeculativelyExecute()
6827 if (Inst->getOpcode() != Opcode) { in isSafeToSpeculativelyExecuteWithOpcode()
6831 if (Inst->getNumOperands() < NumLeadingOperands) in isSafeToSpeculativelyExecuteWithOpcode()
6833 const Type *ExpectedType = Inst->getType(); in isSafeToSpeculativelyExecuteWithOpcode()
6835 if (Inst->getOperand(ItOp)->getType() != ExpectedType) in isSafeToSpeculativelyExecuteWithOpcode()
6852 const APInt *V; in isSafeToSpeculativelyExecuteWithOpcode() local
6853 if (match(Inst->getOperand(1), m_APInt(V))) in isSafeToSpeculativelyExecuteWithOpcode()
6854 return *V != 0; in isSafeToSpeculativelyExecuteWithOpcode()
6859 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 in isSafeToSpeculativelyExecuteWithOpcode()
6861 if (!match(Inst->getOperand(1), m_APInt(Denominator))) in isSafeToSpeculativelyExecuteWithOpcode()
6866 // It's safe to hoist if the denominator is not 0 or -1. in isSafeToSpeculativelyExecuteWithOpcode()
6867 if (!Denominator->isAllOnes()) in isSafeToSpeculativelyExecuteWithOpcode()
6869 // At this point we know that the denominator is -1. It is safe to hoist as in isSafeToSpeculativelyExecuteWithOpcode()
6871 if (match(Inst->getOperand(0), m_APInt(Numerator))) in isSafeToSpeculativelyExecuteWithOpcode()
6872 return !Numerator->isMinSignedValue(); in isSafeToSpeculativelyExecuteWithOpcode()
6885 const DataLayout &DL = LI->getDataLayout(); in isSafeToSpeculativelyExecuteWithOpcode()
6886 return isDereferenceableAndAlignedPointer(LI->getPointerOperand(), in isSafeToSpeculativelyExecuteWithOpcode()
6887 LI->getType(), LI->getAlign(), DL, in isSafeToSpeculativelyExecuteWithOpcode()
6894 const Function *Callee = CI->getCalledFunction(); in isSafeToSpeculativelyExecuteWithOpcode()
6896 // The called function could have undefined behavior or side-effects, even in isSafeToSpeculativelyExecuteWithOpcode()
6898 return Callee && Callee->isSpeculatable(); in isSafeToSpeculativelyExecuteWithOpcode()
6934 // 1) Can't reorder two inf-loop calls, even if readonly in mayHaveNonDefUseDependency()
6935 // 2) Also can't reorder an inf-loop call below a instruction which isn't in mayHaveNonDefUseDependency()
6958 llvm::computeConstantRangeIncludingKnownBits(const WithCache<const Value *> &V, in computeConstantRangeIncludingKnownBits() argument
6962 ConstantRange::fromKnownBits(V.getKnownBits(SQ), ForSigned); in computeConstantRangeIncludingKnownBits()
6963 ConstantRange CR2 = computeConstantRange(V, ForSigned, SQ.IIQ.UseInstrInfo); in computeConstantRangeIncludingKnownBits()
6976 // mul nsw of two non-negative numbers is also nuw. in computeOverflowForUnsignedMul()
6994 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); in computeOverflowForSignedMul()
7039 if (Add && Add->hasNoSignedWrap()) { in computeOverflowForSignedAdd()
7097 // X - (X % ?) in computeOverflowForUnsignedSub()
7101 // X - (X -nuw ?) in computeOverflowForUnsignedSub()
7104 // then determining no-overflow may allow other transforms. in computeOverflowForUnsignedSub()
7133 // X - (X % ?) in computeOverflowForSignedSub()
7137 // X - (X -nsw ?) in computeOverflowForSignedSub()
7140 // then determining no-overflow may allow other transforms. in computeOverflowForSignedSub()
7164 for (const User *U : WO->users()) { in isOverflowIntrinsicNoWrap()
7166 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); in isOverflowIntrinsicNoWrap()
7168 if (EVI->getIndices()[0] == 0) in isOverflowIntrinsicNoWrap()
7171 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); in isOverflowIntrinsicNoWrap()
7173 for (const auto *U : EVI->users()) in isOverflowIntrinsicNoWrap()
7175 assert(B->isConditional() && "How else is it using an i1?"); in isOverflowIntrinsicNoWrap()
7187 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); in isOverflowIntrinsicNoWrap()
7191 // Check if all users of the add are provably no-wrap. in isOverflowIntrinsicNoWrap()
7195 if (DT.dominates(NoWrapEdge, Result->getParent())) in isOverflowIntrinsicNoWrap()
7198 for (const auto &RU : Result->uses()) in isOverflowIntrinsicNoWrap()
7217 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) { in shiftAmountKnownInRange()
7218 unsigned NumElts = FVTy->getNumElements(); in shiftAmountKnownInRange()
7220 ShiftAmounts.push_back(C->getAggregateElement(i)); in shiftAmountKnownInRange()
7221 } else if (isa<ScalableVectorType>(C->getType())) in shiftAmountKnownInRange()
7228 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth()); in shiftAmountKnownInRange()
7252 Op->hasPoisonGeneratingAnnotations()) in canCreateUndefOrPoison()
7255 unsigned Opcode = Op->getOpcode(); in canCreateUndefOrPoison()
7257 // Check whether opcode is a poison/undef-generating operation in canCreateUndefOrPoison()
7262 return includesPoison(Kind) && !shiftAmountKnownInRange(Op->getOperand(1)); in canCreateUndefOrPoison()
7270 switch (II->getIntrinsicID()) { in canCreateUndefOrPoison()
7275 if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue()) in canCreateUndefOrPoison()
7304 !shiftAmountKnownInRange(II->getArgOperand(1)); in canCreateUndefOrPoison()
7351 return !CB->hasRetAttr(Attribute::NoUndef); in canCreateUndefOrPoison()
7356 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType()); in canCreateUndefOrPoison()
7357 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1; in canCreateUndefOrPoison()
7358 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp)); in canCreateUndefOrPoison()
7361 Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()); in canCreateUndefOrPoison()
7366 ? cast<ConstantExpr>(Op)->getShuffleMask() in canCreateUndefOrPoison()
7367 : cast<ShuffleVectorInst>(Op)->getShuffleMask(); in canCreateUndefOrPoison()
7392 if (isa<CastInst>(Op) || (CE && CE->isCast())) in canCreateUndefOrPoison()
7413 static bool directlyImpliesPoison(const Value *ValAssumedPoison, const Value *V, in directlyImpliesPoison() argument
7415 if (ValAssumedPoison == V) in directlyImpliesPoison()
7422 if (const auto *I = dyn_cast<Instruction>(V)) { in directlyImpliesPoison()
7423 if (any_of(I->operands(), [=](const Use &Op) { in directlyImpliesPoison()
7429 // V = extractvalue V0, idx in directlyImpliesPoison()
7435 llvm::is_contained(II->args(), ValAssumedPoison))) in directlyImpliesPoison()
7441 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V, in impliesPoison() argument
7446 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0)) in impliesPoison()
7455 return all_of(I->operands(), [=](const Value *Op) { in impliesPoison()
7456 return impliesPoison(Op, V, Depth + 1); in impliesPoison()
7462 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) { in impliesPoison() argument
7463 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0); in impliesPoison()
7466 static bool programUndefinedIfUndefOrPoison(const Value *V, bool PoisonOnly);
7469 const Value *V, AssumptionCache *AC, const Instruction *CtxI, in isGuaranteedNotToBeUndefOrPoison() argument
7474 if (isa<MetadataAsValue>(V)) in isGuaranteedNotToBeUndefOrPoison()
7477 if (const auto *A = dyn_cast<Argument>(V)) { in isGuaranteedNotToBeUndefOrPoison()
7478 if (A->hasAttribute(Attribute::NoUndef) || in isGuaranteedNotToBeUndefOrPoison()
7479 A->hasAttribute(Attribute::Dereferenceable) || in isGuaranteedNotToBeUndefOrPoison()
7480 A->hasAttribute(Attribute::DereferenceableOrNull)) in isGuaranteedNotToBeUndefOrPoison()
7484 if (auto *C = dyn_cast<Constant>(V)) { in isGuaranteedNotToBeUndefOrPoison()
7491 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) || in isGuaranteedNotToBeUndefOrPoison()
7495 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C)) { in isGuaranteedNotToBeUndefOrPoison()
7496 if (includesUndef(Kind) && C->containsUndefElement()) in isGuaranteedNotToBeUndefOrPoison()
7498 if (includesPoison(Kind) && C->containsPoisonElement()) in isGuaranteedNotToBeUndefOrPoison()
7500 return !C->containsConstantExpression(); in isGuaranteedNotToBeUndefOrPoison()
7511 // well. We believe that such addrspacecast is equivalent to no-op. in isGuaranteedNotToBeUndefOrPoison()
7512 auto *StrippedV = V->stripPointerCastsSameRepresentation(); in isGuaranteedNotToBeUndefOrPoison()
7517 auto OpCheck = [&](const Value *V) { in isGuaranteedNotToBeUndefOrPoison() argument
7518 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, Kind); in isGuaranteedNotToBeUndefOrPoison()
7521 if (auto *Opr = dyn_cast<Operator>(V)) { in isGuaranteedNotToBeUndefOrPoison()
7524 if (isa<FreezeInst>(V)) in isGuaranteedNotToBeUndefOrPoison()
7527 if (const auto *CB = dyn_cast<CallBase>(V)) { in isGuaranteedNotToBeUndefOrPoison()
7528 if (CB->hasRetAttr(Attribute::NoUndef) || in isGuaranteedNotToBeUndefOrPoison()
7529 CB->hasRetAttr(Attribute::Dereferenceable) || in isGuaranteedNotToBeUndefOrPoison()
7530 CB->hasRetAttr(Attribute::DereferenceableOrNull)) in isGuaranteedNotToBeUndefOrPoison()
7534 if (const auto *PN = dyn_cast<PHINode>(V)) { in isGuaranteedNotToBeUndefOrPoison()
7535 unsigned Num = PN->getNumIncomingValues(); in isGuaranteedNotToBeUndefOrPoison()
7538 auto *TI = PN->getIncomingBlock(i)->getTerminator(); in isGuaranteedNotToBeUndefOrPoison()
7539 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, in isGuaranteedNotToBeUndefOrPoison()
7549 all_of(Opr->operands(), OpCheck)) in isGuaranteedNotToBeUndefOrPoison()
7553 if (auto *I = dyn_cast<LoadInst>(V)) in isGuaranteedNotToBeUndefOrPoison()
7554 if (I->hasMetadata(LLVMContext::MD_noundef) || in isGuaranteedNotToBeUndefOrPoison()
7555 I->hasMetadata(LLVMContext::MD_dereferenceable) || in isGuaranteedNotToBeUndefOrPoison()
7556 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null)) in isGuaranteedNotToBeUndefOrPoison()
7559 if (programUndefinedIfUndefOrPoison(V, !includesUndef(Kind))) in isGuaranteedNotToBeUndefOrPoison()
7563 if (!CtxI || !CtxI->getParent() || !DT) in isGuaranteedNotToBeUndefOrPoison()
7566 auto *DNode = DT->getNode(CtxI->getParent()); in isGuaranteedNotToBeUndefOrPoison()
7571 // If V is used as a branch condition before reaching CtxI, V cannot be in isGuaranteedNotToBeUndefOrPoison()
7573 // br V, BB1, BB2 in isGuaranteedNotToBeUndefOrPoison()
7575 // CtxI ; V cannot be undef or poison here in isGuaranteedNotToBeUndefOrPoison()
7576 auto *Dominator = DNode->getIDom(); in isGuaranteedNotToBeUndefOrPoison()
7579 if (!includesUndef(Kind) || V->getType()->isIntegerTy()) in isGuaranteedNotToBeUndefOrPoison()
7581 auto *TI = Dominator->getBlock()->getTerminator(); in isGuaranteedNotToBeUndefOrPoison()
7585 if (BI->isConditional()) in isGuaranteedNotToBeUndefOrPoison()
7586 Cond = BI->getCondition(); in isGuaranteedNotToBeUndefOrPoison()
7588 Cond = SI->getCondition(); in isGuaranteedNotToBeUndefOrPoison()
7592 if (Cond == V) in isGuaranteedNotToBeUndefOrPoison()
7597 if (any_of(Opr->operands(), [V](const Use &U) { in isGuaranteedNotToBeUndefOrPoison()
7598 return V == U && propagatesPoison(U); in isGuaranteedNotToBeUndefOrPoison()
7604 Dominator = Dominator->getIDom(); in isGuaranteedNotToBeUndefOrPoison()
7607 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC)) in isGuaranteedNotToBeUndefOrPoison()
7613 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC, in isGuaranteedNotToBeUndefOrPoison() argument
7617 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, in isGuaranteedNotToBeUndefOrPoison()
7621 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC, in isGuaranteedNotToBePoison() argument
7624 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, in isGuaranteedNotToBePoison()
7628 bool llvm::isGuaranteedNotToBeUndef(const Value *V, AssumptionCache *AC, in isGuaranteedNotToBeUndef() argument
7631 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, in isGuaranteedNotToBeUndef()
7659 if (mustTriggerUB(I, KnownPoison) && DT->dominates(I, OnPathTo)) in mustExecuteUBIfPoisonOnPathTo()
7664 if (I != Root && !any_of(I->operands(), [&KnownPoison](const Use &U) { in mustExecuteUBIfPoisonOnPathTo()
7670 for (const User *User : I->users()) in mustExecuteUBIfPoisonOnPathTo()
7674 // Might be non-UB, or might have a path we couldn't prove must execute on in mustExecuteUBIfPoisonOnPathTo()
7681 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), in computeOverflowForSignedAdd()
7708 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) { in isGuaranteedToTransferExecutionToSuccessor()
7721 return !I->mayThrow() && I->willReturn(); in isGuaranteedToTransferExecutionToSuccessor()
7742 assert(ScanLimit && "scan limit must be non-zero"); in isGuaranteedToTransferExecutionToSuccessor()
7746 if (--ScanLimit == 0) in isGuaranteedToTransferExecutionToSuccessor()
7760 if (I->getParent() != L->getHeader()) return false; in isGuaranteedToExecuteForEveryIteration()
7762 for (const Instruction &LI : *L->getHeader()) { in isGuaranteedToExecuteForEveryIteration()
7771 switch (I->getOpcode()) { in propagatesPoison()
7780 switch (II->getIntrinsicID()) { in propagatesPoison()
7831 switch (I->getOpcode()) { in handleGuaranteedWellDefinedOps()
7833 if (Handle(cast<StoreInst>(I)->getPointerOperand())) in handleGuaranteedWellDefinedOps()
7838 if (Handle(cast<LoadInst>(I)->getPointerOperand())) in handleGuaranteedWellDefinedOps()
7845 if (Handle(cast<AtomicCmpXchgInst>(I)->getPointerOperand())) in handleGuaranteedWellDefinedOps()
7850 if (Handle(cast<AtomicRMWInst>(I)->getPointerOperand())) in handleGuaranteedWellDefinedOps()
7857 if (CB->isIndirectCall() && Handle(CB->getCalledOperand())) in handleGuaranteedWellDefinedOps()
7859 for (unsigned i = 0; i < CB->arg_size(); ++i) in handleGuaranteedWellDefinedOps()
7860 if ((CB->paramHasAttr(i, Attribute::NoUndef) || in handleGuaranteedWellDefinedOps()
7861 CB->paramHasAttr(i, Attribute::Dereferenceable) || in handleGuaranteedWellDefinedOps()
7862 CB->paramHasAttr(i, Attribute::DereferenceableOrNull)) && in handleGuaranteedWellDefinedOps()
7863 Handle(CB->getArgOperand(i))) in handleGuaranteedWellDefinedOps()
7868 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef) && in handleGuaranteedWellDefinedOps()
7869 Handle(I->getOperand(0))) in handleGuaranteedWellDefinedOps()
7873 if (Handle(cast<SwitchInst>(I)->getCondition())) in handleGuaranteedWellDefinedOps()
7878 if (BR->isConditional() && Handle(BR->getCondition())) in handleGuaranteedWellDefinedOps()
7891 handleGuaranteedWellDefinedOps(I, [&](const Value *V) { in getGuaranteedWellDefinedOps() argument
7892 Operands.push_back(V); in getGuaranteedWellDefinedOps()
7903 switch (I->getOpcode()) { in handleGuaranteedNonPoisonOps()
7909 return Handle(I->getOperand(1)); in handleGuaranteedNonPoisonOps()
7917 handleGuaranteedNonPoisonOps(I, [&](const Value *V) { in getGuaranteedNonPoisonOps() argument
7918 Operands.push_back(V); in getGuaranteedNonPoisonOps()
7926 I, [&](const Value *V) { return KnownPoison.count(V); }); in mustTriggerUB() argument
7929 static bool programUndefinedIfUndefOrPoison(const Value *V, in programUndefinedIfUndefOrPoison() argument
7936 // this, look out for the distinction between post-dominance and strong in programUndefinedIfUndefOrPoison()
7937 // post-dominance. in programUndefinedIfUndefOrPoison()
7940 if (const auto *Inst = dyn_cast<Instruction>(V)) { in programUndefinedIfUndefOrPoison()
7941 BB = Inst->getParent(); in programUndefinedIfUndefOrPoison()
7942 Begin = Inst->getIterator(); in programUndefinedIfUndefOrPoison()
7944 } else if (const auto *Arg = dyn_cast<Argument>(V)) { in programUndefinedIfUndefOrPoison()
7945 if (Arg->getParent()->isDeclaration()) in programUndefinedIfUndefOrPoison()
7947 BB = &Arg->getParent()->getEntryBlock(); in programUndefinedIfUndefOrPoison()
7948 Begin = BB->begin(); in programUndefinedIfUndefOrPoison()
7956 BasicBlock::const_iterator End = BB->end(); in programUndefinedIfUndefOrPoison()
7961 // well-defined operands. in programUndefinedIfUndefOrPoison()
7966 if (--ScanLimit == 0) in programUndefinedIfUndefOrPoison()
7969 if (handleGuaranteedWellDefinedOps(&I, [V](const Value *WellDefinedOp) { in programUndefinedIfUndefOrPoison()
7970 return WellDefinedOp == V; in programUndefinedIfUndefOrPoison()
7985 YieldsPoison.insert(V); in programUndefinedIfUndefOrPoison()
7992 if (--ScanLimit == 0) in programUndefinedIfUndefOrPoison()
8017 BB = BB->getSingleSuccessor(); in programUndefinedIfUndefOrPoison()
8021 Begin = BB->getFirstNonPHI()->getIterator(); in programUndefinedIfUndefOrPoison()
8022 End = BB->end(); in programUndefinedIfUndefOrPoison()
8035 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { in isKnownNonNaN() argument
8039 if (auto *C = dyn_cast<ConstantFP>(V)) in isKnownNonNaN()
8040 return !C->isNaN(); in isKnownNonNaN()
8042 if (auto *C = dyn_cast<ConstantDataVector>(V)) { in isKnownNonNaN()
8043 if (!C->getElementType()->isFloatingPointTy()) in isKnownNonNaN()
8045 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { in isKnownNonNaN()
8046 if (C->getElementAsAPFloat(I).isNaN()) in isKnownNonNaN()
8052 if (isa<ConstantAggregateZero>(V)) in isKnownNonNaN()
8058 static bool isKnownNonZero(const Value *V) { in isKnownNonZero() argument
8059 if (auto *C = dyn_cast<ConstantFP>(V)) in isKnownNonZero()
8060 return !C->isZero(); in isKnownNonZero()
8062 if (auto *C = dyn_cast<ConstantDataVector>(V)) { in isKnownNonZero()
8063 if (!C->getElementType()->isFloatingPointTy()) in isKnownNonZero()
8065 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { in isKnownNonZero()
8066 if (C->getElementAsAPFloat(I).isZero()) in isKnownNonZero()
8076 /// Given non-min/max outer cmp/select from the clamp pattern this
8084 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) in matchFastFloatClamp()
8085 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) in matchFastFloatClamp()
8099 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) in matchFastFloatClamp()
8132 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
8146 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) in matchClamp()
8151 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) in matchClamp()
8156 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) in matchClamp()
8161 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) in matchClamp()
8230 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) in matchMinMaxOfMinMax()
8231 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) in matchMinMaxOfMinMax()
8237 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) in matchMinMaxOfMinMax()
8238 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) in matchMinMaxOfMinMax()
8244 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) in matchMinMaxOfMinMax()
8245 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) in matchMinMaxOfMinMax()
8251 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) in matchMinMaxOfMinMax()
8252 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) in matchMinMaxOfMinMax()
8263 /// splat of a constant integer, return the bitwise-not source value.
8264 /// TODO: This could be extended to handle non-splat vector integer constants.
8265 static Value *getNotValue(Value *V) { in getNotValue() argument
8267 if (match(V, m_Not(m_Value(NotV)))) in getNotValue()
8271 if (match(V, m_APInt(C))) in getNotValue()
8272 return ConstantInt::get(V->getType(), ~(*C)); in getNotValue()
8277 /// Match non-obvious integer minimum and maximum sequences.
8334 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue()) in matchMinMax()
8338 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX in matchMinMax()
8339 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN in matchMinMax()
8340 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue()) in matchMinMax()
8356 if (NeedNSW && !BO->hasNoSignedWrap()) in isKnownNegation()
8359 auto *Zero = cast<Constant>(BO->getOperand(0)); in isKnownNegation()
8360 if (!AllowPoison && !Zero->isNullValue()) in isKnownNegation()
8366 // X = -Y or Y = -X in isKnownNegation()
8408 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one in matchSelectPattern()
8411 // elements because those can not be back-propagated for analysis. in matchSelectPattern()
8414 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement()) in matchSelectPattern()
8417 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement()) in matchSelectPattern()
8436 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 in matchSelectPattern()
8437 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) in matchSelectPattern()
8457 // When given one NaN and one non-NaN input: in matchSelectPattern()
8458 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. in matchSelectPattern()
8459 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the in matchSelectPattern()
8460 // ordered comparison fails), which could be NaN or non-NaN. in matchSelectPattern()
8467 // Both operands are known non-NaN. in matchSelectPattern()
8474 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. in matchSelectPattern()
8486 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. in matchSelectPattern()
8530 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can in matchSelectPattern()
8537 // Set the return values. If the compare uses the negated value (-X >s 0), in matchSelectPattern()
8544 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) in matchSelectPattern()
8545 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) in matchSelectPattern()
8549 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) in matchSelectPattern()
8553 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) in matchSelectPattern()
8554 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) in matchSelectPattern()
8559 // Set the return values. If the compare uses the negated value (-X >s 0), in matchSelectPattern()
8566 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) in matchSelectPattern()
8567 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) in matchSelectPattern()
8571 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) in matchSelectPattern()
8572 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) in matchSelectPattern()
8581 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar in matchSelectPattern()
8582 // may return either -0.0 or 0.0, so fcmp/select pair has stricter in matchSelectPattern()
8612 *CastOp = Cast1->getOpcode(); in lookThroughCast()
8613 Type *SrcTy = Cast1->getSrcTy(); in lookThroughCast()
8616 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) in lookThroughCast()
8617 return Cast2->getOperand(0); in lookThroughCast()
8625 const DataLayout &DL = CmpI->getDataLayout(); in lookThroughCast()
8629 if (CmpI->isUnsigned()) in lookThroughCast()
8633 if (CmpI->isSigned()) in lookThroughCast()
8638 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && in lookThroughCast()
8639 CmpConst->getType() == SrcTy) { in lookThroughCast()
8656 // select i1 %cond, x, -x. in lookThroughCast()
8663 unsigned ExtOp = CmpI->isSigned() ? Instruction::SExt : Instruction::ZExt; in lookThroughCast()
8694 ConstantFoldCastOperand(*CastOp, CastedTo, C->getType(), DL); in lookThroughCast()
8701 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, in matchSelectPattern() argument
8707 SelectInst *SI = dyn_cast<SelectInst>(V); in matchSelectPattern()
8710 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); in matchSelectPattern()
8713 Value *TrueVal = SI->getTrueValue(); in matchSelectPattern()
8714 Value *FalseVal = SI->getFalseValue(); in matchSelectPattern()
8723 CmpInst::Predicate Pred = CmpI->getPredicate(); in matchDecomposedSelectPattern()
8724 Value *CmpLHS = CmpI->getOperand(0); in matchDecomposedSelectPattern()
8725 Value *CmpRHS = CmpI->getOperand(1); in matchDecomposedSelectPattern()
8728 FMF = CmpI->getFastMathFlags(); in matchDecomposedSelectPattern()
8731 if (CmpI->isEquality()) in matchDecomposedSelectPattern()
8735 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { in matchDecomposedSelectPattern()
8738 // -0.0 because there is no corresponding integer value. in matchDecomposedSelectPattern()
8742 cast<CastInst>(TrueVal)->getOperand(0), C, in matchDecomposedSelectPattern()
8747 // -0.0 because there is no corresponding integer value. in matchDecomposedSelectPattern()
8751 C, cast<CastInst>(FalseVal)->getOperand(0), in matchDecomposedSelectPattern()
8848 // Handle the case of a simple two-predecessor recurrence PHI. in matchSimpleRecurrence()
8851 if (P->getNumIncomingValues() != 2) in matchSimpleRecurrence()
8855 Value *L = P->getIncomingValue(i); in matchSimpleRecurrence()
8856 Value *R = P->getIncomingValue(!i); in matchSimpleRecurrence()
8860 unsigned Opcode = LU->getOpcode(); in matchSimpleRecurrence()
8865 // TODO: Expand list -- xor, div, gep, uaddo, etc.. in matchSimpleRecurrence()
8875 Value *LL = LU->getOperand(0); in matchSimpleRecurrence()
8876 Value *LR = LU->getOperand(1); in matchSimpleRecurrence()
8906 P = dyn_cast<PHINode>(I->getOperand(0)); in matchSimpleRecurrence()
8908 P = dyn_cast<PHINode>(I->getOperand(1)); in matchSimpleRecurrence()
8929 return !C->isNegative(); in isTruePredicate()
8931 // LHS s<= smax(LHS, V) for any V in isTruePredicate()
8935 // smin(RHS, V) s<= RHS for any V in isTruePredicate()
8944 return CLHS->sle(*CRHS); in isTruePredicate()
8950 // LHS u<= LHS +_{nuw} V for any V in isTruePredicate()
8952 cast<OverflowingBinaryOperator>(RHS)->hasNoUnsignedWrap()) in isTruePredicate()
8955 // LHS u<= LHS | V for any V in isTruePredicate()
8959 // LHS u<= umax(LHS, V) for any V in isTruePredicate()
8963 // RHS >> V u<= RHS for any V in isTruePredicate()
8969 if (match(LHS, m_UDiv(m_Specific(RHS), m_APInt(C))) && C->ugt(1)) in isTruePredicate()
8972 // RHS & V u<= RHS for any V in isTruePredicate()
8976 // umin(RHS, V) u<= RHS for any V in isTruePredicate()
8985 return CLHS->ule(*CRHS); in isTruePredicate()
9070 Value *L0 = LHS->getOperand(0); in isImpliedCondICmps()
9071 Value *L1 = LHS->getOperand(1); in isImpliedCondICmps()
9076 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); in isImpliedCondICmps()
9078 // We can have non-canonical operands, so try to normalize any common operand in isImpliedCondICmps()
9098 // See if we can infer anything if operand-0 matches and we have at least one in isImpliedCondICmps()
9108 /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1); in isImpliedCondICmps()
9111 /*CxtI=*/nullptr, /*DT=*/nullptr, MaxAnalysisRecursionDepth - 1); in isImpliedCondICmps()
9148 assert((LHS->getOpcode() == Instruction::And || in isImpliedCondAndOr()
9149 LHS->getOpcode() == Instruction::Or || in isImpliedCondAndOr()
9150 LHS->getOpcode() == Instruction::Select) && in isImpliedCondAndOr()
9161 // FIXME: Make this non-recursion. in isImpliedCondAndOr()
9183 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy()) in isImpliedCondition()
9186 assert(LHS->getType()->isIntOrIntVectorTy(1) && in isImpliedCondition()
9202 if ((LHSI->getOpcode() == Instruction::And || in isImpliedCondition()
9203 LHSI->getOpcode() == Instruction::Or || in isImpliedCondition()
9204 LHSI->getOpcode() == Instruction::Select)) in isImpliedCondition()
9228 LHS, RHSCmp->getPredicate(), RHSCmp->getOperand(0), in isImpliedCondition()
9229 RHSCmp->getOperand(1), DL, LHSIsTrue, Depth)) in isImpliedCondition()
9268 if (!ContextI || !ContextI->getParent()) in getDomPredecessorCondition()
9273 const BasicBlock *ContextBB = ContextI->getParent(); in getDomPredecessorCondition()
9274 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); in getDomPredecessorCondition()
9281 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) in getDomPredecessorCondition()
9298 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); in isImpliedByDomCondition()
9324 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { in setLimitsForBinOp()
9329 // Otherwise if both no-wraps are set, use the unsigned range because it in setLimitsForBinOp()
9331 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125]. in setLimitsForBinOp()
9339 if (C->isNegative()) { in setLimitsForBinOp()
9340 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. in setLimitsForBinOp()
9356 // X & -X is a power of two or zero. So we can cap the value at max power of in setLimitsForBinOp()
9370 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { in setLimitsForBinOp()
9375 unsigned ShiftAmount = Width - 1; in setLimitsForBinOp()
9376 if (!C->isZero() && IIQ.isExact(&BO)) in setLimitsForBinOp()
9377 ShiftAmount = C->countr_zero(); in setLimitsForBinOp()
9378 if (C->isNegative()) { in setLimitsForBinOp()
9379 // 'ashr C, x' produces [C, C >> (Width-1)] in setLimitsForBinOp()
9381 Upper = C->ashr(ShiftAmount) + 1; in setLimitsForBinOp()
9383 // 'ashr C, x' produces [C >> (Width-1), C] in setLimitsForBinOp()
9384 Lower = C->ashr(ShiftAmount); in setLimitsForBinOp()
9391 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { in setLimitsForBinOp()
9395 // 'lshr C, x' produces [C >> (Width-1), C]. in setLimitsForBinOp()
9396 unsigned ShiftAmount = Width - 1; in setLimitsForBinOp()
9397 if (!C->isZero() && IIQ.isExact(&BO)) in setLimitsForBinOp()
9398 ShiftAmount = C->countr_zero(); in setLimitsForBinOp()
9399 Lower = C->lshr(ShiftAmount); in setLimitsForBinOp()
9411 if (C->isNegative()) { in setLimitsForBinOp()
9412 // 'shl nsw C, x' produces [C << CLO(C)-1, C] in setLimitsForBinOp()
9413 unsigned ShiftAmount = C->countl_one() - 1; in setLimitsForBinOp()
9414 Lower = C->shl(ShiftAmount); in setLimitsForBinOp()
9417 // 'shl nsw C, x' produces [C, C << CLZ(C)-1] in setLimitsForBinOp()
9418 unsigned ShiftAmount = C->countl_zero() - 1; in setLimitsForBinOp()
9420 Upper = C->shl(ShiftAmount) + 1; in setLimitsForBinOp()
9432 Upper = APInt::getHighBitsSet(Width, C->popcount()) + 1; in setLimitsForBinOp()
9434 } else if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { in setLimitsForBinOp()
9435 Upper = APInt::getBitsSetFrom(Width, C->getZExtValue()) + 1; in setLimitsForBinOp()
9443 if (C->isAllOnes()) { in setLimitsForBinOp()
9444 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] in setLimitsForBinOp()
9445 // where C != -1 and C != 0 and C != 1 in setLimitsForBinOp()
9448 } else if (C->countl_zero() < Width - 1) { in setLimitsForBinOp()
9450 // where C != -1 and C != 0 and C != 1 in setLimitsForBinOp()
9459 if (C->isMinSignedValue()) { in setLimitsForBinOp()
9460 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. in setLimitsForBinOp()
9464 // 'sdiv C, x' produces [-|C|, |C|]. in setLimitsForBinOp()
9465 Upper = C->abs() + 1; in setLimitsForBinOp()
9466 Lower = (-Upper) + 1; in setLimitsForBinOp()
9472 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { in setLimitsForBinOp()
9483 // 'srem x, C' produces (-|C|, |C|). in setLimitsForBinOp()
9484 Upper = C->abs(); in setLimitsForBinOp()
9485 Lower = (-Upper) + 1; in setLimitsForBinOp()
9487 if (C->isNegative()) { in setLimitsForBinOp()
9488 // 'srem -|C|, x' produces [-|C|, 0]. in setLimitsForBinOp()
9513 unsigned Width = II.getType()->getScalarSizeInBits(); in getRangeForIntrinsic()
9531 if (C->isNegative()) in getRangeForIntrinsic()
9532 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. in getRangeForIntrinsic()
9547 // usub.sat(x, C) produces [0, UINT_MAX - C]. in getRangeForIntrinsic()
9550 APInt::getMaxValue(Width) - *C + 1); in getRangeForIntrinsic()
9554 if (C->isNegative()) in getRangeForIntrinsic()
9555 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. in getRangeForIntrinsic()
9557 *C - APInt::getSignedMinValue(Width) + in getRangeForIntrinsic()
9560 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. in getRangeForIntrinsic()
9561 return ConstantRange::getNonEmpty(*C - APInt::getSignedMaxValue(Width), in getRangeForIntrinsic()
9564 if (C->isNegative()) in getRangeForIntrinsic()
9565 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: in getRangeForIntrinsic()
9566 return ConstantRange::getNonEmpty(APInt::getSignedMinValue(Width) - *C, in getRangeForIntrinsic()
9569 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. in getRangeForIntrinsic()
9571 APInt::getSignedMaxValue(Width) - *C + in getRangeForIntrinsic()
9600 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. in getRangeForIntrinsic()
9624 unsigned BitWidth = SI.getType()->getScalarSizeInBits(); in getRangeForSelectPattern()
9633 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. in getRangeForSelectPattern()
9644 // The result of -abs(X) is <= 0. in getRangeForSelectPattern()
9672 unsigned BitWidth = I->getType()->getScalarSizeInBits(); in setLimitForFPToI()
9673 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy()) in setLimitForFPToI()
9676 Lower = APInt(BitWidth, -65504); in setLimitForFPToI()
9686 ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, in computeConstantRange() argument
9691 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"); in computeConstantRange()
9694 return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); in computeConstantRange()
9696 if (auto *C = dyn_cast<Constant>(V)) in computeConstantRange()
9697 return C->toConstantRange(); in computeConstantRange()
9699 unsigned BitWidth = V->getType()->getScalarSizeInBits(); in computeConstantRange()
9702 if (auto *BO = dyn_cast<BinaryOperator>(V)) { in computeConstantRange()
9708 } else if (auto *II = dyn_cast<IntrinsicInst>(V)) in computeConstantRange()
9710 else if (auto *SI = dyn_cast<SelectInst>(V)) { in computeConstantRange()
9712 SI->getTrueValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1); in computeConstantRange()
9714 SI->getFalseValue(), ForSigned, UseInstrInfo, AC, CtxI, DT, Depth + 1); in computeConstantRange()
9717 } else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) { in computeConstantRange()
9721 setLimitForFPToI(cast<Instruction>(V), Lower, Upper); in computeConstantRange()
9723 } else if (const auto *A = dyn_cast<Argument>(V)) in computeConstantRange()
9724 if (std::optional<ConstantRange> Range = A->getRange()) in computeConstantRange()
9727 if (auto *I = dyn_cast<Instruction>(V)) { in computeConstantRange()
9731 if (const auto *CB = dyn_cast<CallBase>(V)) in computeConstantRange()
9732 if (std::optional<ConstantRange> Range = CB->getRange()) in computeConstantRange()
9738 for (auto &AssumeVH : AC->assumptionsFor(V)) { in computeConstantRange()
9742 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() && in computeConstantRange()
9744 assert(I->getIntrinsicID() == Intrinsic::assume && in computeConstantRange()
9749 Value *Arg = I->getArgOperand(0); in computeConstantRange()
9752 if (!Cmp || Cmp->getOperand(0) != V) in computeConstantRange()
9754 // TODO: Set "ForSigned" parameter via Cmp->isSigned()? in computeConstantRange()
9756 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false, in computeConstantRange()
9759 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS)); in computeConstantRange()
9767 addValueAffectedByCondition(Value *V, in addValueAffectedByCondition() argument
9769 assert(V != nullptr); in addValueAffectedByCondition()
9770 if (isa<Argument>(V) || isa<GlobalValue>(V)) { in addValueAffectedByCondition()
9771 InsertAffected(V); in addValueAffectedByCondition()
9772 } else if (auto *I = dyn_cast<Instruction>(V)) { in addValueAffectedByCondition()
9773 InsertAffected(V); in addValueAffectedByCondition()
9786 auto AddAffected = [&InsertAffected](Value *V) { in findValuesAffectedByCondition() argument
9787 addValueAffectedByCondition(V, InsertAffected); in findValuesAffectedByCondition()
9802 Value *V = Worklist.pop_back_val(); in findValuesAffectedByCondition() local
9803 if (!Visited.insert(V).second) in findValuesAffectedByCondition()
9810 AddAffected(V); in findValuesAffectedByCondition()
9811 if (match(V, m_Not(m_Value(X)))) in findValuesAffectedByCondition()
9815 if (match(V, m_LogicalOp(m_Value(A), m_Value(B)))) { in findValuesAffectedByCondition()
9816 // assume(A && B) is split to -> assume(A); assume(B); in findValuesAffectedByCondition()
9817 // assume(!(A || B)) is split to -> assume(!A); assume(!B); in findValuesAffectedByCondition()
9825 } else if (match(V, m_ICmp(Pred, m_Value(A), m_Value(B)))) { in findValuesAffectedByCondition()
9851 // X & Y u> C -> X >u C && Y >u C in findValuesAffectedByCondition()
9852 // X | Y u< C -> X u< C && Y u< C in findValuesAffectedByCondition()
9853 // X nuw+ Y u< C -> X u< C && Y u< C in findValuesAffectedByCondition()
9860 // X nuw- Y u> C -> X u> C in findValuesAffectedByCondition()
9866 // Handle icmp slt/sgt (bitcast X to int), 0/-1, which is supported in findValuesAffectedByCondition()
9886 } else if (match(V, m_Intrinsic<Intrinsic::is_fpclass>(m_Value(A), in findValuesAffectedByCondition()