xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/ValueTracking.cpp (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 //===- ValueTracking.cpp - Walk computations to compute properties --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains routines that help analyze properties that chains of
10 // computations have.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/iterator_range.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/ConstantFolding.h"
28 #include "llvm/Analysis/EHPersonalities.h"
29 #include "llvm/Analysis/GuardUtils.h"
30 #include "llvm/Analysis/InstructionSimplify.h"
31 #include "llvm/Analysis/Loads.h"
32 #include "llvm/Analysis/LoopInfo.h"
33 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
34 #include "llvm/Analysis/TargetLibraryInfo.h"
35 #include "llvm/Analysis/VectorUtils.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/BasicBlock.h"
39 #include "llvm/IR/Constant.h"
40 #include "llvm/IR/ConstantRange.h"
41 #include "llvm/IR/Constants.h"
42 #include "llvm/IR/DerivedTypes.h"
43 #include "llvm/IR/DiagnosticInfo.h"
44 #include "llvm/IR/Dominators.h"
45 #include "llvm/IR/Function.h"
46 #include "llvm/IR/GetElementPtrTypeIterator.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Intrinsics.h"
55 #include "llvm/IR/IntrinsicsAArch64.h"
56 #include "llvm/IR/IntrinsicsRISCV.h"
57 #include "llvm/IR/IntrinsicsX86.h"
58 #include "llvm/IR/LLVMContext.h"
59 #include "llvm/IR/Metadata.h"
60 #include "llvm/IR/Module.h"
61 #include "llvm/IR/Operator.h"
62 #include "llvm/IR/PatternMatch.h"
63 #include "llvm/IR/Type.h"
64 #include "llvm/IR/User.h"
65 #include "llvm/IR/Value.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Compiler.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/KnownBits.h"
71 #include "llvm/Support/MathExtras.h"
72 #include <algorithm>
73 #include <cassert>
74 #include <cstdint>
75 #include <optional>
76 #include <utility>
77 
78 using namespace llvm;
79 using namespace llvm::PatternMatch;
80 
81 // Controls the number of uses of the value searched for possible
82 // dominating comparisons.
83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses",
84                                               cl::Hidden, cl::init(20));
85 
86 
87 /// Returns the bitwidth of the given scalar or pointer type. For vector types,
88 /// returns the element type's bitwidth.
89 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
90   if (unsigned BitWidth = Ty->getScalarSizeInBits())
91     return BitWidth;
92 
93   return DL.getPointerTypeSizeInBits(Ty);
94 }
95 
96 namespace {
97 
98 // Simplifying using an assume can only be done in a particular control-flow
99 // context (the context instruction provides that context). If an assume and
100 // the context instruction are not in the same block then the DT helps in
101 // figuring out if we can use it.
102 struct Query {
103   const DataLayout &DL;
104   AssumptionCache *AC;
105   const Instruction *CxtI;
106   const DominatorTree *DT;
107 
108   // Unlike the other analyses, this may be a nullptr because not all clients
109   // provide it currently.
110   OptimizationRemarkEmitter *ORE;
111 
112   /// If true, it is safe to use metadata during simplification.
113   InstrInfoQuery IIQ;
114 
115   Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI,
116         const DominatorTree *DT, bool UseInstrInfo,
117         OptimizationRemarkEmitter *ORE = nullptr)
118       : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {}
119 };
120 
121 } // end anonymous namespace
122 
123 // Given the provided Value and, potentially, a context instruction, return
124 // the preferred context instruction (if any).
125 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) {
126   // If we've been provided with a context instruction, then use that (provided
127   // it has been inserted).
128   if (CxtI && CxtI->getParent())
129     return CxtI;
130 
131   // If the value is really an already-inserted instruction, then use that.
132   CxtI = dyn_cast<Instruction>(V);
133   if (CxtI && CxtI->getParent())
134     return CxtI;
135 
136   return nullptr;
137 }
138 
139 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) {
140   // If we've been provided with a context instruction, then use that (provided
141   // it has been inserted).
142   if (CxtI && CxtI->getParent())
143     return CxtI;
144 
145   // If the value is really an already-inserted instruction, then use that.
146   CxtI = dyn_cast<Instruction>(V1);
147   if (CxtI && CxtI->getParent())
148     return CxtI;
149 
150   CxtI = dyn_cast<Instruction>(V2);
151   if (CxtI && CxtI->getParent())
152     return CxtI;
153 
154   return nullptr;
155 }
156 
157 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf,
158                                    const APInt &DemandedElts,
159                                    APInt &DemandedLHS, APInt &DemandedRHS) {
160   if (isa<ScalableVectorType>(Shuf->getType())) {
161     assert(DemandedElts == APInt(1,1));
162     DemandedLHS = DemandedRHS = DemandedElts;
163     return true;
164   }
165 
166   int NumElts =
167       cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements();
168   return llvm::getShuffleDemandedElts(NumElts, Shuf->getShuffleMask(),
169                                       DemandedElts, DemandedLHS, DemandedRHS);
170 }
171 
172 static void computeKnownBits(const Value *V, const APInt &DemandedElts,
173                              KnownBits &Known, unsigned Depth, const Query &Q);
174 
175 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
176                              const Query &Q) {
177   // Since the number of lanes in a scalable vector is unknown at compile time,
178   // we track one bit which is implicitly broadcast to all lanes.  This means
179   // that all lanes in a scalable vector are considered demanded.
180   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
181   APInt DemandedElts =
182       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
183   computeKnownBits(V, DemandedElts, Known, Depth, Q);
184 }
185 
186 void llvm::computeKnownBits(const Value *V, KnownBits &Known,
187                             const DataLayout &DL, unsigned Depth,
188                             AssumptionCache *AC, const Instruction *CxtI,
189                             const DominatorTree *DT,
190                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
191   ::computeKnownBits(V, Known, Depth,
192                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
193 }
194 
195 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
196                             KnownBits &Known, const DataLayout &DL,
197                             unsigned Depth, AssumptionCache *AC,
198                             const Instruction *CxtI, const DominatorTree *DT,
199                             OptimizationRemarkEmitter *ORE, bool UseInstrInfo) {
200   ::computeKnownBits(V, DemandedElts, Known, Depth,
201                      Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
202 }
203 
204 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
205                                   unsigned Depth, const Query &Q);
206 
207 static KnownBits computeKnownBits(const Value *V, unsigned Depth,
208                                   const Query &Q);
209 
210 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
211                                  unsigned Depth, AssumptionCache *AC,
212                                  const Instruction *CxtI,
213                                  const DominatorTree *DT,
214                                  OptimizationRemarkEmitter *ORE,
215                                  bool UseInstrInfo) {
216   return ::computeKnownBits(
217       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
218 }
219 
220 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts,
221                                  const DataLayout &DL, unsigned Depth,
222                                  AssumptionCache *AC, const Instruction *CxtI,
223                                  const DominatorTree *DT,
224                                  OptimizationRemarkEmitter *ORE,
225                                  bool UseInstrInfo) {
226   return ::computeKnownBits(
227       V, DemandedElts, Depth,
228       Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE));
229 }
230 
231 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
232                                const DataLayout &DL, AssumptionCache *AC,
233                                const Instruction *CxtI, const DominatorTree *DT,
234                                bool UseInstrInfo) {
235   assert(LHS->getType() == RHS->getType() &&
236          "LHS and RHS should have the same type");
237   assert(LHS->getType()->isIntOrIntVectorTy() &&
238          "LHS and RHS should be integers");
239   // Look for an inverted mask: (X & ~M) op (Y & M).
240   {
241     Value *M;
242     if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
243         match(RHS, m_c_And(m_Specific(M), m_Value())))
244       return true;
245     if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) &&
246         match(LHS, m_c_And(m_Specific(M), m_Value())))
247       return true;
248   }
249 
250   // X op (Y & ~X)
251   if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) ||
252       match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value())))
253     return true;
254 
255   // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern
256   // for constant Y.
257   Value *Y;
258   if (match(RHS,
259             m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) ||
260       match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y))))
261     return true;
262 
263   // Peek through extends to find a 'not' of the other side:
264   // (ext Y) op ext(~Y)
265   // (ext ~Y) op ext(Y)
266   if ((match(LHS, m_ZExtOrSExt(m_Value(Y))) &&
267        match(RHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))) ||
268       (match(RHS, m_ZExtOrSExt(m_Value(Y))) &&
269        match(LHS, m_ZExtOrSExt(m_Not(m_Specific(Y))))))
270     return true;
271 
272   // Look for: (A & B) op ~(A | B)
273   {
274     Value *A, *B;
275     if (match(LHS, m_And(m_Value(A), m_Value(B))) &&
276         match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
277       return true;
278     if (match(RHS, m_And(m_Value(A), m_Value(B))) &&
279         match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
280       return true;
281   }
282   IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType());
283   KnownBits LHSKnown(IT->getBitWidth());
284   KnownBits RHSKnown(IT->getBitWidth());
285   computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
286   computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo);
287   return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown);
288 }
289 
290 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) {
291   return !I->user_empty() && all_of(I->users(), [](const User *U) {
292     ICmpInst::Predicate P;
293     return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P);
294   });
295 }
296 
297 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
298                                    const Query &Q);
299 
300 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL,
301                                   bool OrZero, unsigned Depth,
302                                   AssumptionCache *AC, const Instruction *CxtI,
303                                   const DominatorTree *DT, bool UseInstrInfo) {
304   return ::isKnownToBeAPowerOfTwo(
305       V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
306 }
307 
308 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts,
309                            unsigned Depth, const Query &Q);
310 
311 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q);
312 
313 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth,
314                           AssumptionCache *AC, const Instruction *CxtI,
315                           const DominatorTree *DT, bool UseInstrInfo) {
316   return ::isKnownNonZero(V, Depth,
317                           Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
318 }
319 
320 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL,
321                               unsigned Depth, AssumptionCache *AC,
322                               const Instruction *CxtI, const DominatorTree *DT,
323                               bool UseInstrInfo) {
324   KnownBits Known =
325       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
326   return Known.isNonNegative();
327 }
328 
329 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth,
330                            AssumptionCache *AC, const Instruction *CxtI,
331                            const DominatorTree *DT, bool UseInstrInfo) {
332   if (auto *CI = dyn_cast<ConstantInt>(V))
333     return CI->getValue().isStrictlyPositive();
334 
335   // TODO: We'd doing two recursive queries here.  We should factor this such
336   // that only a single query is needed.
337   return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) &&
338          isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo);
339 }
340 
341 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth,
342                            AssumptionCache *AC, const Instruction *CxtI,
343                            const DominatorTree *DT, bool UseInstrInfo) {
344   KnownBits Known =
345       computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo);
346   return Known.isNegative();
347 }
348 
349 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
350                             const Query &Q);
351 
352 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2,
353                            const DataLayout &DL, AssumptionCache *AC,
354                            const Instruction *CxtI, const DominatorTree *DT,
355                            bool UseInstrInfo) {
356   return ::isKnownNonEqual(V1, V2, 0,
357                            Query(DL, AC, safeCxtI(V2, V1, CxtI), DT,
358                                  UseInstrInfo, /*ORE=*/nullptr));
359 }
360 
361 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
362                               const Query &Q);
363 
364 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask,
365                              const DataLayout &DL, unsigned Depth,
366                              AssumptionCache *AC, const Instruction *CxtI,
367                              const DominatorTree *DT, bool UseInstrInfo) {
368   return ::MaskedValueIsZero(
369       V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
370 }
371 
372 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
373                                    unsigned Depth, const Query &Q);
374 
375 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth,
376                                    const Query &Q) {
377   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
378   APInt DemandedElts =
379       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
380   return ComputeNumSignBits(V, DemandedElts, Depth, Q);
381 }
382 
383 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL,
384                                   unsigned Depth, AssumptionCache *AC,
385                                   const Instruction *CxtI,
386                                   const DominatorTree *DT, bool UseInstrInfo) {
387   return ::ComputeNumSignBits(
388       V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo));
389 }
390 
391 unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL,
392                                          unsigned Depth, AssumptionCache *AC,
393                                          const Instruction *CxtI,
394                                          const DominatorTree *DT) {
395   unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT);
396   return V->getType()->getScalarSizeInBits() - SignBits + 1;
397 }
398 
399 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
400                                    bool NSW, const APInt &DemandedElts,
401                                    KnownBits &KnownOut, KnownBits &Known2,
402                                    unsigned Depth, const Query &Q) {
403   computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q);
404 
405   // If one operand is unknown and we have no nowrap information,
406   // the result will be unknown independently of the second operand.
407   if (KnownOut.isUnknown() && !NSW)
408     return;
409 
410   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
411   KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut);
412 }
413 
414 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
415                                 const APInt &DemandedElts, KnownBits &Known,
416                                 KnownBits &Known2, unsigned Depth,
417                                 const Query &Q) {
418   computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q);
419   computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q);
420 
421   bool isKnownNegative = false;
422   bool isKnownNonNegative = false;
423   // If the multiplication is known not to overflow, compute the sign bit.
424   if (NSW) {
425     if (Op0 == Op1) {
426       // The product of a number with itself is non-negative.
427       isKnownNonNegative = true;
428     } else {
429       bool isKnownNonNegativeOp1 = Known.isNonNegative();
430       bool isKnownNonNegativeOp0 = Known2.isNonNegative();
431       bool isKnownNegativeOp1 = Known.isNegative();
432       bool isKnownNegativeOp0 = Known2.isNegative();
433       // The product of two numbers with the same sign is non-negative.
434       isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
435                            (isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
436       // The product of a negative number and a non-negative number is either
437       // negative or zero.
438       if (!isKnownNonNegative)
439         isKnownNegative =
440             (isKnownNegativeOp1 && isKnownNonNegativeOp0 &&
441              Known2.isNonZero()) ||
442             (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero());
443     }
444   }
445 
446   bool SelfMultiply = Op0 == Op1;
447   // TODO: SelfMultiply can be poison, but not undef.
448   if (SelfMultiply)
449     SelfMultiply &=
450         isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1);
451   Known = KnownBits::mul(Known, Known2, SelfMultiply);
452 
453   // Only make use of no-wrap flags if we failed to compute the sign bit
454   // directly.  This matters if the multiplication always overflows, in
455   // which case we prefer to follow the result of the direct computation,
456   // though as the program is invoking undefined behaviour we can choose
457   // whatever we like here.
458   if (isKnownNonNegative && !Known.isNegative())
459     Known.makeNonNegative();
460   else if (isKnownNegative && !Known.isNonNegative())
461     Known.makeNegative();
462 }
463 
464 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
465                                              KnownBits &Known) {
466   unsigned BitWidth = Known.getBitWidth();
467   unsigned NumRanges = Ranges.getNumOperands() / 2;
468   assert(NumRanges >= 1);
469 
470   Known.Zero.setAllBits();
471   Known.One.setAllBits();
472 
473   for (unsigned i = 0; i < NumRanges; ++i) {
474     ConstantInt *Lower =
475         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0));
476     ConstantInt *Upper =
477         mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1));
478     ConstantRange Range(Lower->getValue(), Upper->getValue());
479 
480     // The first CommonPrefixBits of all values in Range are equal.
481     unsigned CommonPrefixBits =
482         (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
483     APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
484     APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth);
485     Known.One &= UnsignedMax & Mask;
486     Known.Zero &= ~UnsignedMax & Mask;
487   }
488 }
489 
490 static bool isEphemeralValueOf(const Instruction *I, const Value *E) {
491   SmallVector<const Value *, 16> WorkSet(1, I);
492   SmallPtrSet<const Value *, 32> Visited;
493   SmallPtrSet<const Value *, 16> EphValues;
494 
495   // The instruction defining an assumption's condition itself is always
496   // considered ephemeral to that assumption (even if it has other
497   // non-ephemeral users). See r246696's test case for an example.
498   if (is_contained(I->operands(), E))
499     return true;
500 
501   while (!WorkSet.empty()) {
502     const Value *V = WorkSet.pop_back_val();
503     if (!Visited.insert(V).second)
504       continue;
505 
506     // If all uses of this value are ephemeral, then so is this value.
507     if (llvm::all_of(V->users(), [&](const User *U) {
508                                    return EphValues.count(U);
509                                  })) {
510       if (V == E)
511         return true;
512 
513       if (V == I || (isa<Instruction>(V) &&
514                      !cast<Instruction>(V)->mayHaveSideEffects() &&
515                      !cast<Instruction>(V)->isTerminator())) {
516        EphValues.insert(V);
517        if (const User *U = dyn_cast<User>(V))
518          append_range(WorkSet, U->operands());
519       }
520     }
521   }
522 
523   return false;
524 }
525 
526 // Is this an intrinsic that cannot be speculated but also cannot trap?
527 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) {
528   if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I))
529     return CI->isAssumeLikeIntrinsic();
530 
531   return false;
532 }
533 
534 bool llvm::isValidAssumeForContext(const Instruction *Inv,
535                                    const Instruction *CxtI,
536                                    const DominatorTree *DT) {
537   // There are two restrictions on the use of an assume:
538   //  1. The assume must dominate the context (or the control flow must
539   //     reach the assume whenever it reaches the context).
540   //  2. The context must not be in the assume's set of ephemeral values
541   //     (otherwise we will use the assume to prove that the condition
542   //     feeding the assume is trivially true, thus causing the removal of
543   //     the assume).
544 
545   if (Inv->getParent() == CxtI->getParent()) {
546     // If Inv and CtxI are in the same block, check if the assume (Inv) is first
547     // in the BB.
548     if (Inv->comesBefore(CxtI))
549       return true;
550 
551     // Don't let an assume affect itself - this would cause the problems
552     // `isEphemeralValueOf` is trying to prevent, and it would also make
553     // the loop below go out of bounds.
554     if (Inv == CxtI)
555       return false;
556 
557     // The context comes first, but they're both in the same block.
558     // Make sure there is nothing in between that might interrupt
559     // the control flow, not even CxtI itself.
560     // We limit the scan distance between the assume and its context instruction
561     // to avoid a compile-time explosion. This limit is chosen arbitrarily, so
562     // it can be adjusted if needed (could be turned into a cl::opt).
563     auto Range = make_range(CxtI->getIterator(), Inv->getIterator());
564     if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15))
565       return false;
566 
567     return !isEphemeralValueOf(Inv, CxtI);
568   }
569 
570   // Inv and CxtI are in different blocks.
571   if (DT) {
572     if (DT->dominates(Inv, CxtI))
573       return true;
574   } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) {
575     // We don't have a DT, but this trivially dominates.
576     return true;
577   }
578 
579   return false;
580 }
581 
582 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) {
583   // v u> y implies v != 0.
584   if (Pred == ICmpInst::ICMP_UGT)
585     return true;
586 
587   // Special-case v != 0 to also handle v != null.
588   if (Pred == ICmpInst::ICMP_NE)
589     return match(RHS, m_Zero());
590 
591   // All other predicates - rely on generic ConstantRange handling.
592   const APInt *C;
593   if (!match(RHS, m_APInt(C)))
594     return false;
595 
596   ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C);
597   return !TrueValues.contains(APInt::getZero(C->getBitWidth()));
598 }
599 
600 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) {
601   // Use of assumptions is context-sensitive. If we don't have a context, we
602   // cannot use them!
603   if (!Q.AC || !Q.CxtI)
604     return false;
605 
606   if (Q.CxtI && V->getType()->isPointerTy()) {
607     SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull};
608     if (!NullPointerIsDefined(Q.CxtI->getFunction(),
609                               V->getType()->getPointerAddressSpace()))
610       AttrKinds.push_back(Attribute::Dereferenceable);
611 
612     if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC))
613       return true;
614   }
615 
616   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
617     if (!AssumeVH)
618       continue;
619     CondGuardInst *I = cast<CondGuardInst>(AssumeVH);
620     assert(I->getFunction() == Q.CxtI->getFunction() &&
621            "Got assumption for the wrong function!");
622 
623     // Warning: This loop can end up being somewhat performance sensitive.
624     // We're running this loop for once for each value queried resulting in a
625     // runtime of ~O(#assumes * #values).
626 
627     Value *RHS;
628     CmpInst::Predicate Pred;
629     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
630     if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS))))
631       return false;
632 
633     if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT))
634       return true;
635   }
636 
637   return false;
638 }
639 
640 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
641                                        unsigned Depth, const Query &Q) {
642   // Use of assumptions is context-sensitive. If we don't have a context, we
643   // cannot use them!
644   if (!Q.AC || !Q.CxtI)
645     return;
646 
647   unsigned BitWidth = Known.getBitWidth();
648 
649   // Refine Known set if the pointer alignment is set by assume bundles.
650   if (V->getType()->isPointerTy()) {
651     if (RetainedKnowledge RK = getKnowledgeValidInContext(
652             V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) {
653       if (isPowerOf2_64(RK.ArgValue))
654         Known.Zero.setLowBits(Log2_64(RK.ArgValue));
655     }
656   }
657 
658   // Note that the patterns below need to be kept in sync with the code
659   // in AssumptionCache::updateAffectedValues.
660 
661   for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
662     if (!AssumeVH)
663       continue;
664     CondGuardInst *I = cast<CondGuardInst>(AssumeVH);
665     assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() &&
666            "Got assumption for the wrong function!");
667 
668     // Warning: This loop can end up being somewhat performance sensitive.
669     // We're running this loop for once for each value queried resulting in a
670     // runtime of ~O(#assumes * #values).
671 
672     Value *Arg = I->getArgOperand(0);
673 
674     if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
675       assert(BitWidth == 1 && "assume operand is not i1?");
676       Known.setAllOnes();
677       return;
678     }
679     if (match(Arg, m_Not(m_Specific(V))) &&
680         isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
681       assert(BitWidth == 1 && "assume operand is not i1?");
682       Known.setAllZero();
683       return;
684     }
685 
686     // The remaining tests are all recursive, so bail out if we hit the limit.
687     if (Depth == MaxAnalysisRecursionDepth)
688       continue;
689 
690     ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
691     if (!Cmp)
692       continue;
693 
694     // We are attempting to compute known bits for the operands of an assume.
695     // Do not try to use other assumptions for those recursive calls because
696     // that can lead to mutual recursion and a compile-time explosion.
697     // An example of the mutual recursion: computeKnownBits can call
698     // isKnownNonZero which calls computeKnownBitsFromAssume (this function)
699     // and so on.
700     Query QueryNoAC = Q;
701     QueryNoAC.AC = nullptr;
702 
703     // Note that ptrtoint may change the bitwidth.
704     Value *A, *B;
705     auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V)));
706 
707     CmpInst::Predicate Pred;
708     uint64_t C;
709     switch (Cmp->getPredicate()) {
710     default:
711       break;
712     case ICmpInst::ICMP_EQ:
713       // assume(v = a)
714       if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) &&
715           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
716         KnownBits RHSKnown =
717             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
718         Known.Zero |= RHSKnown.Zero;
719         Known.One  |= RHSKnown.One;
720       // assume(v & b = a)
721       } else if (match(Cmp,
722                        m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) &&
723                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
724         KnownBits RHSKnown =
725             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
726         KnownBits MaskKnown =
727             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
728 
729         // For those bits in the mask that are known to be one, we can propagate
730         // known bits from the RHS to V.
731         Known.Zero |= RHSKnown.Zero & MaskKnown.One;
732         Known.One  |= RHSKnown.One  & MaskKnown.One;
733       // assume(~(v & b) = a)
734       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))),
735                                      m_Value(A))) &&
736                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
737         KnownBits RHSKnown =
738             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
739         KnownBits MaskKnown =
740             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
741 
742         // For those bits in the mask that are known to be one, we can propagate
743         // inverted known bits from the RHS to V.
744         Known.Zero |= RHSKnown.One  & MaskKnown.One;
745         Known.One  |= RHSKnown.Zero & MaskKnown.One;
746       // assume(v | b = a)
747       } else if (match(Cmp,
748                        m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) &&
749                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
750         KnownBits RHSKnown =
751             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
752         KnownBits BKnown =
753             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
754 
755         // For those bits in B that are known to be zero, we can propagate known
756         // bits from the RHS to V.
757         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
758         Known.One  |= RHSKnown.One  & BKnown.Zero;
759       // assume(~(v | b) = a)
760       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))),
761                                      m_Value(A))) &&
762                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
763         KnownBits RHSKnown =
764             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
765         KnownBits BKnown =
766             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
767 
768         // For those bits in B that are known to be zero, we can propagate
769         // inverted known bits from the RHS to V.
770         Known.Zero |= RHSKnown.One  & BKnown.Zero;
771         Known.One  |= RHSKnown.Zero & BKnown.Zero;
772       // assume(v ^ b = a)
773       } else if (match(Cmp,
774                        m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) &&
775                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
776         KnownBits RHSKnown =
777             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
778         KnownBits BKnown =
779             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
780 
781         // For those bits in B that are known to be zero, we can propagate known
782         // bits from the RHS to V. For those bits in B that are known to be one,
783         // we can propagate inverted known bits from the RHS to V.
784         Known.Zero |= RHSKnown.Zero & BKnown.Zero;
785         Known.One  |= RHSKnown.One  & BKnown.Zero;
786         Known.Zero |= RHSKnown.One  & BKnown.One;
787         Known.One  |= RHSKnown.Zero & BKnown.One;
788       // assume(~(v ^ b) = a)
789       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))),
790                                      m_Value(A))) &&
791                  isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
792         KnownBits RHSKnown =
793             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
794         KnownBits BKnown =
795             computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
796 
797         // For those bits in B that are known to be zero, we can propagate
798         // inverted known bits from the RHS to V. For those bits in B that are
799         // known to be one, we can propagate known bits from the RHS to V.
800         Known.Zero |= RHSKnown.One  & BKnown.Zero;
801         Known.One  |= RHSKnown.Zero & BKnown.Zero;
802         Known.Zero |= RHSKnown.Zero & BKnown.One;
803         Known.One  |= RHSKnown.One  & BKnown.One;
804       // assume(v << c = a)
805       } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)),
806                                      m_Value(A))) &&
807                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
808         KnownBits RHSKnown =
809             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
810 
811         // For those bits in RHS that are known, we can propagate them to known
812         // bits in V shifted to the right by C.
813         RHSKnown.Zero.lshrInPlace(C);
814         Known.Zero |= RHSKnown.Zero;
815         RHSKnown.One.lshrInPlace(C);
816         Known.One  |= RHSKnown.One;
817       // assume(~(v << c) = a)
818       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))),
819                                      m_Value(A))) &&
820                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
821         KnownBits RHSKnown =
822             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
823         // For those bits in RHS that are known, we can propagate them inverted
824         // to known bits in V shifted to the right by C.
825         RHSKnown.One.lshrInPlace(C);
826         Known.Zero |= RHSKnown.One;
827         RHSKnown.Zero.lshrInPlace(C);
828         Known.One  |= RHSKnown.Zero;
829       // assume(v >> c = a)
830       } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)),
831                                      m_Value(A))) &&
832                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
833         KnownBits RHSKnown =
834             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
835         // For those bits in RHS that are known, we can propagate them to known
836         // bits in V shifted to the right by C.
837         Known.Zero |= RHSKnown.Zero << C;
838         Known.One  |= RHSKnown.One  << C;
839       // assume(~(v >> c) = a)
840       } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))),
841                                      m_Value(A))) &&
842                  isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) {
843         KnownBits RHSKnown =
844             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
845         // For those bits in RHS that are known, we can propagate them inverted
846         // to known bits in V shifted to the right by C.
847         Known.Zero |= RHSKnown.One  << C;
848         Known.One  |= RHSKnown.Zero << C;
849       }
850       break;
851     case ICmpInst::ICMP_SGE:
852       // assume(v >=_s c) where c is non-negative
853       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
854           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
855         KnownBits RHSKnown =
856             computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
857 
858         if (RHSKnown.isNonNegative()) {
859           // We know that the sign bit is zero.
860           Known.makeNonNegative();
861         }
862       }
863       break;
864     case ICmpInst::ICMP_SGT:
865       // assume(v >_s c) where c is at least -1.
866       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
867           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
868         KnownBits RHSKnown =
869             computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
870 
871         if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
872           // We know that the sign bit is zero.
873           Known.makeNonNegative();
874         }
875       }
876       break;
877     case ICmpInst::ICMP_SLE:
878       // assume(v <=_s c) where c is negative
879       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
880           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
881         KnownBits RHSKnown =
882             computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth);
883 
884         if (RHSKnown.isNegative()) {
885           // We know that the sign bit is one.
886           Known.makeNegative();
887         }
888       }
889       break;
890     case ICmpInst::ICMP_SLT:
891       // assume(v <_s c) where c is non-positive
892       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
893           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
894         KnownBits RHSKnown =
895             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
896 
897         if (RHSKnown.isZero() || RHSKnown.isNegative()) {
898           // We know that the sign bit is one.
899           Known.makeNegative();
900         }
901       }
902       break;
903     case ICmpInst::ICMP_ULE:
904       // assume(v <=_u c)
905       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
906           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
907         KnownBits RHSKnown =
908             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
909 
910         // Whatever high bits in c are zero are known to be zero.
911         Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
912       }
913       break;
914     case ICmpInst::ICMP_ULT:
915       // assume(v <_u c)
916       if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) &&
917           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
918         KnownBits RHSKnown =
919             computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth);
920 
921         // If the RHS is known zero, then this assumption must be wrong (nothing
922         // is unsigned less than zero). Signal a conflict and get out of here.
923         if (RHSKnown.isZero()) {
924           Known.Zero.setAllBits();
925           Known.One.setAllBits();
926           break;
927         }
928 
929         // Whatever high bits in c are zero are known to be zero (if c is a power
930         // of 2, then one more).
931         if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC))
932           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1);
933         else
934           Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros());
935       }
936       break;
937     case ICmpInst::ICMP_NE: {
938       // assume (v & b != 0) where b is a power of 2
939       const APInt *BPow2;
940       if (match(Cmp, m_ICmp(Pred, m_c_And(m_V, m_Power2(BPow2)), m_Zero())) &&
941           isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
942         Known.One |= BPow2->zextOrTrunc(BitWidth);
943       }
944     } break;
945     }
946   }
947 
948   // If assumptions conflict with each other or previous known bits, then we
949   // have a logical fallacy. It's possible that the assumption is not reachable,
950   // so this isn't a real bug. On the other hand, the program may have undefined
951   // behavior, or we might have a bug in the compiler. We can't assert/crash, so
952   // clear out the known bits, try to warn the user, and hope for the best.
953   if (Known.Zero.intersects(Known.One)) {
954     Known.resetAll();
955 
956     if (Q.ORE)
957       Q.ORE->emit([&]() {
958         auto *CxtI = const_cast<Instruction *>(Q.CxtI);
959         return OptimizationRemarkAnalysis("value-tracking", "BadAssumption",
960                                           CxtI)
961                << "Detected conflicting code assumptions. Program may "
962                   "have undefined behavior, or compiler may have "
963                   "internal error.";
964       });
965   }
966 }
967 
968 /// Compute known bits from a shift operator, including those with a
969 /// non-constant shift amount. Known is the output of this function. Known2 is a
970 /// pre-allocated temporary with the same bit width as Known and on return
971 /// contains the known bit of the shift value source. KF is an
972 /// operator-specific function that, given the known-bits and a shift amount,
973 /// compute the implied known-bits of the shift operator's result respectively
974 /// for that shift amount. The results from calling KF are conservatively
975 /// combined for all permitted shift amounts.
976 static void computeKnownBitsFromShiftOperator(
977     const Operator *I, const APInt &DemandedElts, KnownBits &Known,
978     KnownBits &Known2, unsigned Depth, const Query &Q,
979     function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) {
980   unsigned BitWidth = Known.getBitWidth();
981   computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
982   computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
983 
984   // Note: We cannot use Known.Zero.getLimitedValue() here, because if
985   // BitWidth > 64 and any upper bits are known, we'll end up returning the
986   // limit value (which implies all bits are known).
987   uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue();
988   uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue();
989   bool ShiftAmtIsConstant = Known.isConstant();
990   bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth);
991 
992   if (ShiftAmtIsConstant) {
993     Known = KF(Known2, Known);
994 
995     // If the known bits conflict, this must be an overflowing left shift, so
996     // the shift result is poison. We can return anything we want. Choose 0 for
997     // the best folding opportunity.
998     if (Known.hasConflict())
999       Known.setAllZero();
1000 
1001     return;
1002   }
1003 
1004   // If the shift amount could be greater than or equal to the bit-width of the
1005   // LHS, the value could be poison, but bail out because the check below is
1006   // expensive.
1007   // TODO: Should we just carry on?
1008   if (MaxShiftAmtIsOutOfRange) {
1009     Known.resetAll();
1010     return;
1011   }
1012 
1013   // It would be more-clearly correct to use the two temporaries for this
1014   // calculation. Reusing the APInts here to prevent unnecessary allocations.
1015   Known.resetAll();
1016 
1017   // If we know the shifter operand is nonzero, we can sometimes infer more
1018   // known bits. However this is expensive to compute, so be lazy about it and
1019   // only compute it when absolutely necessary.
1020   std::optional<bool> ShifterOperandIsNonZero;
1021 
1022   // Early exit if we can't constrain any well-defined shift amount.
1023   if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
1024       !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
1025     ShifterOperandIsNonZero =
1026         isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1027     if (!*ShifterOperandIsNonZero)
1028       return;
1029   }
1030 
1031   Known.Zero.setAllBits();
1032   Known.One.setAllBits();
1033   for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) {
1034     // Combine the shifted known input bits only for those shift amounts
1035     // compatible with its known constraints.
1036     if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt)
1037       continue;
1038     if ((ShiftAmt | ShiftAmtKO) != ShiftAmt)
1039       continue;
1040     // If we know the shifter is nonzero, we may be able to infer more known
1041     // bits. This check is sunk down as far as possible to avoid the expensive
1042     // call to isKnownNonZero if the cheaper checks above fail.
1043     if (ShiftAmt == 0) {
1044       if (!ShifterOperandIsNonZero)
1045         ShifterOperandIsNonZero =
1046             isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q);
1047       if (*ShifterOperandIsNonZero)
1048         continue;
1049     }
1050 
1051     Known = KnownBits::commonBits(
1052         Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt))));
1053   }
1054 
1055   // If the known bits conflict, the result is poison. Return a 0 and hope the
1056   // caller can further optimize that.
1057   if (Known.hasConflict())
1058     Known.setAllZero();
1059 }
1060 
1061 static void computeKnownBitsFromOperator(const Operator *I,
1062                                          const APInt &DemandedElts,
1063                                          KnownBits &Known, unsigned Depth,
1064                                          const Query &Q) {
1065   unsigned BitWidth = Known.getBitWidth();
1066 
1067   KnownBits Known2(BitWidth);
1068   switch (I->getOpcode()) {
1069   default: break;
1070   case Instruction::Load:
1071     if (MDNode *MD =
1072             Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range))
1073       computeKnownBitsFromRangeMetadata(*MD, Known);
1074     break;
1075   case Instruction::And: {
1076     // If either the LHS or the RHS are Zero, the result is zero.
1077     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1078     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1079 
1080     Known &= Known2;
1081 
1082     // and(x, add (x, -1)) is a common idiom that always clears the low bit;
1083     // here we handle the more general case of adding any odd number by
1084     // matching the form add(x, add(x, y)) where y is odd.
1085     // TODO: This could be generalized to clearing any bit set in y where the
1086     // following bit is known to be unset in y.
1087     Value *X = nullptr, *Y = nullptr;
1088     if (!Known.Zero[0] && !Known.One[0] &&
1089         match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) {
1090       Known2.resetAll();
1091       computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q);
1092       if (Known2.countMinTrailingOnes() > 0)
1093         Known.Zero.setBit(0);
1094     }
1095     break;
1096   }
1097   case Instruction::Or:
1098     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1099     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1100 
1101     Known |= Known2;
1102     break;
1103   case Instruction::Xor:
1104     computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q);
1105     computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1106 
1107     Known ^= Known2;
1108     break;
1109   case Instruction::Mul: {
1110     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1111     computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts,
1112                         Known, Known2, Depth, Q);
1113     break;
1114   }
1115   case Instruction::UDiv: {
1116     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1117     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1118     Known = KnownBits::udiv(Known, Known2);
1119     break;
1120   }
1121   case Instruction::Select: {
1122     const Value *LHS = nullptr, *RHS = nullptr;
1123     SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor;
1124     if (SelectPatternResult::isMinOrMax(SPF)) {
1125       computeKnownBits(RHS, Known, Depth + 1, Q);
1126       computeKnownBits(LHS, Known2, Depth + 1, Q);
1127       switch (SPF) {
1128       default:
1129         llvm_unreachable("Unhandled select pattern flavor!");
1130       case SPF_SMAX:
1131         Known = KnownBits::smax(Known, Known2);
1132         break;
1133       case SPF_SMIN:
1134         Known = KnownBits::smin(Known, Known2);
1135         break;
1136       case SPF_UMAX:
1137         Known = KnownBits::umax(Known, Known2);
1138         break;
1139       case SPF_UMIN:
1140         Known = KnownBits::umin(Known, Known2);
1141         break;
1142       }
1143       break;
1144     }
1145 
1146     computeKnownBits(I->getOperand(2), Known, Depth + 1, Q);
1147     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1148 
1149     // Only known if known in both the LHS and RHS.
1150     Known = KnownBits::commonBits(Known, Known2);
1151 
1152     if (SPF == SPF_ABS) {
1153       // RHS from matchSelectPattern returns the negation part of abs pattern.
1154       // If the negate has an NSW flag we can assume the sign bit of the result
1155       // will be 0 because that makes abs(INT_MIN) undefined.
1156       if (match(RHS, m_Neg(m_Specific(LHS))) &&
1157           Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS)))
1158         Known.Zero.setSignBit();
1159     }
1160 
1161     break;
1162   }
1163   case Instruction::FPTrunc:
1164   case Instruction::FPExt:
1165   case Instruction::FPToUI:
1166   case Instruction::FPToSI:
1167   case Instruction::SIToFP:
1168   case Instruction::UIToFP:
1169     break; // Can't work with floating point.
1170   case Instruction::PtrToInt:
1171   case Instruction::IntToPtr:
1172     // Fall through and handle them the same as zext/trunc.
1173     [[fallthrough]];
1174   case Instruction::ZExt:
1175   case Instruction::Trunc: {
1176     Type *SrcTy = I->getOperand(0)->getType();
1177 
1178     unsigned SrcBitWidth;
1179     // Note that we handle pointer operands here because of inttoptr/ptrtoint
1180     // which fall through here.
1181     Type *ScalarTy = SrcTy->getScalarType();
1182     SrcBitWidth = ScalarTy->isPointerTy() ?
1183       Q.DL.getPointerTypeSizeInBits(ScalarTy) :
1184       Q.DL.getTypeSizeInBits(ScalarTy);
1185 
1186     assert(SrcBitWidth && "SrcBitWidth can't be zero");
1187     Known = Known.anyextOrTrunc(SrcBitWidth);
1188     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1189     Known = Known.zextOrTrunc(BitWidth);
1190     break;
1191   }
1192   case Instruction::BitCast: {
1193     Type *SrcTy = I->getOperand(0)->getType();
1194     if (SrcTy->isIntOrPtrTy() &&
1195         // TODO: For now, not handling conversions like:
1196         // (bitcast i64 %x to <2 x i32>)
1197         !I->getType()->isVectorTy()) {
1198       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1199       break;
1200     }
1201 
1202     // Handle cast from vector integer type to scalar or vector integer.
1203     auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy);
1204     if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() ||
1205         !I->getType()->isIntOrIntVectorTy() ||
1206         isa<ScalableVectorType>(I->getType()))
1207       break;
1208 
1209     // Look through a cast from narrow vector elements to wider type.
1210     // Examples: v4i32 -> v2i64, v3i8 -> v24
1211     unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits();
1212     if (BitWidth % SubBitWidth == 0) {
1213       // Known bits are automatically intersected across demanded elements of a
1214       // vector. So for example, if a bit is computed as known zero, it must be
1215       // zero across all demanded elements of the vector.
1216       //
1217       // For this bitcast, each demanded element of the output is sub-divided
1218       // across a set of smaller vector elements in the source vector. To get
1219       // the known bits for an entire element of the output, compute the known
1220       // bits for each sub-element sequentially. This is done by shifting the
1221       // one-set-bit demanded elements parameter across the sub-elements for
1222       // consecutive calls to computeKnownBits. We are using the demanded
1223       // elements parameter as a mask operator.
1224       //
1225       // The known bits of each sub-element are then inserted into place
1226       // (dependent on endian) to form the full result of known bits.
1227       unsigned NumElts = DemandedElts.getBitWidth();
1228       unsigned SubScale = BitWidth / SubBitWidth;
1229       APInt SubDemandedElts = APInt::getZero(NumElts * SubScale);
1230       for (unsigned i = 0; i != NumElts; ++i) {
1231         if (DemandedElts[i])
1232           SubDemandedElts.setBit(i * SubScale);
1233       }
1234 
1235       KnownBits KnownSrc(SubBitWidth);
1236       for (unsigned i = 0; i != SubScale; ++i) {
1237         computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc,
1238                          Depth + 1, Q);
1239         unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i;
1240         Known.insertBits(KnownSrc, ShiftElt * SubBitWidth);
1241       }
1242     }
1243     break;
1244   }
1245   case Instruction::SExt: {
1246     // Compute the bits in the result that are not present in the input.
1247     unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
1248 
1249     Known = Known.trunc(SrcBitWidth);
1250     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1251     // If the sign bit of the input is known set or clear, then we know the
1252     // top bits of the result.
1253     Known = Known.sext(BitWidth);
1254     break;
1255   }
1256   case Instruction::Shl: {
1257     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1258     auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1259       KnownBits Result = KnownBits::shl(KnownVal, KnownAmt);
1260       // If this shift has "nsw" keyword, then the result is either a poison
1261       // value or has the same sign bit as the first operand.
1262       if (NSW) {
1263         if (KnownVal.Zero.isSignBitSet())
1264           Result.Zero.setSignBit();
1265         if (KnownVal.One.isSignBitSet())
1266           Result.One.setSignBit();
1267       }
1268       return Result;
1269     };
1270     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1271                                       KF);
1272     // Trailing zeros of a right-shifted constant never decrease.
1273     const APInt *C;
1274     if (match(I->getOperand(0), m_APInt(C)))
1275       Known.Zero.setLowBits(C->countTrailingZeros());
1276     break;
1277   }
1278   case Instruction::LShr: {
1279     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1280       return KnownBits::lshr(KnownVal, KnownAmt);
1281     };
1282     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1283                                       KF);
1284     // Leading zeros of a left-shifted constant never decrease.
1285     const APInt *C;
1286     if (match(I->getOperand(0), m_APInt(C)))
1287       Known.Zero.setHighBits(C->countLeadingZeros());
1288     break;
1289   }
1290   case Instruction::AShr: {
1291     auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) {
1292       return KnownBits::ashr(KnownVal, KnownAmt);
1293     };
1294     computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q,
1295                                       KF);
1296     break;
1297   }
1298   case Instruction::Sub: {
1299     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1300     computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW,
1301                            DemandedElts, Known, Known2, Depth, Q);
1302     break;
1303   }
1304   case Instruction::Add: {
1305     bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I));
1306     computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW,
1307                            DemandedElts, Known, Known2, Depth, Q);
1308     break;
1309   }
1310   case Instruction::SRem:
1311     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1312     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1313     Known = KnownBits::srem(Known, Known2);
1314     break;
1315 
1316   case Instruction::URem:
1317     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1318     computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1319     Known = KnownBits::urem(Known, Known2);
1320     break;
1321   case Instruction::Alloca:
1322     Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign()));
1323     break;
1324   case Instruction::GetElementPtr: {
1325     // Analyze all of the subscripts of this getelementptr instruction
1326     // to determine if we can prove known low zero bits.
1327     computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1328     // Accumulate the constant indices in a separate variable
1329     // to minimize the number of calls to computeForAddSub.
1330     APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true);
1331 
1332     gep_type_iterator GTI = gep_type_begin(I);
1333     for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) {
1334       // TrailZ can only become smaller, short-circuit if we hit zero.
1335       if (Known.isUnknown())
1336         break;
1337 
1338       Value *Index = I->getOperand(i);
1339 
1340       // Handle case when index is zero.
1341       Constant *CIndex = dyn_cast<Constant>(Index);
1342       if (CIndex && CIndex->isZeroValue())
1343         continue;
1344 
1345       if (StructType *STy = GTI.getStructTypeOrNull()) {
1346         // Handle struct member offset arithmetic.
1347 
1348         assert(CIndex &&
1349                "Access to structure field must be known at compile time");
1350 
1351         if (CIndex->getType()->isVectorTy())
1352           Index = CIndex->getSplatValue();
1353 
1354         unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
1355         const StructLayout *SL = Q.DL.getStructLayout(STy);
1356         uint64_t Offset = SL->getElementOffset(Idx);
1357         AccConstIndices += Offset;
1358         continue;
1359       }
1360 
1361       // Handle array index arithmetic.
1362       Type *IndexedTy = GTI.getIndexedType();
1363       if (!IndexedTy->isSized()) {
1364         Known.resetAll();
1365         break;
1366       }
1367 
1368       unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits();
1369       KnownBits IndexBits(IndexBitWidth);
1370       computeKnownBits(Index, IndexBits, Depth + 1, Q);
1371       TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy);
1372       uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinValue();
1373       KnownBits ScalingFactor(IndexBitWidth);
1374       // Multiply by current sizeof type.
1375       // &A[i] == A + i * sizeof(*A[i]).
1376       if (IndexTypeSize.isScalable()) {
1377         // For scalable types the only thing we know about sizeof is
1378         // that this is a multiple of the minimum size.
1379         ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes));
1380       } else if (IndexBits.isConstant()) {
1381         APInt IndexConst = IndexBits.getConstant();
1382         APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes);
1383         IndexConst *= ScalingFactor;
1384         AccConstIndices += IndexConst.sextOrTrunc(BitWidth);
1385         continue;
1386       } else {
1387         ScalingFactor =
1388             KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes));
1389       }
1390       IndexBits = KnownBits::mul(IndexBits, ScalingFactor);
1391 
1392       // If the offsets have a different width from the pointer, according
1393       // to the language reference we need to sign-extend or truncate them
1394       // to the width of the pointer.
1395       IndexBits = IndexBits.sextOrTrunc(BitWidth);
1396 
1397       // Note that inbounds does *not* guarantee nsw for the addition, as only
1398       // the offset is signed, while the base address is unsigned.
1399       Known = KnownBits::computeForAddSub(
1400           /*Add=*/true, /*NSW=*/false, Known, IndexBits);
1401     }
1402     if (!Known.isUnknown() && !AccConstIndices.isZero()) {
1403       KnownBits Index = KnownBits::makeConstant(AccConstIndices);
1404       Known = KnownBits::computeForAddSub(
1405           /*Add=*/true, /*NSW=*/false, Known, Index);
1406     }
1407     break;
1408   }
1409   case Instruction::PHI: {
1410     const PHINode *P = cast<PHINode>(I);
1411     BinaryOperator *BO = nullptr;
1412     Value *R = nullptr, *L = nullptr;
1413     if (matchSimpleRecurrence(P, BO, R, L)) {
1414       // Handle the case of a simple two-predecessor recurrence PHI.
1415       // There's a lot more that could theoretically be done here, but
1416       // this is sufficient to catch some interesting cases.
1417       unsigned Opcode = BO->getOpcode();
1418 
1419       // If this is a shift recurrence, we know the bits being shifted in.
1420       // We can combine that with information about the start value of the
1421       // recurrence to conclude facts about the result.
1422       if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr ||
1423            Opcode == Instruction::Shl) &&
1424           BO->getOperand(0) == I) {
1425 
1426         // We have matched a recurrence of the form:
1427         // %iv = [R, %entry], [%iv.next, %backedge]
1428         // %iv.next = shift_op %iv, L
1429 
1430         // Recurse with the phi context to avoid concern about whether facts
1431         // inferred hold at original context instruction.  TODO: It may be
1432         // correct to use the original context.  IF warranted, explore and
1433         // add sufficient tests to cover.
1434         Query RecQ = Q;
1435         RecQ.CxtI = P;
1436         computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ);
1437         switch (Opcode) {
1438         case Instruction::Shl:
1439           // A shl recurrence will only increase the tailing zeros
1440           Known.Zero.setLowBits(Known2.countMinTrailingZeros());
1441           break;
1442         case Instruction::LShr:
1443           // A lshr recurrence will preserve the leading zeros of the
1444           // start value
1445           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1446           break;
1447         case Instruction::AShr:
1448           // An ashr recurrence will extend the initial sign bit
1449           Known.Zero.setHighBits(Known2.countMinLeadingZeros());
1450           Known.One.setHighBits(Known2.countMinLeadingOnes());
1451           break;
1452         };
1453       }
1454 
1455       // Check for operations that have the property that if
1456       // both their operands have low zero bits, the result
1457       // will have low zero bits.
1458       if (Opcode == Instruction::Add ||
1459           Opcode == Instruction::Sub ||
1460           Opcode == Instruction::And ||
1461           Opcode == Instruction::Or ||
1462           Opcode == Instruction::Mul) {
1463         // Change the context instruction to the "edge" that flows into the
1464         // phi. This is important because that is where the value is actually
1465         // "evaluated" even though it is used later somewhere else. (see also
1466         // D69571).
1467         Query RecQ = Q;
1468 
1469         unsigned OpNum = P->getOperand(0) == R ? 0 : 1;
1470         Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator();
1471         Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator();
1472 
1473         // Ok, we have a PHI of the form L op= R. Check for low
1474         // zero bits.
1475         RecQ.CxtI = RInst;
1476         computeKnownBits(R, Known2, Depth + 1, RecQ);
1477 
1478         // We need to take the minimum number of known bits
1479         KnownBits Known3(BitWidth);
1480         RecQ.CxtI = LInst;
1481         computeKnownBits(L, Known3, Depth + 1, RecQ);
1482 
1483         Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(),
1484                                        Known3.countMinTrailingZeros()));
1485 
1486         auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO);
1487         if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) {
1488           // If initial value of recurrence is nonnegative, and we are adding
1489           // a nonnegative number with nsw, the result can only be nonnegative
1490           // or poison value regardless of the number of times we execute the
1491           // add in phi recurrence. If initial value is negative and we are
1492           // adding a negative number with nsw, the result can only be
1493           // negative or poison value. Similar arguments apply to sub and mul.
1494           //
1495           // (add non-negative, non-negative) --> non-negative
1496           // (add negative, negative) --> negative
1497           if (Opcode == Instruction::Add) {
1498             if (Known2.isNonNegative() && Known3.isNonNegative())
1499               Known.makeNonNegative();
1500             else if (Known2.isNegative() && Known3.isNegative())
1501               Known.makeNegative();
1502           }
1503 
1504           // (sub nsw non-negative, negative) --> non-negative
1505           // (sub nsw negative, non-negative) --> negative
1506           else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) {
1507             if (Known2.isNonNegative() && Known3.isNegative())
1508               Known.makeNonNegative();
1509             else if (Known2.isNegative() && Known3.isNonNegative())
1510               Known.makeNegative();
1511           }
1512 
1513           // (mul nsw non-negative, non-negative) --> non-negative
1514           else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
1515                    Known3.isNonNegative())
1516             Known.makeNonNegative();
1517         }
1518 
1519         break;
1520       }
1521     }
1522 
1523     // Unreachable blocks may have zero-operand PHI nodes.
1524     if (P->getNumIncomingValues() == 0)
1525       break;
1526 
1527     // Otherwise take the unions of the known bit sets of the operands,
1528     // taking conservative care to avoid excessive recursion.
1529     if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) {
1530       // Skip if every incoming value references to ourself.
1531       if (isa_and_nonnull<UndefValue>(P->hasConstantValue()))
1532         break;
1533 
1534       Known.Zero.setAllBits();
1535       Known.One.setAllBits();
1536       for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) {
1537         Value *IncValue = P->getIncomingValue(u);
1538         // Skip direct self references.
1539         if (IncValue == P) continue;
1540 
1541         // Change the context instruction to the "edge" that flows into the
1542         // phi. This is important because that is where the value is actually
1543         // "evaluated" even though it is used later somewhere else. (see also
1544         // D69571).
1545         Query RecQ = Q;
1546         RecQ.CxtI = P->getIncomingBlock(u)->getTerminator();
1547 
1548         Known2 = KnownBits(BitWidth);
1549 
1550         // Recurse, but cap the recursion to one level, because we don't
1551         // want to waste time spinning around in loops.
1552         computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ);
1553 
1554         // If this failed, see if we can use a conditional branch into the phi
1555         // to help us determine the range of the value.
1556         if (Known2.isUnknown()) {
1557           ICmpInst::Predicate Pred;
1558           const APInt *RHSC;
1559           BasicBlock *TrueSucc, *FalseSucc;
1560           // TODO: Use RHS Value and compute range from its known bits.
1561           if (match(RecQ.CxtI,
1562                     m_Br(m_c_ICmp(Pred, m_Specific(IncValue), m_APInt(RHSC)),
1563                          m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) {
1564             // Check for cases of duplicate successors.
1565             if ((TrueSucc == P->getParent()) != (FalseSucc == P->getParent())) {
1566               // If we're using the false successor, invert the predicate.
1567               if (FalseSucc == P->getParent())
1568                 Pred = CmpInst::getInversePredicate(Pred);
1569 
1570               switch (Pred) {
1571               case CmpInst::Predicate::ICMP_EQ:
1572                 Known2 = KnownBits::makeConstant(*RHSC);
1573                 break;
1574               case CmpInst::Predicate::ICMP_ULE:
1575                 Known2.Zero.setHighBits(RHSC->countLeadingZeros());
1576                 break;
1577               case CmpInst::Predicate::ICMP_ULT:
1578                 Known2.Zero.setHighBits((*RHSC - 1).countLeadingZeros());
1579                 break;
1580               default:
1581                 // TODO - add additional integer predicate handling.
1582                 break;
1583               }
1584             }
1585           }
1586         }
1587 
1588         Known = KnownBits::commonBits(Known, Known2);
1589         // If all bits have been ruled out, there's no need to check
1590         // more operands.
1591         if (Known.isUnknown())
1592           break;
1593       }
1594     }
1595     break;
1596   }
1597   case Instruction::Call:
1598   case Instruction::Invoke:
1599     // If range metadata is attached to this call, set known bits from that,
1600     // and then intersect with known bits based on other properties of the
1601     // function.
1602     if (MDNode *MD =
1603             Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range))
1604       computeKnownBitsFromRangeMetadata(*MD, Known);
1605     if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) {
1606       computeKnownBits(RV, Known2, Depth + 1, Q);
1607       Known.Zero |= Known2.Zero;
1608       Known.One |= Known2.One;
1609     }
1610     if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1611       switch (II->getIntrinsicID()) {
1612       default: break;
1613       case Intrinsic::abs: {
1614         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1615         bool IntMinIsPoison = match(II->getArgOperand(1), m_One());
1616         Known = Known2.abs(IntMinIsPoison);
1617         break;
1618       }
1619       case Intrinsic::bitreverse:
1620         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1621         Known.Zero |= Known2.Zero.reverseBits();
1622         Known.One |= Known2.One.reverseBits();
1623         break;
1624       case Intrinsic::bswap:
1625         computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q);
1626         Known.Zero |= Known2.Zero.byteSwap();
1627         Known.One |= Known2.One.byteSwap();
1628         break;
1629       case Intrinsic::ctlz: {
1630         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1631         // If we have a known 1, its position is our upper bound.
1632         unsigned PossibleLZ = Known2.countMaxLeadingZeros();
1633         // If this call is poison for 0 input, the result will be less than 2^n.
1634         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1635           PossibleLZ = std::min(PossibleLZ, BitWidth - 1);
1636         unsigned LowBits = llvm::bit_width(PossibleLZ);
1637         Known.Zero.setBitsFrom(LowBits);
1638         break;
1639       }
1640       case Intrinsic::cttz: {
1641         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1642         // If we have a known 1, its position is our upper bound.
1643         unsigned PossibleTZ = Known2.countMaxTrailingZeros();
1644         // If this call is poison for 0 input, the result will be less than 2^n.
1645         if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext()))
1646           PossibleTZ = std::min(PossibleTZ, BitWidth - 1);
1647         unsigned LowBits = llvm::bit_width(PossibleTZ);
1648         Known.Zero.setBitsFrom(LowBits);
1649         break;
1650       }
1651       case Intrinsic::ctpop: {
1652         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1653         // We can bound the space the count needs.  Also, bits known to be zero
1654         // can't contribute to the population.
1655         unsigned BitsPossiblySet = Known2.countMaxPopulation();
1656         unsigned LowBits = llvm::bit_width(BitsPossiblySet);
1657         Known.Zero.setBitsFrom(LowBits);
1658         // TODO: we could bound KnownOne using the lower bound on the number
1659         // of bits which might be set provided by popcnt KnownOne2.
1660         break;
1661       }
1662       case Intrinsic::fshr:
1663       case Intrinsic::fshl: {
1664         const APInt *SA;
1665         if (!match(I->getOperand(2), m_APInt(SA)))
1666           break;
1667 
1668         // Normalize to funnel shift left.
1669         uint64_t ShiftAmt = SA->urem(BitWidth);
1670         if (II->getIntrinsicID() == Intrinsic::fshr)
1671           ShiftAmt = BitWidth - ShiftAmt;
1672 
1673         KnownBits Known3(BitWidth);
1674         computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
1675         computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q);
1676 
1677         Known.Zero =
1678             Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt);
1679         Known.One =
1680             Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt);
1681         break;
1682       }
1683       case Intrinsic::uadd_sat:
1684       case Intrinsic::usub_sat: {
1685         bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat;
1686         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1687         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1688 
1689         // Add: Leading ones of either operand are preserved.
1690         // Sub: Leading zeros of LHS and leading ones of RHS are preserved
1691         // as leading zeros in the result.
1692         unsigned LeadingKnown;
1693         if (IsAdd)
1694           LeadingKnown = std::max(Known.countMinLeadingOnes(),
1695                                   Known2.countMinLeadingOnes());
1696         else
1697           LeadingKnown = std::max(Known.countMinLeadingZeros(),
1698                                   Known2.countMinLeadingOnes());
1699 
1700         Known = KnownBits::computeForAddSub(
1701             IsAdd, /* NSW */ false, Known, Known2);
1702 
1703         // We select between the operation result and all-ones/zero
1704         // respectively, so we can preserve known ones/zeros.
1705         if (IsAdd) {
1706           Known.One.setHighBits(LeadingKnown);
1707           Known.Zero.clearAllBits();
1708         } else {
1709           Known.Zero.setHighBits(LeadingKnown);
1710           Known.One.clearAllBits();
1711         }
1712         break;
1713       }
1714       case Intrinsic::umin:
1715         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1716         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1717         Known = KnownBits::umin(Known, Known2);
1718         break;
1719       case Intrinsic::umax:
1720         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1721         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1722         Known = KnownBits::umax(Known, Known2);
1723         break;
1724       case Intrinsic::smin:
1725         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1726         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1727         Known = KnownBits::smin(Known, Known2);
1728         break;
1729       case Intrinsic::smax:
1730         computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1731         computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
1732         Known = KnownBits::smax(Known, Known2);
1733         break;
1734       case Intrinsic::x86_sse42_crc32_64_64:
1735         Known.Zero.setBitsFrom(32);
1736         break;
1737       case Intrinsic::riscv_vsetvli:
1738       case Intrinsic::riscv_vsetvlimax:
1739         // Assume that VL output is positive and would fit in an int32_t.
1740         // TODO: VLEN might be capped at 16 bits in a future V spec update.
1741         if (BitWidth >= 32)
1742           Known.Zero.setBitsFrom(31);
1743         break;
1744       case Intrinsic::vscale: {
1745         if (!II->getParent() || !II->getFunction() ||
1746             !II->getFunction()->hasFnAttribute(Attribute::VScaleRange))
1747           break;
1748 
1749         auto Attr = II->getFunction()->getFnAttribute(Attribute::VScaleRange);
1750         std::optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
1751 
1752         if (!VScaleMax)
1753           break;
1754 
1755         unsigned VScaleMin = Attr.getVScaleRangeMin();
1756 
1757         // If vscale min = max then we know the exact value at compile time
1758         // and hence we know the exact bits.
1759         if (VScaleMin == VScaleMax) {
1760           Known.One = VScaleMin;
1761           Known.Zero = VScaleMin;
1762           Known.Zero.flipAllBits();
1763           break;
1764         }
1765 
1766         unsigned FirstZeroHighBit = llvm::bit_width(*VScaleMax);
1767         if (FirstZeroHighBit < BitWidth)
1768           Known.Zero.setBitsFrom(FirstZeroHighBit);
1769 
1770         break;
1771       }
1772       }
1773     }
1774     break;
1775   case Instruction::ShuffleVector: {
1776     auto *Shuf = dyn_cast<ShuffleVectorInst>(I);
1777     // FIXME: Do we need to handle ConstantExpr involving shufflevectors?
1778     if (!Shuf) {
1779       Known.resetAll();
1780       return;
1781     }
1782     // For undef elements, we don't know anything about the common state of
1783     // the shuffle result.
1784     APInt DemandedLHS, DemandedRHS;
1785     if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) {
1786       Known.resetAll();
1787       return;
1788     }
1789     Known.One.setAllBits();
1790     Known.Zero.setAllBits();
1791     if (!!DemandedLHS) {
1792       const Value *LHS = Shuf->getOperand(0);
1793       computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q);
1794       // If we don't know any bits, early out.
1795       if (Known.isUnknown())
1796         break;
1797     }
1798     if (!!DemandedRHS) {
1799       const Value *RHS = Shuf->getOperand(1);
1800       computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q);
1801       Known = KnownBits::commonBits(Known, Known2);
1802     }
1803     break;
1804   }
1805   case Instruction::InsertElement: {
1806     if (isa<ScalableVectorType>(I->getType())) {
1807       Known.resetAll();
1808       return;
1809     }
1810     const Value *Vec = I->getOperand(0);
1811     const Value *Elt = I->getOperand(1);
1812     auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2));
1813     // Early out if the index is non-constant or out-of-range.
1814     unsigned NumElts = DemandedElts.getBitWidth();
1815     if (!CIdx || CIdx->getValue().uge(NumElts)) {
1816       Known.resetAll();
1817       return;
1818     }
1819     Known.One.setAllBits();
1820     Known.Zero.setAllBits();
1821     unsigned EltIdx = CIdx->getZExtValue();
1822     // Do we demand the inserted element?
1823     if (DemandedElts[EltIdx]) {
1824       computeKnownBits(Elt, Known, Depth + 1, Q);
1825       // If we don't know any bits, early out.
1826       if (Known.isUnknown())
1827         break;
1828     }
1829     // We don't need the base vector element that has been inserted.
1830     APInt DemandedVecElts = DemandedElts;
1831     DemandedVecElts.clearBit(EltIdx);
1832     if (!!DemandedVecElts) {
1833       computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q);
1834       Known = KnownBits::commonBits(Known, Known2);
1835     }
1836     break;
1837   }
1838   case Instruction::ExtractElement: {
1839     // Look through extract element. If the index is non-constant or
1840     // out-of-range demand all elements, otherwise just the extracted element.
1841     const Value *Vec = I->getOperand(0);
1842     const Value *Idx = I->getOperand(1);
1843     auto *CIdx = dyn_cast<ConstantInt>(Idx);
1844     if (isa<ScalableVectorType>(Vec->getType())) {
1845       // FIXME: there's probably *something* we can do with scalable vectors
1846       Known.resetAll();
1847       break;
1848     }
1849     unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements();
1850     APInt DemandedVecElts = APInt::getAllOnes(NumElts);
1851     if (CIdx && CIdx->getValue().ult(NumElts))
1852       DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
1853     computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q);
1854     break;
1855   }
1856   case Instruction::ExtractValue:
1857     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) {
1858       const ExtractValueInst *EVI = cast<ExtractValueInst>(I);
1859       if (EVI->getNumIndices() != 1) break;
1860       if (EVI->getIndices()[0] == 0) {
1861         switch (II->getIntrinsicID()) {
1862         default: break;
1863         case Intrinsic::uadd_with_overflow:
1864         case Intrinsic::sadd_with_overflow:
1865           computeKnownBitsAddSub(true, II->getArgOperand(0),
1866                                  II->getArgOperand(1), false, DemandedElts,
1867                                  Known, Known2, Depth, Q);
1868           break;
1869         case Intrinsic::usub_with_overflow:
1870         case Intrinsic::ssub_with_overflow:
1871           computeKnownBitsAddSub(false, II->getArgOperand(0),
1872                                  II->getArgOperand(1), false, DemandedElts,
1873                                  Known, Known2, Depth, Q);
1874           break;
1875         case Intrinsic::umul_with_overflow:
1876         case Intrinsic::smul_with_overflow:
1877           computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false,
1878                               DemandedElts, Known, Known2, Depth, Q);
1879           break;
1880         }
1881       }
1882     }
1883     break;
1884   case Instruction::Freeze:
1885     if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
1886                                   Depth + 1))
1887       computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
1888     break;
1889   }
1890 }
1891 
1892 /// Determine which bits of V are known to be either zero or one and return
1893 /// them.
1894 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts,
1895                            unsigned Depth, const Query &Q) {
1896   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1897   computeKnownBits(V, DemandedElts, Known, Depth, Q);
1898   return Known;
1899 }
1900 
1901 /// Determine which bits of V are known to be either zero or one and return
1902 /// them.
1903 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) {
1904   KnownBits Known(getBitWidth(V->getType(), Q.DL));
1905   computeKnownBits(V, Known, Depth, Q);
1906   return Known;
1907 }
1908 
1909 /// Determine which bits of V are known to be either zero or one and return
1910 /// them in the Known bit set.
1911 ///
1912 /// NOTE: we cannot consider 'undef' to be "IsZero" here.  The problem is that
1913 /// we cannot optimize based on the assumption that it is zero without changing
1914 /// it to be an explicit zero.  If we don't change it to zero, other code could
1915 /// optimized based on the contradictory assumption that it is non-zero.
1916 /// Because instcombine aggressively folds operations with undef args anyway,
1917 /// this won't lose us code quality.
1918 ///
1919 /// This function is defined on values with integer type, values with pointer
1920 /// type, and vectors of integers.  In the case
1921 /// where V is a vector, known zero, and known one values are the
1922 /// same width as the vector element, and the bit is set only if it is true
1923 /// for all of the demanded elements in the vector specified by DemandedElts.
1924 void computeKnownBits(const Value *V, const APInt &DemandedElts,
1925                       KnownBits &Known, unsigned Depth, const Query &Q) {
1926   if (!DemandedElts) {
1927     // No demanded elts, better to assume we don't know anything.
1928     Known.resetAll();
1929     return;
1930   }
1931 
1932   assert(V && "No Value?");
1933   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
1934 
1935 #ifndef NDEBUG
1936   Type *Ty = V->getType();
1937   unsigned BitWidth = Known.getBitWidth();
1938 
1939   assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) &&
1940          "Not integer or pointer type!");
1941 
1942   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
1943     assert(
1944         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
1945         "DemandedElt width should equal the fixed vector number of elements");
1946   } else {
1947     assert(DemandedElts == APInt(1, 1) &&
1948            "DemandedElt width should be 1 for scalars or scalable vectors");
1949   }
1950 
1951   Type *ScalarTy = Ty->getScalarType();
1952   if (ScalarTy->isPointerTy()) {
1953     assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) &&
1954            "V and Known should have same BitWidth");
1955   } else {
1956     assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) &&
1957            "V and Known should have same BitWidth");
1958   }
1959 #endif
1960 
1961   const APInt *C;
1962   if (match(V, m_APInt(C))) {
1963     // We know all of the bits for a scalar constant or a splat vector constant!
1964     Known = KnownBits::makeConstant(*C);
1965     return;
1966   }
1967   // Null and aggregate-zero are all-zeros.
1968   if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
1969     Known.setAllZero();
1970     return;
1971   }
1972   // Handle a constant vector by taking the intersection of the known bits of
1973   // each element.
1974   if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) {
1975     assert(!isa<ScalableVectorType>(V->getType()));
1976     // We know that CDV must be a vector of integers. Take the intersection of
1977     // each element.
1978     Known.Zero.setAllBits(); Known.One.setAllBits();
1979     for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) {
1980       if (!DemandedElts[i])
1981         continue;
1982       APInt Elt = CDV->getElementAsAPInt(i);
1983       Known.Zero &= ~Elt;
1984       Known.One &= Elt;
1985     }
1986     return;
1987   }
1988 
1989   if (const auto *CV = dyn_cast<ConstantVector>(V)) {
1990     assert(!isa<ScalableVectorType>(V->getType()));
1991     // We know that CV must be a vector of integers. Take the intersection of
1992     // each element.
1993     Known.Zero.setAllBits(); Known.One.setAllBits();
1994     for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
1995       if (!DemandedElts[i])
1996         continue;
1997       Constant *Element = CV->getAggregateElement(i);
1998       auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
1999       if (!ElementCI) {
2000         Known.resetAll();
2001         return;
2002       }
2003       const APInt &Elt = ElementCI->getValue();
2004       Known.Zero &= ~Elt;
2005       Known.One &= Elt;
2006     }
2007     return;
2008   }
2009 
2010   // Start out not knowing anything.
2011   Known.resetAll();
2012 
2013   // We can't imply anything about undefs.
2014   if (isa<UndefValue>(V))
2015     return;
2016 
2017   // There's no point in looking through other users of ConstantData for
2018   // assumptions.  Confirm that we've handled them all.
2019   assert(!isa<ConstantData>(V) && "Unhandled constant data!");
2020 
2021   // All recursive calls that increase depth must come after this.
2022   if (Depth == MaxAnalysisRecursionDepth)
2023     return;
2024 
2025   // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has
2026   // the bits of its aliasee.
2027   if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
2028     if (!GA->isInterposable())
2029       computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q);
2030     return;
2031   }
2032 
2033   if (const Operator *I = dyn_cast<Operator>(V))
2034     computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q);
2035 
2036   // Aligned pointers have trailing zeros - refine Known.Zero set
2037   if (isa<PointerType>(V->getType())) {
2038     Align Alignment = V->getPointerAlignment(Q.DL);
2039     Known.Zero.setLowBits(Log2(Alignment));
2040   }
2041 
2042   // computeKnownBitsFromAssume strictly refines Known.
2043   // Therefore, we run them after computeKnownBitsFromOperator.
2044 
2045   // Check whether a nearby assume intrinsic can determine some known bits.
2046   computeKnownBitsFromAssume(V, Known, Depth, Q);
2047 
2048   assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
2049 }
2050 
2051 /// Try to detect a recurrence that the value of the induction variable is
2052 /// always a power of two (or zero).
2053 static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero,
2054                                    unsigned Depth, Query &Q) {
2055   BinaryOperator *BO = nullptr;
2056   Value *Start = nullptr, *Step = nullptr;
2057   if (!matchSimpleRecurrence(PN, BO, Start, Step))
2058     return false;
2059 
2060   // Initial value must be a power of two.
2061   for (const Use &U : PN->operands()) {
2062     if (U.get() == Start) {
2063       // Initial value comes from a different BB, need to adjust context
2064       // instruction for analysis.
2065       Q.CxtI = PN->getIncomingBlock(U)->getTerminator();
2066       if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q))
2067         return false;
2068     }
2069   }
2070 
2071   // Except for Mul, the induction variable must be on the left side of the
2072   // increment expression, otherwise its value can be arbitrary.
2073   if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step)
2074     return false;
2075 
2076   Q.CxtI = BO->getParent()->getTerminator();
2077   switch (BO->getOpcode()) {
2078   case Instruction::Mul:
2079     // Power of two is closed under multiplication.
2080     return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) ||
2081             Q.IIQ.hasNoSignedWrap(BO)) &&
2082            isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q);
2083   case Instruction::SDiv:
2084     // Start value must not be signmask for signed division, so simply being a
2085     // power of two is not sufficient, and it has to be a constant.
2086     if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2087       return false;
2088     [[fallthrough]];
2089   case Instruction::UDiv:
2090     // Divisor must be a power of two.
2091     // If OrZero is false, cannot guarantee induction variable is non-zero after
2092     // division, same for Shr, unless it is exact division.
2093     return (OrZero || Q.IIQ.isExact(BO)) &&
2094            isKnownToBeAPowerOfTwo(Step, false, Depth, Q);
2095   case Instruction::Shl:
2096     return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO);
2097   case Instruction::AShr:
2098     if (!match(Start, m_Power2()) || match(Start, m_SignMask()))
2099       return false;
2100     [[fallthrough]];
2101   case Instruction::LShr:
2102     return OrZero || Q.IIQ.isExact(BO);
2103   default:
2104     return false;
2105   }
2106 }
2107 
2108 /// Return true if the given value is known to have exactly one
2109 /// bit set when defined. For vectors return true if every element is known to
2110 /// be a power of two when defined. Supports values with integer or pointer
2111 /// types and vectors of integers.
2112 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth,
2113                             const Query &Q) {
2114   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2115 
2116   // Attempt to match against constants.
2117   if (OrZero && match(V, m_Power2OrZero()))
2118       return true;
2119   if (match(V, m_Power2()))
2120       return true;
2121 
2122   // 1 << X is clearly a power of two if the one is not shifted off the end.  If
2123   // it is shifted off the end then the result is undefined.
2124   if (match(V, m_Shl(m_One(), m_Value())))
2125     return true;
2126 
2127   // (signmask) >>l X is clearly a power of two if the one is not shifted off
2128   // the bottom.  If it is shifted off the bottom then the result is undefined.
2129   if (match(V, m_LShr(m_SignMask(), m_Value())))
2130     return true;
2131 
2132   // The remaining tests are all recursive, so bail out if we hit the limit.
2133   if (Depth++ == MaxAnalysisRecursionDepth)
2134     return false;
2135 
2136   Value *X = nullptr, *Y = nullptr;
2137   // A shift left or a logical shift right of a power of two is a power of two
2138   // or zero.
2139   if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) ||
2140                  match(V, m_LShr(m_Value(X), m_Value()))))
2141     return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q);
2142 
2143   if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V))
2144     return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q);
2145 
2146   if (const SelectInst *SI = dyn_cast<SelectInst>(V))
2147     return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) &&
2148            isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q);
2149 
2150   // Peek through min/max.
2151   if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) {
2152     return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) &&
2153            isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q);
2154   }
2155 
2156   if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) {
2157     // A power of two and'd with anything is a power of two or zero.
2158     if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) ||
2159         isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q))
2160       return true;
2161     // X & (-X) is always a power of two or zero.
2162     if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X))))
2163       return true;
2164     return false;
2165   }
2166 
2167   // Adding a power-of-two or zero to the same power-of-two or zero yields
2168   // either the original power-of-two, a larger power-of-two or zero.
2169   if (match(V, m_Add(m_Value(X), m_Value(Y)))) {
2170     const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V);
2171     if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) ||
2172         Q.IIQ.hasNoSignedWrap(VOBO)) {
2173       if (match(X, m_And(m_Specific(Y), m_Value())) ||
2174           match(X, m_And(m_Value(), m_Specific(Y))))
2175         if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q))
2176           return true;
2177       if (match(Y, m_And(m_Specific(X), m_Value())) ||
2178           match(Y, m_And(m_Value(), m_Specific(X))))
2179         if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q))
2180           return true;
2181 
2182       unsigned BitWidth = V->getType()->getScalarSizeInBits();
2183       KnownBits LHSBits(BitWidth);
2184       computeKnownBits(X, LHSBits, Depth, Q);
2185 
2186       KnownBits RHSBits(BitWidth);
2187       computeKnownBits(Y, RHSBits, Depth, Q);
2188       // If i8 V is a power of two or zero:
2189       //  ZeroBits: 1 1 1 0 1 1 1 1
2190       // ~ZeroBits: 0 0 0 1 0 0 0 0
2191       if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2())
2192         // If OrZero isn't set, we cannot give back a zero result.
2193         // Make sure either the LHS or RHS has a bit set.
2194         if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue())
2195           return true;
2196     }
2197   }
2198 
2199   // A PHI node is power of two if all incoming values are power of two, or if
2200   // it is an induction variable where in each step its value is a power of two.
2201   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
2202     Query RecQ = Q;
2203 
2204     // Check if it is an induction variable and always power of two.
2205     if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ))
2206       return true;
2207 
2208     // Recursively check all incoming values. Limit recursion to 2 levels, so
2209     // that search complexity is limited to number of operands^2.
2210     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2211     return llvm::all_of(PN->operands(), [&](const Use &U) {
2212       // Value is power of 2 if it is coming from PHI node itself by induction.
2213       if (U.get() == PN)
2214         return true;
2215 
2216       // Change the context instruction to the incoming block where it is
2217       // evaluated.
2218       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2219       return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ);
2220     });
2221   }
2222 
2223   // An exact divide or right shift can only shift off zero bits, so the result
2224   // is a power of two only if the first operand is a power of two and not
2225   // copying a sign bit (sdiv int_min, 2).
2226   if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) ||
2227       match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) {
2228     return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero,
2229                                   Depth, Q);
2230   }
2231 
2232   return false;
2233 }
2234 
2235 /// Test whether a GEP's result is known to be non-null.
2236 ///
2237 /// Uses properties inherent in a GEP to try to determine whether it is known
2238 /// to be non-null.
2239 ///
2240 /// Currently this routine does not support vector GEPs.
2241 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
2242                               const Query &Q) {
2243   const Function *F = nullptr;
2244   if (const Instruction *I = dyn_cast<Instruction>(GEP))
2245     F = I->getFunction();
2246 
2247   if (!GEP->isInBounds() ||
2248       NullPointerIsDefined(F, GEP->getPointerAddressSpace()))
2249     return false;
2250 
2251   // FIXME: Support vector-GEPs.
2252   assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP");
2253 
2254   // If the base pointer is non-null, we cannot walk to a null address with an
2255   // inbounds GEP in address space zero.
2256   if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q))
2257     return true;
2258 
2259   // Walk the GEP operands and see if any operand introduces a non-zero offset.
2260   // If so, then the GEP cannot produce a null pointer, as doing so would
2261   // inherently violate the inbounds contract within address space zero.
2262   for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
2263        GTI != GTE; ++GTI) {
2264     // Struct types are easy -- they must always be indexed by a constant.
2265     if (StructType *STy = GTI.getStructTypeOrNull()) {
2266       ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand());
2267       unsigned ElementIdx = OpC->getZExtValue();
2268       const StructLayout *SL = Q.DL.getStructLayout(STy);
2269       uint64_t ElementOffset = SL->getElementOffset(ElementIdx);
2270       if (ElementOffset > 0)
2271         return true;
2272       continue;
2273     }
2274 
2275     // If we have a zero-sized type, the index doesn't matter. Keep looping.
2276     if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).isZero())
2277       continue;
2278 
2279     // Fast path the constant operand case both for efficiency and so we don't
2280     // increment Depth when just zipping down an all-constant GEP.
2281     if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) {
2282       if (!OpC->isZero())
2283         return true;
2284       continue;
2285     }
2286 
2287     // We post-increment Depth here because while isKnownNonZero increments it
2288     // as well, when we pop back up that increment won't persist. We don't want
2289     // to recurse 10k times just because we have 10k GEP operands. We don't
2290     // bail completely out because we want to handle constant GEPs regardless
2291     // of depth.
2292     if (Depth++ >= MaxAnalysisRecursionDepth)
2293       continue;
2294 
2295     if (isKnownNonZero(GTI.getOperand(), Depth, Q))
2296       return true;
2297   }
2298 
2299   return false;
2300 }
2301 
2302 static bool isKnownNonNullFromDominatingCondition(const Value *V,
2303                                                   const Instruction *CtxI,
2304                                                   const DominatorTree *DT) {
2305   if (isa<Constant>(V))
2306     return false;
2307 
2308   if (!CtxI || !DT)
2309     return false;
2310 
2311   unsigned NumUsesExplored = 0;
2312   for (const auto *U : V->users()) {
2313     // Avoid massive lists
2314     if (NumUsesExplored >= DomConditionsMaxUses)
2315       break;
2316     NumUsesExplored++;
2317 
2318     // If the value is used as an argument to a call or invoke, then argument
2319     // attributes may provide an answer about null-ness.
2320     if (const auto *CB = dyn_cast<CallBase>(U))
2321       if (auto *CalledFunc = CB->getCalledFunction())
2322         for (const Argument &Arg : CalledFunc->args())
2323           if (CB->getArgOperand(Arg.getArgNo()) == V &&
2324               Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) &&
2325               DT->dominates(CB, CtxI))
2326             return true;
2327 
2328     // If the value is used as a load/store, then the pointer must be non null.
2329     if (V == getLoadStorePointerOperand(U)) {
2330       const Instruction *I = cast<Instruction>(U);
2331       if (!NullPointerIsDefined(I->getFunction(),
2332                                 V->getType()->getPointerAddressSpace()) &&
2333           DT->dominates(I, CtxI))
2334         return true;
2335     }
2336 
2337     // Consider only compare instructions uniquely controlling a branch
2338     Value *RHS;
2339     CmpInst::Predicate Pred;
2340     if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS))))
2341       continue;
2342 
2343     bool NonNullIfTrue;
2344     if (cmpExcludesZero(Pred, RHS))
2345       NonNullIfTrue = true;
2346     else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS))
2347       NonNullIfTrue = false;
2348     else
2349       continue;
2350 
2351     SmallVector<const User *, 4> WorkList;
2352     SmallPtrSet<const User *, 4> Visited;
2353     for (const auto *CmpU : U->users()) {
2354       assert(WorkList.empty() && "Should be!");
2355       if (Visited.insert(CmpU).second)
2356         WorkList.push_back(CmpU);
2357 
2358       while (!WorkList.empty()) {
2359         auto *Curr = WorkList.pop_back_val();
2360 
2361         // If a user is an AND, add all its users to the work list. We only
2362         // propagate "pred != null" condition through AND because it is only
2363         // correct to assume that all conditions of AND are met in true branch.
2364         // TODO: Support similar logic of OR and EQ predicate?
2365         if (NonNullIfTrue)
2366           if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) {
2367             for (const auto *CurrU : Curr->users())
2368               if (Visited.insert(CurrU).second)
2369                 WorkList.push_back(CurrU);
2370             continue;
2371           }
2372 
2373         if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) {
2374           assert(BI->isConditional() && "uses a comparison!");
2375 
2376           BasicBlock *NonNullSuccessor =
2377               BI->getSuccessor(NonNullIfTrue ? 0 : 1);
2378           BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
2379           if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
2380             return true;
2381         } else if (NonNullIfTrue && isGuard(Curr) &&
2382                    DT->dominates(cast<Instruction>(Curr), CtxI)) {
2383           return true;
2384         }
2385       }
2386     }
2387   }
2388 
2389   return false;
2390 }
2391 
2392 /// Does the 'Range' metadata (which must be a valid MD_range operand list)
2393 /// ensure that the value it's attached to is never Value?  'RangeType' is
2394 /// is the type of the value described by the range.
2395 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) {
2396   const unsigned NumRanges = Ranges->getNumOperands() / 2;
2397   assert(NumRanges >= 1);
2398   for (unsigned i = 0; i < NumRanges; ++i) {
2399     ConstantInt *Lower =
2400         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0));
2401     ConstantInt *Upper =
2402         mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1));
2403     ConstantRange Range(Lower->getValue(), Upper->getValue());
2404     if (Range.contains(Value))
2405       return false;
2406   }
2407   return true;
2408 }
2409 
2410 /// Try to detect a recurrence that monotonically increases/decreases from a
2411 /// non-zero starting value. These are common as induction variables.
2412 static bool isNonZeroRecurrence(const PHINode *PN) {
2413   BinaryOperator *BO = nullptr;
2414   Value *Start = nullptr, *Step = nullptr;
2415   const APInt *StartC, *StepC;
2416   if (!matchSimpleRecurrence(PN, BO, Start, Step) ||
2417       !match(Start, m_APInt(StartC)) || StartC->isZero())
2418     return false;
2419 
2420   switch (BO->getOpcode()) {
2421   case Instruction::Add:
2422     // Starting from non-zero and stepping away from zero can never wrap back
2423     // to zero.
2424     return BO->hasNoUnsignedWrap() ||
2425            (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) &&
2426             StartC->isNegative() == StepC->isNegative());
2427   case Instruction::Mul:
2428     return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) &&
2429            match(Step, m_APInt(StepC)) && !StepC->isZero();
2430   case Instruction::Shl:
2431     return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap();
2432   case Instruction::AShr:
2433   case Instruction::LShr:
2434     return BO->isExact();
2435   default:
2436     return false;
2437   }
2438 }
2439 
2440 /// Return true if the given value is known to be non-zero when defined. For
2441 /// vectors, return true if every demanded element is known to be non-zero when
2442 /// defined. For pointers, if the context instruction and dominator tree are
2443 /// specified, perform context-sensitive analysis and return true if the
2444 /// pointer couldn't possibly be null at the specified instruction.
2445 /// Supports values with integer or pointer type and vectors of integers.
2446 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth,
2447                     const Query &Q) {
2448 
2449 #ifndef NDEBUG
2450   Type *Ty = V->getType();
2451   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
2452 
2453   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
2454     assert(
2455         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
2456         "DemandedElt width should equal the fixed vector number of elements");
2457   } else {
2458     assert(DemandedElts == APInt(1, 1) &&
2459            "DemandedElt width should be 1 for scalars");
2460   }
2461 #endif
2462 
2463   if (auto *C = dyn_cast<Constant>(V)) {
2464     if (C->isNullValue())
2465       return false;
2466     if (isa<ConstantInt>(C))
2467       // Must be non-zero due to null test above.
2468       return true;
2469 
2470     // For constant vectors, check that all elements are undefined or known
2471     // non-zero to determine that the whole vector is known non-zero.
2472     if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) {
2473       for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) {
2474         if (!DemandedElts[i])
2475           continue;
2476         Constant *Elt = C->getAggregateElement(i);
2477         if (!Elt || Elt->isNullValue())
2478           return false;
2479         if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt))
2480           return false;
2481       }
2482       return true;
2483     }
2484 
2485     // A global variable in address space 0 is non null unless extern weak
2486     // or an absolute symbol reference. Other address spaces may have null as a
2487     // valid address for a global, so we can't assume anything.
2488     if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
2489       if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
2490           GV->getType()->getAddressSpace() == 0)
2491         return true;
2492     }
2493 
2494     // For constant expressions, fall through to the Operator code below.
2495     if (!isa<ConstantExpr>(V))
2496       return false;
2497   }
2498 
2499   if (auto *I = dyn_cast<Instruction>(V)) {
2500     if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) {
2501       // If the possible ranges don't contain zero, then the value is
2502       // definitely non-zero.
2503       if (auto *Ty = dyn_cast<IntegerType>(V->getType())) {
2504         const APInt ZeroValue(Ty->getBitWidth(), 0);
2505         if (rangeMetadataExcludesValue(Ranges, ZeroValue))
2506           return true;
2507       }
2508     }
2509   }
2510 
2511   if (!isa<Constant>(V) && isKnownNonZeroFromAssume(V, Q))
2512     return true;
2513 
2514   // Some of the tests below are recursive, so bail out if we hit the limit.
2515   if (Depth++ >= MaxAnalysisRecursionDepth)
2516     return false;
2517 
2518   // Check for pointer simplifications.
2519 
2520   if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) {
2521     // Alloca never returns null, malloc might.
2522     if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
2523       return true;
2524 
2525     // A byval, inalloca may not be null in a non-default addres space. A
2526     // nonnull argument is assumed never 0.
2527     if (const Argument *A = dyn_cast<Argument>(V)) {
2528       if (((A->hasPassPointeeByValueCopyAttr() &&
2529             !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) ||
2530            A->hasNonNullAttr()))
2531         return true;
2532     }
2533 
2534     // A Load tagged with nonnull metadata is never null.
2535     if (const LoadInst *LI = dyn_cast<LoadInst>(V))
2536       if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull))
2537         return true;
2538 
2539     if (const auto *Call = dyn_cast<CallBase>(V)) {
2540       if (Call->isReturnNonNull())
2541         return true;
2542       if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true))
2543         return isKnownNonZero(RP, Depth, Q);
2544     }
2545   }
2546 
2547   if (!isa<Constant>(V) &&
2548       isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
2549     return true;
2550 
2551   const Operator *I = dyn_cast<Operator>(V);
2552   if (!I)
2553     return false;
2554 
2555   unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL);
2556   switch (I->getOpcode()) {
2557   case Instruction::GetElementPtr:
2558     if (I->getType()->isPointerTy())
2559       return isGEPKnownNonNull(cast<GEPOperator>(I), Depth, Q);
2560     break;
2561   case Instruction::BitCast:
2562     if (I->getType()->isPointerTy())
2563       return isKnownNonZero(I->getOperand(0), Depth, Q);
2564     break;
2565   case Instruction::IntToPtr:
2566     // Note that we have to take special care to avoid looking through
2567     // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well
2568     // as casts that can alter the value, e.g., AddrSpaceCasts.
2569     if (!isa<ScalableVectorType>(I->getType()) &&
2570         Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2571             Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2572       return isKnownNonZero(I->getOperand(0), Depth, Q);
2573     break;
2574   case Instruction::PtrToInt:
2575     // Similar to int2ptr above, we can look through ptr2int here if the cast
2576     // is a no-op or an extend and not a truncate.
2577     if (!isa<ScalableVectorType>(I->getType()) &&
2578         Q.DL.getTypeSizeInBits(I->getOperand(0)->getType()).getFixedValue() <=
2579             Q.DL.getTypeSizeInBits(I->getType()).getFixedValue())
2580       return isKnownNonZero(I->getOperand(0), Depth, Q);
2581     break;
2582   case Instruction::Or:
2583     // X | Y != 0 if X != 0 or Y != 0.
2584     return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) ||
2585            isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q);
2586   case Instruction::SExt:
2587   case Instruction::ZExt:
2588     // ext X != 0 if X != 0.
2589     return isKnownNonZero(I->getOperand(0), Depth, Q);
2590 
2591   case Instruction::Shl: {
2592     // shl nuw can't remove any non-zero bits.
2593     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2594     if (Q.IIQ.hasNoUnsignedWrap(BO))
2595       return isKnownNonZero(I->getOperand(0), Depth, Q);
2596 
2597     // shl X, Y != 0 if X is odd.  Note that the value of the shift is undefined
2598     // if the lowest bit is shifted off the end.
2599     KnownBits Known(BitWidth);
2600     computeKnownBits(I->getOperand(0), DemandedElts, Known, Depth, Q);
2601     if (Known.One[0])
2602       return true;
2603     break;
2604   }
2605   case Instruction::LShr:
2606   case Instruction::AShr: {
2607     // shr exact can only shift out zero bits.
2608     const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V);
2609     if (BO->isExact())
2610       return isKnownNonZero(I->getOperand(0), Depth, Q);
2611 
2612     // shr X, Y != 0 if X is negative.  Note that the value of the shift is not
2613     // defined if the sign bit is shifted off the end.
2614     KnownBits Known =
2615         computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2616     if (Known.isNegative())
2617       return true;
2618 
2619     // If the shifter operand is a constant, and all of the bits shifted
2620     // out are known to be zero, and X is known non-zero then at least one
2621     // non-zero bit must remain.
2622     if (ConstantInt *Shift = dyn_cast<ConstantInt>(I->getOperand(1))) {
2623       auto ShiftVal = Shift->getLimitedValue(BitWidth - 1);
2624       // Is there a known one in the portion not shifted out?
2625       if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal)
2626         return true;
2627       // Are all the bits to be shifted out known zero?
2628       if (Known.countMinTrailingZeros() >= ShiftVal)
2629         return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2630     }
2631     break;
2632   }
2633   case Instruction::UDiv:
2634   case Instruction::SDiv:
2635     // div exact can only produce a zero if the dividend is zero.
2636     if (cast<PossiblyExactOperator>(I)->isExact())
2637       return isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q);
2638     break;
2639   case Instruction::Add: {
2640     // X + Y.
2641     KnownBits XKnown =
2642         computeKnownBits(I->getOperand(0), DemandedElts, Depth, Q);
2643     KnownBits YKnown =
2644         computeKnownBits(I->getOperand(1), DemandedElts, Depth, Q);
2645 
2646     // If X and Y are both non-negative (as signed values) then their sum is not
2647     // zero unless both X and Y are zero.
2648     if (XKnown.isNonNegative() && YKnown.isNonNegative())
2649       if (isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) ||
2650           isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q))
2651         return true;
2652 
2653     // If X and Y are both negative (as signed values) then their sum is not
2654     // zero unless both X and Y equal INT_MIN.
2655     if (XKnown.isNegative() && YKnown.isNegative()) {
2656       APInt Mask = APInt::getSignedMaxValue(BitWidth);
2657       // The sign bit of X is set.  If some other bit is set then X is not equal
2658       // to INT_MIN.
2659       if (XKnown.One.intersects(Mask))
2660         return true;
2661       // The sign bit of Y is set.  If some other bit is set then Y is not equal
2662       // to INT_MIN.
2663       if (YKnown.One.intersects(Mask))
2664         return true;
2665     }
2666 
2667     // The sum of a non-negative number and a power of two is not zero.
2668     if (XKnown.isNonNegative() &&
2669         isKnownToBeAPowerOfTwo(I->getOperand(1), /*OrZero*/ false, Depth, Q))
2670       return true;
2671     if (YKnown.isNonNegative() &&
2672         isKnownToBeAPowerOfTwo(I->getOperand(0), /*OrZero*/ false, Depth, Q))
2673       return true;
2674     break;
2675   }
2676   case Instruction::Mul: {
2677     // If X and Y are non-zero then so is X * Y as long as the multiplication
2678     // does not overflow.
2679     const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
2680     if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) &&
2681         isKnownNonZero(I->getOperand(0), DemandedElts, Depth, Q) &&
2682         isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q))
2683       return true;
2684     break;
2685   }
2686   case Instruction::Select:
2687     // (C ? X : Y) != 0 if X != 0 and Y != 0.
2688     if (isKnownNonZero(I->getOperand(1), DemandedElts, Depth, Q) &&
2689         isKnownNonZero(I->getOperand(2), DemandedElts, Depth, Q))
2690       return true;
2691     break;
2692   case Instruction::PHI: {
2693     auto *PN = cast<PHINode>(I);
2694     if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN))
2695       return true;
2696 
2697     // Check if all incoming values are non-zero using recursion.
2698     Query RecQ = Q;
2699     unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1);
2700     return llvm::all_of(PN->operands(), [&](const Use &U) {
2701       if (U.get() == PN)
2702         return true;
2703       RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator();
2704       return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ);
2705     });
2706   }
2707   case Instruction::ExtractElement:
2708     if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) {
2709       const Value *Vec = EEI->getVectorOperand();
2710       const Value *Idx = EEI->getIndexOperand();
2711       auto *CIdx = dyn_cast<ConstantInt>(Idx);
2712       if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) {
2713         unsigned NumElts = VecTy->getNumElements();
2714         APInt DemandedVecElts = APInt::getAllOnes(NumElts);
2715         if (CIdx && CIdx->getValue().ult(NumElts))
2716           DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue());
2717         return isKnownNonZero(Vec, DemandedVecElts, Depth, Q);
2718       }
2719     }
2720     break;
2721   case Instruction::Freeze:
2722     return isKnownNonZero(I->getOperand(0), Depth, Q) &&
2723            isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT,
2724                                      Depth);
2725   case Instruction::Call:
2726     if (cast<CallInst>(I)->getIntrinsicID() == Intrinsic::vscale)
2727       return true;
2728     break;
2729   }
2730 
2731   KnownBits Known(BitWidth);
2732   computeKnownBits(V, DemandedElts, Known, Depth, Q);
2733   return Known.One != 0;
2734 }
2735 
2736 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) {
2737   auto *FVTy = dyn_cast<FixedVectorType>(V->getType());
2738   APInt DemandedElts =
2739       FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1);
2740   return isKnownNonZero(V, DemandedElts, Depth, Q);
2741 }
2742 
2743 /// If the pair of operators are the same invertible function, return the
2744 /// the operands of the function corresponding to each input. Otherwise,
2745 /// return std::nullopt.  An invertible function is one that is 1-to-1 and maps
2746 /// every input value to exactly one output value.  This is equivalent to
2747 /// saying that Op1 and Op2 are equal exactly when the specified pair of
2748 /// operands are equal, (except that Op1 and Op2 may be poison more often.)
2749 static std::optional<std::pair<Value*, Value*>>
2750 getInvertibleOperands(const Operator *Op1,
2751                       const Operator *Op2) {
2752   if (Op1->getOpcode() != Op2->getOpcode())
2753     return std::nullopt;
2754 
2755   auto getOperands = [&](unsigned OpNum) -> auto {
2756     return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum));
2757   };
2758 
2759   switch (Op1->getOpcode()) {
2760   default:
2761     break;
2762   case Instruction::Add:
2763   case Instruction::Sub:
2764     if (Op1->getOperand(0) == Op2->getOperand(0))
2765       return getOperands(1);
2766     if (Op1->getOperand(1) == Op2->getOperand(1))
2767       return getOperands(0);
2768     break;
2769   case Instruction::Mul: {
2770     // invertible if A * B == (A * B) mod 2^N where A, and B are integers
2771     // and N is the bitwdith.  The nsw case is non-obvious, but proven by
2772     // alive2: https://alive2.llvm.org/ce/z/Z6D5qK
2773     auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2774     auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2775     if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2776         (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2777       break;
2778 
2779     // Assume operand order has been canonicalized
2780     if (Op1->getOperand(1) == Op2->getOperand(1) &&
2781         isa<ConstantInt>(Op1->getOperand(1)) &&
2782         !cast<ConstantInt>(Op1->getOperand(1))->isZero())
2783       return getOperands(0);
2784     break;
2785   }
2786   case Instruction::Shl: {
2787     // Same as multiplies, with the difference that we don't need to check
2788     // for a non-zero multiply. Shifts always multiply by non-zero.
2789     auto *OBO1 = cast<OverflowingBinaryOperator>(Op1);
2790     auto *OBO2 = cast<OverflowingBinaryOperator>(Op2);
2791     if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) &&
2792         (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap()))
2793       break;
2794 
2795     if (Op1->getOperand(1) == Op2->getOperand(1))
2796       return getOperands(0);
2797     break;
2798   }
2799   case Instruction::AShr:
2800   case Instruction::LShr: {
2801     auto *PEO1 = cast<PossiblyExactOperator>(Op1);
2802     auto *PEO2 = cast<PossiblyExactOperator>(Op2);
2803     if (!PEO1->isExact() || !PEO2->isExact())
2804       break;
2805 
2806     if (Op1->getOperand(1) == Op2->getOperand(1))
2807       return getOperands(0);
2808     break;
2809   }
2810   case Instruction::SExt:
2811   case Instruction::ZExt:
2812     if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType())
2813       return getOperands(0);
2814     break;
2815   case Instruction::PHI: {
2816     const PHINode *PN1 = cast<PHINode>(Op1);
2817     const PHINode *PN2 = cast<PHINode>(Op2);
2818 
2819     // If PN1 and PN2 are both recurrences, can we prove the entire recurrences
2820     // are a single invertible function of the start values? Note that repeated
2821     // application of an invertible function is also invertible
2822     BinaryOperator *BO1 = nullptr;
2823     Value *Start1 = nullptr, *Step1 = nullptr;
2824     BinaryOperator *BO2 = nullptr;
2825     Value *Start2 = nullptr, *Step2 = nullptr;
2826     if (PN1->getParent() != PN2->getParent() ||
2827         !matchSimpleRecurrence(PN1, BO1, Start1, Step1) ||
2828         !matchSimpleRecurrence(PN2, BO2, Start2, Step2))
2829       break;
2830 
2831     auto Values = getInvertibleOperands(cast<Operator>(BO1),
2832                                         cast<Operator>(BO2));
2833     if (!Values)
2834        break;
2835 
2836     // We have to be careful of mutually defined recurrences here.  Ex:
2837     // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V
2838     // * X_i = Y_i = X_(i-1) OP Y_(i-1)
2839     // The invertibility of these is complicated, and not worth reasoning
2840     // about (yet?).
2841     if (Values->first != PN1 || Values->second != PN2)
2842       break;
2843 
2844     return std::make_pair(Start1, Start2);
2845   }
2846   }
2847   return std::nullopt;
2848 }
2849 
2850 /// Return true if V2 == V1 + X, where X is known non-zero.
2851 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth,
2852                            const Query &Q) {
2853   const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1);
2854   if (!BO || BO->getOpcode() != Instruction::Add)
2855     return false;
2856   Value *Op = nullptr;
2857   if (V2 == BO->getOperand(0))
2858     Op = BO->getOperand(1);
2859   else if (V2 == BO->getOperand(1))
2860     Op = BO->getOperand(0);
2861   else
2862     return false;
2863   return isKnownNonZero(Op, Depth + 1, Q);
2864 }
2865 
2866 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and
2867 /// the multiplication is nuw or nsw.
2868 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth,
2869                           const Query &Q) {
2870   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2871     const APInt *C;
2872     return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) &&
2873            (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2874            !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q);
2875   }
2876   return false;
2877 }
2878 
2879 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and
2880 /// the shift is nuw or nsw.
2881 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth,
2882                           const Query &Q) {
2883   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) {
2884     const APInt *C;
2885     return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) &&
2886            (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) &&
2887            !C->isZero() && isKnownNonZero(V1, Depth + 1, Q);
2888   }
2889   return false;
2890 }
2891 
2892 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2,
2893                            unsigned Depth, const Query &Q) {
2894   // Check two PHIs are in same block.
2895   if (PN1->getParent() != PN2->getParent())
2896     return false;
2897 
2898   SmallPtrSet<const BasicBlock *, 8> VisitedBBs;
2899   bool UsedFullRecursion = false;
2900   for (const BasicBlock *IncomBB : PN1->blocks()) {
2901     if (!VisitedBBs.insert(IncomBB).second)
2902       continue; // Don't reprocess blocks that we have dealt with already.
2903     const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB);
2904     const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB);
2905     const APInt *C1, *C2;
2906     if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2)
2907       continue;
2908 
2909     // Only one pair of phi operands is allowed for full recursion.
2910     if (UsedFullRecursion)
2911       return false;
2912 
2913     Query RecQ = Q;
2914     RecQ.CxtI = IncomBB->getTerminator();
2915     if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ))
2916       return false;
2917     UsedFullRecursion = true;
2918   }
2919   return true;
2920 }
2921 
2922 /// Return true if it is known that V1 != V2.
2923 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth,
2924                             const Query &Q) {
2925   if (V1 == V2)
2926     return false;
2927   if (V1->getType() != V2->getType())
2928     // We can't look through casts yet.
2929     return false;
2930 
2931   if (Depth >= MaxAnalysisRecursionDepth)
2932     return false;
2933 
2934   // See if we can recurse through (exactly one of) our operands.  This
2935   // requires our operation be 1-to-1 and map every input value to exactly
2936   // one output value.  Such an operation is invertible.
2937   auto *O1 = dyn_cast<Operator>(V1);
2938   auto *O2 = dyn_cast<Operator>(V2);
2939   if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) {
2940     if (auto Values = getInvertibleOperands(O1, O2))
2941       return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q);
2942 
2943     if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) {
2944       const PHINode *PN2 = cast<PHINode>(V2);
2945       // FIXME: This is missing a generalization to handle the case where one is
2946       // a PHI and another one isn't.
2947       if (isNonEqualPHIs(PN1, PN2, Depth, Q))
2948         return true;
2949     };
2950   }
2951 
2952   if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q))
2953     return true;
2954 
2955   if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q))
2956     return true;
2957 
2958   if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q))
2959     return true;
2960 
2961   if (V1->getType()->isIntOrIntVectorTy()) {
2962     // Are any known bits in V1 contradictory to known bits in V2? If V1
2963     // has a known zero where V2 has a known one, they must not be equal.
2964     KnownBits Known1 = computeKnownBits(V1, Depth, Q);
2965     KnownBits Known2 = computeKnownBits(V2, Depth, Q);
2966 
2967     if (Known1.Zero.intersects(Known2.One) ||
2968         Known2.Zero.intersects(Known1.One))
2969       return true;
2970   }
2971   return false;
2972 }
2973 
2974 /// Return true if 'V & Mask' is known to be zero.  We use this predicate to
2975 /// simplify operations downstream. Mask is known to be zero for bits that V
2976 /// cannot have.
2977 ///
2978 /// This function is defined on values with integer type, values with pointer
2979 /// type, and vectors of integers.  In the case
2980 /// where V is a vector, the mask, known zero, and known one values are the
2981 /// same width as the vector element, and the bit is set only if it is true
2982 /// for all of the elements in the vector.
2983 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth,
2984                        const Query &Q) {
2985   KnownBits Known(Mask.getBitWidth());
2986   computeKnownBits(V, Known, Depth, Q);
2987   return Mask.isSubsetOf(Known.Zero);
2988 }
2989 
2990 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow).
2991 // Returns the input and lower/upper bounds.
2992 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In,
2993                                 const APInt *&CLow, const APInt *&CHigh) {
2994   assert(isa<Operator>(Select) &&
2995          cast<Operator>(Select)->getOpcode() == Instruction::Select &&
2996          "Input should be a Select!");
2997 
2998   const Value *LHS = nullptr, *RHS = nullptr;
2999   SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor;
3000   if (SPF != SPF_SMAX && SPF != SPF_SMIN)
3001     return false;
3002 
3003   if (!match(RHS, m_APInt(CLow)))
3004     return false;
3005 
3006   const Value *LHS2 = nullptr, *RHS2 = nullptr;
3007   SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor;
3008   if (getInverseMinMaxFlavor(SPF) != SPF2)
3009     return false;
3010 
3011   if (!match(RHS2, m_APInt(CHigh)))
3012     return false;
3013 
3014   if (SPF == SPF_SMIN)
3015     std::swap(CLow, CHigh);
3016 
3017   In = LHS2;
3018   return CLow->sle(*CHigh);
3019 }
3020 
3021 static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II,
3022                                          const APInt *&CLow,
3023                                          const APInt *&CHigh) {
3024   assert((II->getIntrinsicID() == Intrinsic::smin ||
3025           II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax");
3026 
3027   Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
3028   auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
3029   if (!InnerII || InnerII->getIntrinsicID() != InverseID ||
3030       !match(II->getArgOperand(1), m_APInt(CLow)) ||
3031       !match(InnerII->getArgOperand(1), m_APInt(CHigh)))
3032     return false;
3033 
3034   if (II->getIntrinsicID() == Intrinsic::smin)
3035     std::swap(CLow, CHigh);
3036   return CLow->sle(*CHigh);
3037 }
3038 
3039 /// For vector constants, loop over the elements and find the constant with the
3040 /// minimum number of sign bits. Return 0 if the value is not a vector constant
3041 /// or if any element was not analyzed; otherwise, return the count for the
3042 /// element with the minimum number of sign bits.
3043 static unsigned computeNumSignBitsVectorConstant(const Value *V,
3044                                                  const APInt &DemandedElts,
3045                                                  unsigned TyBits) {
3046   const auto *CV = dyn_cast<Constant>(V);
3047   if (!CV || !isa<FixedVectorType>(CV->getType()))
3048     return 0;
3049 
3050   unsigned MinSignBits = TyBits;
3051   unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements();
3052   for (unsigned i = 0; i != NumElts; ++i) {
3053     if (!DemandedElts[i])
3054       continue;
3055     // If we find a non-ConstantInt, bail out.
3056     auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i));
3057     if (!Elt)
3058       return 0;
3059 
3060     MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits());
3061   }
3062 
3063   return MinSignBits;
3064 }
3065 
3066 static unsigned ComputeNumSignBitsImpl(const Value *V,
3067                                        const APInt &DemandedElts,
3068                                        unsigned Depth, const Query &Q);
3069 
3070 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts,
3071                                    unsigned Depth, const Query &Q) {
3072   unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q);
3073   assert(Result > 0 && "At least one sign bit needs to be present!");
3074   return Result;
3075 }
3076 
3077 /// Return the number of times the sign bit of the register is replicated into
3078 /// the other bits. We know that at least 1 bit is always equal to the sign bit
3079 /// (itself), but other cases can give us information. For example, immediately
3080 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each
3081 /// other, so we return 3. For vectors, return the number of sign bits for the
3082 /// vector element with the minimum number of known sign bits of the demanded
3083 /// elements in the vector specified by DemandedElts.
3084 static unsigned ComputeNumSignBitsImpl(const Value *V,
3085                                        const APInt &DemandedElts,
3086                                        unsigned Depth, const Query &Q) {
3087   Type *Ty = V->getType();
3088 #ifndef NDEBUG
3089   assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth");
3090 
3091   if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) {
3092     assert(
3093         FVTy->getNumElements() == DemandedElts.getBitWidth() &&
3094         "DemandedElt width should equal the fixed vector number of elements");
3095   } else {
3096     assert(DemandedElts == APInt(1, 1) &&
3097            "DemandedElt width should be 1 for scalars");
3098   }
3099 #endif
3100 
3101   // We return the minimum number of sign bits that are guaranteed to be present
3102   // in V, so for undef we have to conservatively return 1.  We don't have the
3103   // same behavior for poison though -- that's a FIXME today.
3104 
3105   Type *ScalarTy = Ty->getScalarType();
3106   unsigned TyBits = ScalarTy->isPointerTy() ?
3107     Q.DL.getPointerTypeSizeInBits(ScalarTy) :
3108     Q.DL.getTypeSizeInBits(ScalarTy);
3109 
3110   unsigned Tmp, Tmp2;
3111   unsigned FirstAnswer = 1;
3112 
3113   // Note that ConstantInt is handled by the general computeKnownBits case
3114   // below.
3115 
3116   if (Depth == MaxAnalysisRecursionDepth)
3117     return 1;
3118 
3119   if (auto *U = dyn_cast<Operator>(V)) {
3120     switch (Operator::getOpcode(V)) {
3121     default: break;
3122     case Instruction::SExt:
3123       Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits();
3124       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp;
3125 
3126     case Instruction::SDiv: {
3127       const APInt *Denominator;
3128       // sdiv X, C -> adds log(C) sign bits.
3129       if (match(U->getOperand(1), m_APInt(Denominator))) {
3130 
3131         // Ignore non-positive denominator.
3132         if (!Denominator->isStrictlyPositive())
3133           break;
3134 
3135         // Calculate the incoming numerator bits.
3136         unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3137 
3138         // Add floor(log(C)) bits to the numerator bits.
3139         return std::min(TyBits, NumBits + Denominator->logBase2());
3140       }
3141       break;
3142     }
3143 
3144     case Instruction::SRem: {
3145       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3146 
3147       const APInt *Denominator;
3148       // srem X, C -> we know that the result is within [-C+1,C) when C is a
3149       // positive constant.  This let us put a lower bound on the number of sign
3150       // bits.
3151       if (match(U->getOperand(1), m_APInt(Denominator))) {
3152 
3153         // Ignore non-positive denominator.
3154         if (Denominator->isStrictlyPositive()) {
3155           // Calculate the leading sign bit constraints by examining the
3156           // denominator.  Given that the denominator is positive, there are two
3157           // cases:
3158           //
3159           //  1. The numerator is positive. The result range is [0,C) and
3160           //     [0,C) u< (1 << ceilLogBase2(C)).
3161           //
3162           //  2. The numerator is negative. Then the result range is (-C,0] and
3163           //     integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)).
3164           //
3165           // Thus a lower bound on the number of sign bits is `TyBits -
3166           // ceilLogBase2(C)`.
3167 
3168           unsigned ResBits = TyBits - Denominator->ceilLogBase2();
3169           Tmp = std::max(Tmp, ResBits);
3170         }
3171       }
3172       return Tmp;
3173     }
3174 
3175     case Instruction::AShr: {
3176       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3177       // ashr X, C   -> adds C sign bits.  Vectors too.
3178       const APInt *ShAmt;
3179       if (match(U->getOperand(1), m_APInt(ShAmt))) {
3180         if (ShAmt->uge(TyBits))
3181           break; // Bad shift.
3182         unsigned ShAmtLimited = ShAmt->getZExtValue();
3183         Tmp += ShAmtLimited;
3184         if (Tmp > TyBits) Tmp = TyBits;
3185       }
3186       return Tmp;
3187     }
3188     case Instruction::Shl: {
3189       const APInt *ShAmt;
3190       if (match(U->getOperand(1), m_APInt(ShAmt))) {
3191         // shl destroys sign bits.
3192         Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3193         if (ShAmt->uge(TyBits) ||   // Bad shift.
3194             ShAmt->uge(Tmp)) break; // Shifted all sign bits out.
3195         Tmp2 = ShAmt->getZExtValue();
3196         return Tmp - Tmp2;
3197       }
3198       break;
3199     }
3200     case Instruction::And:
3201     case Instruction::Or:
3202     case Instruction::Xor: // NOT is handled here.
3203       // Logical binary ops preserve the number of sign bits at the worst.
3204       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3205       if (Tmp != 1) {
3206         Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3207         FirstAnswer = std::min(Tmp, Tmp2);
3208         // We computed what we know about the sign bits as our first
3209         // answer. Now proceed to the generic code that uses
3210         // computeKnownBits, and pick whichever answer is better.
3211       }
3212       break;
3213 
3214     case Instruction::Select: {
3215       // If we have a clamp pattern, we know that the number of sign bits will
3216       // be the minimum of the clamp min/max range.
3217       const Value *X;
3218       const APInt *CLow, *CHigh;
3219       if (isSignedMinMaxClamp(U, X, CLow, CHigh))
3220         return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3221 
3222       Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3223       if (Tmp == 1) break;
3224       Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q);
3225       return std::min(Tmp, Tmp2);
3226     }
3227 
3228     case Instruction::Add:
3229       // Add can have at most one carry bit.  Thus we know that the output
3230       // is, at worst, one more bit than the inputs.
3231       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3232       if (Tmp == 1) break;
3233 
3234       // Special case decrementing a value (ADD X, -1):
3235       if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1)))
3236         if (CRHS->isAllOnesValue()) {
3237           KnownBits Known(TyBits);
3238           computeKnownBits(U->getOperand(0), Known, Depth + 1, Q);
3239 
3240           // If the input is known to be 0 or 1, the output is 0/-1, which is
3241           // all sign bits set.
3242           if ((Known.Zero | 1).isAllOnes())
3243             return TyBits;
3244 
3245           // If we are subtracting one from a positive number, there is no carry
3246           // out of the result.
3247           if (Known.isNonNegative())
3248             return Tmp;
3249         }
3250 
3251       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3252       if (Tmp2 == 1) break;
3253       return std::min(Tmp, Tmp2) - 1;
3254 
3255     case Instruction::Sub:
3256       Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3257       if (Tmp2 == 1) break;
3258 
3259       // Handle NEG.
3260       if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0)))
3261         if (CLHS->isNullValue()) {
3262           KnownBits Known(TyBits);
3263           computeKnownBits(U->getOperand(1), Known, Depth + 1, Q);
3264           // If the input is known to be 0 or 1, the output is 0/-1, which is
3265           // all sign bits set.
3266           if ((Known.Zero | 1).isAllOnes())
3267             return TyBits;
3268 
3269           // If the input is known to be positive (the sign bit is known clear),
3270           // the output of the NEG has the same number of sign bits as the
3271           // input.
3272           if (Known.isNonNegative())
3273             return Tmp2;
3274 
3275           // Otherwise, we treat this like a SUB.
3276         }
3277 
3278       // Sub can have at most one carry bit.  Thus we know that the output
3279       // is, at worst, one more bit than the inputs.
3280       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3281       if (Tmp == 1) break;
3282       return std::min(Tmp, Tmp2) - 1;
3283 
3284     case Instruction::Mul: {
3285       // The output of the Mul can be at most twice the valid bits in the
3286       // inputs.
3287       unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3288       if (SignBitsOp0 == 1) break;
3289       unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q);
3290       if (SignBitsOp1 == 1) break;
3291       unsigned OutValidBits =
3292           (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1);
3293       return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1;
3294     }
3295 
3296     case Instruction::PHI: {
3297       const PHINode *PN = cast<PHINode>(U);
3298       unsigned NumIncomingValues = PN->getNumIncomingValues();
3299       // Don't analyze large in-degree PHIs.
3300       if (NumIncomingValues > 4) break;
3301       // Unreachable blocks may have zero-operand PHI nodes.
3302       if (NumIncomingValues == 0) break;
3303 
3304       // Take the minimum of all incoming values.  This can't infinitely loop
3305       // because of our depth threshold.
3306       Query RecQ = Q;
3307       Tmp = TyBits;
3308       for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) {
3309         if (Tmp == 1) return Tmp;
3310         RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator();
3311         Tmp = std::min(
3312             Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ));
3313       }
3314       return Tmp;
3315     }
3316 
3317     case Instruction::Trunc: {
3318       // If the input contained enough sign bits that some remain after the
3319       // truncation, then we can make use of that. Otherwise we don't know
3320       // anything.
3321       Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3322       unsigned OperandTyBits = U->getOperand(0)->getType()->getScalarSizeInBits();
3323       if (Tmp > (OperandTyBits - TyBits))
3324         return Tmp - (OperandTyBits - TyBits);
3325 
3326       return 1;
3327     }
3328 
3329     case Instruction::ExtractElement:
3330       // Look through extract element. At the moment we keep this simple and
3331       // skip tracking the specific element. But at least we might find
3332       // information valid for all elements of the vector (for example if vector
3333       // is sign extended, shifted, etc).
3334       return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3335 
3336     case Instruction::ShuffleVector: {
3337       // Collect the minimum number of sign bits that are shared by every vector
3338       // element referenced by the shuffle.
3339       auto *Shuf = dyn_cast<ShuffleVectorInst>(U);
3340       if (!Shuf) {
3341         // FIXME: Add support for shufflevector constant expressions.
3342         return 1;
3343       }
3344       APInt DemandedLHS, DemandedRHS;
3345       // For undef elements, we don't know anything about the common state of
3346       // the shuffle result.
3347       if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS))
3348         return 1;
3349       Tmp = std::numeric_limits<unsigned>::max();
3350       if (!!DemandedLHS) {
3351         const Value *LHS = Shuf->getOperand(0);
3352         Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q);
3353       }
3354       // If we don't know anything, early out and try computeKnownBits
3355       // fall-back.
3356       if (Tmp == 1)
3357         break;
3358       if (!!DemandedRHS) {
3359         const Value *RHS = Shuf->getOperand(1);
3360         Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q);
3361         Tmp = std::min(Tmp, Tmp2);
3362       }
3363       // If we don't know anything, early out and try computeKnownBits
3364       // fall-back.
3365       if (Tmp == 1)
3366         break;
3367       assert(Tmp <= TyBits && "Failed to determine minimum sign bits");
3368       return Tmp;
3369     }
3370     case Instruction::Call: {
3371       if (const auto *II = dyn_cast<IntrinsicInst>(U)) {
3372         switch (II->getIntrinsicID()) {
3373         default: break;
3374         case Intrinsic::abs:
3375           Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q);
3376           if (Tmp == 1) break;
3377 
3378           // Absolute value reduces number of sign bits by at most 1.
3379           return Tmp - 1;
3380         case Intrinsic::smin:
3381         case Intrinsic::smax: {
3382           const APInt *CLow, *CHigh;
3383           if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh))
3384             return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits());
3385         }
3386         }
3387       }
3388     }
3389     }
3390   }
3391 
3392   // Finally, if we can prove that the top bits of the result are 0's or 1's,
3393   // use this information.
3394 
3395   // If we can examine all elements of a vector constant successfully, we're
3396   // done (we can't do any better than that). If not, keep trying.
3397   if (unsigned VecSignBits =
3398           computeNumSignBitsVectorConstant(V, DemandedElts, TyBits))
3399     return VecSignBits;
3400 
3401   KnownBits Known(TyBits);
3402   computeKnownBits(V, DemandedElts, Known, Depth, Q);
3403 
3404   // If we know that the sign bit is either zero or one, determine the number of
3405   // identical bits in the top of the input value.
3406   return std::max(FirstAnswer, Known.countMinSignBits());
3407 }
3408 
3409 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB,
3410                                             const TargetLibraryInfo *TLI) {
3411   const Function *F = CB.getCalledFunction();
3412   if (!F)
3413     return Intrinsic::not_intrinsic;
3414 
3415   if (F->isIntrinsic())
3416     return F->getIntrinsicID();
3417 
3418   // We are going to infer semantics of a library function based on mapping it
3419   // to an LLVM intrinsic. Check that the library function is available from
3420   // this callbase and in this environment.
3421   LibFunc Func;
3422   if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) ||
3423       !CB.onlyReadsMemory())
3424     return Intrinsic::not_intrinsic;
3425 
3426   switch (Func) {
3427   default:
3428     break;
3429   case LibFunc_sin:
3430   case LibFunc_sinf:
3431   case LibFunc_sinl:
3432     return Intrinsic::sin;
3433   case LibFunc_cos:
3434   case LibFunc_cosf:
3435   case LibFunc_cosl:
3436     return Intrinsic::cos;
3437   case LibFunc_exp:
3438   case LibFunc_expf:
3439   case LibFunc_expl:
3440     return Intrinsic::exp;
3441   case LibFunc_exp2:
3442   case LibFunc_exp2f:
3443   case LibFunc_exp2l:
3444     return Intrinsic::exp2;
3445   case LibFunc_log:
3446   case LibFunc_logf:
3447   case LibFunc_logl:
3448     return Intrinsic::log;
3449   case LibFunc_log10:
3450   case LibFunc_log10f:
3451   case LibFunc_log10l:
3452     return Intrinsic::log10;
3453   case LibFunc_log2:
3454   case LibFunc_log2f:
3455   case LibFunc_log2l:
3456     return Intrinsic::log2;
3457   case LibFunc_fabs:
3458   case LibFunc_fabsf:
3459   case LibFunc_fabsl:
3460     return Intrinsic::fabs;
3461   case LibFunc_fmin:
3462   case LibFunc_fminf:
3463   case LibFunc_fminl:
3464     return Intrinsic::minnum;
3465   case LibFunc_fmax:
3466   case LibFunc_fmaxf:
3467   case LibFunc_fmaxl:
3468     return Intrinsic::maxnum;
3469   case LibFunc_copysign:
3470   case LibFunc_copysignf:
3471   case LibFunc_copysignl:
3472     return Intrinsic::copysign;
3473   case LibFunc_floor:
3474   case LibFunc_floorf:
3475   case LibFunc_floorl:
3476     return Intrinsic::floor;
3477   case LibFunc_ceil:
3478   case LibFunc_ceilf:
3479   case LibFunc_ceill:
3480     return Intrinsic::ceil;
3481   case LibFunc_trunc:
3482   case LibFunc_truncf:
3483   case LibFunc_truncl:
3484     return Intrinsic::trunc;
3485   case LibFunc_rint:
3486   case LibFunc_rintf:
3487   case LibFunc_rintl:
3488     return Intrinsic::rint;
3489   case LibFunc_nearbyint:
3490   case LibFunc_nearbyintf:
3491   case LibFunc_nearbyintl:
3492     return Intrinsic::nearbyint;
3493   case LibFunc_round:
3494   case LibFunc_roundf:
3495   case LibFunc_roundl:
3496     return Intrinsic::round;
3497   case LibFunc_roundeven:
3498   case LibFunc_roundevenf:
3499   case LibFunc_roundevenl:
3500     return Intrinsic::roundeven;
3501   case LibFunc_pow:
3502   case LibFunc_powf:
3503   case LibFunc_powl:
3504     return Intrinsic::pow;
3505   case LibFunc_sqrt:
3506   case LibFunc_sqrtf:
3507   case LibFunc_sqrtl:
3508     return Intrinsic::sqrt;
3509   }
3510 
3511   return Intrinsic::not_intrinsic;
3512 }
3513 
3514 /// Return true if we can prove that the specified FP value is never equal to
3515 /// -0.0.
3516 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee
3517 ///       that a value is not -0.0. It only guarantees that -0.0 may be treated
3518 ///       the same as +0.0 in floating-point ops.
3519 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
3520                                 unsigned Depth) {
3521   if (auto *CFP = dyn_cast<ConstantFP>(V))
3522     return !CFP->getValueAPF().isNegZero();
3523 
3524   if (Depth == MaxAnalysisRecursionDepth)
3525     return false;
3526 
3527   auto *Op = dyn_cast<Operator>(V);
3528   if (!Op)
3529     return false;
3530 
3531   // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0.
3532   if (match(Op, m_FAdd(m_Value(), m_PosZeroFP())))
3533     return true;
3534 
3535   // sitofp and uitofp turn into +0.0 for zero.
3536   if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op))
3537     return true;
3538 
3539   if (auto *Call = dyn_cast<CallInst>(Op)) {
3540     Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI);
3541     switch (IID) {
3542     default:
3543       break;
3544     // sqrt(-0.0) = -0.0, no other negative results are possible.
3545     case Intrinsic::sqrt:
3546     case Intrinsic::canonicalize:
3547       return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3548     case Intrinsic::experimental_constrained_sqrt: {
3549       // NOTE: This rounding mode restriction may be too strict.
3550       const auto *CI = cast<ConstrainedFPIntrinsic>(Call);
3551       if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven)
3552         return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1);
3553       else
3554         return false;
3555     }
3556     // fabs(x) != -0.0
3557     case Intrinsic::fabs:
3558       return true;
3559     // sitofp and uitofp turn into +0.0 for zero.
3560     case Intrinsic::experimental_constrained_sitofp:
3561     case Intrinsic::experimental_constrained_uitofp:
3562       return true;
3563     }
3564   }
3565 
3566   return false;
3567 }
3568 
3569 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
3570 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
3571 /// bit despite comparing equal.
3572 static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
3573                                             const TargetLibraryInfo *TLI,
3574                                             bool SignBitOnly,
3575                                             unsigned Depth) {
3576   // TODO: This function does not do the right thing when SignBitOnly is true
3577   // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform
3578   // which flips the sign bits of NaNs.  See
3579   // https://llvm.org/bugs/show_bug.cgi?id=31702.
3580 
3581   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
3582     return !CFP->getValueAPF().isNegative() ||
3583            (!SignBitOnly && CFP->getValueAPF().isZero());
3584   }
3585 
3586   // Handle vector of constants.
3587   if (auto *CV = dyn_cast<Constant>(V)) {
3588     if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) {
3589       unsigned NumElts = CVFVTy->getNumElements();
3590       for (unsigned i = 0; i != NumElts; ++i) {
3591         auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
3592         if (!CFP)
3593           return false;
3594         if (CFP->getValueAPF().isNegative() &&
3595             (SignBitOnly || !CFP->getValueAPF().isZero()))
3596           return false;
3597       }
3598 
3599       // All non-negative ConstantFPs.
3600       return true;
3601     }
3602   }
3603 
3604   if (Depth == MaxAnalysisRecursionDepth)
3605     return false;
3606 
3607   const Operator *I = dyn_cast<Operator>(V);
3608   if (!I)
3609     return false;
3610 
3611   switch (I->getOpcode()) {
3612   default:
3613     break;
3614   // Unsigned integers are always nonnegative.
3615   case Instruction::UIToFP:
3616     return true;
3617   case Instruction::FDiv:
3618     // X / X is always exactly 1.0 or a NaN.
3619     if (I->getOperand(0) == I->getOperand(1) &&
3620         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3621       return true;
3622 
3623     // Set SignBitOnly for RHS, because X / -0.0 is -Inf (or NaN).
3624     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3625                                            Depth + 1) &&
3626            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI,
3627                                            /*SignBitOnly*/ true, Depth + 1);
3628   case Instruction::FMul:
3629     // X * X is always non-negative or a NaN.
3630     if (I->getOperand(0) == I->getOperand(1) &&
3631         (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
3632       return true;
3633 
3634     [[fallthrough]];
3635   case Instruction::FAdd:
3636   case Instruction::FRem:
3637     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3638                                            Depth + 1) &&
3639            cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3640                                            Depth + 1);
3641   case Instruction::Select:
3642     return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3643                                            Depth + 1) &&
3644            cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3645                                            Depth + 1);
3646   case Instruction::FPExt:
3647   case Instruction::FPTrunc:
3648     // Widening/narrowing never change sign.
3649     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3650                                            Depth + 1);
3651   case Instruction::ExtractElement:
3652     // Look through extract element. At the moment we keep this simple and skip
3653     // tracking the specific element. But at least we might find information
3654     // valid for all elements of the vector.
3655     return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3656                                            Depth + 1);
3657   case Instruction::Call:
3658     const auto *CI = cast<CallInst>(I);
3659     Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI);
3660     switch (IID) {
3661     default:
3662       break;
3663     case Intrinsic::canonicalize:
3664     case Intrinsic::arithmetic_fence:
3665     case Intrinsic::floor:
3666     case Intrinsic::ceil:
3667     case Intrinsic::trunc:
3668     case Intrinsic::rint:
3669     case Intrinsic::nearbyint:
3670     case Intrinsic::round:
3671     case Intrinsic::roundeven:
3672     case Intrinsic::fptrunc_round:
3673       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, Depth + 1);
3674     case Intrinsic::maxnum: {
3675       Value *V0 = I->getOperand(0), *V1 = I->getOperand(1);
3676       auto isPositiveNum = [&](Value *V) {
3677         if (SignBitOnly) {
3678           // With SignBitOnly, this is tricky because the result of
3679           // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is
3680           // a constant strictly greater than 0.0.
3681           const APFloat *C;
3682           return match(V, m_APFloat(C)) &&
3683                  *C > APFloat::getZero(C->getSemantics());
3684         }
3685 
3686         // -0.0 compares equal to 0.0, so if this operand is at least -0.0,
3687         // maxnum can't be ordered-less-than-zero.
3688         return isKnownNeverNaN(V, TLI) &&
3689                cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1);
3690       };
3691 
3692       // TODO: This could be improved. We could also check that neither operand
3693       //       has its sign bit set (and at least 1 is not-NAN?).
3694       return isPositiveNum(V0) || isPositiveNum(V1);
3695     }
3696 
3697     case Intrinsic::maximum:
3698       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3699                                              Depth + 1) ||
3700              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3701                                              Depth + 1);
3702     case Intrinsic::minnum:
3703     case Intrinsic::minimum:
3704       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3705                                              Depth + 1) &&
3706              cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
3707                                              Depth + 1);
3708     case Intrinsic::exp:
3709     case Intrinsic::exp2:
3710     case Intrinsic::fabs:
3711       return true;
3712     case Intrinsic::copysign:
3713       // Only the sign operand matters.
3714       return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, true,
3715                                              Depth + 1);
3716     case Intrinsic::sqrt:
3717       // sqrt(x) is always >= -0 or NaN.  Moreover, sqrt(x) == -0 iff x == -0.
3718       if (!SignBitOnly)
3719         return true;
3720       return CI->hasNoNaNs() && (CI->hasNoSignedZeros() ||
3721                                  CannotBeNegativeZero(CI->getOperand(0), TLI));
3722 
3723     case Intrinsic::powi:
3724       if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) {
3725         // powi(x,n) is non-negative if n is even.
3726         if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0)
3727           return true;
3728       }
3729       // TODO: This is not correct.  Given that exp is an integer, here are the
3730       // ways that pow can return a negative value:
3731       //
3732       //   pow(x, exp)    --> negative if exp is odd and x is negative.
3733       //   pow(-0, exp)   --> -inf if exp is negative odd.
3734       //   pow(-0, exp)   --> -0 if exp is positive odd.
3735       //   pow(-inf, exp) --> -0 if exp is negative odd.
3736       //   pow(-inf, exp) --> -inf if exp is positive odd.
3737       //
3738       // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN,
3739       // but we must return false if x == -0.  Unfortunately we do not currently
3740       // have a way of expressing this constraint.  See details in
3741       // https://llvm.org/bugs/show_bug.cgi?id=31702.
3742       return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
3743                                              Depth + 1);
3744 
3745     case Intrinsic::fma:
3746     case Intrinsic::fmuladd:
3747       // x*x+y is non-negative if y is non-negative.
3748       return I->getOperand(0) == I->getOperand(1) &&
3749              (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
3750              cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
3751                                              Depth + 1);
3752     }
3753     break;
3754   }
3755   return false;
3756 }
3757 
3758 bool llvm::CannotBeOrderedLessThanZero(const Value *V,
3759                                        const TargetLibraryInfo *TLI) {
3760   return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
3761 }
3762 
3763 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
3764   return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
3765 }
3766 
3767 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI,
3768                                 unsigned Depth) {
3769   assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type");
3770 
3771   // If we're told that infinities won't happen, assume they won't.
3772   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3773     if (FPMathOp->hasNoInfs())
3774       return true;
3775 
3776   // Handle scalar constants.
3777   if (auto *CFP = dyn_cast<ConstantFP>(V))
3778     return !CFP->isInfinity();
3779 
3780   if (Depth == MaxAnalysisRecursionDepth)
3781     return false;
3782 
3783   if (auto *Inst = dyn_cast<Instruction>(V)) {
3784     switch (Inst->getOpcode()) {
3785     case Instruction::Select: {
3786       return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) &&
3787              isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1);
3788     }
3789     case Instruction::SIToFP:
3790     case Instruction::UIToFP: {
3791       // Get width of largest magnitude integer (remove a bit if signed).
3792       // This still works for a signed minimum value because the largest FP
3793       // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx).
3794       int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits();
3795       if (Inst->getOpcode() == Instruction::SIToFP)
3796         --IntSize;
3797 
3798       // If the exponent of the largest finite FP value can hold the largest
3799       // integer, the result of the cast must be finite.
3800       Type *FPTy = Inst->getType()->getScalarType();
3801       return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize;
3802     }
3803     case Instruction::FNeg:
3804     case Instruction::FPExt: {
3805       // Peek through to source op. If it is not infinity, this is not infinity.
3806       return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1);
3807     }
3808     case Instruction::FPTrunc: {
3809       // Need a range check.
3810       return false;
3811     }
3812     default:
3813       break;
3814     }
3815 
3816     if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3817       switch (II->getIntrinsicID()) {
3818       case Intrinsic::sin:
3819       case Intrinsic::cos:
3820         // Return NaN on infinite inputs.
3821         return true;
3822       case Intrinsic::fabs:
3823       case Intrinsic::sqrt:
3824       case Intrinsic::canonicalize:
3825       case Intrinsic::copysign:
3826       case Intrinsic::arithmetic_fence:
3827       case Intrinsic::trunc:
3828         return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1);
3829       case Intrinsic::floor:
3830       case Intrinsic::ceil:
3831       case Intrinsic::rint:
3832       case Intrinsic::nearbyint:
3833       case Intrinsic::round:
3834       case Intrinsic::roundeven:
3835         // PPC_FP128 is a special case.
3836         if (V->getType()->isMultiUnitFPType())
3837           return false;
3838         return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1);
3839       case Intrinsic::fptrunc_round:
3840         // Requires knowing the value range.
3841         return false;
3842       case Intrinsic::minnum:
3843       case Intrinsic::maxnum:
3844       case Intrinsic::minimum:
3845       case Intrinsic::maximum:
3846         return isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3847                isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3848       case Intrinsic::log:
3849       case Intrinsic::log10:
3850       case Intrinsic::log2:
3851         // log(+inf) -> +inf
3852         // log([+-]0.0) -> -inf
3853         // log(-inf) -> nan
3854         // log(-x) -> nan
3855         // TODO: We lack API to check the == 0 case.
3856         return false;
3857       case Intrinsic::exp:
3858       case Intrinsic::exp2:
3859       case Intrinsic::pow:
3860       case Intrinsic::powi:
3861       case Intrinsic::fma:
3862       case Intrinsic::fmuladd:
3863         // These can return infinities on overflow cases, so it's hard to prove
3864         // anything about it.
3865         return false;
3866       default:
3867         break;
3868       }
3869     }
3870   }
3871 
3872   // try to handle fixed width vector constants
3873   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3874   if (VFVTy && isa<Constant>(V)) {
3875     // For vectors, verify that each element is not infinity.
3876     unsigned NumElts = VFVTy->getNumElements();
3877     for (unsigned i = 0; i != NumElts; ++i) {
3878       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3879       if (!Elt)
3880         return false;
3881       if (isa<UndefValue>(Elt))
3882         continue;
3883       auto *CElt = dyn_cast<ConstantFP>(Elt);
3884       if (!CElt || CElt->isInfinity())
3885         return false;
3886     }
3887     // All elements were confirmed non-infinity or undefined.
3888     return true;
3889   }
3890 
3891   // was not able to prove that V never contains infinity
3892   return false;
3893 }
3894 
3895 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI,
3896                            unsigned Depth) {
3897   assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type");
3898 
3899   // If we're told that NaNs won't happen, assume they won't.
3900   if (auto *FPMathOp = dyn_cast<FPMathOperator>(V))
3901     if (FPMathOp->hasNoNaNs())
3902       return true;
3903 
3904   // Handle scalar constants.
3905   if (auto *CFP = dyn_cast<ConstantFP>(V))
3906     return !CFP->isNaN();
3907 
3908   if (Depth == MaxAnalysisRecursionDepth)
3909     return false;
3910 
3911   if (auto *Inst = dyn_cast<Instruction>(V)) {
3912     switch (Inst->getOpcode()) {
3913     case Instruction::FAdd:
3914     case Instruction::FSub:
3915       // Adding positive and negative infinity produces NaN.
3916       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3917              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3918              (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) ||
3919               isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1));
3920 
3921     case Instruction::FMul:
3922       // Zero multiplied with infinity produces NaN.
3923       // FIXME: If neither side can be zero fmul never produces NaN.
3924       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) &&
3925              isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) &&
3926              isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3927              isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1);
3928 
3929     case Instruction::FDiv:
3930     case Instruction::FRem:
3931       // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN.
3932       return false;
3933 
3934     case Instruction::Select: {
3935       return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) &&
3936              isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1);
3937     }
3938     case Instruction::SIToFP:
3939     case Instruction::UIToFP:
3940       return true;
3941     case Instruction::FPTrunc:
3942     case Instruction::FPExt:
3943     case Instruction::FNeg:
3944       return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1);
3945     default:
3946       break;
3947     }
3948   }
3949 
3950   if (const auto *II = dyn_cast<IntrinsicInst>(V)) {
3951     switch (II->getIntrinsicID()) {
3952     case Intrinsic::canonicalize:
3953     case Intrinsic::fabs:
3954     case Intrinsic::copysign:
3955     case Intrinsic::exp:
3956     case Intrinsic::exp2:
3957     case Intrinsic::floor:
3958     case Intrinsic::ceil:
3959     case Intrinsic::trunc:
3960     case Intrinsic::rint:
3961     case Intrinsic::nearbyint:
3962     case Intrinsic::round:
3963     case Intrinsic::roundeven:
3964     case Intrinsic::arithmetic_fence:
3965       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1);
3966     case Intrinsic::sqrt:
3967       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) &&
3968              CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI);
3969     case Intrinsic::minnum:
3970     case Intrinsic::maxnum:
3971       // If either operand is not NaN, the result is not NaN.
3972       return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) ||
3973              isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1);
3974     default:
3975       return false;
3976     }
3977   }
3978 
3979   // Try to handle fixed width vector constants
3980   auto *VFVTy = dyn_cast<FixedVectorType>(V->getType());
3981   if (VFVTy && isa<Constant>(V)) {
3982     // For vectors, verify that each element is not NaN.
3983     unsigned NumElts = VFVTy->getNumElements();
3984     for (unsigned i = 0; i != NumElts; ++i) {
3985       Constant *Elt = cast<Constant>(V)->getAggregateElement(i);
3986       if (!Elt)
3987         return false;
3988       if (isa<UndefValue>(Elt))
3989         continue;
3990       auto *CElt = dyn_cast<ConstantFP>(Elt);
3991       if (!CElt || CElt->isNaN())
3992         return false;
3993     }
3994     // All elements were confirmed not-NaN or undefined.
3995     return true;
3996   }
3997 
3998   // Was not able to prove that V never contains NaN
3999   return false;
4000 }
4001 
4002 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) {
4003 
4004   // All byte-wide stores are splatable, even of arbitrary variables.
4005   if (V->getType()->isIntegerTy(8))
4006     return V;
4007 
4008   LLVMContext &Ctx = V->getContext();
4009 
4010   // Undef don't care.
4011   auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx));
4012   if (isa<UndefValue>(V))
4013     return UndefInt8;
4014 
4015   // Return Undef for zero-sized type.
4016   if (!DL.getTypeStoreSize(V->getType()).isNonZero())
4017     return UndefInt8;
4018 
4019   Constant *C = dyn_cast<Constant>(V);
4020   if (!C) {
4021     // Conceptually, we could handle things like:
4022     //   %a = zext i8 %X to i16
4023     //   %b = shl i16 %a, 8
4024     //   %c = or i16 %a, %b
4025     // but until there is an example that actually needs this, it doesn't seem
4026     // worth worrying about.
4027     return nullptr;
4028   }
4029 
4030   // Handle 'null' ConstantArrayZero etc.
4031   if (C->isNullValue())
4032     return Constant::getNullValue(Type::getInt8Ty(Ctx));
4033 
4034   // Constant floating-point values can be handled as integer values if the
4035   // corresponding integer value is "byteable".  An important case is 0.0.
4036   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
4037     Type *Ty = nullptr;
4038     if (CFP->getType()->isHalfTy())
4039       Ty = Type::getInt16Ty(Ctx);
4040     else if (CFP->getType()->isFloatTy())
4041       Ty = Type::getInt32Ty(Ctx);
4042     else if (CFP->getType()->isDoubleTy())
4043       Ty = Type::getInt64Ty(Ctx);
4044     // Don't handle long double formats, which have strange constraints.
4045     return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL)
4046               : nullptr;
4047   }
4048 
4049   // We can handle constant integers that are multiple of 8 bits.
4050   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
4051     if (CI->getBitWidth() % 8 == 0) {
4052       assert(CI->getBitWidth() > 8 && "8 bits should be handled above!");
4053       if (!CI->getValue().isSplat(8))
4054         return nullptr;
4055       return ConstantInt::get(Ctx, CI->getValue().trunc(8));
4056     }
4057   }
4058 
4059   if (auto *CE = dyn_cast<ConstantExpr>(C)) {
4060     if (CE->getOpcode() == Instruction::IntToPtr) {
4061       if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) {
4062         unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace());
4063         return isBytewiseValue(
4064             ConstantExpr::getIntegerCast(CE->getOperand(0),
4065                                          Type::getIntNTy(Ctx, BitWidth), false),
4066             DL);
4067       }
4068     }
4069   }
4070 
4071   auto Merge = [&](Value *LHS, Value *RHS) -> Value * {
4072     if (LHS == RHS)
4073       return LHS;
4074     if (!LHS || !RHS)
4075       return nullptr;
4076     if (LHS == UndefInt8)
4077       return RHS;
4078     if (RHS == UndefInt8)
4079       return LHS;
4080     return nullptr;
4081   };
4082 
4083   if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) {
4084     Value *Val = UndefInt8;
4085     for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I)
4086       if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL))))
4087         return nullptr;
4088     return Val;
4089   }
4090 
4091   if (isa<ConstantAggregate>(C)) {
4092     Value *Val = UndefInt8;
4093     for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I)
4094       if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL))))
4095         return nullptr;
4096     return Val;
4097   }
4098 
4099   // Don't try to handle the handful of other constants.
4100   return nullptr;
4101 }
4102 
4103 // This is the recursive version of BuildSubAggregate. It takes a few different
4104 // arguments. Idxs is the index within the nested struct From that we are
4105 // looking at now (which is of type IndexedType). IdxSkip is the number of
4106 // indices from Idxs that should be left out when inserting into the resulting
4107 // struct. To is the result struct built so far, new insertvalue instructions
4108 // build on that.
4109 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType,
4110                                 SmallVectorImpl<unsigned> &Idxs,
4111                                 unsigned IdxSkip,
4112                                 Instruction *InsertBefore) {
4113   StructType *STy = dyn_cast<StructType>(IndexedType);
4114   if (STy) {
4115     // Save the original To argument so we can modify it
4116     Value *OrigTo = To;
4117     // General case, the type indexed by Idxs is a struct
4118     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4119       // Process each struct element recursively
4120       Idxs.push_back(i);
4121       Value *PrevTo = To;
4122       To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip,
4123                              InsertBefore);
4124       Idxs.pop_back();
4125       if (!To) {
4126         // Couldn't find any inserted value for this index? Cleanup
4127         while (PrevTo != OrigTo) {
4128           InsertValueInst* Del = cast<InsertValueInst>(PrevTo);
4129           PrevTo = Del->getAggregateOperand();
4130           Del->eraseFromParent();
4131         }
4132         // Stop processing elements
4133         break;
4134       }
4135     }
4136     // If we successfully found a value for each of our subaggregates
4137     if (To)
4138       return To;
4139   }
4140   // Base case, the type indexed by SourceIdxs is not a struct, or not all of
4141   // the struct's elements had a value that was inserted directly. In the latter
4142   // case, perhaps we can't determine each of the subelements individually, but
4143   // we might be able to find the complete struct somewhere.
4144 
4145   // Find the value that is at that particular spot
4146   Value *V = FindInsertedValue(From, Idxs);
4147 
4148   if (!V)
4149     return nullptr;
4150 
4151   // Insert the value in the new (sub) aggregate
4152   return InsertValueInst::Create(To, V, ArrayRef(Idxs).slice(IdxSkip), "tmp",
4153                                  InsertBefore);
4154 }
4155 
4156 // This helper takes a nested struct and extracts a part of it (which is again a
4157 // struct) into a new value. For example, given the struct:
4158 // { a, { b, { c, d }, e } }
4159 // and the indices "1, 1" this returns
4160 // { c, d }.
4161 //
4162 // It does this by inserting an insertvalue for each element in the resulting
4163 // struct, as opposed to just inserting a single struct. This will only work if
4164 // each of the elements of the substruct are known (ie, inserted into From by an
4165 // insertvalue instruction somewhere).
4166 //
4167 // All inserted insertvalue instructions are inserted before InsertBefore
4168 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range,
4169                                 Instruction *InsertBefore) {
4170   assert(InsertBefore && "Must have someplace to insert!");
4171   Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
4172                                                              idx_range);
4173   Value *To = PoisonValue::get(IndexedType);
4174   SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end());
4175   unsigned IdxSkip = Idxs.size();
4176 
4177   return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore);
4178 }
4179 
4180 /// Given an aggregate and a sequence of indices, see if the scalar value
4181 /// indexed is already around as a register, for example if it was inserted
4182 /// directly into the aggregate.
4183 ///
4184 /// If InsertBefore is not null, this function will duplicate (modified)
4185 /// insertvalues when a part of a nested struct is extracted.
4186 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
4187                                Instruction *InsertBefore) {
4188   // Nothing to index? Just return V then (this is useful at the end of our
4189   // recursion).
4190   if (idx_range.empty())
4191     return V;
4192   // We have indices, so V should have an indexable type.
4193   assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) &&
4194          "Not looking at a struct or array?");
4195   assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) &&
4196          "Invalid indices for type?");
4197 
4198   if (Constant *C = dyn_cast<Constant>(V)) {
4199     C = C->getAggregateElement(idx_range[0]);
4200     if (!C) return nullptr;
4201     return FindInsertedValue(C, idx_range.slice(1), InsertBefore);
4202   }
4203 
4204   if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) {
4205     // Loop the indices for the insertvalue instruction in parallel with the
4206     // requested indices
4207     const unsigned *req_idx = idx_range.begin();
4208     for (const unsigned *i = I->idx_begin(), *e = I->idx_end();
4209          i != e; ++i, ++req_idx) {
4210       if (req_idx == idx_range.end()) {
4211         // We can't handle this without inserting insertvalues
4212         if (!InsertBefore)
4213           return nullptr;
4214 
4215         // The requested index identifies a part of a nested aggregate. Handle
4216         // this specially. For example,
4217         // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0
4218         // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1
4219         // %C = extractvalue {i32, { i32, i32 } } %B, 1
4220         // This can be changed into
4221         // %A = insertvalue {i32, i32 } undef, i32 10, 0
4222         // %C = insertvalue {i32, i32 } %A, i32 11, 1
4223         // which allows the unused 0,0 element from the nested struct to be
4224         // removed.
4225         return BuildSubAggregate(V, ArrayRef(idx_range.begin(), req_idx),
4226                                  InsertBefore);
4227       }
4228 
4229       // This insert value inserts something else than what we are looking for.
4230       // See if the (aggregate) value inserted into has the value we are
4231       // looking for, then.
4232       if (*req_idx != *i)
4233         return FindInsertedValue(I->getAggregateOperand(), idx_range,
4234                                  InsertBefore);
4235     }
4236     // If we end up here, the indices of the insertvalue match with those
4237     // requested (though possibly only partially). Now we recursively look at
4238     // the inserted value, passing any remaining indices.
4239     return FindInsertedValue(I->getInsertedValueOperand(),
4240                              ArrayRef(req_idx, idx_range.end()), InsertBefore);
4241   }
4242 
4243   if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) {
4244     // If we're extracting a value from an aggregate that was extracted from
4245     // something else, we can extract from that something else directly instead.
4246     // However, we will need to chain I's indices with the requested indices.
4247 
4248     // Calculate the number of indices required
4249     unsigned size = I->getNumIndices() + idx_range.size();
4250     // Allocate some space to put the new indices in
4251     SmallVector<unsigned, 5> Idxs;
4252     Idxs.reserve(size);
4253     // Add indices from the extract value instruction
4254     Idxs.append(I->idx_begin(), I->idx_end());
4255 
4256     // Add requested indices
4257     Idxs.append(idx_range.begin(), idx_range.end());
4258 
4259     assert(Idxs.size() == size
4260            && "Number of indices added not correct?");
4261 
4262     return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore);
4263   }
4264   // Otherwise, we don't know (such as, extracting from a function return value
4265   // or load instruction)
4266   return nullptr;
4267 }
4268 
4269 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
4270                                        unsigned CharSize) {
4271   // Make sure the GEP has exactly three arguments.
4272   if (GEP->getNumOperands() != 3)
4273     return false;
4274 
4275   // Make sure the index-ee is a pointer to array of \p CharSize integers.
4276   // CharSize.
4277   ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType());
4278   if (!AT || !AT->getElementType()->isIntegerTy(CharSize))
4279     return false;
4280 
4281   // Check to make sure that the first operand of the GEP is an integer and
4282   // has value 0 so that we are sure we're indexing into the initializer.
4283   const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
4284   if (!FirstIdx || !FirstIdx->isZero())
4285     return false;
4286 
4287   return true;
4288 }
4289 
4290 // If V refers to an initialized global constant, set Slice either to
4291 // its initializer if the size of its elements equals ElementSize, or,
4292 // for ElementSize == 8, to its representation as an array of unsiged
4293 // char. Return true on success.
4294 // Offset is in the unit "nr of ElementSize sized elements".
4295 bool llvm::getConstantDataArrayInfo(const Value *V,
4296                                     ConstantDataArraySlice &Slice,
4297                                     unsigned ElementSize, uint64_t Offset) {
4298   assert(V && "V should not be null.");
4299   assert((ElementSize % 8) == 0 &&
4300          "ElementSize expected to be a multiple of the size of a byte.");
4301   unsigned ElementSizeInBytes = ElementSize / 8;
4302 
4303   // Drill down into the pointer expression V, ignoring any intervening
4304   // casts, and determine the identity of the object it references along
4305   // with the cumulative byte offset into it.
4306   const GlobalVariable *GV =
4307     dyn_cast<GlobalVariable>(getUnderlyingObject(V));
4308   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
4309     // Fail if V is not based on constant global object.
4310     return false;
4311 
4312   const DataLayout &DL = GV->getParent()->getDataLayout();
4313   APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0);
4314 
4315   if (GV != V->stripAndAccumulateConstantOffsets(DL, Off,
4316                                                  /*AllowNonInbounds*/ true))
4317     // Fail if a constant offset could not be determined.
4318     return false;
4319 
4320   uint64_t StartIdx = Off.getLimitedValue();
4321   if (StartIdx == UINT64_MAX)
4322     // Fail if the constant offset is excessive.
4323     return false;
4324 
4325   // Off/StartIdx is in the unit of bytes. So we need to convert to number of
4326   // elements. Simply bail out if that isn't possible.
4327   if ((StartIdx % ElementSizeInBytes) != 0)
4328     return false;
4329 
4330   Offset += StartIdx / ElementSizeInBytes;
4331   ConstantDataArray *Array = nullptr;
4332   ArrayType *ArrayTy = nullptr;
4333 
4334   if (GV->getInitializer()->isNullValue()) {
4335     Type *GVTy = GV->getValueType();
4336     uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedValue();
4337     uint64_t Length = SizeInBytes / ElementSizeInBytes;
4338 
4339     Slice.Array = nullptr;
4340     Slice.Offset = 0;
4341     // Return an empty Slice for undersized constants to let callers
4342     // transform even undefined library calls into simpler, well-defined
4343     // expressions.  This is preferable to making the calls although it
4344     // prevents sanitizers from detecting such calls.
4345     Slice.Length = Length < Offset ? 0 : Length - Offset;
4346     return true;
4347   }
4348 
4349   auto *Init = const_cast<Constant *>(GV->getInitializer());
4350   if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
4351     Type *InitElTy = ArrayInit->getElementType();
4352     if (InitElTy->isIntegerTy(ElementSize)) {
4353       // If Init is an initializer for an array of the expected type
4354       // and size, use it as is.
4355       Array = ArrayInit;
4356       ArrayTy = ArrayInit->getType();
4357     }
4358   }
4359 
4360   if (!Array) {
4361     if (ElementSize != 8)
4362       // TODO: Handle conversions to larger integral types.
4363       return false;
4364 
4365     // Otherwise extract the portion of the initializer starting
4366     // at Offset as an array of bytes, and reset Offset.
4367     Init = ReadByteArrayFromGlobal(GV, Offset);
4368     if (!Init)
4369       return false;
4370 
4371     Offset = 0;
4372     Array = dyn_cast<ConstantDataArray>(Init);
4373     ArrayTy = dyn_cast<ArrayType>(Init->getType());
4374   }
4375 
4376   uint64_t NumElts = ArrayTy->getArrayNumElements();
4377   if (Offset > NumElts)
4378     return false;
4379 
4380   Slice.Array = Array;
4381   Slice.Offset = Offset;
4382   Slice.Length = NumElts - Offset;
4383   return true;
4384 }
4385 
4386 /// Extract bytes from the initializer of the constant array V, which need
4387 /// not be a nul-terminated string.  On success, store the bytes in Str and
4388 /// return true.  When TrimAtNul is set, Str will contain only the bytes up
4389 /// to but not including the first nul.  Return false on failure.
4390 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str,
4391                                  bool TrimAtNul) {
4392   ConstantDataArraySlice Slice;
4393   if (!getConstantDataArrayInfo(V, Slice, 8))
4394     return false;
4395 
4396   if (Slice.Array == nullptr) {
4397     if (TrimAtNul) {
4398       // Return a nul-terminated string even for an empty Slice.  This is
4399       // safe because all existing SimplifyLibcalls callers require string
4400       // arguments and the behavior of the functions they fold is undefined
4401       // otherwise.  Folding the calls this way is preferable to making
4402       // the undefined library calls, even though it prevents sanitizers
4403       // from reporting such calls.
4404       Str = StringRef();
4405       return true;
4406     }
4407     if (Slice.Length == 1) {
4408       Str = StringRef("", 1);
4409       return true;
4410     }
4411     // We cannot instantiate a StringRef as we do not have an appropriate string
4412     // of 0s at hand.
4413     return false;
4414   }
4415 
4416   // Start out with the entire array in the StringRef.
4417   Str = Slice.Array->getAsString();
4418   // Skip over 'offset' bytes.
4419   Str = Str.substr(Slice.Offset);
4420 
4421   if (TrimAtNul) {
4422     // Trim off the \0 and anything after it.  If the array is not nul
4423     // terminated, we just return the whole end of string.  The client may know
4424     // some other way that the string is length-bound.
4425     Str = Str.substr(0, Str.find('\0'));
4426   }
4427   return true;
4428 }
4429 
4430 // These next two are very similar to the above, but also look through PHI
4431 // nodes.
4432 // TODO: See if we can integrate these two together.
4433 
4434 /// If we can compute the length of the string pointed to by
4435 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4436 static uint64_t GetStringLengthH(const Value *V,
4437                                  SmallPtrSetImpl<const PHINode*> &PHIs,
4438                                  unsigned CharSize) {
4439   // Look through noop bitcast instructions.
4440   V = V->stripPointerCasts();
4441 
4442   // If this is a PHI node, there are two cases: either we have already seen it
4443   // or we haven't.
4444   if (const PHINode *PN = dyn_cast<PHINode>(V)) {
4445     if (!PHIs.insert(PN).second)
4446       return ~0ULL;  // already in the set.
4447 
4448     // If it was new, see if all the input strings are the same length.
4449     uint64_t LenSoFar = ~0ULL;
4450     for (Value *IncValue : PN->incoming_values()) {
4451       uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize);
4452       if (Len == 0) return 0; // Unknown length -> unknown.
4453 
4454       if (Len == ~0ULL) continue;
4455 
4456       if (Len != LenSoFar && LenSoFar != ~0ULL)
4457         return 0;    // Disagree -> unknown.
4458       LenSoFar = Len;
4459     }
4460 
4461     // Success, all agree.
4462     return LenSoFar;
4463   }
4464 
4465   // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y)
4466   if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
4467     uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize);
4468     if (Len1 == 0) return 0;
4469     uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize);
4470     if (Len2 == 0) return 0;
4471     if (Len1 == ~0ULL) return Len2;
4472     if (Len2 == ~0ULL) return Len1;
4473     if (Len1 != Len2) return 0;
4474     return Len1;
4475   }
4476 
4477   // Otherwise, see if we can read the string.
4478   ConstantDataArraySlice Slice;
4479   if (!getConstantDataArrayInfo(V, Slice, CharSize))
4480     return 0;
4481 
4482   if (Slice.Array == nullptr)
4483     // Zeroinitializer (including an empty one).
4484     return 1;
4485 
4486   // Search for the first nul character.  Return a conservative result even
4487   // when there is no nul.  This is safe since otherwise the string function
4488   // being folded such as strlen is undefined, and can be preferable to
4489   // making the undefined library call.
4490   unsigned NullIndex = 0;
4491   for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) {
4492     if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0)
4493       break;
4494   }
4495 
4496   return NullIndex + 1;
4497 }
4498 
4499 /// If we can compute the length of the string pointed to by
4500 /// the specified pointer, return 'len+1'.  If we can't, return 0.
4501 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) {
4502   if (!V->getType()->isPointerTy())
4503     return 0;
4504 
4505   SmallPtrSet<const PHINode*, 32> PHIs;
4506   uint64_t Len = GetStringLengthH(V, PHIs, CharSize);
4507   // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return
4508   // an empty string as a length.
4509   return Len == ~0ULL ? 1 : Len;
4510 }
4511 
4512 const Value *
4513 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call,
4514                                            bool MustPreserveNullness) {
4515   assert(Call &&
4516          "getArgumentAliasingToReturnedPointer only works on nonnull calls");
4517   if (const Value *RV = Call->getReturnedArgOperand())
4518     return RV;
4519   // This can be used only as a aliasing property.
4520   if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4521           Call, MustPreserveNullness))
4522     return Call->getArgOperand(0);
4523   return nullptr;
4524 }
4525 
4526 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
4527     const CallBase *Call, bool MustPreserveNullness) {
4528   switch (Call->getIntrinsicID()) {
4529   case Intrinsic::launder_invariant_group:
4530   case Intrinsic::strip_invariant_group:
4531   case Intrinsic::aarch64_irg:
4532   case Intrinsic::aarch64_tagp:
4533     return true;
4534   case Intrinsic::ptrmask:
4535     return !MustPreserveNullness;
4536   default:
4537     return false;
4538   }
4539 }
4540 
4541 /// \p PN defines a loop-variant pointer to an object.  Check if the
4542 /// previous iteration of the loop was referring to the same object as \p PN.
4543 static bool isSameUnderlyingObjectInLoop(const PHINode *PN,
4544                                          const LoopInfo *LI) {
4545   // Find the loop-defined value.
4546   Loop *L = LI->getLoopFor(PN->getParent());
4547   if (PN->getNumIncomingValues() != 2)
4548     return true;
4549 
4550   // Find the value from previous iteration.
4551   auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0));
4552   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4553     PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1));
4554   if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L)
4555     return true;
4556 
4557   // If a new pointer is loaded in the loop, the pointer references a different
4558   // object in every iteration.  E.g.:
4559   //    for (i)
4560   //       int *p = a[i];
4561   //       ...
4562   if (auto *Load = dyn_cast<LoadInst>(PrevValue))
4563     if (!L->isLoopInvariant(Load->getPointerOperand()))
4564       return false;
4565   return true;
4566 }
4567 
4568 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) {
4569   if (!V->getType()->isPointerTy())
4570     return V;
4571   for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
4572     if (auto *GEP = dyn_cast<GEPOperator>(V)) {
4573       V = GEP->getPointerOperand();
4574     } else if (Operator::getOpcode(V) == Instruction::BitCast ||
4575                Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
4576       V = cast<Operator>(V)->getOperand(0);
4577       if (!V->getType()->isPointerTy())
4578         return V;
4579     } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
4580       if (GA->isInterposable())
4581         return V;
4582       V = GA->getAliasee();
4583     } else {
4584       if (auto *PHI = dyn_cast<PHINode>(V)) {
4585         // Look through single-arg phi nodes created by LCSSA.
4586         if (PHI->getNumIncomingValues() == 1) {
4587           V = PHI->getIncomingValue(0);
4588           continue;
4589         }
4590       } else if (auto *Call = dyn_cast<CallBase>(V)) {
4591         // CaptureTracking can know about special capturing properties of some
4592         // intrinsics like launder.invariant.group, that can't be expressed with
4593         // the attributes, but have properties like returning aliasing pointer.
4594         // Because some analysis may assume that nocaptured pointer is not
4595         // returned from some special intrinsic (because function would have to
4596         // be marked with returns attribute), it is crucial to use this function
4597         // because it should be in sync with CaptureTracking. Not using it may
4598         // cause weird miscompilations where 2 aliasing pointers are assumed to
4599         // noalias.
4600         if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
4601           V = RP;
4602           continue;
4603         }
4604       }
4605 
4606       return V;
4607     }
4608     assert(V->getType()->isPointerTy() && "Unexpected operand type!");
4609   }
4610   return V;
4611 }
4612 
4613 void llvm::getUnderlyingObjects(const Value *V,
4614                                 SmallVectorImpl<const Value *> &Objects,
4615                                 LoopInfo *LI, unsigned MaxLookup) {
4616   SmallPtrSet<const Value *, 4> Visited;
4617   SmallVector<const Value *, 4> Worklist;
4618   Worklist.push_back(V);
4619   do {
4620     const Value *P = Worklist.pop_back_val();
4621     P = getUnderlyingObject(P, MaxLookup);
4622 
4623     if (!Visited.insert(P).second)
4624       continue;
4625 
4626     if (auto *SI = dyn_cast<SelectInst>(P)) {
4627       Worklist.push_back(SI->getTrueValue());
4628       Worklist.push_back(SI->getFalseValue());
4629       continue;
4630     }
4631 
4632     if (auto *PN = dyn_cast<PHINode>(P)) {
4633       // If this PHI changes the underlying object in every iteration of the
4634       // loop, don't look through it.  Consider:
4635       //   int **A;
4636       //   for (i) {
4637       //     Prev = Curr;     // Prev = PHI (Prev_0, Curr)
4638       //     Curr = A[i];
4639       //     *Prev, *Curr;
4640       //
4641       // Prev is tracking Curr one iteration behind so they refer to different
4642       // underlying objects.
4643       if (!LI || !LI->isLoopHeader(PN->getParent()) ||
4644           isSameUnderlyingObjectInLoop(PN, LI))
4645         append_range(Worklist, PN->incoming_values());
4646       continue;
4647     }
4648 
4649     Objects.push_back(P);
4650   } while (!Worklist.empty());
4651 }
4652 
4653 /// This is the function that does the work of looking through basic
4654 /// ptrtoint+arithmetic+inttoptr sequences.
4655 static const Value *getUnderlyingObjectFromInt(const Value *V) {
4656   do {
4657     if (const Operator *U = dyn_cast<Operator>(V)) {
4658       // If we find a ptrtoint, we can transfer control back to the
4659       // regular getUnderlyingObjectFromInt.
4660       if (U->getOpcode() == Instruction::PtrToInt)
4661         return U->getOperand(0);
4662       // If we find an add of a constant, a multiplied value, or a phi, it's
4663       // likely that the other operand will lead us to the base
4664       // object. We don't have to worry about the case where the
4665       // object address is somehow being computed by the multiply,
4666       // because our callers only care when the result is an
4667       // identifiable object.
4668       if (U->getOpcode() != Instruction::Add ||
4669           (!isa<ConstantInt>(U->getOperand(1)) &&
4670            Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
4671            !isa<PHINode>(U->getOperand(1))))
4672         return V;
4673       V = U->getOperand(0);
4674     } else {
4675       return V;
4676     }
4677     assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
4678   } while (true);
4679 }
4680 
4681 /// This is a wrapper around getUnderlyingObjects and adds support for basic
4682 /// ptrtoint+arithmetic+inttoptr sequences.
4683 /// It returns false if unidentified object is found in getUnderlyingObjects.
4684 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V,
4685                                           SmallVectorImpl<Value *> &Objects) {
4686   SmallPtrSet<const Value *, 16> Visited;
4687   SmallVector<const Value *, 4> Working(1, V);
4688   do {
4689     V = Working.pop_back_val();
4690 
4691     SmallVector<const Value *, 4> Objs;
4692     getUnderlyingObjects(V, Objs);
4693 
4694     for (const Value *V : Objs) {
4695       if (!Visited.insert(V).second)
4696         continue;
4697       if (Operator::getOpcode(V) == Instruction::IntToPtr) {
4698         const Value *O =
4699           getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
4700         if (O->getType()->isPointerTy()) {
4701           Working.push_back(O);
4702           continue;
4703         }
4704       }
4705       // If getUnderlyingObjects fails to find an identifiable object,
4706       // getUnderlyingObjectsForCodeGen also fails for safety.
4707       if (!isIdentifiedObject(V)) {
4708         Objects.clear();
4709         return false;
4710       }
4711       Objects.push_back(const_cast<Value *>(V));
4712     }
4713   } while (!Working.empty());
4714   return true;
4715 }
4716 
4717 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) {
4718   AllocaInst *Result = nullptr;
4719   SmallPtrSet<Value *, 4> Visited;
4720   SmallVector<Value *, 4> Worklist;
4721 
4722   auto AddWork = [&](Value *V) {
4723     if (Visited.insert(V).second)
4724       Worklist.push_back(V);
4725   };
4726 
4727   AddWork(V);
4728   do {
4729     V = Worklist.pop_back_val();
4730     assert(Visited.count(V));
4731 
4732     if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
4733       if (Result && Result != AI)
4734         return nullptr;
4735       Result = AI;
4736     } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
4737       AddWork(CI->getOperand(0));
4738     } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
4739       for (Value *IncValue : PN->incoming_values())
4740         AddWork(IncValue);
4741     } else if (auto *SI = dyn_cast<SelectInst>(V)) {
4742       AddWork(SI->getTrueValue());
4743       AddWork(SI->getFalseValue());
4744     } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) {
4745       if (OffsetZero && !GEP->hasAllZeroIndices())
4746         return nullptr;
4747       AddWork(GEP->getPointerOperand());
4748     } else if (CallBase *CB = dyn_cast<CallBase>(V)) {
4749       Value *Returned = CB->getReturnedArgOperand();
4750       if (Returned)
4751         AddWork(Returned);
4752       else
4753         return nullptr;
4754     } else {
4755       return nullptr;
4756     }
4757   } while (!Worklist.empty());
4758 
4759   return Result;
4760 }
4761 
4762 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4763     const Value *V, bool AllowLifetime, bool AllowDroppable) {
4764   for (const User *U : V->users()) {
4765     const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
4766     if (!II)
4767       return false;
4768 
4769     if (AllowLifetime && II->isLifetimeStartOrEnd())
4770       continue;
4771 
4772     if (AllowDroppable && II->isDroppable())
4773       continue;
4774 
4775     return false;
4776   }
4777   return true;
4778 }
4779 
4780 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
4781   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4782       V, /* AllowLifetime */ true, /* AllowDroppable */ false);
4783 }
4784 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) {
4785   return onlyUsedByLifetimeMarkersOrDroppableInstsHelper(
4786       V, /* AllowLifetime */ true, /* AllowDroppable */ true);
4787 }
4788 
4789 bool llvm::mustSuppressSpeculation(const LoadInst &LI) {
4790   if (!LI.isUnordered())
4791     return true;
4792   const Function &F = *LI.getFunction();
4793   // Speculative load may create a race that did not exist in the source.
4794   return F.hasFnAttribute(Attribute::SanitizeThread) ||
4795     // Speculative load may load data from dirty regions.
4796     F.hasFnAttribute(Attribute::SanitizeAddress) ||
4797     F.hasFnAttribute(Attribute::SanitizeHWAddress);
4798 }
4799 
4800 bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst,
4801                                         const Instruction *CtxI,
4802                                         AssumptionCache *AC,
4803                                         const DominatorTree *DT,
4804                                         const TargetLibraryInfo *TLI) {
4805   return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI,
4806                                                 AC, DT, TLI);
4807 }
4808 
4809 bool llvm::isSafeToSpeculativelyExecuteWithOpcode(
4810     unsigned Opcode, const Instruction *Inst, const Instruction *CtxI,
4811     AssumptionCache *AC, const DominatorTree *DT,
4812     const TargetLibraryInfo *TLI) {
4813 #ifndef NDEBUG
4814   if (Inst->getOpcode() != Opcode) {
4815     // Check that the operands are actually compatible with the Opcode override.
4816     auto hasEqualReturnAndLeadingOperandTypes =
4817         [](const Instruction *Inst, unsigned NumLeadingOperands) {
4818           if (Inst->getNumOperands() < NumLeadingOperands)
4819             return false;
4820           const Type *ExpectedType = Inst->getType();
4821           for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp)
4822             if (Inst->getOperand(ItOp)->getType() != ExpectedType)
4823               return false;
4824           return true;
4825         };
4826     assert(!Instruction::isBinaryOp(Opcode) ||
4827            hasEqualReturnAndLeadingOperandTypes(Inst, 2));
4828     assert(!Instruction::isUnaryOp(Opcode) ||
4829            hasEqualReturnAndLeadingOperandTypes(Inst, 1));
4830   }
4831 #endif
4832 
4833   switch (Opcode) {
4834   default:
4835     return true;
4836   case Instruction::UDiv:
4837   case Instruction::URem: {
4838     // x / y is undefined if y == 0.
4839     const APInt *V;
4840     if (match(Inst->getOperand(1), m_APInt(V)))
4841       return *V != 0;
4842     return false;
4843   }
4844   case Instruction::SDiv:
4845   case Instruction::SRem: {
4846     // x / y is undefined if y == 0 or x == INT_MIN and y == -1
4847     const APInt *Numerator, *Denominator;
4848     if (!match(Inst->getOperand(1), m_APInt(Denominator)))
4849       return false;
4850     // We cannot hoist this division if the denominator is 0.
4851     if (*Denominator == 0)
4852       return false;
4853     // It's safe to hoist if the denominator is not 0 or -1.
4854     if (!Denominator->isAllOnes())
4855       return true;
4856     // At this point we know that the denominator is -1.  It is safe to hoist as
4857     // long we know that the numerator is not INT_MIN.
4858     if (match(Inst->getOperand(0), m_APInt(Numerator)))
4859       return !Numerator->isMinSignedValue();
4860     // The numerator *might* be MinSignedValue.
4861     return false;
4862   }
4863   case Instruction::Load: {
4864     const LoadInst *LI = dyn_cast<LoadInst>(Inst);
4865     if (!LI)
4866       return false;
4867     if (mustSuppressSpeculation(*LI))
4868       return false;
4869     const DataLayout &DL = LI->getModule()->getDataLayout();
4870     return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
4871                                               LI->getType(), LI->getAlign(), DL,
4872                                               CtxI, AC, DT, TLI);
4873   }
4874   case Instruction::Call: {
4875     auto *CI = dyn_cast<const CallInst>(Inst);
4876     if (!CI)
4877       return false;
4878     const Function *Callee = CI->getCalledFunction();
4879 
4880     // The called function could have undefined behavior or side-effects, even
4881     // if marked readnone nounwind.
4882     return Callee && Callee->isSpeculatable();
4883   }
4884   case Instruction::VAArg:
4885   case Instruction::Alloca:
4886   case Instruction::Invoke:
4887   case Instruction::CallBr:
4888   case Instruction::PHI:
4889   case Instruction::Store:
4890   case Instruction::Ret:
4891   case Instruction::Br:
4892   case Instruction::IndirectBr:
4893   case Instruction::Switch:
4894   case Instruction::Unreachable:
4895   case Instruction::Fence:
4896   case Instruction::AtomicRMW:
4897   case Instruction::AtomicCmpXchg:
4898   case Instruction::LandingPad:
4899   case Instruction::Resume:
4900   case Instruction::CatchSwitch:
4901   case Instruction::CatchPad:
4902   case Instruction::CatchRet:
4903   case Instruction::CleanupPad:
4904   case Instruction::CleanupRet:
4905     return false; // Misc instructions which have effects
4906   }
4907 }
4908 
4909 bool llvm::mayHaveNonDefUseDependency(const Instruction &I) {
4910   if (I.mayReadOrWriteMemory())
4911     // Memory dependency possible
4912     return true;
4913   if (!isSafeToSpeculativelyExecute(&I))
4914     // Can't move above a maythrow call or infinite loop.  Or if an
4915     // inalloca alloca, above a stacksave call.
4916     return true;
4917   if (!isGuaranteedToTransferExecutionToSuccessor(&I))
4918     // 1) Can't reorder two inf-loop calls, even if readonly
4919     // 2) Also can't reorder an inf-loop call below a instruction which isn't
4920     //    safe to speculative execute.  (Inverse of above)
4921     return true;
4922   return false;
4923 }
4924 
4925 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult.
4926 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) {
4927   switch (OR) {
4928     case ConstantRange::OverflowResult::MayOverflow:
4929       return OverflowResult::MayOverflow;
4930     case ConstantRange::OverflowResult::AlwaysOverflowsLow:
4931       return OverflowResult::AlwaysOverflowsLow;
4932     case ConstantRange::OverflowResult::AlwaysOverflowsHigh:
4933       return OverflowResult::AlwaysOverflowsHigh;
4934     case ConstantRange::OverflowResult::NeverOverflows:
4935       return OverflowResult::NeverOverflows;
4936   }
4937   llvm_unreachable("Unknown OverflowResult");
4938 }
4939 
4940 /// Combine constant ranges from computeConstantRange() and computeKnownBits().
4941 static ConstantRange computeConstantRangeIncludingKnownBits(
4942     const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth,
4943     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4944     OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) {
4945   KnownBits Known = computeKnownBits(
4946       V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo);
4947   ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned);
4948   ConstantRange CR2 = computeConstantRange(V, UseInstrInfo);
4949   ConstantRange::PreferredRangeType RangeType =
4950       ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned;
4951   return CR1.intersectWith(CR2, RangeType);
4952 }
4953 
4954 OverflowResult llvm::computeOverflowForUnsignedMul(
4955     const Value *LHS, const Value *RHS, const DataLayout &DL,
4956     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
4957     bool UseInstrInfo) {
4958   KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
4959                                         nullptr, UseInstrInfo);
4960   KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
4961                                         nullptr, UseInstrInfo);
4962   ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false);
4963   ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false);
4964   return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange));
4965 }
4966 
4967 OverflowResult
4968 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS,
4969                                   const DataLayout &DL, AssumptionCache *AC,
4970                                   const Instruction *CxtI,
4971                                   const DominatorTree *DT, bool UseInstrInfo) {
4972   // Multiplying n * m significant bits yields a result of n + m significant
4973   // bits. If the total number of significant bits does not exceed the
4974   // result bit width (minus 1), there is no overflow.
4975   // This means if we have enough leading sign bits in the operands
4976   // we can guarantee that the result does not overflow.
4977   // Ref: "Hacker's Delight" by Henry Warren
4978   unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
4979 
4980   // Note that underestimating the number of sign bits gives a more
4981   // conservative answer.
4982   unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) +
4983                       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT);
4984 
4985   // First handle the easy case: if we have enough sign bits there's
4986   // definitely no overflow.
4987   if (SignBits > BitWidth + 1)
4988     return OverflowResult::NeverOverflows;
4989 
4990   // There are two ambiguous cases where there can be no overflow:
4991   //   SignBits == BitWidth + 1    and
4992   //   SignBits == BitWidth
4993   // The second case is difficult to check, therefore we only handle the
4994   // first case.
4995   if (SignBits == BitWidth + 1) {
4996     // It overflows only when both arguments are negative and the true
4997     // product is exactly the minimum negative number.
4998     // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000
4999     // For simplicity we just check if at least one side is not negative.
5000     KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT,
5001                                           nullptr, UseInstrInfo);
5002     KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT,
5003                                           nullptr, UseInstrInfo);
5004     if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative())
5005       return OverflowResult::NeverOverflows;
5006   }
5007   return OverflowResult::MayOverflow;
5008 }
5009 
5010 OverflowResult llvm::computeOverflowForUnsignedAdd(
5011     const Value *LHS, const Value *RHS, const DataLayout &DL,
5012     AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT,
5013     bool UseInstrInfo) {
5014   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
5015       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
5016       nullptr, UseInstrInfo);
5017   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
5018       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT,
5019       nullptr, UseInstrInfo);
5020   return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange));
5021 }
5022 
5023 static OverflowResult computeOverflowForSignedAdd(const Value *LHS,
5024                                                   const Value *RHS,
5025                                                   const AddOperator *Add,
5026                                                   const DataLayout &DL,
5027                                                   AssumptionCache *AC,
5028                                                   const Instruction *CxtI,
5029                                                   const DominatorTree *DT) {
5030   if (Add && Add->hasNoSignedWrap()) {
5031     return OverflowResult::NeverOverflows;
5032   }
5033 
5034   // If LHS and RHS each have at least two sign bits, the addition will look
5035   // like
5036   //
5037   // XX..... +
5038   // YY.....
5039   //
5040   // If the carry into the most significant position is 0, X and Y can't both
5041   // be 1 and therefore the carry out of the addition is also 0.
5042   //
5043   // If the carry into the most significant position is 1, X and Y can't both
5044   // be 0 and therefore the carry out of the addition is also 1.
5045   //
5046   // Since the carry into the most significant position is always equal to
5047   // the carry out of the addition, there is no signed overflow.
5048   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
5049       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
5050     return OverflowResult::NeverOverflows;
5051 
5052   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
5053       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
5054   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
5055       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
5056   OverflowResult OR =
5057       mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange));
5058   if (OR != OverflowResult::MayOverflow)
5059     return OR;
5060 
5061   // The remaining code needs Add to be available. Early returns if not so.
5062   if (!Add)
5063     return OverflowResult::MayOverflow;
5064 
5065   // If the sign of Add is the same as at least one of the operands, this add
5066   // CANNOT overflow. If this can be determined from the known bits of the
5067   // operands the above signedAddMayOverflow() check will have already done so.
5068   // The only other way to improve on the known bits is from an assumption, so
5069   // call computeKnownBitsFromAssume() directly.
5070   bool LHSOrRHSKnownNonNegative =
5071       (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative());
5072   bool LHSOrRHSKnownNegative =
5073       (LHSRange.isAllNegative() || RHSRange.isAllNegative());
5074   if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
5075     KnownBits AddKnown(LHSRange.getBitWidth());
5076     computeKnownBitsFromAssume(
5077         Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true));
5078     if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) ||
5079         (AddKnown.isNegative() && LHSOrRHSKnownNegative))
5080       return OverflowResult::NeverOverflows;
5081   }
5082 
5083   return OverflowResult::MayOverflow;
5084 }
5085 
5086 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS,
5087                                                    const Value *RHS,
5088                                                    const DataLayout &DL,
5089                                                    AssumptionCache *AC,
5090                                                    const Instruction *CxtI,
5091                                                    const DominatorTree *DT) {
5092   // X - (X % ?)
5093   // The remainder of a value can't have greater magnitude than itself,
5094   // so the subtraction can't overflow.
5095 
5096   // X - (X -nuw ?)
5097   // In the minimal case, this would simplify to "?", so there's no subtract
5098   // at all. But if this analysis is used to peek through casts, for example,
5099   // then determining no-overflow may allow other transforms.
5100 
5101   // TODO: There are other patterns like this.
5102   //       See simplifyICmpWithBinOpOnLHS() for candidates.
5103   if (match(RHS, m_URem(m_Specific(LHS), m_Value())) ||
5104       match(RHS, m_NUWSub(m_Specific(LHS), m_Value())))
5105     if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
5106       return OverflowResult::NeverOverflows;
5107 
5108   // Checking for conditions implied by dominating conditions may be expensive.
5109   // Limit it to usub_with_overflow calls for now.
5110   if (match(CxtI,
5111             m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value())))
5112     if (auto C =
5113             isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) {
5114       if (*C)
5115         return OverflowResult::NeverOverflows;
5116       return OverflowResult::AlwaysOverflowsLow;
5117     }
5118   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
5119       LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
5120   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
5121       RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT);
5122   return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange));
5123 }
5124 
5125 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS,
5126                                                  const Value *RHS,
5127                                                  const DataLayout &DL,
5128                                                  AssumptionCache *AC,
5129                                                  const Instruction *CxtI,
5130                                                  const DominatorTree *DT) {
5131   // X - (X % ?)
5132   // The remainder of a value can't have greater magnitude than itself,
5133   // so the subtraction can't overflow.
5134 
5135   // X - (X -nsw ?)
5136   // In the minimal case, this would simplify to "?", so there's no subtract
5137   // at all. But if this analysis is used to peek through casts, for example,
5138   // then determining no-overflow may allow other transforms.
5139   if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) ||
5140       match(RHS, m_NSWSub(m_Specific(LHS), m_Value())))
5141     if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT))
5142       return OverflowResult::NeverOverflows;
5143 
5144   // If LHS and RHS each have at least two sign bits, the subtraction
5145   // cannot overflow.
5146   if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 &&
5147       ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1)
5148     return OverflowResult::NeverOverflows;
5149 
5150   ConstantRange LHSRange = computeConstantRangeIncludingKnownBits(
5151       LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
5152   ConstantRange RHSRange = computeConstantRangeIncludingKnownBits(
5153       RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT);
5154   return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange));
5155 }
5156 
5157 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO,
5158                                      const DominatorTree &DT) {
5159   SmallVector<const BranchInst *, 2> GuardingBranches;
5160   SmallVector<const ExtractValueInst *, 2> Results;
5161 
5162   for (const User *U : WO->users()) {
5163     if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) {
5164       assert(EVI->getNumIndices() == 1 && "Obvious from CI's type");
5165 
5166       if (EVI->getIndices()[0] == 0)
5167         Results.push_back(EVI);
5168       else {
5169         assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type");
5170 
5171         for (const auto *U : EVI->users())
5172           if (const auto *B = dyn_cast<BranchInst>(U)) {
5173             assert(B->isConditional() && "How else is it using an i1?");
5174             GuardingBranches.push_back(B);
5175           }
5176       }
5177     } else {
5178       // We are using the aggregate directly in a way we don't want to analyze
5179       // here (storing it to a global, say).
5180       return false;
5181     }
5182   }
5183 
5184   auto AllUsesGuardedByBranch = [&](const BranchInst *BI) {
5185     BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1));
5186     if (!NoWrapEdge.isSingleEdge())
5187       return false;
5188 
5189     // Check if all users of the add are provably no-wrap.
5190     for (const auto *Result : Results) {
5191       // If the extractvalue itself is not executed on overflow, the we don't
5192       // need to check each use separately, since domination is transitive.
5193       if (DT.dominates(NoWrapEdge, Result->getParent()))
5194         continue;
5195 
5196       for (const auto &RU : Result->uses())
5197         if (!DT.dominates(NoWrapEdge, RU))
5198           return false;
5199     }
5200 
5201     return true;
5202   };
5203 
5204   return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch);
5205 }
5206 
5207 /// Shifts return poison if shiftwidth is larger than the bitwidth.
5208 static bool shiftAmountKnownInRange(const Value *ShiftAmount) {
5209   auto *C = dyn_cast<Constant>(ShiftAmount);
5210   if (!C)
5211     return false;
5212 
5213   // Shifts return poison if shiftwidth is larger than the bitwidth.
5214   SmallVector<const Constant *, 4> ShiftAmounts;
5215   if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) {
5216     unsigned NumElts = FVTy->getNumElements();
5217     for (unsigned i = 0; i < NumElts; ++i)
5218       ShiftAmounts.push_back(C->getAggregateElement(i));
5219   } else if (isa<ScalableVectorType>(C->getType()))
5220     return false; // Can't tell, just return false to be safe
5221   else
5222     ShiftAmounts.push_back(C);
5223 
5224   bool Safe = llvm::all_of(ShiftAmounts, [](const Constant *C) {
5225     auto *CI = dyn_cast_or_null<ConstantInt>(C);
5226     return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth());
5227   });
5228 
5229   return Safe;
5230 }
5231 
5232 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly,
5233                                    bool ConsiderFlagsAndMetadata) {
5234 
5235   if (ConsiderFlagsAndMetadata && Op->hasPoisonGeneratingFlagsOrMetadata())
5236     return true;
5237 
5238   unsigned Opcode = Op->getOpcode();
5239 
5240   // Check whether opcode is a poison/undef-generating operation
5241   switch (Opcode) {
5242   case Instruction::Shl:
5243   case Instruction::AShr:
5244   case Instruction::LShr:
5245     return !shiftAmountKnownInRange(Op->getOperand(1));
5246   case Instruction::FPToSI:
5247   case Instruction::FPToUI:
5248     // fptosi/ui yields poison if the resulting value does not fit in the
5249     // destination type.
5250     return true;
5251   case Instruction::Call:
5252     if (auto *II = dyn_cast<IntrinsicInst>(Op)) {
5253       switch (II->getIntrinsicID()) {
5254       // TODO: Add more intrinsics.
5255       case Intrinsic::ctlz:
5256       case Intrinsic::cttz:
5257       case Intrinsic::abs:
5258         if (cast<ConstantInt>(II->getArgOperand(1))->isNullValue())
5259           return false;
5260         break;
5261       case Intrinsic::ctpop:
5262       case Intrinsic::bswap:
5263       case Intrinsic::bitreverse:
5264       case Intrinsic::fshl:
5265       case Intrinsic::fshr:
5266       case Intrinsic::smax:
5267       case Intrinsic::smin:
5268       case Intrinsic::umax:
5269       case Intrinsic::umin:
5270       case Intrinsic::ptrmask:
5271       case Intrinsic::fptoui_sat:
5272       case Intrinsic::fptosi_sat:
5273       case Intrinsic::sadd_with_overflow:
5274       case Intrinsic::ssub_with_overflow:
5275       case Intrinsic::smul_with_overflow:
5276       case Intrinsic::uadd_with_overflow:
5277       case Intrinsic::usub_with_overflow:
5278       case Intrinsic::umul_with_overflow:
5279       case Intrinsic::sadd_sat:
5280       case Intrinsic::uadd_sat:
5281       case Intrinsic::ssub_sat:
5282       case Intrinsic::usub_sat:
5283         return false;
5284       case Intrinsic::sshl_sat:
5285       case Intrinsic::ushl_sat:
5286         return !shiftAmountKnownInRange(II->getArgOperand(1));
5287       case Intrinsic::fma:
5288       case Intrinsic::fmuladd:
5289       case Intrinsic::sqrt:
5290       case Intrinsic::powi:
5291       case Intrinsic::sin:
5292       case Intrinsic::cos:
5293       case Intrinsic::pow:
5294       case Intrinsic::log:
5295       case Intrinsic::log10:
5296       case Intrinsic::log2:
5297       case Intrinsic::exp:
5298       case Intrinsic::exp2:
5299       case Intrinsic::fabs:
5300       case Intrinsic::copysign:
5301       case Intrinsic::floor:
5302       case Intrinsic::ceil:
5303       case Intrinsic::trunc:
5304       case Intrinsic::rint:
5305       case Intrinsic::nearbyint:
5306       case Intrinsic::round:
5307       case Intrinsic::roundeven:
5308       case Intrinsic::fptrunc_round:
5309       case Intrinsic::canonicalize:
5310       case Intrinsic::arithmetic_fence:
5311       case Intrinsic::minnum:
5312       case Intrinsic::maxnum:
5313       case Intrinsic::minimum:
5314       case Intrinsic::maximum:
5315       case Intrinsic::is_fpclass:
5316         return false;
5317       case Intrinsic::lround:
5318       case Intrinsic::llround:
5319       case Intrinsic::lrint:
5320       case Intrinsic::llrint:
5321         // If the value doesn't fit an unspecified value is returned (but this
5322         // is not poison).
5323         return false;
5324       }
5325     }
5326     [[fallthrough]];
5327   case Instruction::CallBr:
5328   case Instruction::Invoke: {
5329     const auto *CB = cast<CallBase>(Op);
5330     return !CB->hasRetAttr(Attribute::NoUndef);
5331   }
5332   case Instruction::InsertElement:
5333   case Instruction::ExtractElement: {
5334     // If index exceeds the length of the vector, it returns poison
5335     auto *VTy = cast<VectorType>(Op->getOperand(0)->getType());
5336     unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1;
5337     auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp));
5338     if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue()))
5339       return true;
5340     return false;
5341   }
5342   case Instruction::ShuffleVector: {
5343     // shufflevector may return undef.
5344     if (PoisonOnly)
5345       return false;
5346     ArrayRef<int> Mask = isa<ConstantExpr>(Op)
5347                              ? cast<ConstantExpr>(Op)->getShuffleMask()
5348                              : cast<ShuffleVectorInst>(Op)->getShuffleMask();
5349     return is_contained(Mask, UndefMaskElem);
5350   }
5351   case Instruction::FNeg:
5352   case Instruction::PHI:
5353   case Instruction::Select:
5354   case Instruction::URem:
5355   case Instruction::SRem:
5356   case Instruction::ExtractValue:
5357   case Instruction::InsertValue:
5358   case Instruction::Freeze:
5359   case Instruction::ICmp:
5360   case Instruction::FCmp:
5361   case Instruction::FAdd:
5362   case Instruction::FSub:
5363   case Instruction::FMul:
5364   case Instruction::FDiv:
5365   case Instruction::FRem:
5366     return false;
5367   case Instruction::GetElementPtr:
5368     // inbounds is handled above
5369     // TODO: what about inrange on constexpr?
5370     return false;
5371   default: {
5372     const auto *CE = dyn_cast<ConstantExpr>(Op);
5373     if (isa<CastInst>(Op) || (CE && CE->isCast()))
5374       return false;
5375     else if (Instruction::isBinaryOp(Opcode))
5376       return false;
5377     // Be conservative and return true.
5378     return true;
5379   }
5380   }
5381 }
5382 
5383 bool llvm::canCreateUndefOrPoison(const Operator *Op,
5384                                   bool ConsiderFlagsAndMetadata) {
5385   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false,
5386                                   ConsiderFlagsAndMetadata);
5387 }
5388 
5389 bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata) {
5390   return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true,
5391                                   ConsiderFlagsAndMetadata);
5392 }
5393 
5394 static bool directlyImpliesPoison(const Value *ValAssumedPoison,
5395                                   const Value *V, unsigned Depth) {
5396   if (ValAssumedPoison == V)
5397     return true;
5398 
5399   const unsigned MaxDepth = 2;
5400   if (Depth >= MaxDepth)
5401     return false;
5402 
5403   if (const auto *I = dyn_cast<Instruction>(V)) {
5404     if (any_of(I->operands(), [=](const Use &Op) {
5405           return propagatesPoison(Op) &&
5406                  directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1);
5407         }))
5408       return true;
5409 
5410     // V  = extractvalue V0, idx
5411     // V2 = extractvalue V0, idx2
5412     // V0's elements are all poison or not. (e.g., add_with_overflow)
5413     const WithOverflowInst *II;
5414     if (match(I, m_ExtractValue(m_WithOverflowInst(II))) &&
5415         (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) ||
5416          llvm::is_contained(II->args(), ValAssumedPoison)))
5417       return true;
5418   }
5419   return false;
5420 }
5421 
5422 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V,
5423                           unsigned Depth) {
5424   if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison))
5425     return true;
5426 
5427   if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0))
5428     return true;
5429 
5430   const unsigned MaxDepth = 2;
5431   if (Depth >= MaxDepth)
5432     return false;
5433 
5434   const auto *I = dyn_cast<Instruction>(ValAssumedPoison);
5435   if (I && !canCreatePoison(cast<Operator>(I))) {
5436     return all_of(I->operands(), [=](const Value *Op) {
5437       return impliesPoison(Op, V, Depth + 1);
5438     });
5439   }
5440   return false;
5441 }
5442 
5443 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) {
5444   return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0);
5445 }
5446 
5447 static bool programUndefinedIfUndefOrPoison(const Value *V,
5448                                             bool PoisonOnly);
5449 
5450 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V,
5451                                              AssumptionCache *AC,
5452                                              const Instruction *CtxI,
5453                                              const DominatorTree *DT,
5454                                              unsigned Depth, bool PoisonOnly) {
5455   if (Depth >= MaxAnalysisRecursionDepth)
5456     return false;
5457 
5458   if (isa<MetadataAsValue>(V))
5459     return false;
5460 
5461   if (const auto *A = dyn_cast<Argument>(V)) {
5462     if (A->hasAttribute(Attribute::NoUndef))
5463       return true;
5464   }
5465 
5466   if (auto *C = dyn_cast<Constant>(V)) {
5467     if (isa<UndefValue>(C))
5468       return PoisonOnly && !isa<PoisonValue>(C);
5469 
5470     if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) ||
5471         isa<ConstantPointerNull>(C) || isa<Function>(C))
5472       return true;
5473 
5474     if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C))
5475       return (PoisonOnly ? !C->containsPoisonElement()
5476                          : !C->containsUndefOrPoisonElement()) &&
5477              !C->containsConstantExpression();
5478   }
5479 
5480   // Strip cast operations from a pointer value.
5481   // Note that stripPointerCastsSameRepresentation can strip off getelementptr
5482   // inbounds with zero offset. To guarantee that the result isn't poison, the
5483   // stripped pointer is checked as it has to be pointing into an allocated
5484   // object or be null `null` to ensure `inbounds` getelement pointers with a
5485   // zero offset could not produce poison.
5486   // It can strip off addrspacecast that do not change bit representation as
5487   // well. We believe that such addrspacecast is equivalent to no-op.
5488   auto *StrippedV = V->stripPointerCastsSameRepresentation();
5489   if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) ||
5490       isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV))
5491     return true;
5492 
5493   auto OpCheck = [&](const Value *V) {
5494     return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1,
5495                                             PoisonOnly);
5496   };
5497 
5498   if (auto *Opr = dyn_cast<Operator>(V)) {
5499     // If the value is a freeze instruction, then it can never
5500     // be undef or poison.
5501     if (isa<FreezeInst>(V))
5502       return true;
5503 
5504     if (const auto *CB = dyn_cast<CallBase>(V)) {
5505       if (CB->hasRetAttr(Attribute::NoUndef))
5506         return true;
5507     }
5508 
5509     if (const auto *PN = dyn_cast<PHINode>(V)) {
5510       unsigned Num = PN->getNumIncomingValues();
5511       bool IsWellDefined = true;
5512       for (unsigned i = 0; i < Num; ++i) {
5513         auto *TI = PN->getIncomingBlock(i)->getTerminator();
5514         if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI,
5515                                               DT, Depth + 1, PoisonOnly)) {
5516           IsWellDefined = false;
5517           break;
5518         }
5519       }
5520       if (IsWellDefined)
5521         return true;
5522     } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck))
5523       return true;
5524   }
5525 
5526   if (auto *I = dyn_cast<LoadInst>(V))
5527     if (I->hasMetadata(LLVMContext::MD_noundef) ||
5528         I->hasMetadata(LLVMContext::MD_dereferenceable) ||
5529         I->hasMetadata(LLVMContext::MD_dereferenceable_or_null))
5530       return true;
5531 
5532   if (programUndefinedIfUndefOrPoison(V, PoisonOnly))
5533     return true;
5534 
5535   // CxtI may be null or a cloned instruction.
5536   if (!CtxI || !CtxI->getParent() || !DT)
5537     return false;
5538 
5539   auto *DNode = DT->getNode(CtxI->getParent());
5540   if (!DNode)
5541     // Unreachable block
5542     return false;
5543 
5544   // If V is used as a branch condition before reaching CtxI, V cannot be
5545   // undef or poison.
5546   //   br V, BB1, BB2
5547   // BB1:
5548   //   CtxI ; V cannot be undef or poison here
5549   auto *Dominator = DNode->getIDom();
5550   while (Dominator) {
5551     auto *TI = Dominator->getBlock()->getTerminator();
5552 
5553     Value *Cond = nullptr;
5554     if (auto BI = dyn_cast_or_null<BranchInst>(TI)) {
5555       if (BI->isConditional())
5556         Cond = BI->getCondition();
5557     } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) {
5558       Cond = SI->getCondition();
5559     }
5560 
5561     if (Cond) {
5562       if (Cond == V)
5563         return true;
5564       else if (PoisonOnly && isa<Operator>(Cond)) {
5565         // For poison, we can analyze further
5566         auto *Opr = cast<Operator>(Cond);
5567         if (any_of(Opr->operands(),
5568                    [V](const Use &U) { return V == U && propagatesPoison(U); }))
5569           return true;
5570       }
5571     }
5572 
5573     Dominator = Dominator->getIDom();
5574   }
5575 
5576   if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC))
5577     return true;
5578 
5579   return false;
5580 }
5581 
5582 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC,
5583                                             const Instruction *CtxI,
5584                                             const DominatorTree *DT,
5585                                             unsigned Depth) {
5586   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false);
5587 }
5588 
5589 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC,
5590                                      const Instruction *CtxI,
5591                                      const DominatorTree *DT, unsigned Depth) {
5592   return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true);
5593 }
5594 
5595 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add,
5596                                                  const DataLayout &DL,
5597                                                  AssumptionCache *AC,
5598                                                  const Instruction *CxtI,
5599                                                  const DominatorTree *DT) {
5600   return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1),
5601                                        Add, DL, AC, CxtI, DT);
5602 }
5603 
5604 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS,
5605                                                  const Value *RHS,
5606                                                  const DataLayout &DL,
5607                                                  AssumptionCache *AC,
5608                                                  const Instruction *CxtI,
5609                                                  const DominatorTree *DT) {
5610   return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT);
5611 }
5612 
5613 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) {
5614   // Note: An atomic operation isn't guaranteed to return in a reasonable amount
5615   // of time because it's possible for another thread to interfere with it for an
5616   // arbitrary length of time, but programs aren't allowed to rely on that.
5617 
5618   // If there is no successor, then execution can't transfer to it.
5619   if (isa<ReturnInst>(I))
5620     return false;
5621   if (isa<UnreachableInst>(I))
5622     return false;
5623 
5624   // Note: Do not add new checks here; instead, change Instruction::mayThrow or
5625   // Instruction::willReturn.
5626   //
5627   // FIXME: Move this check into Instruction::willReturn.
5628   if (isa<CatchPadInst>(I)) {
5629     switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) {
5630     default:
5631       // A catchpad may invoke exception object constructors and such, which
5632       // in some languages can be arbitrary code, so be conservative by default.
5633       return false;
5634     case EHPersonality::CoreCLR:
5635       // For CoreCLR, it just involves a type test.
5636       return true;
5637     }
5638   }
5639 
5640   // An instruction that returns without throwing must transfer control flow
5641   // to a successor.
5642   return !I->mayThrow() && I->willReturn();
5643 }
5644 
5645 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) {
5646   // TODO: This is slightly conservative for invoke instruction since exiting
5647   // via an exception *is* normal control for them.
5648   for (const Instruction &I : *BB)
5649     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5650       return false;
5651   return true;
5652 }
5653 
5654 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5655    BasicBlock::const_iterator Begin, BasicBlock::const_iterator End,
5656    unsigned ScanLimit) {
5657   return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End),
5658                                                     ScanLimit);
5659 }
5660 
5661 bool llvm::isGuaranteedToTransferExecutionToSuccessor(
5662    iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) {
5663   assert(ScanLimit && "scan limit must be non-zero");
5664   for (const Instruction &I : Range) {
5665     if (isa<DbgInfoIntrinsic>(I))
5666         continue;
5667     if (--ScanLimit == 0)
5668       return false;
5669     if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5670       return false;
5671   }
5672   return true;
5673 }
5674 
5675 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I,
5676                                                   const Loop *L) {
5677   // The loop header is guaranteed to be executed for every iteration.
5678   //
5679   // FIXME: Relax this constraint to cover all basic blocks that are
5680   // guaranteed to be executed at every iteration.
5681   if (I->getParent() != L->getHeader()) return false;
5682 
5683   for (const Instruction &LI : *L->getHeader()) {
5684     if (&LI == I) return true;
5685     if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false;
5686   }
5687   llvm_unreachable("Instruction not contained in its own parent basic block.");
5688 }
5689 
5690 bool llvm::propagatesPoison(const Use &PoisonOp) {
5691   const Operator *I = cast<Operator>(PoisonOp.getUser());
5692   switch (I->getOpcode()) {
5693   case Instruction::Freeze:
5694   case Instruction::PHI:
5695   case Instruction::Invoke:
5696     return false;
5697   case Instruction::Select:
5698     return PoisonOp.getOperandNo() == 0;
5699   case Instruction::Call:
5700     if (auto *II = dyn_cast<IntrinsicInst>(I)) {
5701       switch (II->getIntrinsicID()) {
5702       // TODO: Add more intrinsics.
5703       case Intrinsic::sadd_with_overflow:
5704       case Intrinsic::ssub_with_overflow:
5705       case Intrinsic::smul_with_overflow:
5706       case Intrinsic::uadd_with_overflow:
5707       case Intrinsic::usub_with_overflow:
5708       case Intrinsic::umul_with_overflow:
5709         // If an input is a vector containing a poison element, the
5710         // two output vectors (calculated results, overflow bits)'
5711         // corresponding lanes are poison.
5712         return true;
5713       case Intrinsic::ctpop:
5714         return true;
5715       }
5716     }
5717     return false;
5718   case Instruction::ICmp:
5719   case Instruction::FCmp:
5720   case Instruction::GetElementPtr:
5721     return true;
5722   default:
5723     if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I))
5724       return true;
5725 
5726     // Be conservative and return false.
5727     return false;
5728   }
5729 }
5730 
5731 void llvm::getGuaranteedWellDefinedOps(
5732     const Instruction *I, SmallVectorImpl<const Value *> &Operands) {
5733   switch (I->getOpcode()) {
5734     case Instruction::Store:
5735       Operands.push_back(cast<StoreInst>(I)->getPointerOperand());
5736       break;
5737 
5738     case Instruction::Load:
5739       Operands.push_back(cast<LoadInst>(I)->getPointerOperand());
5740       break;
5741 
5742     // Since dereferenceable attribute imply noundef, atomic operations
5743     // also implicitly have noundef pointers too
5744     case Instruction::AtomicCmpXchg:
5745       Operands.push_back(cast<AtomicCmpXchgInst>(I)->getPointerOperand());
5746       break;
5747 
5748     case Instruction::AtomicRMW:
5749       Operands.push_back(cast<AtomicRMWInst>(I)->getPointerOperand());
5750       break;
5751 
5752     case Instruction::Call:
5753     case Instruction::Invoke: {
5754       const CallBase *CB = cast<CallBase>(I);
5755       if (CB->isIndirectCall())
5756         Operands.push_back(CB->getCalledOperand());
5757       for (unsigned i = 0; i < CB->arg_size(); ++i) {
5758         if (CB->paramHasAttr(i, Attribute::NoUndef) ||
5759             CB->paramHasAttr(i, Attribute::Dereferenceable))
5760           Operands.push_back(CB->getArgOperand(i));
5761       }
5762       break;
5763     }
5764     case Instruction::Ret:
5765       if (I->getFunction()->hasRetAttribute(Attribute::NoUndef))
5766         Operands.push_back(I->getOperand(0));
5767       break;
5768     case Instruction::Switch:
5769       Operands.push_back(cast<SwitchInst>(I)->getCondition());
5770       break;
5771     case Instruction::Br: {
5772       auto *BR = cast<BranchInst>(I);
5773       if (BR->isConditional())
5774         Operands.push_back(BR->getCondition());
5775       break;
5776     }
5777     default:
5778       break;
5779   }
5780 }
5781 
5782 void llvm::getGuaranteedNonPoisonOps(const Instruction *I,
5783                                      SmallVectorImpl<const Value *> &Operands) {
5784   getGuaranteedWellDefinedOps(I, Operands);
5785   switch (I->getOpcode()) {
5786   // Divisors of these operations are allowed to be partially undef.
5787   case Instruction::UDiv:
5788   case Instruction::SDiv:
5789   case Instruction::URem:
5790   case Instruction::SRem:
5791     Operands.push_back(I->getOperand(1));
5792     break;
5793   default:
5794     break;
5795   }
5796 }
5797 
5798 bool llvm::mustTriggerUB(const Instruction *I,
5799                          const SmallSet<const Value *, 16>& KnownPoison) {
5800   SmallVector<const Value *, 4> NonPoisonOps;
5801   getGuaranteedNonPoisonOps(I, NonPoisonOps);
5802 
5803   for (const auto *V : NonPoisonOps)
5804     if (KnownPoison.count(V))
5805       return true;
5806 
5807   return false;
5808 }
5809 
5810 static bool programUndefinedIfUndefOrPoison(const Value *V,
5811                                             bool PoisonOnly) {
5812   // We currently only look for uses of values within the same basic
5813   // block, as that makes it easier to guarantee that the uses will be
5814   // executed given that Inst is executed.
5815   //
5816   // FIXME: Expand this to consider uses beyond the same basic block. To do
5817   // this, look out for the distinction between post-dominance and strong
5818   // post-dominance.
5819   const BasicBlock *BB = nullptr;
5820   BasicBlock::const_iterator Begin;
5821   if (const auto *Inst = dyn_cast<Instruction>(V)) {
5822     BB = Inst->getParent();
5823     Begin = Inst->getIterator();
5824     Begin++;
5825   } else if (const auto *Arg = dyn_cast<Argument>(V)) {
5826     BB = &Arg->getParent()->getEntryBlock();
5827     Begin = BB->begin();
5828   } else {
5829     return false;
5830   }
5831 
5832   // Limit number of instructions we look at, to avoid scanning through large
5833   // blocks. The current limit is chosen arbitrarily.
5834   unsigned ScanLimit = 32;
5835   BasicBlock::const_iterator End = BB->end();
5836 
5837   if (!PoisonOnly) {
5838     // Since undef does not propagate eagerly, be conservative & just check
5839     // whether a value is directly passed to an instruction that must take
5840     // well-defined operands.
5841 
5842     for (const auto &I : make_range(Begin, End)) {
5843       if (isa<DbgInfoIntrinsic>(I))
5844         continue;
5845       if (--ScanLimit == 0)
5846         break;
5847 
5848       SmallVector<const Value *, 4> WellDefinedOps;
5849       getGuaranteedWellDefinedOps(&I, WellDefinedOps);
5850       if (is_contained(WellDefinedOps, V))
5851         return true;
5852 
5853       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5854         break;
5855     }
5856     return false;
5857   }
5858 
5859   // Set of instructions that we have proved will yield poison if Inst
5860   // does.
5861   SmallSet<const Value *, 16> YieldsPoison;
5862   SmallSet<const BasicBlock *, 4> Visited;
5863 
5864   YieldsPoison.insert(V);
5865   Visited.insert(BB);
5866 
5867   while (true) {
5868     for (const auto &I : make_range(Begin, End)) {
5869       if (isa<DbgInfoIntrinsic>(I))
5870         continue;
5871       if (--ScanLimit == 0)
5872         return false;
5873       if (mustTriggerUB(&I, YieldsPoison))
5874         return true;
5875       if (!isGuaranteedToTransferExecutionToSuccessor(&I))
5876         return false;
5877 
5878       // If an operand is poison and propagates it, mark I as yielding poison.
5879       for (const Use &Op : I.operands()) {
5880         if (YieldsPoison.count(Op) && propagatesPoison(Op)) {
5881           YieldsPoison.insert(&I);
5882           break;
5883         }
5884       }
5885     }
5886 
5887     BB = BB->getSingleSuccessor();
5888     if (!BB || !Visited.insert(BB).second)
5889       break;
5890 
5891     Begin = BB->getFirstNonPHI()->getIterator();
5892     End = BB->end();
5893   }
5894   return false;
5895 }
5896 
5897 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) {
5898   return ::programUndefinedIfUndefOrPoison(Inst, false);
5899 }
5900 
5901 bool llvm::programUndefinedIfPoison(const Instruction *Inst) {
5902   return ::programUndefinedIfUndefOrPoison(Inst, true);
5903 }
5904 
5905 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) {
5906   if (FMF.noNaNs())
5907     return true;
5908 
5909   if (auto *C = dyn_cast<ConstantFP>(V))
5910     return !C->isNaN();
5911 
5912   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5913     if (!C->getElementType()->isFloatingPointTy())
5914       return false;
5915     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5916       if (C->getElementAsAPFloat(I).isNaN())
5917         return false;
5918     }
5919     return true;
5920   }
5921 
5922   if (isa<ConstantAggregateZero>(V))
5923     return true;
5924 
5925   return false;
5926 }
5927 
5928 static bool isKnownNonZero(const Value *V) {
5929   if (auto *C = dyn_cast<ConstantFP>(V))
5930     return !C->isZero();
5931 
5932   if (auto *C = dyn_cast<ConstantDataVector>(V)) {
5933     if (!C->getElementType()->isFloatingPointTy())
5934       return false;
5935     for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) {
5936       if (C->getElementAsAPFloat(I).isZero())
5937         return false;
5938     }
5939     return true;
5940   }
5941 
5942   return false;
5943 }
5944 
5945 /// Match clamp pattern for float types without care about NaNs or signed zeros.
5946 /// Given non-min/max outer cmp/select from the clamp pattern this
5947 /// function recognizes if it can be substitued by a "canonical" min/max
5948 /// pattern.
5949 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred,
5950                                                Value *CmpLHS, Value *CmpRHS,
5951                                                Value *TrueVal, Value *FalseVal,
5952                                                Value *&LHS, Value *&RHS) {
5953   // Try to match
5954   //   X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2))
5955   //   X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2))
5956   // and return description of the outer Max/Min.
5957 
5958   // First, check if select has inverse order:
5959   if (CmpRHS == FalseVal) {
5960     std::swap(TrueVal, FalseVal);
5961     Pred = CmpInst::getInversePredicate(Pred);
5962   }
5963 
5964   // Assume success now. If there's no match, callers should not use these anyway.
5965   LHS = TrueVal;
5966   RHS = FalseVal;
5967 
5968   const APFloat *FC1;
5969   if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite())
5970     return {SPF_UNKNOWN, SPNB_NA, false};
5971 
5972   const APFloat *FC2;
5973   switch (Pred) {
5974   case CmpInst::FCMP_OLT:
5975   case CmpInst::FCMP_OLE:
5976   case CmpInst::FCMP_ULT:
5977   case CmpInst::FCMP_ULE:
5978     if (match(FalseVal,
5979               m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)),
5980                           m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5981         *FC1 < *FC2)
5982       return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false};
5983     break;
5984   case CmpInst::FCMP_OGT:
5985   case CmpInst::FCMP_OGE:
5986   case CmpInst::FCMP_UGT:
5987   case CmpInst::FCMP_UGE:
5988     if (match(FalseVal,
5989               m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)),
5990                           m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) &&
5991         *FC1 > *FC2)
5992       return {SPF_FMINNUM, SPNB_RETURNS_ANY, false};
5993     break;
5994   default:
5995     break;
5996   }
5997 
5998   return {SPF_UNKNOWN, SPNB_NA, false};
5999 }
6000 
6001 /// Recognize variations of:
6002 ///   CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v)))
6003 static SelectPatternResult matchClamp(CmpInst::Predicate Pred,
6004                                       Value *CmpLHS, Value *CmpRHS,
6005                                       Value *TrueVal, Value *FalseVal) {
6006   // Swap the select operands and predicate to match the patterns below.
6007   if (CmpRHS != TrueVal) {
6008     Pred = ICmpInst::getSwappedPredicate(Pred);
6009     std::swap(TrueVal, FalseVal);
6010   }
6011   const APInt *C1;
6012   if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) {
6013     const APInt *C2;
6014     // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1)
6015     if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) &&
6016         C1->slt(*C2) && Pred == CmpInst::ICMP_SLT)
6017       return {SPF_SMAX, SPNB_NA, false};
6018 
6019     // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1)
6020     if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) &&
6021         C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT)
6022       return {SPF_SMIN, SPNB_NA, false};
6023 
6024     // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1)
6025     if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) &&
6026         C1->ult(*C2) && Pred == CmpInst::ICMP_ULT)
6027       return {SPF_UMAX, SPNB_NA, false};
6028 
6029     // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1)
6030     if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) &&
6031         C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT)
6032       return {SPF_UMIN, SPNB_NA, false};
6033   }
6034   return {SPF_UNKNOWN, SPNB_NA, false};
6035 }
6036 
6037 /// Recognize variations of:
6038 ///   a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c))
6039 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred,
6040                                                Value *CmpLHS, Value *CmpRHS,
6041                                                Value *TVal, Value *FVal,
6042                                                unsigned Depth) {
6043   // TODO: Allow FP min/max with nnan/nsz.
6044   assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison");
6045 
6046   Value *A = nullptr, *B = nullptr;
6047   SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1);
6048   if (!SelectPatternResult::isMinOrMax(L.Flavor))
6049     return {SPF_UNKNOWN, SPNB_NA, false};
6050 
6051   Value *C = nullptr, *D = nullptr;
6052   SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1);
6053   if (L.Flavor != R.Flavor)
6054     return {SPF_UNKNOWN, SPNB_NA, false};
6055 
6056   // We have something like: x Pred y ? min(a, b) : min(c, d).
6057   // Try to match the compare to the min/max operations of the select operands.
6058   // First, make sure we have the right compare predicate.
6059   switch (L.Flavor) {
6060   case SPF_SMIN:
6061     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) {
6062       Pred = ICmpInst::getSwappedPredicate(Pred);
6063       std::swap(CmpLHS, CmpRHS);
6064     }
6065     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
6066       break;
6067     return {SPF_UNKNOWN, SPNB_NA, false};
6068   case SPF_SMAX:
6069     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) {
6070       Pred = ICmpInst::getSwappedPredicate(Pred);
6071       std::swap(CmpLHS, CmpRHS);
6072     }
6073     if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
6074       break;
6075     return {SPF_UNKNOWN, SPNB_NA, false};
6076   case SPF_UMIN:
6077     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
6078       Pred = ICmpInst::getSwappedPredicate(Pred);
6079       std::swap(CmpLHS, CmpRHS);
6080     }
6081     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE)
6082       break;
6083     return {SPF_UNKNOWN, SPNB_NA, false};
6084   case SPF_UMAX:
6085     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
6086       Pred = ICmpInst::getSwappedPredicate(Pred);
6087       std::swap(CmpLHS, CmpRHS);
6088     }
6089     if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
6090       break;
6091     return {SPF_UNKNOWN, SPNB_NA, false};
6092   default:
6093     return {SPF_UNKNOWN, SPNB_NA, false};
6094   }
6095 
6096   // If there is a common operand in the already matched min/max and the other
6097   // min/max operands match the compare operands (either directly or inverted),
6098   // then this is min/max of the same flavor.
6099 
6100   // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
6101   // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b))
6102   if (D == B) {
6103     if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
6104                                          match(A, m_Not(m_Specific(CmpRHS)))))
6105       return {L.Flavor, SPNB_NA, false};
6106   }
6107   // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
6108   // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d))
6109   if (C == B) {
6110     if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
6111                                          match(A, m_Not(m_Specific(CmpRHS)))))
6112       return {L.Flavor, SPNB_NA, false};
6113   }
6114   // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
6115   // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a))
6116   if (D == A) {
6117     if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) &&
6118                                          match(B, m_Not(m_Specific(CmpRHS)))))
6119       return {L.Flavor, SPNB_NA, false};
6120   }
6121   // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
6122   // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d))
6123   if (C == A) {
6124     if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) &&
6125                                          match(B, m_Not(m_Specific(CmpRHS)))))
6126       return {L.Flavor, SPNB_NA, false};
6127   }
6128 
6129   return {SPF_UNKNOWN, SPNB_NA, false};
6130 }
6131 
6132 /// If the input value is the result of a 'not' op, constant integer, or vector
6133 /// splat of a constant integer, return the bitwise-not source value.
6134 /// TODO: This could be extended to handle non-splat vector integer constants.
6135 static Value *getNotValue(Value *V) {
6136   Value *NotV;
6137   if (match(V, m_Not(m_Value(NotV))))
6138     return NotV;
6139 
6140   const APInt *C;
6141   if (match(V, m_APInt(C)))
6142     return ConstantInt::get(V->getType(), ~(*C));
6143 
6144   return nullptr;
6145 }
6146 
6147 /// Match non-obvious integer minimum and maximum sequences.
6148 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred,
6149                                        Value *CmpLHS, Value *CmpRHS,
6150                                        Value *TrueVal, Value *FalseVal,
6151                                        Value *&LHS, Value *&RHS,
6152                                        unsigned Depth) {
6153   // Assume success. If there's no match, callers should not use these anyway.
6154   LHS = TrueVal;
6155   RHS = FalseVal;
6156 
6157   SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal);
6158   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
6159     return SPR;
6160 
6161   SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
6162   if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
6163     return SPR;
6164 
6165   // Look through 'not' ops to find disguised min/max.
6166   // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y)
6167   // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y)
6168   if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) {
6169     switch (Pred) {
6170     case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false};
6171     case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false};
6172     case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false};
6173     case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false};
6174     default: break;
6175     }
6176   }
6177 
6178   // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X)
6179   // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X)
6180   if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) {
6181     switch (Pred) {
6182     case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false};
6183     case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false};
6184     case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false};
6185     case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false};
6186     default: break;
6187     }
6188   }
6189 
6190   if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
6191     return {SPF_UNKNOWN, SPNB_NA, false};
6192 
6193   const APInt *C1;
6194   if (!match(CmpRHS, m_APInt(C1)))
6195     return {SPF_UNKNOWN, SPNB_NA, false};
6196 
6197   // An unsigned min/max can be written with a signed compare.
6198   const APInt *C2;
6199   if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) ||
6200       (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) {
6201     // Is the sign bit set?
6202     // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX
6203     // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN
6204     if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue())
6205       return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
6206 
6207     // Is the sign bit clear?
6208     // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX
6209     // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN
6210     if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue())
6211       return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false};
6212   }
6213 
6214   return {SPF_UNKNOWN, SPNB_NA, false};
6215 }
6216 
6217 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) {
6218   assert(X && Y && "Invalid operand");
6219 
6220   // X = sub (0, Y) || X = sub nsw (0, Y)
6221   if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) ||
6222       (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y)))))
6223     return true;
6224 
6225   // Y = sub (0, X) || Y = sub nsw (0, X)
6226   if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) ||
6227       (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X)))))
6228     return true;
6229 
6230   // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A)
6231   Value *A, *B;
6232   return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) &&
6233                         match(Y, m_Sub(m_Specific(B), m_Specific(A))))) ||
6234          (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) &&
6235                        match(Y, m_NSWSub(m_Specific(B), m_Specific(A)))));
6236 }
6237 
6238 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred,
6239                                               FastMathFlags FMF,
6240                                               Value *CmpLHS, Value *CmpRHS,
6241                                               Value *TrueVal, Value *FalseVal,
6242                                               Value *&LHS, Value *&RHS,
6243                                               unsigned Depth) {
6244   bool HasMismatchedZeros = false;
6245   if (CmpInst::isFPPredicate(Pred)) {
6246     // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one
6247     // 0.0 operand, set the compare's 0.0 operands to that same value for the
6248     // purpose of identifying min/max. Disregard vector constants with undefined
6249     // elements because those can not be back-propagated for analysis.
6250     Value *OutputZeroVal = nullptr;
6251     if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) &&
6252         !cast<Constant>(TrueVal)->containsUndefOrPoisonElement())
6253       OutputZeroVal = TrueVal;
6254     else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) &&
6255              !cast<Constant>(FalseVal)->containsUndefOrPoisonElement())
6256       OutputZeroVal = FalseVal;
6257 
6258     if (OutputZeroVal) {
6259       if (match(CmpLHS, m_AnyZeroFP()) && CmpLHS != OutputZeroVal) {
6260         HasMismatchedZeros = true;
6261         CmpLHS = OutputZeroVal;
6262       }
6263       if (match(CmpRHS, m_AnyZeroFP()) && CmpRHS != OutputZeroVal) {
6264         HasMismatchedZeros = true;
6265         CmpRHS = OutputZeroVal;
6266       }
6267     }
6268   }
6269 
6270   LHS = CmpLHS;
6271   RHS = CmpRHS;
6272 
6273   // Signed zero may return inconsistent results between implementations.
6274   //  (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0
6275   //  minNum(0.0, -0.0)          // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1)
6276   // Therefore, we behave conservatively and only proceed if at least one of the
6277   // operands is known to not be zero or if we don't care about signed zero.
6278   switch (Pred) {
6279   default: break;
6280   case CmpInst::FCMP_OGT: case CmpInst::FCMP_OLT:
6281   case CmpInst::FCMP_UGT: case CmpInst::FCMP_ULT:
6282     if (!HasMismatchedZeros)
6283       break;
6284     [[fallthrough]];
6285   case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE:
6286   case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE:
6287     if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6288         !isKnownNonZero(CmpRHS))
6289       return {SPF_UNKNOWN, SPNB_NA, false};
6290   }
6291 
6292   SelectPatternNaNBehavior NaNBehavior = SPNB_NA;
6293   bool Ordered = false;
6294 
6295   // When given one NaN and one non-NaN input:
6296   //   - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input.
6297   //   - A simple C99 (a < b ? a : b) construction will return 'b' (as the
6298   //     ordered comparison fails), which could be NaN or non-NaN.
6299   // so here we discover exactly what NaN behavior is required/accepted.
6300   if (CmpInst::isFPPredicate(Pred)) {
6301     bool LHSSafe = isKnownNonNaN(CmpLHS, FMF);
6302     bool RHSSafe = isKnownNonNaN(CmpRHS, FMF);
6303 
6304     if (LHSSafe && RHSSafe) {
6305       // Both operands are known non-NaN.
6306       NaNBehavior = SPNB_RETURNS_ANY;
6307     } else if (CmpInst::isOrdered(Pred)) {
6308       // An ordered comparison will return false when given a NaN, so it
6309       // returns the RHS.
6310       Ordered = true;
6311       if (LHSSafe)
6312         // LHS is non-NaN, so if RHS is NaN then NaN will be returned.
6313         NaNBehavior = SPNB_RETURNS_NAN;
6314       else if (RHSSafe)
6315         NaNBehavior = SPNB_RETURNS_OTHER;
6316       else
6317         // Completely unsafe.
6318         return {SPF_UNKNOWN, SPNB_NA, false};
6319     } else {
6320       Ordered = false;
6321       // An unordered comparison will return true when given a NaN, so it
6322       // returns the LHS.
6323       if (LHSSafe)
6324         // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned.
6325         NaNBehavior = SPNB_RETURNS_OTHER;
6326       else if (RHSSafe)
6327         NaNBehavior = SPNB_RETURNS_NAN;
6328       else
6329         // Completely unsafe.
6330         return {SPF_UNKNOWN, SPNB_NA, false};
6331     }
6332   }
6333 
6334   if (TrueVal == CmpRHS && FalseVal == CmpLHS) {
6335     std::swap(CmpLHS, CmpRHS);
6336     Pred = CmpInst::getSwappedPredicate(Pred);
6337     if (NaNBehavior == SPNB_RETURNS_NAN)
6338       NaNBehavior = SPNB_RETURNS_OTHER;
6339     else if (NaNBehavior == SPNB_RETURNS_OTHER)
6340       NaNBehavior = SPNB_RETURNS_NAN;
6341     Ordered = !Ordered;
6342   }
6343 
6344   // ([if]cmp X, Y) ? X : Y
6345   if (TrueVal == CmpLHS && FalseVal == CmpRHS) {
6346     switch (Pred) {
6347     default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality.
6348     case ICmpInst::ICMP_UGT:
6349     case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false};
6350     case ICmpInst::ICMP_SGT:
6351     case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false};
6352     case ICmpInst::ICMP_ULT:
6353     case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false};
6354     case ICmpInst::ICMP_SLT:
6355     case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false};
6356     case FCmpInst::FCMP_UGT:
6357     case FCmpInst::FCMP_UGE:
6358     case FCmpInst::FCMP_OGT:
6359     case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered};
6360     case FCmpInst::FCMP_ULT:
6361     case FCmpInst::FCMP_ULE:
6362     case FCmpInst::FCMP_OLT:
6363     case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
6364     }
6365   }
6366 
6367   if (isKnownNegation(TrueVal, FalseVal)) {
6368     // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
6369     // match against either LHS or sext(LHS).
6370     auto MaybeSExtCmpLHS =
6371         m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS)));
6372     auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes());
6373     auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One());
6374     if (match(TrueVal, MaybeSExtCmpLHS)) {
6375       // Set the return values. If the compare uses the negated value (-X >s 0),
6376       // swap the return values because the negated value is always 'RHS'.
6377       LHS = TrueVal;
6378       RHS = FalseVal;
6379       if (match(CmpLHS, m_Neg(m_Specific(FalseVal))))
6380         std::swap(LHS, RHS);
6381 
6382       // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X)
6383       // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X)
6384       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6385         return {SPF_ABS, SPNB_NA, false};
6386 
6387       // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X)
6388       if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne))
6389         return {SPF_ABS, SPNB_NA, false};
6390 
6391       // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X)
6392       // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X)
6393       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6394         return {SPF_NABS, SPNB_NA, false};
6395     }
6396     else if (match(FalseVal, MaybeSExtCmpLHS)) {
6397       // Set the return values. If the compare uses the negated value (-X >s 0),
6398       // swap the return values because the negated value is always 'RHS'.
6399       LHS = FalseVal;
6400       RHS = TrueVal;
6401       if (match(CmpLHS, m_Neg(m_Specific(TrueVal))))
6402         std::swap(LHS, RHS);
6403 
6404       // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X)
6405       // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X)
6406       if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes))
6407         return {SPF_NABS, SPNB_NA, false};
6408 
6409       // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X)
6410       // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X)
6411       if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne))
6412         return {SPF_ABS, SPNB_NA, false};
6413     }
6414   }
6415 
6416   if (CmpInst::isIntPredicate(Pred))
6417     return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth);
6418 
6419   // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar
6420   // may return either -0.0 or 0.0, so fcmp/select pair has stricter
6421   // semantics than minNum. Be conservative in such case.
6422   if (NaNBehavior != SPNB_RETURNS_ANY ||
6423       (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) &&
6424        !isKnownNonZero(CmpRHS)))
6425     return {SPF_UNKNOWN, SPNB_NA, false};
6426 
6427   return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS);
6428 }
6429 
6430 /// Helps to match a select pattern in case of a type mismatch.
6431 ///
6432 /// The function processes the case when type of true and false values of a
6433 /// select instruction differs from type of the cmp instruction operands because
6434 /// of a cast instruction. The function checks if it is legal to move the cast
6435 /// operation after "select". If yes, it returns the new second value of
6436 /// "select" (with the assumption that cast is moved):
6437 /// 1. As operand of cast instruction when both values of "select" are same cast
6438 /// instructions.
6439 /// 2. As restored constant (by applying reverse cast operation) when the first
6440 /// value of the "select" is a cast operation and the second value is a
6441 /// constant.
6442 /// NOTE: We return only the new second value because the first value could be
6443 /// accessed as operand of cast instruction.
6444 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2,
6445                               Instruction::CastOps *CastOp) {
6446   auto *Cast1 = dyn_cast<CastInst>(V1);
6447   if (!Cast1)
6448     return nullptr;
6449 
6450   *CastOp = Cast1->getOpcode();
6451   Type *SrcTy = Cast1->getSrcTy();
6452   if (auto *Cast2 = dyn_cast<CastInst>(V2)) {
6453     // If V1 and V2 are both the same cast from the same type, look through V1.
6454     if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy())
6455       return Cast2->getOperand(0);
6456     return nullptr;
6457   }
6458 
6459   auto *C = dyn_cast<Constant>(V2);
6460   if (!C)
6461     return nullptr;
6462 
6463   Constant *CastedTo = nullptr;
6464   switch (*CastOp) {
6465   case Instruction::ZExt:
6466     if (CmpI->isUnsigned())
6467       CastedTo = ConstantExpr::getTrunc(C, SrcTy);
6468     break;
6469   case Instruction::SExt:
6470     if (CmpI->isSigned())
6471       CastedTo = ConstantExpr::getTrunc(C, SrcTy, true);
6472     break;
6473   case Instruction::Trunc:
6474     Constant *CmpConst;
6475     if (match(CmpI->getOperand(1), m_Constant(CmpConst)) &&
6476         CmpConst->getType() == SrcTy) {
6477       // Here we have the following case:
6478       //
6479       //   %cond = cmp iN %x, CmpConst
6480       //   %tr = trunc iN %x to iK
6481       //   %narrowsel = select i1 %cond, iK %t, iK C
6482       //
6483       // We can always move trunc after select operation:
6484       //
6485       //   %cond = cmp iN %x, CmpConst
6486       //   %widesel = select i1 %cond, iN %x, iN CmpConst
6487       //   %tr = trunc iN %widesel to iK
6488       //
6489       // Note that C could be extended in any way because we don't care about
6490       // upper bits after truncation. It can't be abs pattern, because it would
6491       // look like:
6492       //
6493       //   select i1 %cond, x, -x.
6494       //
6495       // So only min/max pattern could be matched. Such match requires widened C
6496       // == CmpConst. That is why set widened C = CmpConst, condition trunc
6497       // CmpConst == C is checked below.
6498       CastedTo = CmpConst;
6499     } else {
6500       CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned());
6501     }
6502     break;
6503   case Instruction::FPTrunc:
6504     CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true);
6505     break;
6506   case Instruction::FPExt:
6507     CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true);
6508     break;
6509   case Instruction::FPToUI:
6510     CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true);
6511     break;
6512   case Instruction::FPToSI:
6513     CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true);
6514     break;
6515   case Instruction::UIToFP:
6516     CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true);
6517     break;
6518   case Instruction::SIToFP:
6519     CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true);
6520     break;
6521   default:
6522     break;
6523   }
6524 
6525   if (!CastedTo)
6526     return nullptr;
6527 
6528   // Make sure the cast doesn't lose any information.
6529   Constant *CastedBack =
6530       ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true);
6531   if (CastedBack != C)
6532     return nullptr;
6533 
6534   return CastedTo;
6535 }
6536 
6537 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS,
6538                                              Instruction::CastOps *CastOp,
6539                                              unsigned Depth) {
6540   if (Depth >= MaxAnalysisRecursionDepth)
6541     return {SPF_UNKNOWN, SPNB_NA, false};
6542 
6543   SelectInst *SI = dyn_cast<SelectInst>(V);
6544   if (!SI) return {SPF_UNKNOWN, SPNB_NA, false};
6545 
6546   CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition());
6547   if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false};
6548 
6549   Value *TrueVal = SI->getTrueValue();
6550   Value *FalseVal = SI->getFalseValue();
6551 
6552   return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS,
6553                                             CastOp, Depth);
6554 }
6555 
6556 SelectPatternResult llvm::matchDecomposedSelectPattern(
6557     CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS,
6558     Instruction::CastOps *CastOp, unsigned Depth) {
6559   CmpInst::Predicate Pred = CmpI->getPredicate();
6560   Value *CmpLHS = CmpI->getOperand(0);
6561   Value *CmpRHS = CmpI->getOperand(1);
6562   FastMathFlags FMF;
6563   if (isa<FPMathOperator>(CmpI))
6564     FMF = CmpI->getFastMathFlags();
6565 
6566   // Bail out early.
6567   if (CmpI->isEquality())
6568     return {SPF_UNKNOWN, SPNB_NA, false};
6569 
6570   // Deal with type mismatches.
6571   if (CastOp && CmpLHS->getType() != TrueVal->getType()) {
6572     if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) {
6573       // If this is a potential fmin/fmax with a cast to integer, then ignore
6574       // -0.0 because there is no corresponding integer value.
6575       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6576         FMF.setNoSignedZeros();
6577       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6578                                   cast<CastInst>(TrueVal)->getOperand(0), C,
6579                                   LHS, RHS, Depth);
6580     }
6581     if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) {
6582       // If this is a potential fmin/fmax with a cast to integer, then ignore
6583       // -0.0 because there is no corresponding integer value.
6584       if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI)
6585         FMF.setNoSignedZeros();
6586       return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS,
6587                                   C, cast<CastInst>(FalseVal)->getOperand(0),
6588                                   LHS, RHS, Depth);
6589     }
6590   }
6591   return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal,
6592                               LHS, RHS, Depth);
6593 }
6594 
6595 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) {
6596   if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT;
6597   if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT;
6598   if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT;
6599   if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT;
6600   if (SPF == SPF_FMINNUM)
6601     return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT;
6602   if (SPF == SPF_FMAXNUM)
6603     return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT;
6604   llvm_unreachable("unhandled!");
6605 }
6606 
6607 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) {
6608   if (SPF == SPF_SMIN) return SPF_SMAX;
6609   if (SPF == SPF_UMIN) return SPF_UMAX;
6610   if (SPF == SPF_SMAX) return SPF_SMIN;
6611   if (SPF == SPF_UMAX) return SPF_UMIN;
6612   llvm_unreachable("unhandled!");
6613 }
6614 
6615 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) {
6616   switch (MinMaxID) {
6617   case Intrinsic::smax: return Intrinsic::smin;
6618   case Intrinsic::smin: return Intrinsic::smax;
6619   case Intrinsic::umax: return Intrinsic::umin;
6620   case Intrinsic::umin: return Intrinsic::umax;
6621   default: llvm_unreachable("Unexpected intrinsic");
6622   }
6623 }
6624 
6625 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) {
6626   switch (SPF) {
6627   case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth);
6628   case SPF_SMIN: return APInt::getSignedMinValue(BitWidth);
6629   case SPF_UMAX: return APInt::getMaxValue(BitWidth);
6630   case SPF_UMIN: return APInt::getMinValue(BitWidth);
6631   default: llvm_unreachable("Unexpected flavor");
6632   }
6633 }
6634 
6635 std::pair<Intrinsic::ID, bool>
6636 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) {
6637   // Check if VL contains select instructions that can be folded into a min/max
6638   // vector intrinsic and return the intrinsic if it is possible.
6639   // TODO: Support floating point min/max.
6640   bool AllCmpSingleUse = true;
6641   SelectPatternResult SelectPattern;
6642   SelectPattern.Flavor = SPF_UNKNOWN;
6643   if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) {
6644         Value *LHS, *RHS;
6645         auto CurrentPattern = matchSelectPattern(I, LHS, RHS);
6646         if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) ||
6647             CurrentPattern.Flavor == SPF_FMINNUM ||
6648             CurrentPattern.Flavor == SPF_FMAXNUM ||
6649             !I->getType()->isIntOrIntVectorTy())
6650           return false;
6651         if (SelectPattern.Flavor != SPF_UNKNOWN &&
6652             SelectPattern.Flavor != CurrentPattern.Flavor)
6653           return false;
6654         SelectPattern = CurrentPattern;
6655         AllCmpSingleUse &=
6656             match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value()));
6657         return true;
6658       })) {
6659     switch (SelectPattern.Flavor) {
6660     case SPF_SMIN:
6661       return {Intrinsic::smin, AllCmpSingleUse};
6662     case SPF_UMIN:
6663       return {Intrinsic::umin, AllCmpSingleUse};
6664     case SPF_SMAX:
6665       return {Intrinsic::smax, AllCmpSingleUse};
6666     case SPF_UMAX:
6667       return {Intrinsic::umax, AllCmpSingleUse};
6668     default:
6669       llvm_unreachable("unexpected select pattern flavor");
6670     }
6671   }
6672   return {Intrinsic::not_intrinsic, false};
6673 }
6674 
6675 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO,
6676                                  Value *&Start, Value *&Step) {
6677   // Handle the case of a simple two-predecessor recurrence PHI.
6678   // There's a lot more that could theoretically be done here, but
6679   // this is sufficient to catch some interesting cases.
6680   if (P->getNumIncomingValues() != 2)
6681     return false;
6682 
6683   for (unsigned i = 0; i != 2; ++i) {
6684     Value *L = P->getIncomingValue(i);
6685     Value *R = P->getIncomingValue(!i);
6686     Operator *LU = dyn_cast<Operator>(L);
6687     if (!LU)
6688       continue;
6689     unsigned Opcode = LU->getOpcode();
6690 
6691     switch (Opcode) {
6692     default:
6693       continue;
6694     // TODO: Expand list -- xor, div, gep, uaddo, etc..
6695     case Instruction::LShr:
6696     case Instruction::AShr:
6697     case Instruction::Shl:
6698     case Instruction::Add:
6699     case Instruction::Sub:
6700     case Instruction::And:
6701     case Instruction::Or:
6702     case Instruction::Mul:
6703     case Instruction::FMul: {
6704       Value *LL = LU->getOperand(0);
6705       Value *LR = LU->getOperand(1);
6706       // Find a recurrence.
6707       if (LL == P)
6708         L = LR;
6709       else if (LR == P)
6710         L = LL;
6711       else
6712         continue; // Check for recurrence with L and R flipped.
6713 
6714       break; // Match!
6715     }
6716     };
6717 
6718     // We have matched a recurrence of the form:
6719     //   %iv = [R, %entry], [%iv.next, %backedge]
6720     //   %iv.next = binop %iv, L
6721     // OR
6722     //   %iv = [R, %entry], [%iv.next, %backedge]
6723     //   %iv.next = binop L, %iv
6724     BO = cast<BinaryOperator>(LU);
6725     Start = R;
6726     Step = L;
6727     return true;
6728   }
6729   return false;
6730 }
6731 
6732 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P,
6733                                  Value *&Start, Value *&Step) {
6734   BinaryOperator *BO = nullptr;
6735   P = dyn_cast<PHINode>(I->getOperand(0));
6736   if (!P)
6737     P = dyn_cast<PHINode>(I->getOperand(1));
6738   return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I;
6739 }
6740 
6741 /// Return true if "icmp Pred LHS RHS" is always true.
6742 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS,
6743                             const Value *RHS, const DataLayout &DL,
6744                             unsigned Depth) {
6745   if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS)
6746     return true;
6747 
6748   switch (Pred) {
6749   default:
6750     return false;
6751 
6752   case CmpInst::ICMP_SLE: {
6753     const APInt *C;
6754 
6755     // LHS s<= LHS +_{nsw} C   if C >= 0
6756     if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C))))
6757       return !C->isNegative();
6758     return false;
6759   }
6760 
6761   case CmpInst::ICMP_ULE: {
6762     const APInt *C;
6763 
6764     // LHS u<= LHS +_{nuw} C   for any C
6765     if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C))))
6766       return true;
6767 
6768     // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB)
6769     auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B,
6770                                        const Value *&X,
6771                                        const APInt *&CA, const APInt *&CB) {
6772       if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) &&
6773           match(B, m_NUWAdd(m_Specific(X), m_APInt(CB))))
6774         return true;
6775 
6776       // If X & C == 0 then (X | C) == X +_{nuw} C
6777       if (match(A, m_Or(m_Value(X), m_APInt(CA))) &&
6778           match(B, m_Or(m_Specific(X), m_APInt(CB)))) {
6779         KnownBits Known(CA->getBitWidth());
6780         computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr,
6781                          /*CxtI*/ nullptr, /*DT*/ nullptr);
6782         if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero))
6783           return true;
6784       }
6785 
6786       return false;
6787     };
6788 
6789     const Value *X;
6790     const APInt *CLHS, *CRHS;
6791     if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS))
6792       return CLHS->ule(*CRHS);
6793 
6794     return false;
6795   }
6796   }
6797 }
6798 
6799 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred
6800 /// ALHS ARHS" is true.  Otherwise, return std::nullopt.
6801 static std::optional<bool>
6802 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS,
6803                       const Value *ARHS, const Value *BLHS, const Value *BRHS,
6804                       const DataLayout &DL, unsigned Depth) {
6805   switch (Pred) {
6806   default:
6807     return std::nullopt;
6808 
6809   case CmpInst::ICMP_SLT:
6810   case CmpInst::ICMP_SLE:
6811     if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) &&
6812         isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth))
6813       return true;
6814     return std::nullopt;
6815 
6816   case CmpInst::ICMP_ULT:
6817   case CmpInst::ICMP_ULE:
6818     if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) &&
6819         isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth))
6820       return true;
6821     return std::nullopt;
6822   }
6823 }
6824 
6825 /// Return true if the operands of two compares (expanded as "L0 pred L1" and
6826 /// "R0 pred R1") match. IsSwappedOps is true when the operands match, but are
6827 /// swapped.
6828 static bool areMatchingOperands(const Value *L0, const Value *L1, const Value *R0,
6829                            const Value *R1, bool &AreSwappedOps) {
6830   bool AreMatchingOps = (L0 == R0 && L1 == R1);
6831   AreSwappedOps = (L0 == R1 && L1 == R0);
6832   return AreMatchingOps || AreSwappedOps;
6833 }
6834 
6835 /// Return true if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is true.
6836 /// Return false if "icmp1 LPred X, Y" implies "icmp2 RPred X, Y" is false.
6837 /// Otherwise, return std::nullopt if we can't infer anything.
6838 static std::optional<bool>
6839 isImpliedCondMatchingOperands(CmpInst::Predicate LPred,
6840                               CmpInst::Predicate RPred, bool AreSwappedOps) {
6841   // Canonicalize the predicate as if the operands were not commuted.
6842   if (AreSwappedOps)
6843     RPred = ICmpInst::getSwappedPredicate(RPred);
6844 
6845   if (CmpInst::isImpliedTrueByMatchingCmp(LPred, RPred))
6846     return true;
6847   if (CmpInst::isImpliedFalseByMatchingCmp(LPred, RPred))
6848     return false;
6849 
6850   return std::nullopt;
6851 }
6852 
6853 /// Return true if "icmp LPred X, LC" implies "icmp RPred X, RC" is true.
6854 /// Return false if "icmp LPred X, LC" implies "icmp RPred X, RC" is false.
6855 /// Otherwise, return std::nullopt if we can't infer anything.
6856 static std::optional<bool> isImpliedCondCommonOperandWithConstants(
6857     CmpInst::Predicate LPred, const APInt &LC, CmpInst::Predicate RPred,
6858     const APInt &RC) {
6859   ConstantRange DomCR = ConstantRange::makeExactICmpRegion(LPred, LC);
6860   ConstantRange CR = ConstantRange::makeExactICmpRegion(RPred, RC);
6861   ConstantRange Intersection = DomCR.intersectWith(CR);
6862   ConstantRange Difference = DomCR.difference(CR);
6863   if (Intersection.isEmptySet())
6864     return false;
6865   if (Difference.isEmptySet())
6866     return true;
6867   return std::nullopt;
6868 }
6869 
6870 /// Return true if LHS implies RHS (expanded to its components as "R0 RPred R1")
6871 /// is true.  Return false if LHS implies RHS is false. Otherwise, return
6872 /// std::nullopt if we can't infer anything.
6873 static std::optional<bool> isImpliedCondICmps(const ICmpInst *LHS,
6874                                               CmpInst::Predicate RPred,
6875                                               const Value *R0, const Value *R1,
6876                                               const DataLayout &DL,
6877                                               bool LHSIsTrue, unsigned Depth) {
6878   Value *L0 = LHS->getOperand(0);
6879   Value *L1 = LHS->getOperand(1);
6880 
6881   // The rest of the logic assumes the LHS condition is true.  If that's not the
6882   // case, invert the predicate to make it so.
6883   CmpInst::Predicate LPred =
6884       LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate();
6885 
6886   // Can we infer anything when the two compares have matching operands?
6887   bool AreSwappedOps;
6888   if (areMatchingOperands(L0, L1, R0, R1, AreSwappedOps))
6889     return isImpliedCondMatchingOperands(LPred, RPred, AreSwappedOps);
6890 
6891   // Can we infer anything when the 0-operands match and the 1-operands are
6892   // constants (not necessarily matching)?
6893   const APInt *LC, *RC;
6894   if (L0 == R0 && match(L1, m_APInt(LC)) && match(R1, m_APInt(RC)))
6895     return isImpliedCondCommonOperandWithConstants(LPred, *LC, RPred, *RC);
6896 
6897   if (LPred == RPred)
6898     return isImpliedCondOperands(LPred, L0, L1, R0, R1, DL, Depth);
6899 
6900   return std::nullopt;
6901 }
6902 
6903 /// Return true if LHS implies RHS is true.  Return false if LHS implies RHS is
6904 /// false.  Otherwise, return std::nullopt if we can't infer anything.  We
6905 /// expect the RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select'
6906 /// instruction.
6907 static std::optional<bool>
6908 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred,
6909                    const Value *RHSOp0, const Value *RHSOp1,
6910                    const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6911   // The LHS must be an 'or', 'and', or a 'select' instruction.
6912   assert((LHS->getOpcode() == Instruction::And ||
6913           LHS->getOpcode() == Instruction::Or ||
6914           LHS->getOpcode() == Instruction::Select) &&
6915          "Expected LHS to be 'and', 'or', or 'select'.");
6916 
6917   assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit");
6918 
6919   // If the result of an 'or' is false, then we know both legs of the 'or' are
6920   // false.  Similarly, if the result of an 'and' is true, then we know both
6921   // legs of the 'and' are true.
6922   const Value *ALHS, *ARHS;
6923   if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) ||
6924       (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) {
6925     // FIXME: Make this non-recursion.
6926     if (std::optional<bool> Implication = isImpliedCondition(
6927             ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6928       return Implication;
6929     if (std::optional<bool> Implication = isImpliedCondition(
6930             ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1))
6931       return Implication;
6932     return std::nullopt;
6933   }
6934   return std::nullopt;
6935 }
6936 
6937 std::optional<bool>
6938 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred,
6939                          const Value *RHSOp0, const Value *RHSOp1,
6940                          const DataLayout &DL, bool LHSIsTrue, unsigned Depth) {
6941   // Bail out when we hit the limit.
6942   if (Depth == MaxAnalysisRecursionDepth)
6943     return std::nullopt;
6944 
6945   // A mismatch occurs when we compare a scalar cmp to a vector cmp, for
6946   // example.
6947   if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy())
6948     return std::nullopt;
6949 
6950   assert(LHS->getType()->isIntOrIntVectorTy(1) &&
6951          "Expected integer type only!");
6952 
6953   // Both LHS and RHS are icmps.
6954   const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS);
6955   if (LHSCmp)
6956     return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6957                               Depth);
6958 
6959   /// The LHS should be an 'or', 'and', or a 'select' instruction.  We expect
6960   /// the RHS to be an icmp.
6961   /// FIXME: Add support for and/or/select on the RHS.
6962   if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) {
6963     if ((LHSI->getOpcode() == Instruction::And ||
6964          LHSI->getOpcode() == Instruction::Or ||
6965          LHSI->getOpcode() == Instruction::Select))
6966       return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue,
6967                                 Depth);
6968   }
6969   return std::nullopt;
6970 }
6971 
6972 std::optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS,
6973                                              const DataLayout &DL,
6974                                              bool LHSIsTrue, unsigned Depth) {
6975   // LHS ==> RHS by definition
6976   if (LHS == RHS)
6977     return LHSIsTrue;
6978 
6979   if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS))
6980     return isImpliedCondition(LHS, RHSCmp->getPredicate(),
6981                               RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL,
6982                               LHSIsTrue, Depth);
6983 
6984   if (Depth == MaxAnalysisRecursionDepth)
6985     return std::nullopt;
6986 
6987   // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2
6988   // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2
6989   const Value *RHS1, *RHS2;
6990   if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) {
6991     if (std::optional<bool> Imp =
6992             isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
6993       if (*Imp == true)
6994         return true;
6995     if (std::optional<bool> Imp =
6996             isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
6997       if (*Imp == true)
6998         return true;
6999   }
7000   if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) {
7001     if (std::optional<bool> Imp =
7002             isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1))
7003       if (*Imp == false)
7004         return false;
7005     if (std::optional<bool> Imp =
7006             isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1))
7007       if (*Imp == false)
7008         return false;
7009   }
7010 
7011   return std::nullopt;
7012 }
7013 
7014 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch
7015 // condition dominating ContextI or nullptr, if no condition is found.
7016 static std::pair<Value *, bool>
7017 getDomPredecessorCondition(const Instruction *ContextI) {
7018   if (!ContextI || !ContextI->getParent())
7019     return {nullptr, false};
7020 
7021   // TODO: This is a poor/cheap way to determine dominance. Should we use a
7022   // dominator tree (eg, from a SimplifyQuery) instead?
7023   const BasicBlock *ContextBB = ContextI->getParent();
7024   const BasicBlock *PredBB = ContextBB->getSinglePredecessor();
7025   if (!PredBB)
7026     return {nullptr, false};
7027 
7028   // We need a conditional branch in the predecessor.
7029   Value *PredCond;
7030   BasicBlock *TrueBB, *FalseBB;
7031   if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB)))
7032     return {nullptr, false};
7033 
7034   // The branch should get simplified. Don't bother simplifying this condition.
7035   if (TrueBB == FalseBB)
7036     return {nullptr, false};
7037 
7038   assert((TrueBB == ContextBB || FalseBB == ContextBB) &&
7039          "Predecessor block does not point to successor?");
7040 
7041   // Is this condition implied by the predecessor condition?
7042   return {PredCond, TrueBB == ContextBB};
7043 }
7044 
7045 std::optional<bool> llvm::isImpliedByDomCondition(const Value *Cond,
7046                                                   const Instruction *ContextI,
7047                                                   const DataLayout &DL) {
7048   assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool");
7049   auto PredCond = getDomPredecessorCondition(ContextI);
7050   if (PredCond.first)
7051     return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second);
7052   return std::nullopt;
7053 }
7054 
7055 std::optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred,
7056                                                   const Value *LHS,
7057                                                   const Value *RHS,
7058                                                   const Instruction *ContextI,
7059                                                   const DataLayout &DL) {
7060   auto PredCond = getDomPredecessorCondition(ContextI);
7061   if (PredCond.first)
7062     return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL,
7063                               PredCond.second);
7064   return std::nullopt;
7065 }
7066 
7067 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower,
7068                               APInt &Upper, const InstrInfoQuery &IIQ,
7069                               bool PreferSignedRange) {
7070   unsigned Width = Lower.getBitWidth();
7071   const APInt *C;
7072   switch (BO.getOpcode()) {
7073   case Instruction::Add:
7074     if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
7075       bool HasNSW = IIQ.hasNoSignedWrap(&BO);
7076       bool HasNUW = IIQ.hasNoUnsignedWrap(&BO);
7077 
7078       // If the caller expects a signed compare, then try to use a signed range.
7079       // Otherwise if both no-wraps are set, use the unsigned range because it
7080       // is never larger than the signed range. Example:
7081       // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125].
7082       if (PreferSignedRange && HasNSW && HasNUW)
7083         HasNUW = false;
7084 
7085       if (HasNUW) {
7086         // 'add nuw x, C' produces [C, UINT_MAX].
7087         Lower = *C;
7088       } else if (HasNSW) {
7089         if (C->isNegative()) {
7090           // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C].
7091           Lower = APInt::getSignedMinValue(Width);
7092           Upper = APInt::getSignedMaxValue(Width) + *C + 1;
7093         } else {
7094           // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX].
7095           Lower = APInt::getSignedMinValue(Width) + *C;
7096           Upper = APInt::getSignedMaxValue(Width) + 1;
7097         }
7098       }
7099     }
7100     break;
7101 
7102   case Instruction::And:
7103     if (match(BO.getOperand(1), m_APInt(C)))
7104       // 'and x, C' produces [0, C].
7105       Upper = *C + 1;
7106     break;
7107 
7108   case Instruction::Or:
7109     if (match(BO.getOperand(1), m_APInt(C)))
7110       // 'or x, C' produces [C, UINT_MAX].
7111       Lower = *C;
7112     break;
7113 
7114   case Instruction::AShr:
7115     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
7116       // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C].
7117       Lower = APInt::getSignedMinValue(Width).ashr(*C);
7118       Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1;
7119     } else if (match(BO.getOperand(0), m_APInt(C))) {
7120       unsigned ShiftAmount = Width - 1;
7121       if (!C->isZero() && IIQ.isExact(&BO))
7122         ShiftAmount = C->countTrailingZeros();
7123       if (C->isNegative()) {
7124         // 'ashr C, x' produces [C, C >> (Width-1)]
7125         Lower = *C;
7126         Upper = C->ashr(ShiftAmount) + 1;
7127       } else {
7128         // 'ashr C, x' produces [C >> (Width-1), C]
7129         Lower = C->ashr(ShiftAmount);
7130         Upper = *C + 1;
7131       }
7132     }
7133     break;
7134 
7135   case Instruction::LShr:
7136     if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) {
7137       // 'lshr x, C' produces [0, UINT_MAX >> C].
7138       Upper = APInt::getAllOnes(Width).lshr(*C) + 1;
7139     } else if (match(BO.getOperand(0), m_APInt(C))) {
7140       // 'lshr C, x' produces [C >> (Width-1), C].
7141       unsigned ShiftAmount = Width - 1;
7142       if (!C->isZero() && IIQ.isExact(&BO))
7143         ShiftAmount = C->countTrailingZeros();
7144       Lower = C->lshr(ShiftAmount);
7145       Upper = *C + 1;
7146     }
7147     break;
7148 
7149   case Instruction::Shl:
7150     if (match(BO.getOperand(0), m_APInt(C))) {
7151       if (IIQ.hasNoUnsignedWrap(&BO)) {
7152         // 'shl nuw C, x' produces [C, C << CLZ(C)]
7153         Lower = *C;
7154         Upper = Lower.shl(Lower.countLeadingZeros()) + 1;
7155       } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw?
7156         if (C->isNegative()) {
7157           // 'shl nsw C, x' produces [C << CLO(C)-1, C]
7158           unsigned ShiftAmount = C->countLeadingOnes() - 1;
7159           Lower = C->shl(ShiftAmount);
7160           Upper = *C + 1;
7161         } else {
7162           // 'shl nsw C, x' produces [C, C << CLZ(C)-1]
7163           unsigned ShiftAmount = C->countLeadingZeros() - 1;
7164           Lower = *C;
7165           Upper = C->shl(ShiftAmount) + 1;
7166         }
7167       }
7168     }
7169     break;
7170 
7171   case Instruction::SDiv:
7172     if (match(BO.getOperand(1), m_APInt(C))) {
7173       APInt IntMin = APInt::getSignedMinValue(Width);
7174       APInt IntMax = APInt::getSignedMaxValue(Width);
7175       if (C->isAllOnes()) {
7176         // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX]
7177         //    where C != -1 and C != 0 and C != 1
7178         Lower = IntMin + 1;
7179         Upper = IntMax + 1;
7180       } else if (C->countLeadingZeros() < Width - 1) {
7181         // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C]
7182         //    where C != -1 and C != 0 and C != 1
7183         Lower = IntMin.sdiv(*C);
7184         Upper = IntMax.sdiv(*C);
7185         if (Lower.sgt(Upper))
7186           std::swap(Lower, Upper);
7187         Upper = Upper + 1;
7188         assert(Upper != Lower && "Upper part of range has wrapped!");
7189       }
7190     } else if (match(BO.getOperand(0), m_APInt(C))) {
7191       if (C->isMinSignedValue()) {
7192         // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2].
7193         Lower = *C;
7194         Upper = Lower.lshr(1) + 1;
7195       } else {
7196         // 'sdiv C, x' produces [-|C|, |C|].
7197         Upper = C->abs() + 1;
7198         Lower = (-Upper) + 1;
7199       }
7200     }
7201     break;
7202 
7203   case Instruction::UDiv:
7204     if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) {
7205       // 'udiv x, C' produces [0, UINT_MAX / C].
7206       Upper = APInt::getMaxValue(Width).udiv(*C) + 1;
7207     } else if (match(BO.getOperand(0), m_APInt(C))) {
7208       // 'udiv C, x' produces [0, C].
7209       Upper = *C + 1;
7210     }
7211     break;
7212 
7213   case Instruction::SRem:
7214     if (match(BO.getOperand(1), m_APInt(C))) {
7215       // 'srem x, C' produces (-|C|, |C|).
7216       Upper = C->abs();
7217       Lower = (-Upper) + 1;
7218     }
7219     break;
7220 
7221   case Instruction::URem:
7222     if (match(BO.getOperand(1), m_APInt(C)))
7223       // 'urem x, C' produces [0, C).
7224       Upper = *C;
7225     break;
7226 
7227   default:
7228     break;
7229   }
7230 }
7231 
7232 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower,
7233                                   APInt &Upper) {
7234   unsigned Width = Lower.getBitWidth();
7235   const APInt *C;
7236   switch (II.getIntrinsicID()) {
7237   case Intrinsic::ctpop:
7238   case Intrinsic::ctlz:
7239   case Intrinsic::cttz:
7240     // Maximum of set/clear bits is the bit width.
7241     assert(Lower == 0 && "Expected lower bound to be zero");
7242     Upper = Width + 1;
7243     break;
7244   case Intrinsic::uadd_sat:
7245     // uadd.sat(x, C) produces [C, UINT_MAX].
7246     if (match(II.getOperand(0), m_APInt(C)) ||
7247         match(II.getOperand(1), m_APInt(C)))
7248       Lower = *C;
7249     break;
7250   case Intrinsic::sadd_sat:
7251     if (match(II.getOperand(0), m_APInt(C)) ||
7252         match(II.getOperand(1), m_APInt(C))) {
7253       if (C->isNegative()) {
7254         // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)].
7255         Lower = APInt::getSignedMinValue(Width);
7256         Upper = APInt::getSignedMaxValue(Width) + *C + 1;
7257       } else {
7258         // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX].
7259         Lower = APInt::getSignedMinValue(Width) + *C;
7260         Upper = APInt::getSignedMaxValue(Width) + 1;
7261       }
7262     }
7263     break;
7264   case Intrinsic::usub_sat:
7265     // usub.sat(C, x) produces [0, C].
7266     if (match(II.getOperand(0), m_APInt(C)))
7267       Upper = *C + 1;
7268     // usub.sat(x, C) produces [0, UINT_MAX - C].
7269     else if (match(II.getOperand(1), m_APInt(C)))
7270       Upper = APInt::getMaxValue(Width) - *C + 1;
7271     break;
7272   case Intrinsic::ssub_sat:
7273     if (match(II.getOperand(0), m_APInt(C))) {
7274       if (C->isNegative()) {
7275         // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)].
7276         Lower = APInt::getSignedMinValue(Width);
7277         Upper = *C - APInt::getSignedMinValue(Width) + 1;
7278       } else {
7279         // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX].
7280         Lower = *C - APInt::getSignedMaxValue(Width);
7281         Upper = APInt::getSignedMaxValue(Width) + 1;
7282       }
7283     } else if (match(II.getOperand(1), m_APInt(C))) {
7284       if (C->isNegative()) {
7285         // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]:
7286         Lower = APInt::getSignedMinValue(Width) - *C;
7287         Upper = APInt::getSignedMaxValue(Width) + 1;
7288       } else {
7289         // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C].
7290         Lower = APInt::getSignedMinValue(Width);
7291         Upper = APInt::getSignedMaxValue(Width) - *C + 1;
7292       }
7293     }
7294     break;
7295   case Intrinsic::umin:
7296   case Intrinsic::umax:
7297   case Intrinsic::smin:
7298   case Intrinsic::smax:
7299     if (!match(II.getOperand(0), m_APInt(C)) &&
7300         !match(II.getOperand(1), m_APInt(C)))
7301       break;
7302 
7303     switch (II.getIntrinsicID()) {
7304     case Intrinsic::umin:
7305       Upper = *C + 1;
7306       break;
7307     case Intrinsic::umax:
7308       Lower = *C;
7309       break;
7310     case Intrinsic::smin:
7311       Lower = APInt::getSignedMinValue(Width);
7312       Upper = *C + 1;
7313       break;
7314     case Intrinsic::smax:
7315       Lower = *C;
7316       Upper = APInt::getSignedMaxValue(Width) + 1;
7317       break;
7318     default:
7319       llvm_unreachable("Must be min/max intrinsic");
7320     }
7321     break;
7322   case Intrinsic::abs:
7323     // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX],
7324     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
7325     if (match(II.getOperand(1), m_One()))
7326       Upper = APInt::getSignedMaxValue(Width) + 1;
7327     else
7328       Upper = APInt::getSignedMinValue(Width) + 1;
7329     break;
7330   default:
7331     break;
7332   }
7333 }
7334 
7335 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower,
7336                                       APInt &Upper, const InstrInfoQuery &IIQ) {
7337   const Value *LHS = nullptr, *RHS = nullptr;
7338   SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS);
7339   if (R.Flavor == SPF_UNKNOWN)
7340     return;
7341 
7342   unsigned BitWidth = SI.getType()->getScalarSizeInBits();
7343 
7344   if (R.Flavor == SelectPatternFlavor::SPF_ABS) {
7345     // If the negation part of the abs (in RHS) has the NSW flag,
7346     // then the result of abs(X) is [0..SIGNED_MAX],
7347     // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN.
7348     Lower = APInt::getZero(BitWidth);
7349     if (match(RHS, m_Neg(m_Specific(LHS))) &&
7350         IIQ.hasNoSignedWrap(cast<Instruction>(RHS)))
7351       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7352     else
7353       Upper = APInt::getSignedMinValue(BitWidth) + 1;
7354     return;
7355   }
7356 
7357   if (R.Flavor == SelectPatternFlavor::SPF_NABS) {
7358     // The result of -abs(X) is <= 0.
7359     Lower = APInt::getSignedMinValue(BitWidth);
7360     Upper = APInt(BitWidth, 1);
7361     return;
7362   }
7363 
7364   const APInt *C;
7365   if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C)))
7366     return;
7367 
7368   switch (R.Flavor) {
7369     case SPF_UMIN:
7370       Upper = *C + 1;
7371       break;
7372     case SPF_UMAX:
7373       Lower = *C;
7374       break;
7375     case SPF_SMIN:
7376       Lower = APInt::getSignedMinValue(BitWidth);
7377       Upper = *C + 1;
7378       break;
7379     case SPF_SMAX:
7380       Lower = *C;
7381       Upper = APInt::getSignedMaxValue(BitWidth) + 1;
7382       break;
7383     default:
7384       break;
7385   }
7386 }
7387 
7388 static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) {
7389   // The maximum representable value of a half is 65504. For floats the maximum
7390   // value is 3.4e38 which requires roughly 129 bits.
7391   unsigned BitWidth = I->getType()->getScalarSizeInBits();
7392   if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy())
7393     return;
7394   if (isa<FPToSIInst>(I) && BitWidth >= 17) {
7395     Lower = APInt(BitWidth, -65504);
7396     Upper = APInt(BitWidth, 65505);
7397   }
7398 
7399   if (isa<FPToUIInst>(I) && BitWidth >= 16) {
7400     // For a fptoui the lower limit is left as 0.
7401     Upper = APInt(BitWidth, 65505);
7402   }
7403 }
7404 
7405 ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned,
7406                                          bool UseInstrInfo, AssumptionCache *AC,
7407                                          const Instruction *CtxI,
7408                                          const DominatorTree *DT,
7409                                          unsigned Depth) {
7410   assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction");
7411 
7412   if (Depth == MaxAnalysisRecursionDepth)
7413     return ConstantRange::getFull(V->getType()->getScalarSizeInBits());
7414 
7415   const APInt *C;
7416   if (match(V, m_APInt(C)))
7417     return ConstantRange(*C);
7418 
7419   InstrInfoQuery IIQ(UseInstrInfo);
7420   unsigned BitWidth = V->getType()->getScalarSizeInBits();
7421   APInt Lower = APInt(BitWidth, 0);
7422   APInt Upper = APInt(BitWidth, 0);
7423   if (auto *BO = dyn_cast<BinaryOperator>(V))
7424     setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned);
7425   else if (auto *II = dyn_cast<IntrinsicInst>(V))
7426     setLimitsForIntrinsic(*II, Lower, Upper);
7427   else if (auto *SI = dyn_cast<SelectInst>(V))
7428     setLimitsForSelectPattern(*SI, Lower, Upper, IIQ);
7429   else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V))
7430     setLimitForFPToI(cast<Instruction>(V), Lower, Upper);
7431 
7432   ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper);
7433 
7434   if (auto *I = dyn_cast<Instruction>(V))
7435     if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range))
7436       CR = CR.intersectWith(getConstantRangeFromMetadata(*Range));
7437 
7438   if (CtxI && AC) {
7439     // Try to restrict the range based on information from assumptions.
7440     for (auto &AssumeVH : AC->assumptionsFor(V)) {
7441       if (!AssumeVH)
7442         continue;
7443       IntrinsicInst *I = cast<IntrinsicInst>(AssumeVH);
7444       assert(I->getParent()->getParent() == CtxI->getParent()->getParent() &&
7445              "Got assumption for the wrong function!");
7446 
7447       if (!isValidAssumeForContext(I, CtxI, DT))
7448         continue;
7449       Value *Arg = I->getArgOperand(0);
7450       ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg);
7451       // Currently we just use information from comparisons.
7452       if (!Cmp || Cmp->getOperand(0) != V)
7453         continue;
7454       // TODO: Set "ForSigned" parameter via Cmp->isSigned()?
7455       ConstantRange RHS =
7456           computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false,
7457                                UseInstrInfo, AC, I, DT, Depth + 1);
7458       CR = CR.intersectWith(
7459           ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS));
7460     }
7461   }
7462 
7463   return CR;
7464 }
7465 
7466 static std::optional<int64_t>
7467 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) {
7468   // Skip over the first indices.
7469   gep_type_iterator GTI = gep_type_begin(GEP);
7470   for (unsigned i = 1; i != Idx; ++i, ++GTI)
7471     /*skip along*/;
7472 
7473   // Compute the offset implied by the rest of the indices.
7474   int64_t Offset = 0;
7475   for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
7476     ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
7477     if (!OpC)
7478       return std::nullopt;
7479     if (OpC->isZero())
7480       continue; // No offset.
7481 
7482     // Handle struct indices, which add their field offset to the pointer.
7483     if (StructType *STy = GTI.getStructTypeOrNull()) {
7484       Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
7485       continue;
7486     }
7487 
7488     // Otherwise, we have a sequential type like an array or fixed-length
7489     // vector. Multiply the index by the ElementSize.
7490     TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType());
7491     if (Size.isScalable())
7492       return std::nullopt;
7493     Offset += Size.getFixedValue() * OpC->getSExtValue();
7494   }
7495 
7496   return Offset;
7497 }
7498 
7499 std::optional<int64_t> llvm::isPointerOffset(const Value *Ptr1,
7500                                              const Value *Ptr2,
7501                                              const DataLayout &DL) {
7502   APInt Offset1(DL.getIndexTypeSizeInBits(Ptr1->getType()), 0);
7503   APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0);
7504   Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset1, true);
7505   Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true);
7506 
7507   // Handle the trivial case first.
7508   if (Ptr1 == Ptr2)
7509     return Offset2.getSExtValue() - Offset1.getSExtValue();
7510 
7511   const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1);
7512   const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2);
7513 
7514   // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
7515   // base.  After that base, they may have some number of common (and
7516   // potentially variable) indices.  After that they handle some constant
7517   // offset, which determines their offset from each other.  At this point, we
7518   // handle no other case.
7519   if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0) ||
7520       GEP1->getSourceElementType() != GEP2->getSourceElementType())
7521     return std::nullopt;
7522 
7523   // Skip any common indices and track the GEP types.
7524   unsigned Idx = 1;
7525   for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
7526     if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
7527       break;
7528 
7529   auto IOffset1 = getOffsetFromIndex(GEP1, Idx, DL);
7530   auto IOffset2 = getOffsetFromIndex(GEP2, Idx, DL);
7531   if (!IOffset1 || !IOffset2)
7532     return std::nullopt;
7533   return *IOffset2 - *IOffset1 + Offset2.getSExtValue() -
7534          Offset1.getSExtValue();
7535 }
7536