xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp (revision 5fb307d29b364982acbde82cbf77db3cae486f8c)
1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions.  This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x").  All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "llvm/Analysis/InstructionSimplify.h"
20 
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/CaptureTracking.h"
27 #include "llvm/Analysis/CmpInstAnalysis.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/InstSimplifyFolder.h"
30 #include "llvm/Analysis/LoopAnalysisManager.h"
31 #include "llvm/Analysis/MemoryBuiltins.h"
32 #include "llvm/Analysis/OverflowInstAnalysis.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/IR/ConstantRange.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/PatternMatch.h"
42 #include "llvm/Support/KnownBits.h"
43 #include <algorithm>
44 #include <optional>
45 using namespace llvm;
46 using namespace llvm::PatternMatch;
47 
48 #define DEBUG_TYPE "instsimplify"
49 
50 enum { RecursionLimit = 3 };
51 
52 STATISTIC(NumExpand, "Number of expansions");
53 STATISTIC(NumReassoc, "Number of reassociations");
54 
55 static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
56                               unsigned);
57 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
58 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
59                              const SimplifyQuery &, unsigned);
60 static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
61                             unsigned);
62 static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
63                             const SimplifyQuery &, unsigned);
64 static Value *simplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
65                               unsigned);
66 static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
67                                const SimplifyQuery &Q, unsigned MaxRecurse);
68 static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
69 static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
70                               unsigned);
71 static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
72                                unsigned);
73 static Value *simplifyGEPInst(Type *, Value *, ArrayRef<Value *>, bool,
74                               const SimplifyQuery &, unsigned);
75 static Value *simplifySelectInst(Value *, Value *, Value *,
76                                  const SimplifyQuery &, unsigned);
77 static Value *simplifyInstructionWithOperands(Instruction *I,
78                                               ArrayRef<Value *> NewOps,
79                                               const SimplifyQuery &SQ,
80                                               unsigned MaxRecurse);
81 
82 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
83                                      Value *FalseVal) {
84   BinaryOperator::BinaryOps BinOpCode;
85   if (auto *BO = dyn_cast<BinaryOperator>(Cond))
86     BinOpCode = BO->getOpcode();
87   else
88     return nullptr;
89 
90   CmpInst::Predicate ExpectedPred, Pred1, Pred2;
91   if (BinOpCode == BinaryOperator::Or) {
92     ExpectedPred = ICmpInst::ICMP_NE;
93   } else if (BinOpCode == BinaryOperator::And) {
94     ExpectedPred = ICmpInst::ICMP_EQ;
95   } else
96     return nullptr;
97 
98   // %A = icmp eq %TV, %FV
99   // %B = icmp eq %X, %Y (and one of these is a select operand)
100   // %C = and %A, %B
101   // %D = select %C, %TV, %FV
102   // -->
103   // %FV
104 
105   // %A = icmp ne %TV, %FV
106   // %B = icmp ne %X, %Y (and one of these is a select operand)
107   // %C = or %A, %B
108   // %D = select %C, %TV, %FV
109   // -->
110   // %TV
111   Value *X, *Y;
112   if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
113                                       m_Specific(FalseVal)),
114                              m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
115       Pred1 != Pred2 || Pred1 != ExpectedPred)
116     return nullptr;
117 
118   if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
119     return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
120 
121   return nullptr;
122 }
123 
124 /// For a boolean type or a vector of boolean type, return false or a vector
125 /// with every element false.
126 static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
127 
128 /// For a boolean type or a vector of boolean type, return true or a vector
129 /// with every element true.
130 static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
131 
132 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
133 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
134                           Value *RHS) {
135   CmpInst *Cmp = dyn_cast<CmpInst>(V);
136   if (!Cmp)
137     return false;
138   CmpInst::Predicate CPred = Cmp->getPredicate();
139   Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
140   if (CPred == Pred && CLHS == LHS && CRHS == RHS)
141     return true;
142   return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
143          CRHS == LHS;
144 }
145 
146 /// Simplify comparison with true or false branch of select:
147 ///  %sel = select i1 %cond, i32 %tv, i32 %fv
148 ///  %cmp = icmp sle i32 %sel, %rhs
149 /// Compose new comparison by substituting %sel with either %tv or %fv
150 /// and see if it simplifies.
151 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
152                                  Value *RHS, Value *Cond,
153                                  const SimplifyQuery &Q, unsigned MaxRecurse,
154                                  Constant *TrueOrFalse) {
155   Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
156   if (SimplifiedCmp == Cond) {
157     // %cmp simplified to the select condition (%cond).
158     return TrueOrFalse;
159   } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
160     // It didn't simplify. However, if composed comparison is equivalent
161     // to the select condition (%cond) then we can replace it.
162     return TrueOrFalse;
163   }
164   return SimplifiedCmp;
165 }
166 
167 /// Simplify comparison with true branch of select
168 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
169                                      Value *RHS, Value *Cond,
170                                      const SimplifyQuery &Q,
171                                      unsigned MaxRecurse) {
172   return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
173                             getTrue(Cond->getType()));
174 }
175 
176 /// Simplify comparison with false branch of select
177 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
178                                       Value *RHS, Value *Cond,
179                                       const SimplifyQuery &Q,
180                                       unsigned MaxRecurse) {
181   return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
182                             getFalse(Cond->getType()));
183 }
184 
185 /// We know comparison with both branches of select can be simplified, but they
186 /// are not equal. This routine handles some logical simplifications.
187 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
188                                                Value *Cond,
189                                                const SimplifyQuery &Q,
190                                                unsigned MaxRecurse) {
191   // If the false value simplified to false, then the result of the compare
192   // is equal to "Cond && TCmp".  This also catches the case when the false
193   // value simplified to false and the true value to true, returning "Cond".
194   // Folding select to and/or isn't poison-safe in general; impliesPoison
195   // checks whether folding it does not convert a well-defined value into
196   // poison.
197   if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
198     if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
199       return V;
200   // If the true value simplified to true, then the result of the compare
201   // is equal to "Cond || FCmp".
202   if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
203     if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
204       return V;
205   // Finally, if the false value simplified to true and the true value to
206   // false, then the result of the compare is equal to "!Cond".
207   if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
208     if (Value *V = simplifyXorInst(
209             Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
210       return V;
211   return nullptr;
212 }
213 
214 /// Does the given value dominate the specified phi node?
215 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
216   Instruction *I = dyn_cast<Instruction>(V);
217   if (!I)
218     // Arguments and constants dominate all instructions.
219     return true;
220 
221   // If we have a DominatorTree then do a precise test.
222   if (DT)
223     return DT->dominates(I, P);
224 
225   // Otherwise, if the instruction is in the entry block and is not an invoke,
226   // then it obviously dominates all phi nodes.
227   if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
228       !isa<CallBrInst>(I))
229     return true;
230 
231   return false;
232 }
233 
234 /// Try to simplify a binary operator of form "V op OtherOp" where V is
235 /// "(B0 opex B1)" by distributing 'op' across 'opex' as
236 /// "(B0 op OtherOp) opex (B1 op OtherOp)".
237 static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
238                           Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
239                           const SimplifyQuery &Q, unsigned MaxRecurse) {
240   auto *B = dyn_cast<BinaryOperator>(V);
241   if (!B || B->getOpcode() != OpcodeToExpand)
242     return nullptr;
243   Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
244   Value *L =
245       simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
246   if (!L)
247     return nullptr;
248   Value *R =
249       simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
250   if (!R)
251     return nullptr;
252 
253   // Does the expanded pair of binops simplify to the existing binop?
254   if ((L == B0 && R == B1) ||
255       (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
256     ++NumExpand;
257     return B;
258   }
259 
260   // Otherwise, return "L op' R" if it simplifies.
261   Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
262   if (!S)
263     return nullptr;
264 
265   ++NumExpand;
266   return S;
267 }
268 
269 /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
270 /// distributing op over op'.
271 static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L,
272                                      Value *R,
273                                      Instruction::BinaryOps OpcodeToExpand,
274                                      const SimplifyQuery &Q,
275                                      unsigned MaxRecurse) {
276   // Recursion is always used, so bail out at once if we already hit the limit.
277   if (!MaxRecurse--)
278     return nullptr;
279 
280   if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
281     return V;
282   if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
283     return V;
284   return nullptr;
285 }
286 
287 /// Generic simplifications for associative binary operations.
288 /// Returns the simpler value, or null if none was found.
289 static Value *simplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
290                                        Value *LHS, Value *RHS,
291                                        const SimplifyQuery &Q,
292                                        unsigned MaxRecurse) {
293   assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
294 
295   // Recursion is always used, so bail out at once if we already hit the limit.
296   if (!MaxRecurse--)
297     return nullptr;
298 
299   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
300   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
301 
302   // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
303   if (Op0 && Op0->getOpcode() == Opcode) {
304     Value *A = Op0->getOperand(0);
305     Value *B = Op0->getOperand(1);
306     Value *C = RHS;
307 
308     // Does "B op C" simplify?
309     if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
310       // It does!  Return "A op V" if it simplifies or is already available.
311       // If V equals B then "A op V" is just the LHS.
312       if (V == B)
313         return LHS;
314       // Otherwise return "A op V" if it simplifies.
315       if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
316         ++NumReassoc;
317         return W;
318       }
319     }
320   }
321 
322   // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
323   if (Op1 && Op1->getOpcode() == Opcode) {
324     Value *A = LHS;
325     Value *B = Op1->getOperand(0);
326     Value *C = Op1->getOperand(1);
327 
328     // Does "A op B" simplify?
329     if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
330       // It does!  Return "V op C" if it simplifies or is already available.
331       // If V equals B then "V op C" is just the RHS.
332       if (V == B)
333         return RHS;
334       // Otherwise return "V op C" if it simplifies.
335       if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
336         ++NumReassoc;
337         return W;
338       }
339     }
340   }
341 
342   // The remaining transforms require commutativity as well as associativity.
343   if (!Instruction::isCommutative(Opcode))
344     return nullptr;
345 
346   // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
347   if (Op0 && Op0->getOpcode() == Opcode) {
348     Value *A = Op0->getOperand(0);
349     Value *B = Op0->getOperand(1);
350     Value *C = RHS;
351 
352     // Does "C op A" simplify?
353     if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
354       // It does!  Return "V op B" if it simplifies or is already available.
355       // If V equals A then "V op B" is just the LHS.
356       if (V == A)
357         return LHS;
358       // Otherwise return "V op B" if it simplifies.
359       if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
360         ++NumReassoc;
361         return W;
362       }
363     }
364   }
365 
366   // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
367   if (Op1 && Op1->getOpcode() == Opcode) {
368     Value *A = LHS;
369     Value *B = Op1->getOperand(0);
370     Value *C = Op1->getOperand(1);
371 
372     // Does "C op A" simplify?
373     if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
374       // It does!  Return "B op V" if it simplifies or is already available.
375       // If V equals C then "B op V" is just the RHS.
376       if (V == C)
377         return RHS;
378       // Otherwise return "B op V" if it simplifies.
379       if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
380         ++NumReassoc;
381         return W;
382       }
383     }
384   }
385 
386   return nullptr;
387 }
388 
389 /// In the case of a binary operation with a select instruction as an operand,
390 /// try to simplify the binop by seeing whether evaluating it on both branches
391 /// of the select results in the same value. Returns the common value if so,
392 /// otherwise returns null.
393 static Value *threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
394                                     Value *RHS, const SimplifyQuery &Q,
395                                     unsigned MaxRecurse) {
396   // Recursion is always used, so bail out at once if we already hit the limit.
397   if (!MaxRecurse--)
398     return nullptr;
399 
400   SelectInst *SI;
401   if (isa<SelectInst>(LHS)) {
402     SI = cast<SelectInst>(LHS);
403   } else {
404     assert(isa<SelectInst>(RHS) && "No select instruction operand!");
405     SI = cast<SelectInst>(RHS);
406   }
407 
408   // Evaluate the BinOp on the true and false branches of the select.
409   Value *TV;
410   Value *FV;
411   if (SI == LHS) {
412     TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
413     FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
414   } else {
415     TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
416     FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
417   }
418 
419   // If they simplified to the same value, then return the common value.
420   // If they both failed to simplify then return null.
421   if (TV == FV)
422     return TV;
423 
424   // If one branch simplified to undef, return the other one.
425   if (TV && Q.isUndefValue(TV))
426     return FV;
427   if (FV && Q.isUndefValue(FV))
428     return TV;
429 
430   // If applying the operation did not change the true and false select values,
431   // then the result of the binop is the select itself.
432   if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
433     return SI;
434 
435   // If one branch simplified and the other did not, and the simplified
436   // value is equal to the unsimplified one, return the simplified value.
437   // For example, select (cond, X, X & Z) & Z -> X & Z.
438   if ((FV && !TV) || (TV && !FV)) {
439     // Check that the simplified value has the form "X op Y" where "op" is the
440     // same as the original operation.
441     Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
442     if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
443       // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
444       // We already know that "op" is the same as for the simplified value.  See
445       // if the operands match too.  If so, return the simplified value.
446       Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
447       Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
448       Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
449       if (Simplified->getOperand(0) == UnsimplifiedLHS &&
450           Simplified->getOperand(1) == UnsimplifiedRHS)
451         return Simplified;
452       if (Simplified->isCommutative() &&
453           Simplified->getOperand(1) == UnsimplifiedLHS &&
454           Simplified->getOperand(0) == UnsimplifiedRHS)
455         return Simplified;
456     }
457   }
458 
459   return nullptr;
460 }
461 
462 /// In the case of a comparison with a select instruction, try to simplify the
463 /// comparison by seeing whether both branches of the select result in the same
464 /// value. Returns the common value if so, otherwise returns null.
465 /// For example, if we have:
466 ///  %tmp = select i1 %cmp, i32 1, i32 2
467 ///  %cmp1 = icmp sle i32 %tmp, 3
468 /// We can simplify %cmp1 to true, because both branches of select are
469 /// less than 3. We compose new comparison by substituting %tmp with both
470 /// branches of select and see if it can be simplified.
471 static Value *threadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
472                                   Value *RHS, const SimplifyQuery &Q,
473                                   unsigned MaxRecurse) {
474   // Recursion is always used, so bail out at once if we already hit the limit.
475   if (!MaxRecurse--)
476     return nullptr;
477 
478   // Make sure the select is on the LHS.
479   if (!isa<SelectInst>(LHS)) {
480     std::swap(LHS, RHS);
481     Pred = CmpInst::getSwappedPredicate(Pred);
482   }
483   assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
484   SelectInst *SI = cast<SelectInst>(LHS);
485   Value *Cond = SI->getCondition();
486   Value *TV = SI->getTrueValue();
487   Value *FV = SI->getFalseValue();
488 
489   // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
490   // Does "cmp TV, RHS" simplify?
491   Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
492   if (!TCmp)
493     return nullptr;
494 
495   // Does "cmp FV, RHS" simplify?
496   Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
497   if (!FCmp)
498     return nullptr;
499 
500   // If both sides simplified to the same value, then use it as the result of
501   // the original comparison.
502   if (TCmp == FCmp)
503     return TCmp;
504 
505   // The remaining cases only make sense if the select condition has the same
506   // type as the result of the comparison, so bail out if this is not so.
507   if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
508     return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
509 
510   return nullptr;
511 }
512 
513 /// In the case of a binary operation with an operand that is a PHI instruction,
514 /// try to simplify the binop by seeing whether evaluating it on the incoming
515 /// phi values yields the same result for every value. If so returns the common
516 /// value, otherwise returns null.
517 static Value *threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
518                                  Value *RHS, const SimplifyQuery &Q,
519                                  unsigned MaxRecurse) {
520   // Recursion is always used, so bail out at once if we already hit the limit.
521   if (!MaxRecurse--)
522     return nullptr;
523 
524   PHINode *PI;
525   if (isa<PHINode>(LHS)) {
526     PI = cast<PHINode>(LHS);
527     // Bail out if RHS and the phi may be mutually interdependent due to a loop.
528     if (!valueDominatesPHI(RHS, PI, Q.DT))
529       return nullptr;
530   } else {
531     assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
532     PI = cast<PHINode>(RHS);
533     // Bail out if LHS and the phi may be mutually interdependent due to a loop.
534     if (!valueDominatesPHI(LHS, PI, Q.DT))
535       return nullptr;
536   }
537 
538   // Evaluate the BinOp on the incoming phi values.
539   Value *CommonValue = nullptr;
540   for (Use &Incoming : PI->incoming_values()) {
541     // If the incoming value is the phi node itself, it can safely be skipped.
542     if (Incoming == PI)
543       continue;
544     Instruction *InTI = PI->getIncomingBlock(Incoming)->getTerminator();
545     Value *V = PI == LHS
546                    ? simplifyBinOp(Opcode, Incoming, RHS,
547                                    Q.getWithInstruction(InTI), MaxRecurse)
548                    : simplifyBinOp(Opcode, LHS, Incoming,
549                                    Q.getWithInstruction(InTI), MaxRecurse);
550     // If the operation failed to simplify, or simplified to a different value
551     // to previously, then give up.
552     if (!V || (CommonValue && V != CommonValue))
553       return nullptr;
554     CommonValue = V;
555   }
556 
557   return CommonValue;
558 }
559 
560 /// In the case of a comparison with a PHI instruction, try to simplify the
561 /// comparison by seeing whether comparing with all of the incoming phi values
562 /// yields the same result every time. If so returns the common result,
563 /// otherwise returns null.
564 static Value *threadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
565                                const SimplifyQuery &Q, unsigned MaxRecurse) {
566   // Recursion is always used, so bail out at once if we already hit the limit.
567   if (!MaxRecurse--)
568     return nullptr;
569 
570   // Make sure the phi is on the LHS.
571   if (!isa<PHINode>(LHS)) {
572     std::swap(LHS, RHS);
573     Pred = CmpInst::getSwappedPredicate(Pred);
574   }
575   assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
576   PHINode *PI = cast<PHINode>(LHS);
577 
578   // Bail out if RHS and the phi may be mutually interdependent due to a loop.
579   if (!valueDominatesPHI(RHS, PI, Q.DT))
580     return nullptr;
581 
582   // Evaluate the BinOp on the incoming phi values.
583   Value *CommonValue = nullptr;
584   for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
585     Value *Incoming = PI->getIncomingValue(u);
586     Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
587     // If the incoming value is the phi node itself, it can safely be skipped.
588     if (Incoming == PI)
589       continue;
590     // Change the context instruction to the "edge" that flows into the phi.
591     // This is important because that is where incoming is actually "evaluated"
592     // even though it is used later somewhere else.
593     Value *V = simplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
594                                MaxRecurse);
595     // If the operation failed to simplify, or simplified to a different value
596     // to previously, then give up.
597     if (!V || (CommonValue && V != CommonValue))
598       return nullptr;
599     CommonValue = V;
600   }
601 
602   return CommonValue;
603 }
604 
605 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
606                                        Value *&Op0, Value *&Op1,
607                                        const SimplifyQuery &Q) {
608   if (auto *CLHS = dyn_cast<Constant>(Op0)) {
609     if (auto *CRHS = dyn_cast<Constant>(Op1)) {
610       switch (Opcode) {
611       default:
612         break;
613       case Instruction::FAdd:
614       case Instruction::FSub:
615       case Instruction::FMul:
616       case Instruction::FDiv:
617       case Instruction::FRem:
618         if (Q.CxtI != nullptr)
619           return ConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
620       }
621       return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
622     }
623 
624     // Canonicalize the constant to the RHS if this is a commutative operation.
625     if (Instruction::isCommutative(Opcode))
626       std::swap(Op0, Op1);
627   }
628   return nullptr;
629 }
630 
631 /// Given operands for an Add, see if we can fold the result.
632 /// If not, this returns null.
633 static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
634                               const SimplifyQuery &Q, unsigned MaxRecurse) {
635   if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
636     return C;
637 
638   // X + poison -> poison
639   if (isa<PoisonValue>(Op1))
640     return Op1;
641 
642   // X + undef -> undef
643   if (Q.isUndefValue(Op1))
644     return Op1;
645 
646   // X + 0 -> X
647   if (match(Op1, m_Zero()))
648     return Op0;
649 
650   // If two operands are negative, return 0.
651   if (isKnownNegation(Op0, Op1))
652     return Constant::getNullValue(Op0->getType());
653 
654   // X + (Y - X) -> Y
655   // (Y - X) + X -> Y
656   // Eg: X + -X -> 0
657   Value *Y = nullptr;
658   if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
659       match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
660     return Y;
661 
662   // X + ~X -> -1   since   ~X = -X-1
663   Type *Ty = Op0->getType();
664   if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
665     return Constant::getAllOnesValue(Ty);
666 
667   // add nsw/nuw (xor Y, signmask), signmask --> Y
668   // The no-wrapping add guarantees that the top bit will be set by the add.
669   // Therefore, the xor must be clearing the already set sign bit of Y.
670   if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
671       match(Op0, m_Xor(m_Value(Y), m_SignMask())))
672     return Y;
673 
674   // add nuw %x, -1  ->  -1, because %x can only be 0.
675   if (IsNUW && match(Op1, m_AllOnes()))
676     return Op1; // Which is -1.
677 
678   /// i1 add -> xor.
679   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
680     if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
681       return V;
682 
683   // Try some generic simplifications for associative operations.
684   if (Value *V =
685           simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
686     return V;
687 
688   // Threading Add over selects and phi nodes is pointless, so don't bother.
689   // Threading over the select in "A + select(cond, B, C)" means evaluating
690   // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
691   // only if B and C are equal.  If B and C are equal then (since we assume
692   // that operands have already been simplified) "select(cond, B, C)" should
693   // have been simplified to the common value of B and C already.  Analysing
694   // "A+B" and "A+C" thus gains nothing, but costs compile time.  Similarly
695   // for threading over phi nodes.
696 
697   return nullptr;
698 }
699 
700 Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
701                              const SimplifyQuery &Query) {
702   return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
703 }
704 
705 /// Compute the base pointer and cumulative constant offsets for V.
706 ///
707 /// This strips all constant offsets off of V, leaving it the base pointer, and
708 /// accumulates the total constant offset applied in the returned constant.
709 /// It returns zero if there are no constant offsets applied.
710 ///
711 /// This is very similar to stripAndAccumulateConstantOffsets(), except it
712 /// normalizes the offset bitwidth to the stripped pointer type, not the
713 /// original pointer type.
714 static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
715                                             bool AllowNonInbounds = false) {
716   assert(V->getType()->isPtrOrPtrVectorTy());
717 
718   APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
719   V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
720   // As that strip may trace through `addrspacecast`, need to sext or trunc
721   // the offset calculated.
722   return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
723 }
724 
725 /// Compute the constant difference between two pointer values.
726 /// If the difference is not a constant, returns zero.
727 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
728                                           Value *RHS) {
729   APInt LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
730   APInt RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
731 
732   // If LHS and RHS are not related via constant offsets to the same base
733   // value, there is nothing we can do here.
734   if (LHS != RHS)
735     return nullptr;
736 
737   // Otherwise, the difference of LHS - RHS can be computed as:
738   //    LHS - RHS
739   //  = (LHSOffset + Base) - (RHSOffset + Base)
740   //  = LHSOffset - RHSOffset
741   Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
742   if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
743     Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
744   return Res;
745 }
746 
747 /// Test if there is a dominating equivalence condition for the
748 /// two operands. If there is, try to reduce the binary operation
749 /// between the two operands.
750 /// Example: Op0 - Op1 --> 0 when Op0 == Op1
751 static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
752                               const SimplifyQuery &Q, unsigned MaxRecurse) {
753   // Recursive run it can not get any benefit
754   if (MaxRecurse != RecursionLimit)
755     return nullptr;
756 
757   std::optional<bool> Imp =
758       isImpliedByDomCondition(CmpInst::ICMP_EQ, Op0, Op1, Q.CxtI, Q.DL);
759   if (Imp && *Imp) {
760     Type *Ty = Op0->getType();
761     switch (Opcode) {
762     case Instruction::Sub:
763     case Instruction::Xor:
764     case Instruction::URem:
765     case Instruction::SRem:
766       return Constant::getNullValue(Ty);
767 
768     case Instruction::SDiv:
769     case Instruction::UDiv:
770       return ConstantInt::get(Ty, 1);
771 
772     case Instruction::And:
773     case Instruction::Or:
774       // Could be either one - choose Op1 since that's more likely a constant.
775       return Op1;
776     default:
777       break;
778     }
779   }
780   return nullptr;
781 }
782 
783 /// Given operands for a Sub, see if we can fold the result.
784 /// If not, this returns null.
785 static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
786                               const SimplifyQuery &Q, unsigned MaxRecurse) {
787   if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
788     return C;
789 
790   // X - poison -> poison
791   // poison - X -> poison
792   if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
793     return PoisonValue::get(Op0->getType());
794 
795   // X - undef -> undef
796   // undef - X -> undef
797   if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
798     return UndefValue::get(Op0->getType());
799 
800   // X - 0 -> X
801   if (match(Op1, m_Zero()))
802     return Op0;
803 
804   // X - X -> 0
805   if (Op0 == Op1)
806     return Constant::getNullValue(Op0->getType());
807 
808   // Is this a negation?
809   if (match(Op0, m_Zero())) {
810     // 0 - X -> 0 if the sub is NUW.
811     if (IsNUW)
812       return Constant::getNullValue(Op0->getType());
813 
814     KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
815     if (Known.Zero.isMaxSignedValue()) {
816       // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
817       // Op1 must be 0 because negating the minimum signed value is undefined.
818       if (IsNSW)
819         return Constant::getNullValue(Op0->getType());
820 
821       // 0 - X -> X if X is 0 or the minimum signed value.
822       return Op1;
823     }
824   }
825 
826   // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
827   // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
828   Value *X = nullptr, *Y = nullptr, *Z = Op1;
829   if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
830     // See if "V === Y - Z" simplifies.
831     if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
832       // It does!  Now see if "X + V" simplifies.
833       if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
834         // It does, we successfully reassociated!
835         ++NumReassoc;
836         return W;
837       }
838     // See if "V === X - Z" simplifies.
839     if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
840       // It does!  Now see if "Y + V" simplifies.
841       if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
842         // It does, we successfully reassociated!
843         ++NumReassoc;
844         return W;
845       }
846   }
847 
848   // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
849   // For example, X - (X + 1) -> -1
850   X = Op0;
851   if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
852     // See if "V === X - Y" simplifies.
853     if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
854       // It does!  Now see if "V - Z" simplifies.
855       if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
856         // It does, we successfully reassociated!
857         ++NumReassoc;
858         return W;
859       }
860     // See if "V === X - Z" simplifies.
861     if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
862       // It does!  Now see if "V - Y" simplifies.
863       if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
864         // It does, we successfully reassociated!
865         ++NumReassoc;
866         return W;
867       }
868   }
869 
870   // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
871   // For example, X - (X - Y) -> Y.
872   Z = Op0;
873   if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
874     // See if "V === Z - X" simplifies.
875     if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
876       // It does!  Now see if "V + Y" simplifies.
877       if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
878         // It does, we successfully reassociated!
879         ++NumReassoc;
880         return W;
881       }
882 
883   // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
884   if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
885       match(Op1, m_Trunc(m_Value(Y))))
886     if (X->getType() == Y->getType())
887       // See if "V === X - Y" simplifies.
888       if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
889         // It does!  Now see if "trunc V" simplifies.
890         if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
891                                         Q, MaxRecurse - 1))
892           // It does, return the simplified "trunc V".
893           return W;
894 
895   // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
896   if (match(Op0, m_PtrToInt(m_Value(X))) && match(Op1, m_PtrToInt(m_Value(Y))))
897     if (Constant *Result = computePointerDifference(Q.DL, X, Y))
898       return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
899 
900   // i1 sub -> xor.
901   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
902     if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
903       return V;
904 
905   // Threading Sub over selects and phi nodes is pointless, so don't bother.
906   // Threading over the select in "A - select(cond, B, C)" means evaluating
907   // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
908   // only if B and C are equal.  If B and C are equal then (since we assume
909   // that operands have already been simplified) "select(cond, B, C)" should
910   // have been simplified to the common value of B and C already.  Analysing
911   // "A-B" and "A-C" thus gains nothing, but costs compile time.  Similarly
912   // for threading over phi nodes.
913 
914   if (Value *V = simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
915     return V;
916 
917   return nullptr;
918 }
919 
920 Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
921                              const SimplifyQuery &Q) {
922   return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
923 }
924 
925 /// Given operands for a Mul, see if we can fold the result.
926 /// If not, this returns null.
927 static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
928                               const SimplifyQuery &Q, unsigned MaxRecurse) {
929   if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
930     return C;
931 
932   // X * poison -> poison
933   if (isa<PoisonValue>(Op1))
934     return Op1;
935 
936   // X * undef -> 0
937   // X * 0 -> 0
938   if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
939     return Constant::getNullValue(Op0->getType());
940 
941   // X * 1 -> X
942   if (match(Op1, m_One()))
943     return Op0;
944 
945   // (X / Y) * Y -> X if the division is exact.
946   Value *X = nullptr;
947   if (Q.IIQ.UseInstrInfo &&
948       (match(Op0,
949              m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) ||     // (X / Y) * Y
950        match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
951     return X;
952 
953    if (Op0->getType()->isIntOrIntVectorTy(1)) {
954     // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
955     // representable). All other cases reduce to 0, so just return 0.
956     if (IsNSW)
957       return ConstantInt::getNullValue(Op0->getType());
958 
959     // Treat "mul i1" as "and i1".
960     if (MaxRecurse)
961       if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
962         return V;
963   }
964 
965   // Try some generic simplifications for associative operations.
966   if (Value *V =
967           simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
968     return V;
969 
970   // Mul distributes over Add. Try some generic simplifications based on this.
971   if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
972                                         Instruction::Add, Q, MaxRecurse))
973     return V;
974 
975   // If the operation is with the result of a select instruction, check whether
976   // operating on either branch of the select always yields the same value.
977   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
978     if (Value *V =
979             threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
980       return V;
981 
982   // If the operation is with the result of a phi instruction, check whether
983   // operating on all incoming values of the phi always yields the same value.
984   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
985     if (Value *V =
986             threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
987       return V;
988 
989   return nullptr;
990 }
991 
992 Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
993                              const SimplifyQuery &Q) {
994   return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
995 }
996 
997 /// Given a predicate and two operands, return true if the comparison is true.
998 /// This is a helper for div/rem simplification where we return some other value
999 /// when we can prove a relationship between the operands.
1000 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
1001                        const SimplifyQuery &Q, unsigned MaxRecurse) {
1002   Value *V = simplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
1003   Constant *C = dyn_cast_or_null<Constant>(V);
1004   return (C && C->isAllOnesValue());
1005 }
1006 
1007 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
1008 /// to simplify X % Y to X.
1009 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
1010                       unsigned MaxRecurse, bool IsSigned) {
1011   // Recursion is always used, so bail out at once if we already hit the limit.
1012   if (!MaxRecurse--)
1013     return false;
1014 
1015   if (IsSigned) {
1016     // (X srem Y) sdiv Y --> 0
1017     if (match(X, m_SRem(m_Value(), m_Specific(Y))))
1018       return true;
1019 
1020     // |X| / |Y| --> 0
1021     //
1022     // We require that 1 operand is a simple constant. That could be extended to
1023     // 2 variables if we computed the sign bit for each.
1024     //
1025     // Make sure that a constant is not the minimum signed value because taking
1026     // the abs() of that is undefined.
1027     Type *Ty = X->getType();
1028     const APInt *C;
1029     if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1030       // Is the variable divisor magnitude always greater than the constant
1031       // dividend magnitude?
1032       // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1033       Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1034       Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1035       if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1036           isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1037         return true;
1038     }
1039     if (match(Y, m_APInt(C))) {
1040       // Special-case: we can't take the abs() of a minimum signed value. If
1041       // that's the divisor, then all we have to do is prove that the dividend
1042       // is also not the minimum signed value.
1043       if (C->isMinSignedValue())
1044         return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1045 
1046       // Is the variable dividend magnitude always less than the constant
1047       // divisor magnitude?
1048       // |X| < |C| --> X > -abs(C) and X < abs(C)
1049       Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1050       Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1051       if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1052           isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1053         return true;
1054     }
1055     return false;
1056   }
1057 
1058   // IsSigned == false.
1059 
1060   // Is the unsigned dividend known to be less than a constant divisor?
1061   // TODO: Convert this (and above) to range analysis
1062   //      ("computeConstantRangeIncludingKnownBits")?
1063   const APInt *C;
1064   if (match(Y, m_APInt(C)) &&
1065       computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, Q.DT).getMaxValue().ult(*C))
1066     return true;
1067 
1068   // Try again for any divisor:
1069   // Is the dividend unsigned less than the divisor?
1070   return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1071 }
1072 
1073 /// Check for common or similar folds of integer division or integer remainder.
1074 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
1075 static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
1076                              Value *Op1, const SimplifyQuery &Q,
1077                              unsigned MaxRecurse) {
1078   bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1079   bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1080 
1081   Type *Ty = Op0->getType();
1082 
1083   // X / undef -> poison
1084   // X % undef -> poison
1085   if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1086     return PoisonValue::get(Ty);
1087 
1088   // X / 0 -> poison
1089   // X % 0 -> poison
1090   // We don't need to preserve faults!
1091   if (match(Op1, m_Zero()))
1092     return PoisonValue::get(Ty);
1093 
1094   // If any element of a constant divisor fixed width vector is zero or undef
1095   // the behavior is undefined and we can fold the whole op to poison.
1096   auto *Op1C = dyn_cast<Constant>(Op1);
1097   auto *VTy = dyn_cast<FixedVectorType>(Ty);
1098   if (Op1C && VTy) {
1099     unsigned NumElts = VTy->getNumElements();
1100     for (unsigned i = 0; i != NumElts; ++i) {
1101       Constant *Elt = Op1C->getAggregateElement(i);
1102       if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt)))
1103         return PoisonValue::get(Ty);
1104     }
1105   }
1106 
1107   // poison / X -> poison
1108   // poison % X -> poison
1109   if (isa<PoisonValue>(Op0))
1110     return Op0;
1111 
1112   // undef / X -> 0
1113   // undef % X -> 0
1114   if (Q.isUndefValue(Op0))
1115     return Constant::getNullValue(Ty);
1116 
1117   // 0 / X -> 0
1118   // 0 % X -> 0
1119   if (match(Op0, m_Zero()))
1120     return Constant::getNullValue(Op0->getType());
1121 
1122   // X / X -> 1
1123   // X % X -> 0
1124   if (Op0 == Op1)
1125     return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1126 
1127 
1128   KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1129   // X / 0 -> poison
1130   // X % 0 -> poison
1131   // If the divisor is known to be zero, just return poison. This can happen in
1132   // some cases where its provable indirectly the denominator is zero but it's
1133   // not trivially simplifiable (i.e known zero through a phi node).
1134   if (Known.isZero())
1135     return PoisonValue::get(Ty);
1136 
1137   // X / 1 -> X
1138   // X % 1 -> 0
1139   // If the divisor can only be zero or one, we can't have division-by-zero
1140   // or remainder-by-zero, so assume the divisor is 1.
1141   //   e.g. 1, zext (i8 X), sdiv X (Y and 1)
1142   if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1143     return IsDiv ? Op0 : Constant::getNullValue(Ty);
1144 
1145   // If X * Y does not overflow, then:
1146   //   X * Y / Y -> X
1147   //   X * Y % Y -> 0
1148   Value *X;
1149   if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1150     auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1151     // The multiplication can't overflow if it is defined not to, or if
1152     // X == A / Y for some A.
1153     if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1154         (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1155         (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1156         (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1157       return IsDiv ? X : Constant::getNullValue(Op0->getType());
1158     }
1159   }
1160 
1161   if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1162     return IsDiv ? Constant::getNullValue(Op0->getType()) : Op0;
1163 
1164   if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1165     return V;
1166 
1167   // If the operation is with the result of a select instruction, check whether
1168   // operating on either branch of the select always yields the same value.
1169   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1170     if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1171       return V;
1172 
1173   // If the operation is with the result of a phi instruction, check whether
1174   // operating on all incoming values of the phi always yields the same value.
1175   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1176     if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1177       return V;
1178 
1179   return nullptr;
1180 }
1181 
1182 /// These are simplifications common to SDiv and UDiv.
1183 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1184                           bool IsExact, const SimplifyQuery &Q,
1185                           unsigned MaxRecurse) {
1186   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1187     return C;
1188 
1189   if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1190     return V;
1191 
1192   // If this is an exact divide by a constant, then the dividend (Op0) must have
1193   // at least as many trailing zeros as the divisor to divide evenly. If it has
1194   // less trailing zeros, then the result must be poison.
1195   const APInt *DivC;
1196   if (IsExact && match(Op1, m_APInt(DivC)) && DivC->countr_zero()) {
1197     KnownBits KnownOp0 = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1198     if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1199       return PoisonValue::get(Op0->getType());
1200   }
1201 
1202   return nullptr;
1203 }
1204 
1205 /// These are simplifications common to SRem and URem.
1206 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1207                           const SimplifyQuery &Q, unsigned MaxRecurse) {
1208   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1209     return C;
1210 
1211   if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1212     return V;
1213 
1214   // (X << Y) % X -> 0
1215   if (Q.IIQ.UseInstrInfo &&
1216       ((Opcode == Instruction::SRem &&
1217         match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1218        (Opcode == Instruction::URem &&
1219         match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1220     return Constant::getNullValue(Op0->getType());
1221 
1222   return nullptr;
1223 }
1224 
1225 /// Given operands for an SDiv, see if we can fold the result.
1226 /// If not, this returns null.
1227 static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1228                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1229   // If two operands are negated and no signed overflow, return -1.
1230   if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1231     return Constant::getAllOnesValue(Op0->getType());
1232 
1233   return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1234 }
1235 
1236 Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1237                               const SimplifyQuery &Q) {
1238   return ::simplifySDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1239 }
1240 
1241 /// Given operands for a UDiv, see if we can fold the result.
1242 /// If not, this returns null.
1243 static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1244                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1245   return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1246 }
1247 
1248 Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1249                               const SimplifyQuery &Q) {
1250   return ::simplifyUDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1251 }
1252 
1253 /// Given operands for an SRem, see if we can fold the result.
1254 /// If not, this returns null.
1255 static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1256                                unsigned MaxRecurse) {
1257   // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1258   // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1259   Value *X;
1260   if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1261     return ConstantInt::getNullValue(Op0->getType());
1262 
1263   // If the two operands are negated, return 0.
1264   if (isKnownNegation(Op0, Op1))
1265     return ConstantInt::getNullValue(Op0->getType());
1266 
1267   return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1268 }
1269 
1270 Value *llvm::simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1271   return ::simplifySRemInst(Op0, Op1, Q, RecursionLimit);
1272 }
1273 
1274 /// Given operands for a URem, see if we can fold the result.
1275 /// If not, this returns null.
1276 static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1277                                unsigned MaxRecurse) {
1278   return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1279 }
1280 
1281 Value *llvm::simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1282   return ::simplifyURemInst(Op0, Op1, Q, RecursionLimit);
1283 }
1284 
1285 /// Returns true if a shift by \c Amount always yields poison.
1286 static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1287   Constant *C = dyn_cast<Constant>(Amount);
1288   if (!C)
1289     return false;
1290 
1291   // X shift by undef -> poison because it may shift by the bitwidth.
1292   if (Q.isUndefValue(C))
1293     return true;
1294 
1295   // Shifting by the bitwidth or more is poison. This covers scalars and
1296   // fixed/scalable vectors with splat constants.
1297   const APInt *AmountC;
1298   if (match(C, m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1299     return true;
1300 
1301   // Try harder for fixed-length vectors:
1302   // If all lanes of a vector shift are poison, the whole shift is poison.
1303   if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1304     for (unsigned I = 0,
1305                   E = cast<FixedVectorType>(C->getType())->getNumElements();
1306          I != E; ++I)
1307       if (!isPoisonShift(C->getAggregateElement(I), Q))
1308         return false;
1309     return true;
1310   }
1311 
1312   return false;
1313 }
1314 
1315 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1316 /// If not, this returns null.
1317 static Value *simplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1318                             Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1319                             unsigned MaxRecurse) {
1320   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1321     return C;
1322 
1323   // poison shift by X -> poison
1324   if (isa<PoisonValue>(Op0))
1325     return Op0;
1326 
1327   // 0 shift by X -> 0
1328   if (match(Op0, m_Zero()))
1329     return Constant::getNullValue(Op0->getType());
1330 
1331   // X shift by 0 -> X
1332   // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1333   // would be poison.
1334   Value *X;
1335   if (match(Op1, m_Zero()) ||
1336       (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1337     return Op0;
1338 
1339   // Fold undefined shifts.
1340   if (isPoisonShift(Op1, Q))
1341     return PoisonValue::get(Op0->getType());
1342 
1343   // If the operation is with the result of a select instruction, check whether
1344   // operating on either branch of the select always yields the same value.
1345   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1346     if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1347       return V;
1348 
1349   // If the operation is with the result of a phi instruction, check whether
1350   // operating on all incoming values of the phi always yields the same value.
1351   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1352     if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1353       return V;
1354 
1355   // If any bits in the shift amount make that value greater than or equal to
1356   // the number of bits in the type, the shift is undefined.
1357   KnownBits KnownAmt = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1358   if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1359     return PoisonValue::get(Op0->getType());
1360 
1361   // If all valid bits in the shift amount are known zero, the first operand is
1362   // unchanged.
1363   unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1364   if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1365     return Op0;
1366 
1367   // Check for nsw shl leading to a poison value.
1368   if (IsNSW) {
1369     assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1370     KnownBits KnownVal = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1371     KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1372 
1373     if (KnownVal.Zero.isSignBitSet())
1374       KnownShl.Zero.setSignBit();
1375     if (KnownVal.One.isSignBitSet())
1376       KnownShl.One.setSignBit();
1377 
1378     if (KnownShl.hasConflict())
1379       return PoisonValue::get(Op0->getType());
1380   }
1381 
1382   return nullptr;
1383 }
1384 
1385 /// Given operands for an LShr or AShr, see if we can fold the result.  If not,
1386 /// this returns null.
1387 static Value *simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1388                                  Value *Op1, bool IsExact,
1389                                  const SimplifyQuery &Q, unsigned MaxRecurse) {
1390   if (Value *V =
1391           simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1392     return V;
1393 
1394   // X >> X -> 0
1395   if (Op0 == Op1)
1396     return Constant::getNullValue(Op0->getType());
1397 
1398   // undef >> X -> 0
1399   // undef >> X -> undef (if it's exact)
1400   if (Q.isUndefValue(Op0))
1401     return IsExact ? Op0 : Constant::getNullValue(Op0->getType());
1402 
1403   // The low bit cannot be shifted out of an exact shift if it is set.
1404   // TODO: Generalize by counting trailing zeros (see fold for exact division).
1405   if (IsExact) {
1406     KnownBits Op0Known =
1407         computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1408     if (Op0Known.One[0])
1409       return Op0;
1410   }
1411 
1412   return nullptr;
1413 }
1414 
1415 /// Given operands for an Shl, see if we can fold the result.
1416 /// If not, this returns null.
1417 static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1418                               const SimplifyQuery &Q, unsigned MaxRecurse) {
1419   if (Value *V =
1420           simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1421     return V;
1422 
1423   Type *Ty = Op0->getType();
1424   // undef << X -> 0
1425   // undef << X -> undef if (if it's NSW/NUW)
1426   if (Q.isUndefValue(Op0))
1427     return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty);
1428 
1429   // (X >> A) << A -> X
1430   Value *X;
1431   if (Q.IIQ.UseInstrInfo &&
1432       match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1433     return X;
1434 
1435   // shl nuw i8 C, %x  ->  C  iff C has sign bit set.
1436   if (IsNUW && match(Op0, m_Negative()))
1437     return Op0;
1438   // NOTE: could use computeKnownBits() / LazyValueInfo,
1439   // but the cost-benefit analysis suggests it isn't worth it.
1440 
1441   // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1442   // that the sign-bit does not change, so the only input that does not
1443   // produce poison is 0, and "0 << (bitwidth-1) --> 0".
1444   if (IsNSW && IsNUW &&
1445       match(Op1, m_SpecificInt(Ty->getScalarSizeInBits() - 1)))
1446     return Constant::getNullValue(Ty);
1447 
1448   return nullptr;
1449 }
1450 
1451 Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1452                              const SimplifyQuery &Q) {
1453   return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
1454 }
1455 
1456 /// Given operands for an LShr, see if we can fold the result.
1457 /// If not, this returns null.
1458 static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1459                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1460   if (Value *V = simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1461                                     MaxRecurse))
1462     return V;
1463 
1464   // (X << A) >> A -> X
1465   Value *X;
1466   if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1467     return X;
1468 
1469   // ((X << A) | Y) >> A -> X  if effective width of Y is not larger than A.
1470   // We can return X as we do in the above case since OR alters no bits in X.
1471   // SimplifyDemandedBits in InstCombine can do more general optimization for
1472   // bit manipulation. This pattern aims to provide opportunities for other
1473   // optimizers by supporting a simple but common case in InstSimplify.
1474   Value *Y;
1475   const APInt *ShRAmt, *ShLAmt;
1476   if (match(Op1, m_APInt(ShRAmt)) &&
1477       match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1478       *ShRAmt == *ShLAmt) {
1479     const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1480     const unsigned EffWidthY = YKnown.countMaxActiveBits();
1481     if (ShRAmt->uge(EffWidthY))
1482       return X;
1483   }
1484 
1485   return nullptr;
1486 }
1487 
1488 Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1489                               const SimplifyQuery &Q) {
1490   return ::simplifyLShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1491 }
1492 
1493 /// Given operands for an AShr, see if we can fold the result.
1494 /// If not, this returns null.
1495 static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1496                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1497   if (Value *V = simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1498                                     MaxRecurse))
1499     return V;
1500 
1501   // -1 >>a X --> -1
1502   // (-1 << X) a>> X --> -1
1503   // Do not return Op0 because it may contain undef elements if it's a vector.
1504   if (match(Op0, m_AllOnes()) ||
1505       match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1506     return Constant::getAllOnesValue(Op0->getType());
1507 
1508   // (X << A) >> A -> X
1509   Value *X;
1510   if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1511     return X;
1512 
1513   // Arithmetic shifting an all-sign-bit value is a no-op.
1514   unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1515   if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1516     return Op0;
1517 
1518   return nullptr;
1519 }
1520 
1521 Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1522                               const SimplifyQuery &Q) {
1523   return ::simplifyAShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1524 }
1525 
1526 /// Commuted variants are assumed to be handled by calling this function again
1527 /// with the parameters swapped.
1528 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1529                                          ICmpInst *UnsignedICmp, bool IsAnd,
1530                                          const SimplifyQuery &Q) {
1531   Value *X, *Y;
1532 
1533   ICmpInst::Predicate EqPred;
1534   if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1535       !ICmpInst::isEquality(EqPred))
1536     return nullptr;
1537 
1538   ICmpInst::Predicate UnsignedPred;
1539 
1540   Value *A, *B;
1541   // Y = (A - B);
1542   if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1543     if (match(UnsignedICmp,
1544               m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1545         ICmpInst::isUnsigned(UnsignedPred)) {
1546       // A >=/<= B || (A - B) != 0  <-->  true
1547       if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1548            UnsignedPred == ICmpInst::ICMP_ULE) &&
1549           EqPred == ICmpInst::ICMP_NE && !IsAnd)
1550         return ConstantInt::getTrue(UnsignedICmp->getType());
1551       // A </> B && (A - B) == 0  <-->  false
1552       if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1553            UnsignedPred == ICmpInst::ICMP_UGT) &&
1554           EqPred == ICmpInst::ICMP_EQ && IsAnd)
1555         return ConstantInt::getFalse(UnsignedICmp->getType());
1556 
1557       // A </> B && (A - B) != 0  <-->  A </> B
1558       // A </> B || (A - B) != 0  <-->  (A - B) != 0
1559       if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1560                                           UnsignedPred == ICmpInst::ICMP_UGT))
1561         return IsAnd ? UnsignedICmp : ZeroICmp;
1562 
1563       // A <=/>= B && (A - B) == 0  <-->  (A - B) == 0
1564       // A <=/>= B || (A - B) == 0  <-->  A <=/>= B
1565       if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1566                                           UnsignedPred == ICmpInst::ICMP_UGE))
1567         return IsAnd ? ZeroICmp : UnsignedICmp;
1568     }
1569 
1570     // Given  Y = (A - B)
1571     //   Y >= A && Y != 0  --> Y >= A  iff B != 0
1572     //   Y <  A || Y == 0  --> Y <  A  iff B != 0
1573     if (match(UnsignedICmp,
1574               m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1575       if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1576           EqPred == ICmpInst::ICMP_NE &&
1577           isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1578         return UnsignedICmp;
1579       if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1580           EqPred == ICmpInst::ICMP_EQ &&
1581           isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1582         return UnsignedICmp;
1583     }
1584   }
1585 
1586   if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1587       ICmpInst::isUnsigned(UnsignedPred))
1588     ;
1589   else if (match(UnsignedICmp,
1590                  m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1591            ICmpInst::isUnsigned(UnsignedPred))
1592     UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1593   else
1594     return nullptr;
1595 
1596   // X > Y && Y == 0  -->  Y == 0  iff X != 0
1597   // X > Y || Y == 0  -->  X > Y   iff X != 0
1598   if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1599       isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1600     return IsAnd ? ZeroICmp : UnsignedICmp;
1601 
1602   // X <= Y && Y != 0  -->  X <= Y  iff X != 0
1603   // X <= Y || Y != 0  -->  Y != 0  iff X != 0
1604   if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1605       isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1606     return IsAnd ? UnsignedICmp : ZeroICmp;
1607 
1608   // The transforms below here are expected to be handled more generally with
1609   // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1610   // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1611   // these are candidates for removal.
1612 
1613   // X < Y && Y != 0  -->  X < Y
1614   // X < Y || Y != 0  -->  Y != 0
1615   if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1616     return IsAnd ? UnsignedICmp : ZeroICmp;
1617 
1618   // X >= Y && Y == 0  -->  Y == 0
1619   // X >= Y || Y == 0  -->  X >= Y
1620   if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1621     return IsAnd ? ZeroICmp : UnsignedICmp;
1622 
1623   // X < Y && Y == 0  -->  false
1624   if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1625       IsAnd)
1626     return getFalse(UnsignedICmp->getType());
1627 
1628   // X >= Y || Y != 0  -->  true
1629   if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1630       !IsAnd)
1631     return getTrue(UnsignedICmp->getType());
1632 
1633   return nullptr;
1634 }
1635 
1636 /// Test if a pair of compares with a shared operand and 2 constants has an
1637 /// empty set intersection, full set union, or if one compare is a superset of
1638 /// the other.
1639 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1640                                                 bool IsAnd) {
1641   // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1642   if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1643     return nullptr;
1644 
1645   const APInt *C0, *C1;
1646   if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1647       !match(Cmp1->getOperand(1), m_APInt(C1)))
1648     return nullptr;
1649 
1650   auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1651   auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1652 
1653   // For and-of-compares, check if the intersection is empty:
1654   // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1655   if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1656     return getFalse(Cmp0->getType());
1657 
1658   // For or-of-compares, check if the union is full:
1659   // (icmp X, C0) || (icmp X, C1) --> full set --> true
1660   if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1661     return getTrue(Cmp0->getType());
1662 
1663   // Is one range a superset of the other?
1664   // If this is and-of-compares, take the smaller set:
1665   // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1666   // If this is or-of-compares, take the larger set:
1667   // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1668   if (Range0.contains(Range1))
1669     return IsAnd ? Cmp1 : Cmp0;
1670   if (Range1.contains(Range0))
1671     return IsAnd ? Cmp0 : Cmp1;
1672 
1673   return nullptr;
1674 }
1675 
1676 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1677                                            bool IsAnd) {
1678   ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1679   if (!match(Cmp0->getOperand(1), m_Zero()) ||
1680       !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1681     return nullptr;
1682 
1683   if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1684     return nullptr;
1685 
1686   // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1687   Value *X = Cmp0->getOperand(0);
1688   Value *Y = Cmp1->getOperand(0);
1689 
1690   // If one of the compares is a masked version of a (not) null check, then
1691   // that compare implies the other, so we eliminate the other. Optionally, look
1692   // through a pointer-to-int cast to match a null check of a pointer type.
1693 
1694   // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1695   // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1696   // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1697   // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1698   if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1699       match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1700     return Cmp1;
1701 
1702   // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1703   // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1704   // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1705   // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1706   if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1707       match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1708     return Cmp0;
1709 
1710   return nullptr;
1711 }
1712 
1713 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1714                                         const InstrInfoQuery &IIQ) {
1715   // (icmp (add V, C0), C1) & (icmp V, C0)
1716   ICmpInst::Predicate Pred0, Pred1;
1717   const APInt *C0, *C1;
1718   Value *V;
1719   if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1720     return nullptr;
1721 
1722   if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1723     return nullptr;
1724 
1725   auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1726   if (AddInst->getOperand(1) != Op1->getOperand(1))
1727     return nullptr;
1728 
1729   Type *ITy = Op0->getType();
1730   bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1731   bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1732 
1733   const APInt Delta = *C1 - *C0;
1734   if (C0->isStrictlyPositive()) {
1735     if (Delta == 2) {
1736       if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1737         return getFalse(ITy);
1738       if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1739         return getFalse(ITy);
1740     }
1741     if (Delta == 1) {
1742       if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1743         return getFalse(ITy);
1744       if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1745         return getFalse(ITy);
1746     }
1747   }
1748   if (C0->getBoolValue() && IsNUW) {
1749     if (Delta == 2)
1750       if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1751         return getFalse(ITy);
1752     if (Delta == 1)
1753       if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1754         return getFalse(ITy);
1755   }
1756 
1757   return nullptr;
1758 }
1759 
1760 /// Try to eliminate compares with signed or unsigned min/max constants.
1761 static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1,
1762                                                  bool IsAnd) {
1763   // Canonicalize an equality compare as Cmp0.
1764   if (Cmp1->isEquality())
1765     std::swap(Cmp0, Cmp1);
1766   if (!Cmp0->isEquality())
1767     return nullptr;
1768 
1769   // The non-equality compare must include a common operand (X). Canonicalize
1770   // the common operand as operand 0 (the predicate is swapped if the common
1771   // operand was operand 1).
1772   ICmpInst::Predicate Pred0 = Cmp0->getPredicate();
1773   Value *X = Cmp0->getOperand(0);
1774   ICmpInst::Predicate Pred1;
1775   bool HasNotOp = match(Cmp1, m_c_ICmp(Pred1, m_Not(m_Specific(X)), m_Value()));
1776   if (!HasNotOp && !match(Cmp1, m_c_ICmp(Pred1, m_Specific(X), m_Value())))
1777     return nullptr;
1778   if (ICmpInst::isEquality(Pred1))
1779     return nullptr;
1780 
1781   // The equality compare must be against a constant. Flip bits if we matched
1782   // a bitwise not. Convert a null pointer constant to an integer zero value.
1783   APInt MinMaxC;
1784   const APInt *C;
1785   if (match(Cmp0->getOperand(1), m_APInt(C)))
1786     MinMaxC = HasNotOp ? ~*C : *C;
1787   else if (isa<ConstantPointerNull>(Cmp0->getOperand(1)))
1788     MinMaxC = APInt::getZero(8);
1789   else
1790     return nullptr;
1791 
1792   // DeMorganize if this is 'or': P0 || P1 --> !P0 && !P1.
1793   if (!IsAnd) {
1794     Pred0 = ICmpInst::getInversePredicate(Pred0);
1795     Pred1 = ICmpInst::getInversePredicate(Pred1);
1796   }
1797 
1798   // Normalize to unsigned compare and unsigned min/max value.
1799   // Example for 8-bit: -128 + 128 -> 0; 127 + 128 -> 255
1800   if (ICmpInst::isSigned(Pred1)) {
1801     Pred1 = ICmpInst::getUnsignedPredicate(Pred1);
1802     MinMaxC += APInt::getSignedMinValue(MinMaxC.getBitWidth());
1803   }
1804 
1805   // (X != MAX) && (X < Y) --> X < Y
1806   // (X == MAX) || (X >= Y) --> X >= Y
1807   if (MinMaxC.isMaxValue())
1808     if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT)
1809       return Cmp1;
1810 
1811   // (X != MIN) && (X > Y) -->  X > Y
1812   // (X == MIN) || (X <= Y) --> X <= Y
1813   if (MinMaxC.isMinValue())
1814     if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_UGT)
1815       return Cmp1;
1816 
1817   return nullptr;
1818 }
1819 
1820 /// Try to simplify and/or of icmp with ctpop intrinsic.
1821 static Value *simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1,
1822                                             bool IsAnd) {
1823   ICmpInst::Predicate Pred0, Pred1;
1824   Value *X;
1825   const APInt *C;
1826   if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
1827                           m_APInt(C))) ||
1828       !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())) || C->isZero())
1829     return nullptr;
1830 
1831   // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1832   if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1833     return Cmp1;
1834   // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1835   if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1836     return Cmp1;
1837 
1838   return nullptr;
1839 }
1840 
1841 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1842                                  const SimplifyQuery &Q) {
1843   if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1844     return X;
1845   if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1846     return X;
1847 
1848   if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1849     return X;
1850 
1851   if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, true))
1852     return X;
1853 
1854   if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1855     return X;
1856 
1857   if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, true))
1858     return X;
1859   if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, true))
1860     return X;
1861 
1862   if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1863     return X;
1864   if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1865     return X;
1866 
1867   return nullptr;
1868 }
1869 
1870 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1871                                        const InstrInfoQuery &IIQ) {
1872   // (icmp (add V, C0), C1) | (icmp V, C0)
1873   ICmpInst::Predicate Pred0, Pred1;
1874   const APInt *C0, *C1;
1875   Value *V;
1876   if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1877     return nullptr;
1878 
1879   if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1880     return nullptr;
1881 
1882   auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1883   if (AddInst->getOperand(1) != Op1->getOperand(1))
1884     return nullptr;
1885 
1886   Type *ITy = Op0->getType();
1887   bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1888   bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1889 
1890   const APInt Delta = *C1 - *C0;
1891   if (C0->isStrictlyPositive()) {
1892     if (Delta == 2) {
1893       if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1894         return getTrue(ITy);
1895       if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1896         return getTrue(ITy);
1897     }
1898     if (Delta == 1) {
1899       if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1900         return getTrue(ITy);
1901       if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1902         return getTrue(ITy);
1903     }
1904   }
1905   if (C0->getBoolValue() && IsNUW) {
1906     if (Delta == 2)
1907       if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1908         return getTrue(ITy);
1909     if (Delta == 1)
1910       if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1911         return getTrue(ITy);
1912   }
1913 
1914   return nullptr;
1915 }
1916 
1917 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1918                                 const SimplifyQuery &Q) {
1919   if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1920     return X;
1921   if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1922     return X;
1923 
1924   if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1925     return X;
1926 
1927   if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, false))
1928     return X;
1929 
1930   if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1931     return X;
1932 
1933   if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, false))
1934     return X;
1935   if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, false))
1936     return X;
1937 
1938   if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1939     return X;
1940   if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1941     return X;
1942 
1943   return nullptr;
1944 }
1945 
1946 static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS,
1947                                    FCmpInst *RHS, bool IsAnd) {
1948   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1949   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1950   if (LHS0->getType() != RHS0->getType())
1951     return nullptr;
1952 
1953   const DataLayout &DL = Q.DL;
1954   const TargetLibraryInfo *TLI = Q.TLI;
1955 
1956   FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1957   if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1958       (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1959     // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1960     // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1961     // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1962     // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1963     // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1964     // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1965     // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1966     // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1967     if (((LHS1 == RHS0 || LHS1 == RHS1) &&
1968          isKnownNeverNaN(LHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
1969         ((LHS0 == RHS0 || LHS0 == RHS1) &&
1970          isKnownNeverNaN(LHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
1971       return RHS;
1972 
1973     // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1974     // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1975     // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1976     // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1977     // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1978     // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1979     // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1980     // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1981     if (((RHS1 == LHS0 || RHS1 == LHS1) &&
1982          isKnownNeverNaN(RHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
1983         ((RHS0 == LHS0 || RHS0 == LHS1) &&
1984          isKnownNeverNaN(RHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
1985       return LHS;
1986   }
1987 
1988   return nullptr;
1989 }
1990 
1991 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0,
1992                                   Value *Op1, bool IsAnd) {
1993   // Look through casts of the 'and' operands to find compares.
1994   auto *Cast0 = dyn_cast<CastInst>(Op0);
1995   auto *Cast1 = dyn_cast<CastInst>(Op1);
1996   if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1997       Cast0->getSrcTy() == Cast1->getSrcTy()) {
1998     Op0 = Cast0->getOperand(0);
1999     Op1 = Cast1->getOperand(0);
2000   }
2001 
2002   Value *V = nullptr;
2003   auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
2004   auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
2005   if (ICmp0 && ICmp1)
2006     V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
2007               : simplifyOrOfICmps(ICmp0, ICmp1, Q);
2008 
2009   auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
2010   auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
2011   if (FCmp0 && FCmp1)
2012     V = simplifyAndOrOfFCmps(Q, FCmp0, FCmp1, IsAnd);
2013 
2014   if (!V)
2015     return nullptr;
2016   if (!Cast0)
2017     return V;
2018 
2019   // If we looked through casts, we can only handle a constant simplification
2020   // because we are not allowed to create a cast instruction here.
2021   if (auto *C = dyn_cast<Constant>(V))
2022     return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
2023 
2024   return nullptr;
2025 }
2026 
2027 /// Given a bitwise logic op, check if the operands are add/sub with a common
2028 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
2029 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
2030                                     Instruction::BinaryOps Opcode) {
2031   assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
2032   assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
2033   Value *X;
2034   Constant *C1, *C2;
2035   if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
2036        match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
2037       (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
2038        match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
2039     if (ConstantExpr::getNot(C1) == C2) {
2040       // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
2041       // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
2042       // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
2043       Type *Ty = Op0->getType();
2044       return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2045                                         : ConstantInt::getAllOnesValue(Ty);
2046     }
2047   }
2048   return nullptr;
2049 }
2050 
2051 /// Given operands for an And, see if we can fold the result.
2052 /// If not, this returns null.
2053 static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2054                               unsigned MaxRecurse) {
2055   if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2056     return C;
2057 
2058   // X & poison -> poison
2059   if (isa<PoisonValue>(Op1))
2060     return Op1;
2061 
2062   // X & undef -> 0
2063   if (Q.isUndefValue(Op1))
2064     return Constant::getNullValue(Op0->getType());
2065 
2066   // X & X = X
2067   if (Op0 == Op1)
2068     return Op0;
2069 
2070   // X & 0 = 0
2071   if (match(Op1, m_Zero()))
2072     return Constant::getNullValue(Op0->getType());
2073 
2074   // X & -1 = X
2075   if (match(Op1, m_AllOnes()))
2076     return Op0;
2077 
2078   // A & ~A  =  ~A & A  =  0
2079   if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2080     return Constant::getNullValue(Op0->getType());
2081 
2082   // (A | ?) & A = A
2083   if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2084     return Op1;
2085 
2086   // A & (A | ?) = A
2087   if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
2088     return Op0;
2089 
2090   // (X | Y) & (X | ~Y) --> X (commuted 8 ways)
2091   Value *X, *Y;
2092   if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2093       match(Op1, m_c_Or(m_Deferred(X), m_Deferred(Y))))
2094     return X;
2095   if (match(Op1, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2096       match(Op0, m_c_Or(m_Deferred(X), m_Deferred(Y))))
2097     return X;
2098 
2099   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2100     return V;
2101 
2102   // A mask that only clears known zeros of a shifted value is a no-op.
2103   const APInt *Mask;
2104   const APInt *ShAmt;
2105   if (match(Op1, m_APInt(Mask))) {
2106     // If all bits in the inverted and shifted mask are clear:
2107     // and (shl X, ShAmt), Mask --> shl X, ShAmt
2108     if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2109         (~(*Mask)).lshr(*ShAmt).isZero())
2110       return Op0;
2111 
2112     // If all bits in the inverted and shifted mask are clear:
2113     // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2114     if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2115         (~(*Mask)).shl(*ShAmt).isZero())
2116       return Op0;
2117   }
2118 
2119   // If we have a multiplication overflow check that is being 'and'ed with a
2120   // check that one of the multipliers is not zero, we can omit the 'and', and
2121   // only keep the overflow check.
2122   if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2123     return Op1;
2124   if (isCheckForZeroAndMulWithOverflow(Op1, Op0, true))
2125     return Op0;
2126 
2127   // A & (-A) = A if A is a power of two or zero.
2128   if (match(Op0, m_Neg(m_Specific(Op1))) ||
2129       match(Op1, m_Neg(m_Specific(Op0)))) {
2130     if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2131                                Q.DT))
2132       return Op0;
2133     if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2134                                Q.DT))
2135       return Op1;
2136   }
2137 
2138   // This is a similar pattern used for checking if a value is a power-of-2:
2139   // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2140   // A & (A - 1) --> 0 (if A is a power-of-2 or 0)
2141   if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2142       isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2143     return Constant::getNullValue(Op1->getType());
2144   if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) &&
2145       isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2146     return Constant::getNullValue(Op0->getType());
2147 
2148   if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2149     return V;
2150 
2151   // Try some generic simplifications for associative operations.
2152   if (Value *V =
2153           simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2154     return V;
2155 
2156   // And distributes over Or.  Try some generic simplifications based on this.
2157   if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2158                                         Instruction::Or, Q, MaxRecurse))
2159     return V;
2160 
2161   // And distributes over Xor.  Try some generic simplifications based on this.
2162   if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2163                                         Instruction::Xor, Q, MaxRecurse))
2164     return V;
2165 
2166   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2167     if (Op0->getType()->isIntOrIntVectorTy(1)) {
2168       // A & (A && B) -> A && B
2169       if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2170         return Op1;
2171       else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2172         return Op0;
2173     }
2174     // If the operation is with the result of a select instruction, check
2175     // whether operating on either branch of the select always yields the same
2176     // value.
2177     if (Value *V =
2178             threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2179       return V;
2180   }
2181 
2182   // If the operation is with the result of a phi instruction, check whether
2183   // operating on all incoming values of the phi always yields the same value.
2184   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2185     if (Value *V =
2186             threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2187       return V;
2188 
2189   // Assuming the effective width of Y is not larger than A, i.e. all bits
2190   // from X and Y are disjoint in (X << A) | Y,
2191   // if the mask of this AND op covers all bits of X or Y, while it covers
2192   // no bits from the other, we can bypass this AND op. E.g.,
2193   // ((X << A) | Y) & Mask -> Y,
2194   //     if Mask = ((1 << effective_width_of(Y)) - 1)
2195   // ((X << A) | Y) & Mask -> X << A,
2196   //     if Mask = ((1 << effective_width_of(X)) - 1) << A
2197   // SimplifyDemandedBits in InstCombine can optimize the general case.
2198   // This pattern aims to help other passes for a common case.
2199   Value *XShifted;
2200   if (match(Op1, m_APInt(Mask)) &&
2201       match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2202                                      m_Value(XShifted)),
2203                         m_Value(Y)))) {
2204     const unsigned Width = Op0->getType()->getScalarSizeInBits();
2205     const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2206     const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2207     const unsigned EffWidthY = YKnown.countMaxActiveBits();
2208     if (EffWidthY <= ShftCnt) {
2209       const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2210       const unsigned EffWidthX = XKnown.countMaxActiveBits();
2211       const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2212       const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2213       // If the mask is extracting all bits from X or Y as is, we can skip
2214       // this AND op.
2215       if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2216         return Y;
2217       if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2218         return XShifted;
2219     }
2220   }
2221 
2222   // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2223   // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2224   BinaryOperator *Or;
2225   if (match(Op0, m_c_Xor(m_Value(X),
2226                          m_CombineAnd(m_BinOp(Or),
2227                                       m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2228       match(Op1, m_c_Xor(m_Specific(Or), m_Specific(Y))))
2229     return Constant::getNullValue(Op0->getType());
2230 
2231   if (Op0->getType()->isIntOrIntVectorTy(1)) {
2232     if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
2233       // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2234       if (*Implied == true)
2235         return Op0;
2236       // If Op0 is true implies Op1 is false, then they are not true together.
2237       if (*Implied == false)
2238         return ConstantInt::getFalse(Op0->getType());
2239     }
2240     if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
2241       // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2242       if (*Implied)
2243         return Op1;
2244       // If Op1 is true implies Op0 is false, then they are not true together.
2245       if (!*Implied)
2246         return ConstantInt::getFalse(Op1->getType());
2247     }
2248   }
2249 
2250   if (Value *V = simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2251     return V;
2252 
2253   return nullptr;
2254 }
2255 
2256 Value *llvm::simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2257   return ::simplifyAndInst(Op0, Op1, Q, RecursionLimit);
2258 }
2259 
2260 // TODO: Many of these folds could use LogicalAnd/LogicalOr.
2261 static Value *simplifyOrLogic(Value *X, Value *Y) {
2262   assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2263   Type *Ty = X->getType();
2264 
2265   // X | ~X --> -1
2266   if (match(Y, m_Not(m_Specific(X))))
2267     return ConstantInt::getAllOnesValue(Ty);
2268 
2269   // X | ~(X & ?) = -1
2270   if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2271     return ConstantInt::getAllOnesValue(Ty);
2272 
2273   // X | (X & ?) --> X
2274   if (match(Y, m_c_And(m_Specific(X), m_Value())))
2275     return X;
2276 
2277   Value *A, *B;
2278 
2279   // (A ^ B) | (A | B) --> A | B
2280   // (A ^ B) | (B | A) --> B | A
2281   if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2282       match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2283     return Y;
2284 
2285   // ~(A ^ B) | (A | B) --> -1
2286   // ~(A ^ B) | (B | A) --> -1
2287   if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2288       match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2289     return ConstantInt::getAllOnesValue(Ty);
2290 
2291   // (A & ~B) | (A ^ B) --> A ^ B
2292   // (~B & A) | (A ^ B) --> A ^ B
2293   // (A & ~B) | (B ^ A) --> B ^ A
2294   // (~B & A) | (B ^ A) --> B ^ A
2295   if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2296       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2297     return Y;
2298 
2299   // (~A ^ B) | (A & B) --> ~A ^ B
2300   // (B ^ ~A) | (A & B) --> B ^ ~A
2301   // (~A ^ B) | (B & A) --> ~A ^ B
2302   // (B ^ ~A) | (B & A) --> B ^ ~A
2303   if (match(X, m_c_Xor(m_NotForbidUndef(m_Value(A)), m_Value(B))) &&
2304       match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2305     return X;
2306 
2307   // (~A | B) | (A ^ B) --> -1
2308   // (~A | B) | (B ^ A) --> -1
2309   // (B | ~A) | (A ^ B) --> -1
2310   // (B | ~A) | (B ^ A) --> -1
2311   if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2312       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2313     return ConstantInt::getAllOnesValue(Ty);
2314 
2315   // (~A & B) | ~(A | B) --> ~A
2316   // (~A & B) | ~(B | A) --> ~A
2317   // (B & ~A) | ~(A | B) --> ~A
2318   // (B & ~A) | ~(B | A) --> ~A
2319   Value *NotA;
2320   if (match(X,
2321             m_c_And(m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2322                     m_Value(B))) &&
2323       match(Y, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2324     return NotA;
2325   // The same is true of Logical And
2326   // TODO: This could share the logic of the version above if there was a
2327   // version of LogicalAnd that allowed more than just i1 types.
2328   if (match(X, m_c_LogicalAnd(
2329                    m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2330                    m_Value(B))) &&
2331       match(Y, m_Not(m_c_LogicalOr(m_Specific(A), m_Specific(B)))))
2332     return NotA;
2333 
2334   // ~(A ^ B) | (A & B) --> ~(A ^ B)
2335   // ~(A ^ B) | (B & A) --> ~(A ^ B)
2336   Value *NotAB;
2337   if (match(X, m_CombineAnd(m_NotForbidUndef(m_Xor(m_Value(A), m_Value(B))),
2338                             m_Value(NotAB))) &&
2339       match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2340     return NotAB;
2341 
2342   // ~(A & B) | (A ^ B) --> ~(A & B)
2343   // ~(A & B) | (B ^ A) --> ~(A & B)
2344   if (match(X, m_CombineAnd(m_NotForbidUndef(m_And(m_Value(A), m_Value(B))),
2345                             m_Value(NotAB))) &&
2346       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2347     return NotAB;
2348 
2349   return nullptr;
2350 }
2351 
2352 /// Given operands for an Or, see if we can fold the result.
2353 /// If not, this returns null.
2354 static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2355                              unsigned MaxRecurse) {
2356   if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2357     return C;
2358 
2359   // X | poison -> poison
2360   if (isa<PoisonValue>(Op1))
2361     return Op1;
2362 
2363   // X | undef -> -1
2364   // X | -1 = -1
2365   // Do not return Op1 because it may contain undef elements if it's a vector.
2366   if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2367     return Constant::getAllOnesValue(Op0->getType());
2368 
2369   // X | X = X
2370   // X | 0 = X
2371   if (Op0 == Op1 || match(Op1, m_Zero()))
2372     return Op0;
2373 
2374   if (Value *R = simplifyOrLogic(Op0, Op1))
2375     return R;
2376   if (Value *R = simplifyOrLogic(Op1, Op0))
2377     return R;
2378 
2379   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2380     return V;
2381 
2382   // Rotated -1 is still -1:
2383   // (-1 << X) | (-1 >> (C - X)) --> -1
2384   // (-1 >> X) | (-1 << (C - X)) --> -1
2385   // ...with C <= bitwidth (and commuted variants).
2386   Value *X, *Y;
2387   if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2388        match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2389       (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2390        match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2391     const APInt *C;
2392     if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2393          match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2394         C->ule(X->getType()->getScalarSizeInBits())) {
2395       return ConstantInt::getAllOnesValue(X->getType());
2396     }
2397   }
2398 
2399   // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2400   // are mixing in another shift that is redundant with the funnel shift.
2401 
2402   // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2403   // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2404   if (match(Op0,
2405             m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2406       match(Op1, m_Shl(m_Specific(X), m_Specific(Y))))
2407     return Op0;
2408   if (match(Op1,
2409             m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2410       match(Op0, m_Shl(m_Specific(X), m_Specific(Y))))
2411     return Op1;
2412 
2413   // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2414   // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2415   if (match(Op0,
2416             m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2417       match(Op1, m_LShr(m_Specific(X), m_Specific(Y))))
2418     return Op0;
2419   if (match(Op1,
2420             m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2421       match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
2422     return Op1;
2423 
2424   if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2425     return V;
2426 
2427   // If we have a multiplication overflow check that is being 'and'ed with a
2428   // check that one of the multipliers is not zero, we can omit the 'and', and
2429   // only keep the overflow check.
2430   if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2431     return Op1;
2432   if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2433     return Op0;
2434 
2435   // Try some generic simplifications for associative operations.
2436   if (Value *V =
2437           simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2438     return V;
2439 
2440   // Or distributes over And.  Try some generic simplifications based on this.
2441   if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2442                                         Instruction::And, Q, MaxRecurse))
2443     return V;
2444 
2445   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2446     if (Op0->getType()->isIntOrIntVectorTy(1)) {
2447       // A | (A || B) -> A || B
2448       if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2449         return Op1;
2450       else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2451         return Op0;
2452     }
2453     // If the operation is with the result of a select instruction, check
2454     // whether operating on either branch of the select always yields the same
2455     // value.
2456     if (Value *V =
2457             threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2458       return V;
2459   }
2460 
2461   // (A & C1)|(B & C2)
2462   Value *A, *B;
2463   const APInt *C1, *C2;
2464   if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2465       match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2466     if (*C1 == ~*C2) {
2467       // (A & C1)|(B & C2)
2468       // If we have: ((V + N) & C1) | (V & C2)
2469       // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2470       // replace with V+N.
2471       Value *N;
2472       if (C2->isMask() && // C2 == 0+1+
2473           match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2474         // Add commutes, try both ways.
2475         if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2476           return A;
2477       }
2478       // Or commutes, try both ways.
2479       if (C1->isMask() && match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2480         // Add commutes, try both ways.
2481         if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2482           return B;
2483       }
2484     }
2485   }
2486 
2487   // If the operation is with the result of a phi instruction, check whether
2488   // operating on all incoming values of the phi always yields the same value.
2489   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2490     if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2491       return V;
2492 
2493   if (Op0->getType()->isIntOrIntVectorTy(1)) {
2494     if (std::optional<bool> Implied =
2495             isImpliedCondition(Op0, Op1, Q.DL, false)) {
2496       // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2497       if (*Implied == false)
2498         return Op0;
2499       // If Op0 is false implies Op1 is true, then at least one is always true.
2500       if (*Implied == true)
2501         return ConstantInt::getTrue(Op0->getType());
2502     }
2503     if (std::optional<bool> Implied =
2504             isImpliedCondition(Op1, Op0, Q.DL, false)) {
2505       // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2506       if (*Implied == false)
2507         return Op1;
2508       // If Op1 is false implies Op0 is true, then at least one is always true.
2509       if (*Implied == true)
2510         return ConstantInt::getTrue(Op1->getType());
2511     }
2512   }
2513 
2514   if (Value *V = simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2515     return V;
2516 
2517   return nullptr;
2518 }
2519 
2520 Value *llvm::simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2521   return ::simplifyOrInst(Op0, Op1, Q, RecursionLimit);
2522 }
2523 
2524 /// Given operands for a Xor, see if we can fold the result.
2525 /// If not, this returns null.
2526 static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2527                               unsigned MaxRecurse) {
2528   if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2529     return C;
2530 
2531   // X ^ poison -> poison
2532   if (isa<PoisonValue>(Op1))
2533     return Op1;
2534 
2535   // A ^ undef -> undef
2536   if (Q.isUndefValue(Op1))
2537     return Op1;
2538 
2539   // A ^ 0 = A
2540   if (match(Op1, m_Zero()))
2541     return Op0;
2542 
2543   // A ^ A = 0
2544   if (Op0 == Op1)
2545     return Constant::getNullValue(Op0->getType());
2546 
2547   // A ^ ~A  =  ~A ^ A  =  -1
2548   if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2549     return Constant::getAllOnesValue(Op0->getType());
2550 
2551   auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2552     Value *A, *B;
2553     // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2554     if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2555         match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2556       return A;
2557 
2558     // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2559     // The 'not' op must contain a complete -1 operand (no undef elements for
2560     // vector) for the transform to be safe.
2561     Value *NotA;
2562     if (match(X,
2563               m_c_Or(m_CombineAnd(m_NotForbidUndef(m_Value(A)), m_Value(NotA)),
2564                      m_Value(B))) &&
2565         match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2566       return NotA;
2567 
2568     return nullptr;
2569   };
2570   if (Value *R = foldAndOrNot(Op0, Op1))
2571     return R;
2572   if (Value *R = foldAndOrNot(Op1, Op0))
2573     return R;
2574 
2575   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2576     return V;
2577 
2578   // Try some generic simplifications for associative operations.
2579   if (Value *V =
2580           simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2581     return V;
2582 
2583   // Threading Xor over selects and phi nodes is pointless, so don't bother.
2584   // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2585   // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2586   // only if B and C are equal.  If B and C are equal then (since we assume
2587   // that operands have already been simplified) "select(cond, B, C)" should
2588   // have been simplified to the common value of B and C already.  Analysing
2589   // "A^B" and "A^C" thus gains nothing, but costs compile time.  Similarly
2590   // for threading over phi nodes.
2591 
2592   if (Value *V = simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2593     return V;
2594 
2595   return nullptr;
2596 }
2597 
2598 Value *llvm::simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2599   return ::simplifyXorInst(Op0, Op1, Q, RecursionLimit);
2600 }
2601 
2602 static Type *getCompareTy(Value *Op) {
2603   return CmpInst::makeCmpResultType(Op->getType());
2604 }
2605 
2606 /// Rummage around inside V looking for something equivalent to the comparison
2607 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2608 /// Helper function for analyzing max/min idioms.
2609 static Value *extractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2610                                          Value *LHS, Value *RHS) {
2611   SelectInst *SI = dyn_cast<SelectInst>(V);
2612   if (!SI)
2613     return nullptr;
2614   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2615   if (!Cmp)
2616     return nullptr;
2617   Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2618   if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2619     return Cmp;
2620   if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2621       LHS == CmpRHS && RHS == CmpLHS)
2622     return Cmp;
2623   return nullptr;
2624 }
2625 
2626 /// Return true if the underlying object (storage) must be disjoint from
2627 /// storage returned by any noalias return call.
2628 static bool isAllocDisjoint(const Value *V) {
2629   // For allocas, we consider only static ones (dynamic
2630   // allocas might be transformed into calls to malloc not simultaneously
2631   // live with the compared-to allocation). For globals, we exclude symbols
2632   // that might be resolve lazily to symbols in another dynamically-loaded
2633   // library (and, thus, could be malloc'ed by the implementation).
2634   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2635     return AI->isStaticAlloca();
2636   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2637     return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2638             GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2639            !GV->isThreadLocal();
2640   if (const Argument *A = dyn_cast<Argument>(V))
2641     return A->hasByValAttr();
2642   return false;
2643 }
2644 
2645 /// Return true if V1 and V2 are each the base of some distict storage region
2646 /// [V, object_size(V)] which do not overlap.  Note that zero sized regions
2647 /// *are* possible, and that zero sized regions do not overlap with any other.
2648 static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2649   // Global variables always exist, so they always exist during the lifetime
2650   // of each other and all allocas.  Global variables themselves usually have
2651   // non-overlapping storage, but since their addresses are constants, the
2652   // case involving two globals does not reach here and is instead handled in
2653   // constant folding.
2654   //
2655   // Two different allocas usually have different addresses...
2656   //
2657   // However, if there's an @llvm.stackrestore dynamically in between two
2658   // allocas, they may have the same address. It's tempting to reduce the
2659   // scope of the problem by only looking at *static* allocas here. That would
2660   // cover the majority of allocas while significantly reducing the likelihood
2661   // of having an @llvm.stackrestore pop up in the middle. However, it's not
2662   // actually impossible for an @llvm.stackrestore to pop up in the middle of
2663   // an entry block. Also, if we have a block that's not attached to a
2664   // function, we can't tell if it's "static" under the current definition.
2665   // Theoretically, this problem could be fixed by creating a new kind of
2666   // instruction kind specifically for static allocas. Such a new instruction
2667   // could be required to be at the top of the entry block, thus preventing it
2668   // from being subject to a @llvm.stackrestore. Instcombine could even
2669   // convert regular allocas into these special allocas. It'd be nifty.
2670   // However, until then, this problem remains open.
2671   //
2672   // So, we'll assume that two non-empty allocas have different addresses
2673   // for now.
2674   auto isByValArg = [](const Value *V) {
2675     const Argument *A = dyn_cast<Argument>(V);
2676     return A && A->hasByValAttr();
2677   };
2678 
2679   // Byval args are backed by store which does not overlap with each other,
2680   // allocas, or globals.
2681   if (isByValArg(V1))
2682     return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2683   if (isByValArg(V2))
2684     return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2685 
2686   return isa<AllocaInst>(V1) &&
2687          (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2688 }
2689 
2690 // A significant optimization not implemented here is assuming that alloca
2691 // addresses are not equal to incoming argument values. They don't *alias*,
2692 // as we say, but that doesn't mean they aren't equal, so we take a
2693 // conservative approach.
2694 //
2695 // This is inspired in part by C++11 5.10p1:
2696 //   "Two pointers of the same type compare equal if and only if they are both
2697 //    null, both point to the same function, or both represent the same
2698 //    address."
2699 //
2700 // This is pretty permissive.
2701 //
2702 // It's also partly due to C11 6.5.9p6:
2703 //   "Two pointers compare equal if and only if both are null pointers, both are
2704 //    pointers to the same object (including a pointer to an object and a
2705 //    subobject at its beginning) or function, both are pointers to one past the
2706 //    last element of the same array object, or one is a pointer to one past the
2707 //    end of one array object and the other is a pointer to the start of a
2708 //    different array object that happens to immediately follow the first array
2709 //    object in the address space.)
2710 //
2711 // C11's version is more restrictive, however there's no reason why an argument
2712 // couldn't be a one-past-the-end value for a stack object in the caller and be
2713 // equal to the beginning of a stack object in the callee.
2714 //
2715 // If the C and C++ standards are ever made sufficiently restrictive in this
2716 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2717 // this optimization.
2718 static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
2719                                     Value *RHS, const SimplifyQuery &Q) {
2720   assert(LHS->getType() == RHS->getType() && "Must have same types");
2721   const DataLayout &DL = Q.DL;
2722   const TargetLibraryInfo *TLI = Q.TLI;
2723   const DominatorTree *DT = Q.DT;
2724   const Instruction *CxtI = Q.CxtI;
2725   const InstrInfoQuery &IIQ = Q.IIQ;
2726 
2727   // A non-null pointer is not equal to a null pointer.
2728   if (isa<ConstantPointerNull>(RHS) && ICmpInst::isEquality(Pred) &&
2729       llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr,
2730                            IIQ.UseInstrInfo))
2731     return ConstantInt::get(getCompareTy(LHS), !CmpInst::isTrueWhenEqual(Pred));
2732 
2733   // We can only fold certain predicates on pointer comparisons.
2734   switch (Pred) {
2735   default:
2736     return nullptr;
2737 
2738     // Equality comparisons are easy to fold.
2739   case CmpInst::ICMP_EQ:
2740   case CmpInst::ICMP_NE:
2741     break;
2742 
2743     // We can only handle unsigned relational comparisons because 'inbounds' on
2744     // a GEP only protects against unsigned wrapping.
2745   case CmpInst::ICMP_UGT:
2746   case CmpInst::ICMP_UGE:
2747   case CmpInst::ICMP_ULT:
2748   case CmpInst::ICMP_ULE:
2749     // However, we have to switch them to their signed variants to handle
2750     // negative indices from the base pointer.
2751     Pred = ICmpInst::getSignedPredicate(Pred);
2752     break;
2753   }
2754 
2755   // Strip off any constant offsets so that we can reason about them.
2756   // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2757   // here and compare base addresses like AliasAnalysis does, however there are
2758   // numerous hazards. AliasAnalysis and its utilities rely on special rules
2759   // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2760   // doesn't need to guarantee pointer inequality when it says NoAlias.
2761 
2762   // Even if an non-inbounds GEP occurs along the path we can still optimize
2763   // equality comparisons concerning the result.
2764   bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2765   unsigned IndexSize = DL.getIndexTypeSizeInBits(LHS->getType());
2766   APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2767   LHS = LHS->stripAndAccumulateConstantOffsets(DL, LHSOffset, AllowNonInbounds);
2768   RHS = RHS->stripAndAccumulateConstantOffsets(DL, RHSOffset, AllowNonInbounds);
2769 
2770   // If LHS and RHS are related via constant offsets to the same base
2771   // value, we can replace it with an icmp which just compares the offsets.
2772   if (LHS == RHS)
2773     return ConstantInt::get(getCompareTy(LHS),
2774                             ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2775 
2776   // Various optimizations for (in)equality comparisons.
2777   if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2778     // Different non-empty allocations that exist at the same time have
2779     // different addresses (if the program can tell). If the offsets are
2780     // within the bounds of their allocations (and not one-past-the-end!
2781     // so we can't use inbounds!), and their allocations aren't the same,
2782     // the pointers are not equal.
2783     if (haveNonOverlappingStorage(LHS, RHS)) {
2784       uint64_t LHSSize, RHSSize;
2785       ObjectSizeOpts Opts;
2786       Opts.EvalMode = ObjectSizeOpts::Mode::Min;
2787       auto *F = [](Value *V) -> Function * {
2788         if (auto *I = dyn_cast<Instruction>(V))
2789           return I->getFunction();
2790         if (auto *A = dyn_cast<Argument>(V))
2791           return A->getParent();
2792         return nullptr;
2793       }(LHS);
2794       Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2795       if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2796           getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
2797         APInt Dist = LHSOffset - RHSOffset;
2798         if (Dist.isNonNegative() ? Dist.ult(LHSSize) : (-Dist).ult(RHSSize))
2799           return ConstantInt::get(getCompareTy(LHS),
2800                                   !CmpInst::isTrueWhenEqual(Pred));
2801       }
2802     }
2803 
2804     // If one side of the equality comparison must come from a noalias call
2805     // (meaning a system memory allocation function), and the other side must
2806     // come from a pointer that cannot overlap with dynamically-allocated
2807     // memory within the lifetime of the current function (allocas, byval
2808     // arguments, globals), then determine the comparison result here.
2809     SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2810     getUnderlyingObjects(LHS, LHSUObjs);
2811     getUnderlyingObjects(RHS, RHSUObjs);
2812 
2813     // Is the set of underlying objects all noalias calls?
2814     auto IsNAC = [](ArrayRef<const Value *> Objects) {
2815       return all_of(Objects, isNoAliasCall);
2816     };
2817 
2818     // Is the set of underlying objects all things which must be disjoint from
2819     // noalias calls.  We assume that indexing from such disjoint storage
2820     // into the heap is undefined, and thus offsets can be safely ignored.
2821     auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2822       return all_of(Objects, ::isAllocDisjoint);
2823     };
2824 
2825     if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2826         (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2827       return ConstantInt::get(getCompareTy(LHS),
2828                               !CmpInst::isTrueWhenEqual(Pred));
2829 
2830     // Fold comparisons for non-escaping pointer even if the allocation call
2831     // cannot be elided. We cannot fold malloc comparison to null. Also, the
2832     // dynamic allocation call could be either of the operands.  Note that
2833     // the other operand can not be based on the alloc - if it were, then
2834     // the cmp itself would be a capture.
2835     Value *MI = nullptr;
2836     if (isAllocLikeFn(LHS, TLI) &&
2837         llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2838       MI = LHS;
2839     else if (isAllocLikeFn(RHS, TLI) &&
2840              llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2841       MI = RHS;
2842     if (MI) {
2843       // FIXME: This is incorrect, see PR54002. While we can assume that the
2844       // allocation is at an address that makes the comparison false, this
2845       // requires that *all* comparisons to that address be false, which
2846       // InstSimplify cannot guarantee.
2847       struct CustomCaptureTracker : public CaptureTracker {
2848         bool Captured = false;
2849         void tooManyUses() override { Captured = true; }
2850         bool captured(const Use *U) override {
2851           if (auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2852             // Comparison against value stored in global variable. Given the
2853             // pointer does not escape, its value cannot be guessed and stored
2854             // separately in a global variable.
2855             unsigned OtherIdx = 1 - U->getOperandNo();
2856             auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2857             if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2858               return false;
2859           }
2860 
2861           Captured = true;
2862           return true;
2863         }
2864       };
2865       CustomCaptureTracker Tracker;
2866       PointerMayBeCaptured(MI, &Tracker);
2867       if (!Tracker.Captured)
2868         return ConstantInt::get(getCompareTy(LHS),
2869                                 CmpInst::isFalseWhenEqual(Pred));
2870     }
2871   }
2872 
2873   // Otherwise, fail.
2874   return nullptr;
2875 }
2876 
2877 /// Fold an icmp when its operands have i1 scalar type.
2878 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2879                                   Value *RHS, const SimplifyQuery &Q) {
2880   Type *ITy = getCompareTy(LHS); // The return type.
2881   Type *OpTy = LHS->getType();   // The operand type.
2882   if (!OpTy->isIntOrIntVectorTy(1))
2883     return nullptr;
2884 
2885   // A boolean compared to true/false can be reduced in 14 out of the 20
2886   // (10 predicates * 2 constants) possible combinations. The other
2887   // 6 cases require a 'not' of the LHS.
2888 
2889   auto ExtractNotLHS = [](Value *V) -> Value * {
2890     Value *X;
2891     if (match(V, m_Not(m_Value(X))))
2892       return X;
2893     return nullptr;
2894   };
2895 
2896   if (match(RHS, m_Zero())) {
2897     switch (Pred) {
2898     case CmpInst::ICMP_NE:  // X !=  0 -> X
2899     case CmpInst::ICMP_UGT: // X >u  0 -> X
2900     case CmpInst::ICMP_SLT: // X <s  0 -> X
2901       return LHS;
2902 
2903     case CmpInst::ICMP_EQ:  // not(X) ==  0 -> X != 0 -> X
2904     case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2905     case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2906       if (Value *X = ExtractNotLHS(LHS))
2907         return X;
2908       break;
2909 
2910     case CmpInst::ICMP_ULT: // X <u  0 -> false
2911     case CmpInst::ICMP_SGT: // X >s  0 -> false
2912       return getFalse(ITy);
2913 
2914     case CmpInst::ICMP_UGE: // X >=u 0 -> true
2915     case CmpInst::ICMP_SLE: // X <=s 0 -> true
2916       return getTrue(ITy);
2917 
2918     default:
2919       break;
2920     }
2921   } else if (match(RHS, m_One())) {
2922     switch (Pred) {
2923     case CmpInst::ICMP_EQ:  // X ==   1 -> X
2924     case CmpInst::ICMP_UGE: // X >=u  1 -> X
2925     case CmpInst::ICMP_SLE: // X <=s -1 -> X
2926       return LHS;
2927 
2928     case CmpInst::ICMP_NE:  // not(X) !=  1 -> X ==   1 -> X
2929     case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u  1 -> X
2930     case CmpInst::ICMP_SGT: // not(X) >s  1 -> X <=s -1 -> X
2931       if (Value *X = ExtractNotLHS(LHS))
2932         return X;
2933       break;
2934 
2935     case CmpInst::ICMP_UGT: // X >u   1 -> false
2936     case CmpInst::ICMP_SLT: // X <s  -1 -> false
2937       return getFalse(ITy);
2938 
2939     case CmpInst::ICMP_ULE: // X <=u  1 -> true
2940     case CmpInst::ICMP_SGE: // X >=s -1 -> true
2941       return getTrue(ITy);
2942 
2943     default:
2944       break;
2945     }
2946   }
2947 
2948   switch (Pred) {
2949   default:
2950     break;
2951   case ICmpInst::ICMP_UGE:
2952     if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2953       return getTrue(ITy);
2954     break;
2955   case ICmpInst::ICMP_SGE:
2956     /// For signed comparison, the values for an i1 are 0 and -1
2957     /// respectively. This maps into a truth table of:
2958     /// LHS | RHS | LHS >=s RHS   | LHS implies RHS
2959     ///  0  |  0  |  1 (0 >= 0)   |  1
2960     ///  0  |  1  |  1 (0 >= -1)  |  1
2961     ///  1  |  0  |  0 (-1 >= 0)  |  0
2962     ///  1  |  1  |  1 (-1 >= -1) |  1
2963     if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2964       return getTrue(ITy);
2965     break;
2966   case ICmpInst::ICMP_ULE:
2967     if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2968       return getTrue(ITy);
2969     break;
2970   case ICmpInst::ICMP_SLE:
2971     /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2972     if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2973       return getTrue(ITy);
2974     break;
2975   }
2976 
2977   return nullptr;
2978 }
2979 
2980 /// Try hard to fold icmp with zero RHS because this is a common case.
2981 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2982                                    Value *RHS, const SimplifyQuery &Q) {
2983   if (!match(RHS, m_Zero()))
2984     return nullptr;
2985 
2986   Type *ITy = getCompareTy(LHS); // The return type.
2987   switch (Pred) {
2988   default:
2989     llvm_unreachable("Unknown ICmp predicate!");
2990   case ICmpInst::ICMP_ULT:
2991     return getFalse(ITy);
2992   case ICmpInst::ICMP_UGE:
2993     return getTrue(ITy);
2994   case ICmpInst::ICMP_EQ:
2995   case ICmpInst::ICMP_ULE:
2996     if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2997       return getFalse(ITy);
2998     break;
2999   case ICmpInst::ICMP_NE:
3000   case ICmpInst::ICMP_UGT:
3001     if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
3002       return getTrue(ITy);
3003     break;
3004   case ICmpInst::ICMP_SLT: {
3005     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3006     if (LHSKnown.isNegative())
3007       return getTrue(ITy);
3008     if (LHSKnown.isNonNegative())
3009       return getFalse(ITy);
3010     break;
3011   }
3012   case ICmpInst::ICMP_SLE: {
3013     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3014     if (LHSKnown.isNegative())
3015       return getTrue(ITy);
3016     if (LHSKnown.isNonNegative() &&
3017         isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
3018       return getFalse(ITy);
3019     break;
3020   }
3021   case ICmpInst::ICMP_SGE: {
3022     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3023     if (LHSKnown.isNegative())
3024       return getFalse(ITy);
3025     if (LHSKnown.isNonNegative())
3026       return getTrue(ITy);
3027     break;
3028   }
3029   case ICmpInst::ICMP_SGT: {
3030     KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3031     if (LHSKnown.isNegative())
3032       return getFalse(ITy);
3033     if (LHSKnown.isNonNegative() &&
3034         isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
3035       return getTrue(ITy);
3036     break;
3037   }
3038   }
3039 
3040   return nullptr;
3041 }
3042 
3043 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
3044                                        Value *RHS, const InstrInfoQuery &IIQ) {
3045   Type *ITy = getCompareTy(RHS); // The return type.
3046 
3047   Value *X;
3048   // Sign-bit checks can be optimized to true/false after unsigned
3049   // floating-point casts:
3050   // icmp slt (bitcast (uitofp X)),  0 --> false
3051   // icmp sgt (bitcast (uitofp X)), -1 --> true
3052   if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
3053     if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
3054       return ConstantInt::getFalse(ITy);
3055     if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
3056       return ConstantInt::getTrue(ITy);
3057   }
3058 
3059   const APInt *C;
3060   if (!match(RHS, m_APIntAllowUndef(C)))
3061     return nullptr;
3062 
3063   // Rule out tautological comparisons (eg., ult 0 or uge 0).
3064   ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
3065   if (RHS_CR.isEmptySet())
3066     return ConstantInt::getFalse(ITy);
3067   if (RHS_CR.isFullSet())
3068     return ConstantInt::getTrue(ITy);
3069 
3070   ConstantRange LHS_CR =
3071       computeConstantRange(LHS, CmpInst::isSigned(Pred), IIQ.UseInstrInfo);
3072   if (!LHS_CR.isFullSet()) {
3073     if (RHS_CR.contains(LHS_CR))
3074       return ConstantInt::getTrue(ITy);
3075     if (RHS_CR.inverse().contains(LHS_CR))
3076       return ConstantInt::getFalse(ITy);
3077   }
3078 
3079   // (mul nuw/nsw X, MulC) != C --> true  (if C is not a multiple of MulC)
3080   // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3081   const APInt *MulC;
3082   if (ICmpInst::isEquality(Pred) &&
3083       ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
3084         *MulC != 0 && C->urem(*MulC) != 0) ||
3085        (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
3086         *MulC != 0 && C->srem(*MulC) != 0)))
3087     return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3088 
3089   return nullptr;
3090 }
3091 
3092 static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
3093                                          BinaryOperator *LBO, Value *RHS,
3094                                          const SimplifyQuery &Q,
3095                                          unsigned MaxRecurse) {
3096   Type *ITy = getCompareTy(RHS); // The return type.
3097 
3098   Value *Y = nullptr;
3099   // icmp pred (or X, Y), X
3100   if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
3101     if (Pred == ICmpInst::ICMP_ULT)
3102       return getFalse(ITy);
3103     if (Pred == ICmpInst::ICMP_UGE)
3104       return getTrue(ITy);
3105 
3106     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3107       KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3108       KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3109       if (RHSKnown.isNonNegative() && YKnown.isNegative())
3110         return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
3111       if (RHSKnown.isNegative() || YKnown.isNonNegative())
3112         return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
3113     }
3114   }
3115 
3116   // icmp pred (and X, Y), X
3117   if (match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
3118     if (Pred == ICmpInst::ICMP_UGT)
3119       return getFalse(ITy);
3120     if (Pred == ICmpInst::ICMP_ULE)
3121       return getTrue(ITy);
3122   }
3123 
3124   // icmp pred (urem X, Y), Y
3125   if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
3126     switch (Pred) {
3127     default:
3128       break;
3129     case ICmpInst::ICMP_SGT:
3130     case ICmpInst::ICMP_SGE: {
3131       KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3132       if (!Known.isNonNegative())
3133         break;
3134       [[fallthrough]];
3135     }
3136     case ICmpInst::ICMP_EQ:
3137     case ICmpInst::ICMP_UGT:
3138     case ICmpInst::ICMP_UGE:
3139       return getFalse(ITy);
3140     case ICmpInst::ICMP_SLT:
3141     case ICmpInst::ICMP_SLE: {
3142       KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3143       if (!Known.isNonNegative())
3144         break;
3145       [[fallthrough]];
3146     }
3147     case ICmpInst::ICMP_NE:
3148     case ICmpInst::ICMP_ULT:
3149     case ICmpInst::ICMP_ULE:
3150       return getTrue(ITy);
3151     }
3152   }
3153 
3154   // icmp pred (urem X, Y), X
3155   if (match(LBO, m_URem(m_Specific(RHS), m_Value()))) {
3156     if (Pred == ICmpInst::ICMP_ULE)
3157       return getTrue(ITy);
3158     if (Pred == ICmpInst::ICMP_UGT)
3159       return getFalse(ITy);
3160   }
3161 
3162   // x >>u y <=u x --> true.
3163   // x >>u y >u  x --> false.
3164   // x udiv y <=u x --> true.
3165   // x udiv y >u  x --> false.
3166   if (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
3167       match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
3168     // icmp pred (X op Y), X
3169     if (Pred == ICmpInst::ICMP_UGT)
3170       return getFalse(ITy);
3171     if (Pred == ICmpInst::ICMP_ULE)
3172       return getTrue(ITy);
3173   }
3174 
3175   // If x is nonzero:
3176   // x >>u C <u  x --> true  for C != 0.
3177   // x >>u C !=  x --> true  for C != 0.
3178   // x >>u C >=u x --> false for C != 0.
3179   // x >>u C ==  x --> false for C != 0.
3180   // x udiv C <u  x --> true  for C != 1.
3181   // x udiv C !=  x --> true  for C != 1.
3182   // x udiv C >=u x --> false for C != 1.
3183   // x udiv C ==  x --> false for C != 1.
3184   // TODO: allow non-constant shift amount/divisor
3185   const APInt *C;
3186   if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3187       (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3188     if (isKnownNonZero(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) {
3189       switch (Pred) {
3190       default:
3191         break;
3192       case ICmpInst::ICMP_EQ:
3193       case ICmpInst::ICMP_UGE:
3194         return getFalse(ITy);
3195       case ICmpInst::ICMP_NE:
3196       case ICmpInst::ICMP_ULT:
3197         return getTrue(ITy);
3198       case ICmpInst::ICMP_UGT:
3199       case ICmpInst::ICMP_ULE:
3200         // UGT/ULE are handled by the more general case just above
3201         llvm_unreachable("Unexpected UGT/ULE, should have been handled");
3202       }
3203     }
3204   }
3205 
3206   // (x*C1)/C2 <= x for C1 <= C2.
3207   // This holds even if the multiplication overflows: Assume that x != 0 and
3208   // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3209   // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3210   //
3211   // Additionally, either the multiplication and division might be represented
3212   // as shifts:
3213   // (x*C1)>>C2 <= x for C1 < 2**C2.
3214   // (x<<C1)/C2 <= x for 2**C1 < C2.
3215   const APInt *C1, *C2;
3216   if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3217        C1->ule(*C2)) ||
3218       (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3219        C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3220       (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3221        (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3222     if (Pred == ICmpInst::ICMP_UGT)
3223       return getFalse(ITy);
3224     if (Pred == ICmpInst::ICMP_ULE)
3225       return getTrue(ITy);
3226   }
3227 
3228   // (sub C, X) == X, C is odd  --> false
3229   // (sub C, X) != X, C is odd  --> true
3230   if (match(LBO, m_Sub(m_APIntAllowUndef(C), m_Specific(RHS))) &&
3231       (*C & 1) == 1 && ICmpInst::isEquality(Pred))
3232     return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
3233 
3234   return nullptr;
3235 }
3236 
3237 // If only one of the icmp's operands has NSW flags, try to prove that:
3238 //
3239 //   icmp slt (x + C1), (x +nsw C2)
3240 //
3241 // is equivalent to:
3242 //
3243 //   icmp slt C1, C2
3244 //
3245 // which is true if x + C2 has the NSW flags set and:
3246 // *) C1 < C2 && C1 >= 0, or
3247 // *) C2 < C1 && C1 <= 0.
3248 //
3249 static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
3250                                     Value *RHS) {
3251   // TODO: only support icmp slt for now.
3252   if (Pred != CmpInst::ICMP_SLT)
3253     return false;
3254 
3255   // Canonicalize nsw add as RHS.
3256   if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3257     std::swap(LHS, RHS);
3258   if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3259     return false;
3260 
3261   Value *X;
3262   const APInt *C1, *C2;
3263   if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
3264       !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
3265     return false;
3266 
3267   return (C1->slt(*C2) && C1->isNonNegative()) ||
3268          (C2->slt(*C1) && C1->isNonPositive());
3269 }
3270 
3271 /// TODO: A large part of this logic is duplicated in InstCombine's
3272 /// foldICmpBinOp(). We should be able to share that and avoid the code
3273 /// duplication.
3274 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
3275                                     Value *RHS, const SimplifyQuery &Q,
3276                                     unsigned MaxRecurse) {
3277   BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
3278   BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
3279   if (MaxRecurse && (LBO || RBO)) {
3280     // Analyze the case when either LHS or RHS is an add instruction.
3281     Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3282     // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3283     bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3284     if (LBO && LBO->getOpcode() == Instruction::Add) {
3285       A = LBO->getOperand(0);
3286       B = LBO->getOperand(1);
3287       NoLHSWrapProblem =
3288           ICmpInst::isEquality(Pred) ||
3289           (CmpInst::isUnsigned(Pred) &&
3290            Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
3291           (CmpInst::isSigned(Pred) &&
3292            Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
3293     }
3294     if (RBO && RBO->getOpcode() == Instruction::Add) {
3295       C = RBO->getOperand(0);
3296       D = RBO->getOperand(1);
3297       NoRHSWrapProblem =
3298           ICmpInst::isEquality(Pred) ||
3299           (CmpInst::isUnsigned(Pred) &&
3300            Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
3301           (CmpInst::isSigned(Pred) &&
3302            Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
3303     }
3304 
3305     // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3306     if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3307       if (Value *V = simplifyICmpInst(Pred, A == RHS ? B : A,
3308                                       Constant::getNullValue(RHS->getType()), Q,
3309                                       MaxRecurse - 1))
3310         return V;
3311 
3312     // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3313     if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3314       if (Value *V =
3315               simplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
3316                                C == LHS ? D : C, Q, MaxRecurse - 1))
3317         return V;
3318 
3319     // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3320     bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3321                        trySimplifyICmpWithAdds(Pred, LHS, RHS);
3322     if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3323       // Determine Y and Z in the form icmp (X+Y), (X+Z).
3324       Value *Y, *Z;
3325       if (A == C) {
3326         // C + B == C + D  ->  B == D
3327         Y = B;
3328         Z = D;
3329       } else if (A == D) {
3330         // D + B == C + D  ->  B == C
3331         Y = B;
3332         Z = C;
3333       } else if (B == C) {
3334         // A + C == C + D  ->  A == D
3335         Y = A;
3336         Z = D;
3337       } else {
3338         assert(B == D);
3339         // A + D == C + D  ->  A == C
3340         Y = A;
3341         Z = C;
3342       }
3343       if (Value *V = simplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3344         return V;
3345     }
3346   }
3347 
3348   if (LBO)
3349     if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3350       return V;
3351 
3352   if (RBO)
3353     if (Value *V = simplifyICmpWithBinOpOnLHS(
3354             ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3355       return V;
3356 
3357   // 0 - (zext X) pred C
3358   if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3359     const APInt *C;
3360     if (match(RHS, m_APInt(C))) {
3361       if (C->isStrictlyPositive()) {
3362         if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3363           return ConstantInt::getTrue(getCompareTy(RHS));
3364         if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3365           return ConstantInt::getFalse(getCompareTy(RHS));
3366       }
3367       if (C->isNonNegative()) {
3368         if (Pred == ICmpInst::ICMP_SLE)
3369           return ConstantInt::getTrue(getCompareTy(RHS));
3370         if (Pred == ICmpInst::ICMP_SGT)
3371           return ConstantInt::getFalse(getCompareTy(RHS));
3372       }
3373     }
3374   }
3375 
3376   //   If C2 is a power-of-2 and C is not:
3377   //   (C2 << X) == C --> false
3378   //   (C2 << X) != C --> true
3379   const APInt *C;
3380   if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3381       match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
3382     // C2 << X can equal zero in some circumstances.
3383     // This simplification might be unsafe if C is zero.
3384     //
3385     // We know it is safe if:
3386     // - The shift is nsw. We can't shift out the one bit.
3387     // - The shift is nuw. We can't shift out the one bit.
3388     // - C2 is one.
3389     // - C isn't zero.
3390     if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3391         Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3392         match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3393       if (Pred == ICmpInst::ICMP_EQ)
3394         return ConstantInt::getFalse(getCompareTy(RHS));
3395       if (Pred == ICmpInst::ICMP_NE)
3396         return ConstantInt::getTrue(getCompareTy(RHS));
3397     }
3398   }
3399 
3400   // TODO: This is overly constrained. LHS can be any power-of-2.
3401   // (1 << X)  >u 0x8000 --> false
3402   // (1 << X) <=u 0x8000 --> true
3403   if (match(LHS, m_Shl(m_One(), m_Value())) && match(RHS, m_SignMask())) {
3404     if (Pred == ICmpInst::ICMP_UGT)
3405       return ConstantInt::getFalse(getCompareTy(RHS));
3406     if (Pred == ICmpInst::ICMP_ULE)
3407       return ConstantInt::getTrue(getCompareTy(RHS));
3408   }
3409 
3410   if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3411     return nullptr;
3412 
3413   if (LBO->getOperand(0) == RBO->getOperand(0)) {
3414     switch (LBO->getOpcode()) {
3415     default:
3416       break;
3417     case Instruction::Shl:
3418       bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3419       bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3420       if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3421           !isKnownNonZero(LBO->getOperand(0), Q.DL))
3422         break;
3423       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
3424                                       RBO->getOperand(1), Q, MaxRecurse - 1))
3425         return V;
3426     }
3427   }
3428 
3429   if (LBO->getOperand(1) == RBO->getOperand(1)) {
3430     switch (LBO->getOpcode()) {
3431     default:
3432       break;
3433     case Instruction::UDiv:
3434     case Instruction::LShr:
3435       if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3436           !Q.IIQ.isExact(RBO))
3437         break;
3438       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3439                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3440         return V;
3441       break;
3442     case Instruction::SDiv:
3443       if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3444           !Q.IIQ.isExact(RBO))
3445         break;
3446       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3447                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3448         return V;
3449       break;
3450     case Instruction::AShr:
3451       if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3452         break;
3453       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3454                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3455         return V;
3456       break;
3457     case Instruction::Shl: {
3458       bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3459       bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3460       if (!NUW && !NSW)
3461         break;
3462       if (!NSW && ICmpInst::isSigned(Pred))
3463         break;
3464       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3465                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3466         return V;
3467       break;
3468     }
3469     }
3470   }
3471   return nullptr;
3472 }
3473 
3474 /// simplify integer comparisons where at least one operand of the compare
3475 /// matches an integer min/max idiom.
3476 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3477                                      Value *RHS, const SimplifyQuery &Q,
3478                                      unsigned MaxRecurse) {
3479   Type *ITy = getCompareTy(LHS); // The return type.
3480   Value *A, *B;
3481   CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3482   CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3483 
3484   // Signed variants on "max(a,b)>=a -> true".
3485   if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3486     if (A != RHS)
3487       std::swap(A, B);       // smax(A, B) pred A.
3488     EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3489     // We analyze this as smax(A, B) pred A.
3490     P = Pred;
3491   } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3492              (A == LHS || B == LHS)) {
3493     if (A != LHS)
3494       std::swap(A, B);       // A pred smax(A, B).
3495     EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3496     // We analyze this as smax(A, B) swapped-pred A.
3497     P = CmpInst::getSwappedPredicate(Pred);
3498   } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3499              (A == RHS || B == RHS)) {
3500     if (A != RHS)
3501       std::swap(A, B);       // smin(A, B) pred A.
3502     EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3503     // We analyze this as smax(-A, -B) swapped-pred -A.
3504     // Note that we do not need to actually form -A or -B thanks to EqP.
3505     P = CmpInst::getSwappedPredicate(Pred);
3506   } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3507              (A == LHS || B == LHS)) {
3508     if (A != LHS)
3509       std::swap(A, B);       // A pred smin(A, B).
3510     EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3511     // We analyze this as smax(-A, -B) pred -A.
3512     // Note that we do not need to actually form -A or -B thanks to EqP.
3513     P = Pred;
3514   }
3515   if (P != CmpInst::BAD_ICMP_PREDICATE) {
3516     // Cases correspond to "max(A, B) p A".
3517     switch (P) {
3518     default:
3519       break;
3520     case CmpInst::ICMP_EQ:
3521     case CmpInst::ICMP_SLE:
3522       // Equivalent to "A EqP B".  This may be the same as the condition tested
3523       // in the max/min; if so, we can just return that.
3524       if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3525         return V;
3526       if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3527         return V;
3528       // Otherwise, see if "A EqP B" simplifies.
3529       if (MaxRecurse)
3530         if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3531           return V;
3532       break;
3533     case CmpInst::ICMP_NE:
3534     case CmpInst::ICMP_SGT: {
3535       CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3536       // Equivalent to "A InvEqP B".  This may be the same as the condition
3537       // tested in the max/min; if so, we can just return that.
3538       if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3539         return V;
3540       if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3541         return V;
3542       // Otherwise, see if "A InvEqP B" simplifies.
3543       if (MaxRecurse)
3544         if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3545           return V;
3546       break;
3547     }
3548     case CmpInst::ICMP_SGE:
3549       // Always true.
3550       return getTrue(ITy);
3551     case CmpInst::ICMP_SLT:
3552       // Always false.
3553       return getFalse(ITy);
3554     }
3555   }
3556 
3557   // Unsigned variants on "max(a,b)>=a -> true".
3558   P = CmpInst::BAD_ICMP_PREDICATE;
3559   if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3560     if (A != RHS)
3561       std::swap(A, B);       // umax(A, B) pred A.
3562     EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3563     // We analyze this as umax(A, B) pred A.
3564     P = Pred;
3565   } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3566              (A == LHS || B == LHS)) {
3567     if (A != LHS)
3568       std::swap(A, B);       // A pred umax(A, B).
3569     EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3570     // We analyze this as umax(A, B) swapped-pred A.
3571     P = CmpInst::getSwappedPredicate(Pred);
3572   } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3573              (A == RHS || B == RHS)) {
3574     if (A != RHS)
3575       std::swap(A, B);       // umin(A, B) pred A.
3576     EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3577     // We analyze this as umax(-A, -B) swapped-pred -A.
3578     // Note that we do not need to actually form -A or -B thanks to EqP.
3579     P = CmpInst::getSwappedPredicate(Pred);
3580   } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3581              (A == LHS || B == LHS)) {
3582     if (A != LHS)
3583       std::swap(A, B);       // A pred umin(A, B).
3584     EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3585     // We analyze this as umax(-A, -B) pred -A.
3586     // Note that we do not need to actually form -A or -B thanks to EqP.
3587     P = Pred;
3588   }
3589   if (P != CmpInst::BAD_ICMP_PREDICATE) {
3590     // Cases correspond to "max(A, B) p A".
3591     switch (P) {
3592     default:
3593       break;
3594     case CmpInst::ICMP_EQ:
3595     case CmpInst::ICMP_ULE:
3596       // Equivalent to "A EqP B".  This may be the same as the condition tested
3597       // in the max/min; if so, we can just return that.
3598       if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3599         return V;
3600       if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3601         return V;
3602       // Otherwise, see if "A EqP B" simplifies.
3603       if (MaxRecurse)
3604         if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3605           return V;
3606       break;
3607     case CmpInst::ICMP_NE:
3608     case CmpInst::ICMP_UGT: {
3609       CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3610       // Equivalent to "A InvEqP B".  This may be the same as the condition
3611       // tested in the max/min; if so, we can just return that.
3612       if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3613         return V;
3614       if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3615         return V;
3616       // Otherwise, see if "A InvEqP B" simplifies.
3617       if (MaxRecurse)
3618         if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3619           return V;
3620       break;
3621     }
3622     case CmpInst::ICMP_UGE:
3623       return getTrue(ITy);
3624     case CmpInst::ICMP_ULT:
3625       return getFalse(ITy);
3626     }
3627   }
3628 
3629   // Comparing 1 each of min/max with a common operand?
3630   // Canonicalize min operand to RHS.
3631   if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3632       match(LHS, m_SMin(m_Value(), m_Value()))) {
3633     std::swap(LHS, RHS);
3634     Pred = ICmpInst::getSwappedPredicate(Pred);
3635   }
3636 
3637   Value *C, *D;
3638   if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3639       match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3640       (A == C || A == D || B == C || B == D)) {
3641     // smax(A, B) >=s smin(A, D) --> true
3642     if (Pred == CmpInst::ICMP_SGE)
3643       return getTrue(ITy);
3644     // smax(A, B) <s smin(A, D) --> false
3645     if (Pred == CmpInst::ICMP_SLT)
3646       return getFalse(ITy);
3647   } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3648              match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3649              (A == C || A == D || B == C || B == D)) {
3650     // umax(A, B) >=u umin(A, D) --> true
3651     if (Pred == CmpInst::ICMP_UGE)
3652       return getTrue(ITy);
3653     // umax(A, B) <u umin(A, D) --> false
3654     if (Pred == CmpInst::ICMP_ULT)
3655       return getFalse(ITy);
3656   }
3657 
3658   return nullptr;
3659 }
3660 
3661 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3662                                                Value *LHS, Value *RHS,
3663                                                const SimplifyQuery &Q) {
3664   // Gracefully handle instructions that have not been inserted yet.
3665   if (!Q.AC || !Q.CxtI)
3666     return nullptr;
3667 
3668   for (Value *AssumeBaseOp : {LHS, RHS}) {
3669     for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3670       if (!AssumeVH)
3671         continue;
3672 
3673       CallInst *Assume = cast<CallInst>(AssumeVH);
3674       if (std::optional<bool> Imp = isImpliedCondition(
3675               Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
3676         if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3677           return ConstantInt::get(getCompareTy(LHS), *Imp);
3678     }
3679   }
3680 
3681   return nullptr;
3682 }
3683 
3684 static Value *simplifyICmpWithIntrinsicOnLHS(CmpInst::Predicate Pred,
3685                                              Value *LHS, Value *RHS) {
3686   auto *II = dyn_cast<IntrinsicInst>(LHS);
3687   if (!II)
3688     return nullptr;
3689 
3690   switch (II->getIntrinsicID()) {
3691   case Intrinsic::uadd_sat:
3692     // uadd.sat(X, Y) uge X, uadd.sat(X, Y) uge Y
3693     if (II->getArgOperand(0) == RHS || II->getArgOperand(1) == RHS) {
3694       if (Pred == ICmpInst::ICMP_UGE)
3695         return ConstantInt::getTrue(getCompareTy(II));
3696       if (Pred == ICmpInst::ICMP_ULT)
3697         return ConstantInt::getFalse(getCompareTy(II));
3698     }
3699     return nullptr;
3700   case Intrinsic::usub_sat:
3701     // usub.sat(X, Y) ule X
3702     if (II->getArgOperand(0) == RHS) {
3703       if (Pred == ICmpInst::ICMP_ULE)
3704         return ConstantInt::getTrue(getCompareTy(II));
3705       if (Pred == ICmpInst::ICMP_UGT)
3706         return ConstantInt::getFalse(getCompareTy(II));
3707     }
3708     return nullptr;
3709   default:
3710     return nullptr;
3711   }
3712 }
3713 
3714 /// Given operands for an ICmpInst, see if we can fold the result.
3715 /// If not, this returns null.
3716 static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3717                                const SimplifyQuery &Q, unsigned MaxRecurse) {
3718   CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3719   assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3720 
3721   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3722     if (Constant *CRHS = dyn_cast<Constant>(RHS))
3723       return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3724 
3725     // If we have a constant, make sure it is on the RHS.
3726     std::swap(LHS, RHS);
3727     Pred = CmpInst::getSwappedPredicate(Pred);
3728   }
3729   assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3730 
3731   Type *ITy = getCompareTy(LHS); // The return type.
3732 
3733   // icmp poison, X -> poison
3734   if (isa<PoisonValue>(RHS))
3735     return PoisonValue::get(ITy);
3736 
3737   // For EQ and NE, we can always pick a value for the undef to make the
3738   // predicate pass or fail, so we can return undef.
3739   // Matches behavior in llvm::ConstantFoldCompareInstruction.
3740   if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3741     return UndefValue::get(ITy);
3742 
3743   // icmp X, X -> true/false
3744   // icmp X, undef -> true/false because undef could be X.
3745   if (LHS == RHS || Q.isUndefValue(RHS))
3746     return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3747 
3748   if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3749     return V;
3750 
3751   // TODO: Sink/common this with other potentially expensive calls that use
3752   //       ValueTracking? See comment below for isKnownNonEqual().
3753   if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3754     return V;
3755 
3756   if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3757     return V;
3758 
3759   // If both operands have range metadata, use the metadata
3760   // to simplify the comparison.
3761   if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3762     auto RHS_Instr = cast<Instruction>(RHS);
3763     auto LHS_Instr = cast<Instruction>(LHS);
3764 
3765     if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3766         Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3767       auto RHS_CR = getConstantRangeFromMetadata(
3768           *RHS_Instr->getMetadata(LLVMContext::MD_range));
3769       auto LHS_CR = getConstantRangeFromMetadata(
3770           *LHS_Instr->getMetadata(LLVMContext::MD_range));
3771 
3772       if (LHS_CR.icmp(Pred, RHS_CR))
3773         return ConstantInt::getTrue(RHS->getContext());
3774 
3775       if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
3776         return ConstantInt::getFalse(RHS->getContext());
3777     }
3778   }
3779 
3780   // Compare of cast, for example (zext X) != 0 -> X != 0
3781   if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3782     Instruction *LI = cast<CastInst>(LHS);
3783     Value *SrcOp = LI->getOperand(0);
3784     Type *SrcTy = SrcOp->getType();
3785     Type *DstTy = LI->getType();
3786 
3787     // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3788     // if the integer type is the same size as the pointer type.
3789     if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3790         Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3791       if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3792         // Transfer the cast to the constant.
3793         if (Value *V = simplifyICmpInst(Pred, SrcOp,
3794                                         ConstantExpr::getIntToPtr(RHSC, SrcTy),
3795                                         Q, MaxRecurse - 1))
3796           return V;
3797       } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3798         if (RI->getOperand(0)->getType() == SrcTy)
3799           // Compare without the cast.
3800           if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3801                                           MaxRecurse - 1))
3802             return V;
3803       }
3804     }
3805 
3806     if (isa<ZExtInst>(LHS)) {
3807       // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3808       // same type.
3809       if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3810         if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3811           // Compare X and Y.  Note that signed predicates become unsigned.
3812           if (Value *V =
3813                   simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), SrcOp,
3814                                    RI->getOperand(0), Q, MaxRecurse - 1))
3815             return V;
3816       }
3817       // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3818       else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3819         if (SrcOp == RI->getOperand(0)) {
3820           if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3821             return ConstantInt::getTrue(ITy);
3822           if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3823             return ConstantInt::getFalse(ITy);
3824         }
3825       }
3826       // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3827       // too.  If not, then try to deduce the result of the comparison.
3828       else if (match(RHS, m_ImmConstant())) {
3829         Constant *C = dyn_cast<Constant>(RHS);
3830         assert(C != nullptr);
3831 
3832         // Compute the constant that would happen if we truncated to SrcTy then
3833         // reextended to DstTy.
3834         Constant *Trunc = ConstantExpr::getTrunc(C, SrcTy);
3835         Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3836         Constant *AnyEq = ConstantExpr::getICmp(ICmpInst::ICMP_EQ, RExt, C);
3837 
3838         // If the re-extended constant didn't change any of the elements then
3839         // this is effectively also a case of comparing two zero-extended
3840         // values.
3841         if (AnyEq->isAllOnesValue() && MaxRecurse)
3842           if (Value *V = simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3843                                           SrcOp, Trunc, Q, MaxRecurse - 1))
3844             return V;
3845 
3846         // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3847         // there.  Use this to work out the result of the comparison.
3848         if (AnyEq->isNullValue()) {
3849           switch (Pred) {
3850           default:
3851             llvm_unreachable("Unknown ICmp predicate!");
3852           // LHS <u RHS.
3853           case ICmpInst::ICMP_EQ:
3854           case ICmpInst::ICMP_UGT:
3855           case ICmpInst::ICMP_UGE:
3856             return Constant::getNullValue(ITy);
3857 
3858           case ICmpInst::ICMP_NE:
3859           case ICmpInst::ICMP_ULT:
3860           case ICmpInst::ICMP_ULE:
3861             return Constant::getAllOnesValue(ITy);
3862 
3863           // LHS is non-negative.  If RHS is negative then LHS >s LHS.  If RHS
3864           // is non-negative then LHS <s RHS.
3865           case ICmpInst::ICMP_SGT:
3866           case ICmpInst::ICMP_SGE:
3867             return ConstantExpr::getICmp(ICmpInst::ICMP_SLT, C,
3868                                          Constant::getNullValue(C->getType()));
3869           case ICmpInst::ICMP_SLT:
3870           case ICmpInst::ICMP_SLE:
3871             return ConstantExpr::getICmp(ICmpInst::ICMP_SGE, C,
3872                                          Constant::getNullValue(C->getType()));
3873           }
3874         }
3875       }
3876     }
3877 
3878     if (isa<SExtInst>(LHS)) {
3879       // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3880       // same type.
3881       if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3882         if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3883           // Compare X and Y.  Note that the predicate does not change.
3884           if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3885                                           MaxRecurse - 1))
3886             return V;
3887       }
3888       // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3889       else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3890         if (SrcOp == RI->getOperand(0)) {
3891           if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3892             return ConstantInt::getTrue(ITy);
3893           if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3894             return ConstantInt::getFalse(ITy);
3895         }
3896       }
3897       // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3898       // too.  If not, then try to deduce the result of the comparison.
3899       else if (match(RHS, m_ImmConstant())) {
3900         Constant *C = dyn_cast<Constant>(RHS);
3901         assert(C != nullptr);
3902 
3903         // Compute the constant that would happen if we truncated to SrcTy then
3904         // reextended to DstTy.
3905         Constant *Trunc = ConstantExpr::getTrunc(C, SrcTy);
3906         Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3907         Constant *AnyEq = ConstantExpr::getICmp(ICmpInst::ICMP_EQ, RExt, C);
3908 
3909         // If the re-extended constant didn't change then this is effectively
3910         // also a case of comparing two sign-extended values.
3911         if (AnyEq->isAllOnesValue() && MaxRecurse)
3912           if (Value *V =
3913                   simplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse - 1))
3914             return V;
3915 
3916         // Otherwise the upper bits of LHS are all equal, while RHS has varying
3917         // bits there.  Use this to work out the result of the comparison.
3918         if (AnyEq->isNullValue()) {
3919           switch (Pred) {
3920           default:
3921             llvm_unreachable("Unknown ICmp predicate!");
3922           case ICmpInst::ICMP_EQ:
3923             return Constant::getNullValue(ITy);
3924           case ICmpInst::ICMP_NE:
3925             return Constant::getAllOnesValue(ITy);
3926 
3927           // If RHS is non-negative then LHS <s RHS.  If RHS is negative then
3928           // LHS >s RHS.
3929           case ICmpInst::ICMP_SGT:
3930           case ICmpInst::ICMP_SGE:
3931             return ConstantExpr::getICmp(ICmpInst::ICMP_SLT, C,
3932                                          Constant::getNullValue(C->getType()));
3933           case ICmpInst::ICMP_SLT:
3934           case ICmpInst::ICMP_SLE:
3935             return ConstantExpr::getICmp(ICmpInst::ICMP_SGE, C,
3936                                          Constant::getNullValue(C->getType()));
3937 
3938           // If LHS is non-negative then LHS <u RHS.  If LHS is negative then
3939           // LHS >u RHS.
3940           case ICmpInst::ICMP_UGT:
3941           case ICmpInst::ICMP_UGE:
3942             // Comparison is true iff the LHS <s 0.
3943             if (MaxRecurse)
3944               if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3945                                               Constant::getNullValue(SrcTy), Q,
3946                                               MaxRecurse - 1))
3947                 return V;
3948             break;
3949           case ICmpInst::ICMP_ULT:
3950           case ICmpInst::ICMP_ULE:
3951             // Comparison is true iff the LHS >=s 0.
3952             if (MaxRecurse)
3953               if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3954                                               Constant::getNullValue(SrcTy), Q,
3955                                               MaxRecurse - 1))
3956                 return V;
3957             break;
3958           }
3959         }
3960       }
3961     }
3962   }
3963 
3964   // icmp eq|ne X, Y -> false|true if X != Y
3965   // This is potentially expensive, and we have already computedKnownBits for
3966   // compares with 0 above here, so only try this for a non-zero compare.
3967   if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
3968       isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3969     return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3970   }
3971 
3972   if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3973     return V;
3974 
3975   if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3976     return V;
3977 
3978   if (Value *V = simplifyICmpWithIntrinsicOnLHS(Pred, LHS, RHS))
3979     return V;
3980   if (Value *V = simplifyICmpWithIntrinsicOnLHS(
3981           ICmpInst::getSwappedPredicate(Pred), RHS, LHS))
3982     return V;
3983 
3984   if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
3985     return V;
3986 
3987   if (std::optional<bool> Res =
3988           isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
3989     return ConstantInt::getBool(ITy, *Res);
3990 
3991   // Simplify comparisons of related pointers using a powerful, recursive
3992   // GEP-walk when we have target data available..
3993   if (LHS->getType()->isPointerTy())
3994     if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
3995       return C;
3996   if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3997     if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3998       if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
3999           Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
4000               Q.DL.getTypeSizeInBits(CLHS->getType()))
4001         if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
4002                                          CRHS->getPointerOperand(), Q))
4003           return C;
4004 
4005   // If the comparison is with the result of a select instruction, check whether
4006   // comparing with either branch of the select always yields the same value.
4007   if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4008     if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4009       return V;
4010 
4011   // If the comparison is with the result of a phi instruction, check whether
4012   // doing the compare with each incoming phi value yields a common result.
4013   if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4014     if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4015       return V;
4016 
4017   return nullptr;
4018 }
4019 
4020 Value *llvm::simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4021                               const SimplifyQuery &Q) {
4022   return ::simplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4023 }
4024 
4025 /// Given operands for an FCmpInst, see if we can fold the result.
4026 /// If not, this returns null.
4027 static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4028                                FastMathFlags FMF, const SimplifyQuery &Q,
4029                                unsigned MaxRecurse) {
4030   CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
4031   assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
4032 
4033   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4034     if (Constant *CRHS = dyn_cast<Constant>(RHS))
4035       return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI,
4036                                              Q.CxtI);
4037 
4038     // If we have a constant, make sure it is on the RHS.
4039     std::swap(LHS, RHS);
4040     Pred = CmpInst::getSwappedPredicate(Pred);
4041   }
4042 
4043   // Fold trivial predicates.
4044   Type *RetTy = getCompareTy(LHS);
4045   if (Pred == FCmpInst::FCMP_FALSE)
4046     return getFalse(RetTy);
4047   if (Pred == FCmpInst::FCMP_TRUE)
4048     return getTrue(RetTy);
4049 
4050   // Fold (un)ordered comparison if we can determine there are no NaNs.
4051   if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD)
4052     if (FMF.noNaNs() ||
4053         (isKnownNeverNaN(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
4054          isKnownNeverNaN(RHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT)))
4055       return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
4056 
4057   // NaN is unordered; NaN is not ordered.
4058   assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&
4059          "Comparison must be either ordered or unordered");
4060   if (match(RHS, m_NaN()))
4061     return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4062 
4063   // fcmp pred x, poison and  fcmp pred poison, x
4064   // fold to poison
4065   if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
4066     return PoisonValue::get(RetTy);
4067 
4068   // fcmp pred x, undef  and  fcmp pred undef, x
4069   // fold to true if unordered, false if ordered
4070   if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
4071     // Choosing NaN for the undef will always make unordered comparison succeed
4072     // and ordered comparison fail.
4073     return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4074   }
4075 
4076   // fcmp x,x -> true/false.  Not all compares are foldable.
4077   if (LHS == RHS) {
4078     if (CmpInst::isTrueWhenEqual(Pred))
4079       return getTrue(RetTy);
4080     if (CmpInst::isFalseWhenEqual(Pred))
4081       return getFalse(RetTy);
4082   }
4083 
4084   // Handle fcmp with constant RHS.
4085   // TODO: Use match with a specific FP value, so these work with vectors with
4086   // undef lanes.
4087   const APFloat *C;
4088   if (match(RHS, m_APFloat(C))) {
4089     // Check whether the constant is an infinity.
4090     if (C->isInfinity()) {
4091       if (C->isNegative()) {
4092         switch (Pred) {
4093         case FCmpInst::FCMP_OLT:
4094           // No value is ordered and less than negative infinity.
4095           return getFalse(RetTy);
4096         case FCmpInst::FCMP_UGE:
4097           // All values are unordered with or at least negative infinity.
4098           return getTrue(RetTy);
4099         default:
4100           break;
4101         }
4102       } else {
4103         switch (Pred) {
4104         case FCmpInst::FCMP_OGT:
4105           // No value is ordered and greater than infinity.
4106           return getFalse(RetTy);
4107         case FCmpInst::FCMP_ULE:
4108           // All values are unordered with and at most infinity.
4109           return getTrue(RetTy);
4110         default:
4111           break;
4112         }
4113       }
4114 
4115       // LHS == Inf
4116       if (Pred == FCmpInst::FCMP_OEQ &&
4117           isKnownNeverInfinity(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT))
4118         return getFalse(RetTy);
4119       // LHS != Inf
4120       if (Pred == FCmpInst::FCMP_UNE &&
4121           isKnownNeverInfinity(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT))
4122         return getTrue(RetTy);
4123       // LHS == Inf || LHS == NaN
4124       if (Pred == FCmpInst::FCMP_UEQ &&
4125           isKnownNeverInfOrNaN(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT))
4126         return getFalse(RetTy);
4127       // LHS != Inf && LHS != NaN
4128       if (Pred == FCmpInst::FCMP_ONE &&
4129           isKnownNeverInfOrNaN(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT))
4130         return getTrue(RetTy);
4131     }
4132     if (C->isNegative() && !C->isNegZero()) {
4133       assert(!C->isNaN() && "Unexpected NaN constant!");
4134       // TODO: We can catch more cases by using a range check rather than
4135       //       relying on CannotBeOrderedLessThanZero.
4136       switch (Pred) {
4137       case FCmpInst::FCMP_UGE:
4138       case FCmpInst::FCMP_UGT:
4139       case FCmpInst::FCMP_UNE:
4140         // (X >= 0) implies (X > C) when (C < 0)
4141         if (cannotBeOrderedLessThanZero(LHS, Q.DL, Q.TLI, 0,
4142                                         Q.AC, Q.CxtI, Q.DT))
4143           return getTrue(RetTy);
4144         break;
4145       case FCmpInst::FCMP_OEQ:
4146       case FCmpInst::FCMP_OLE:
4147       case FCmpInst::FCMP_OLT:
4148         // (X >= 0) implies !(X < C) when (C < 0)
4149         if (cannotBeOrderedLessThanZero(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI,
4150                                         Q.DT))
4151           return getFalse(RetTy);
4152         break;
4153       default:
4154         break;
4155       }
4156     }
4157 
4158     // Check comparison of [minnum/maxnum with constant] with other constant.
4159     const APFloat *C2;
4160     if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
4161          *C2 < *C) ||
4162         (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
4163          *C2 > *C)) {
4164       bool IsMaxNum =
4165           cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
4166       // The ordered relationship and minnum/maxnum guarantee that we do not
4167       // have NaN constants, so ordered/unordered preds are handled the same.
4168       switch (Pred) {
4169       case FCmpInst::FCMP_OEQ:
4170       case FCmpInst::FCMP_UEQ:
4171         // minnum(X, LesserC)  == C --> false
4172         // maxnum(X, GreaterC) == C --> false
4173         return getFalse(RetTy);
4174       case FCmpInst::FCMP_ONE:
4175       case FCmpInst::FCMP_UNE:
4176         // minnum(X, LesserC)  != C --> true
4177         // maxnum(X, GreaterC) != C --> true
4178         return getTrue(RetTy);
4179       case FCmpInst::FCMP_OGE:
4180       case FCmpInst::FCMP_UGE:
4181       case FCmpInst::FCMP_OGT:
4182       case FCmpInst::FCMP_UGT:
4183         // minnum(X, LesserC)  >= C --> false
4184         // minnum(X, LesserC)  >  C --> false
4185         // maxnum(X, GreaterC) >= C --> true
4186         // maxnum(X, GreaterC) >  C --> true
4187         return ConstantInt::get(RetTy, IsMaxNum);
4188       case FCmpInst::FCMP_OLE:
4189       case FCmpInst::FCMP_ULE:
4190       case FCmpInst::FCMP_OLT:
4191       case FCmpInst::FCMP_ULT:
4192         // minnum(X, LesserC)  <= C --> true
4193         // minnum(X, LesserC)  <  C --> true
4194         // maxnum(X, GreaterC) <= C --> false
4195         // maxnum(X, GreaterC) <  C --> false
4196         return ConstantInt::get(RetTy, !IsMaxNum);
4197       default:
4198         // TRUE/FALSE/ORD/UNO should be handled before this.
4199         llvm_unreachable("Unexpected fcmp predicate");
4200       }
4201     }
4202   }
4203 
4204   if (match(RHS, m_AnyZeroFP())) {
4205     switch (Pred) {
4206     case FCmpInst::FCMP_OGE:
4207     case FCmpInst::FCMP_ULT: {
4208       FPClassTest Interested = FMF.noNaNs() ? fcNegative : fcNegative | fcNan;
4209       KnownFPClass Known = computeKnownFPClass(LHS, Q.DL, Interested, 0,
4210                                                Q.TLI, Q.AC, Q.CxtI, Q.DT);
4211 
4212       // Positive or zero X >= 0.0 --> true
4213       // Positive or zero X <  0.0 --> false
4214       if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4215           Known.cannotBeOrderedLessThanZero())
4216         return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
4217       break;
4218     }
4219     case FCmpInst::FCMP_UGE:
4220     case FCmpInst::FCMP_OLT:
4221       // Positive or zero or nan X >= 0.0 --> true
4222       // Positive or zero or nan X <  0.0 --> false
4223       if (cannotBeOrderedLessThanZero(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT))
4224         return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
4225       break;
4226     default:
4227       break;
4228     }
4229   }
4230 
4231   // If the comparison is with the result of a select instruction, check whether
4232   // comparing with either branch of the select always yields the same value.
4233   if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4234     if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4235       return V;
4236 
4237   // If the comparison is with the result of a phi instruction, check whether
4238   // doing the compare with each incoming phi value yields a common result.
4239   if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4240     if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4241       return V;
4242 
4243   return nullptr;
4244 }
4245 
4246 Value *llvm::simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4247                               FastMathFlags FMF, const SimplifyQuery &Q) {
4248   return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
4249 }
4250 
4251 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4252                                      const SimplifyQuery &Q,
4253                                      bool AllowRefinement,
4254                                      unsigned MaxRecurse) {
4255   // Trivial replacement.
4256   if (V == Op)
4257     return RepOp;
4258 
4259   if (!MaxRecurse--)
4260     return nullptr;
4261 
4262   // We cannot replace a constant, and shouldn't even try.
4263   if (isa<Constant>(Op))
4264     return nullptr;
4265 
4266   auto *I = dyn_cast<Instruction>(V);
4267   if (!I)
4268     return nullptr;
4269 
4270   // The arguments of a phi node might refer to a value from a previous
4271   // cycle iteration.
4272   if (isa<PHINode>(I))
4273     return nullptr;
4274 
4275   if (Op->getType()->isVectorTy()) {
4276     // For vector types, the simplification must hold per-lane, so forbid
4277     // potentially cross-lane operations like shufflevector.
4278     if (!I->getType()->isVectorTy() || isa<ShuffleVectorInst>(I) ||
4279         isa<CallBase>(I))
4280       return nullptr;
4281   }
4282 
4283   // Replace Op with RepOp in instruction operands.
4284   SmallVector<Value *, 8> NewOps;
4285   bool AnyReplaced = false;
4286   for (Value *InstOp : I->operands()) {
4287     if (Value *NewInstOp = simplifyWithOpReplaced(
4288             InstOp, Op, RepOp, Q, AllowRefinement, MaxRecurse)) {
4289       NewOps.push_back(NewInstOp);
4290       AnyReplaced = InstOp != NewInstOp;
4291     } else {
4292       NewOps.push_back(InstOp);
4293     }
4294   }
4295 
4296   if (!AnyReplaced)
4297     return nullptr;
4298 
4299   if (!AllowRefinement) {
4300     // General InstSimplify functions may refine the result, e.g. by returning
4301     // a constant for a potentially poison value. To avoid this, implement only
4302     // a few non-refining but profitable transforms here.
4303 
4304     if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4305       unsigned Opcode = BO->getOpcode();
4306       // id op x -> x, x op id -> x
4307       if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4308         return NewOps[1];
4309       if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4310                                                       /* RHS */ true))
4311         return NewOps[0];
4312 
4313       // x & x -> x, x | x -> x
4314       if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4315           NewOps[0] == NewOps[1])
4316         return NewOps[0];
4317 
4318       // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4319       // by assumption and this case never wraps, so nowrap flags can be
4320       // ignored.
4321       if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4322           NewOps[0] == RepOp && NewOps[1] == RepOp)
4323         return Constant::getNullValue(I->getType());
4324 
4325       // If we are substituting an absorber constant into a binop and extra
4326       // poison can't leak if we remove the select -- because both operands of
4327       // the binop are based on the same value -- then it may be safe to replace
4328       // the value with the absorber constant. Examples:
4329       // (Op == 0) ? 0 : (Op & -Op)            --> Op & -Op
4330       // (Op == 0) ? 0 : (Op * (binop Op, C))  --> Op * (binop Op, C)
4331       // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4332       Constant *Absorber =
4333           ConstantExpr::getBinOpAbsorber(Opcode, I->getType());
4334       if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4335           impliesPoison(BO, Op))
4336         return Absorber;
4337     }
4338 
4339     if (isa<GetElementPtrInst>(I)) {
4340       // getelementptr x, 0 -> x.
4341       // This never returns poison, even if inbounds is set.
4342       if (NewOps.size() == 2 && match(NewOps[1], m_Zero()))
4343         return NewOps[0];
4344     }
4345   } else {
4346     // The simplification queries below may return the original value. Consider:
4347     //   %div = udiv i32 %arg, %arg2
4348     //   %mul = mul nsw i32 %div, %arg2
4349     //   %cmp = icmp eq i32 %mul, %arg
4350     //   %sel = select i1 %cmp, i32 %div, i32 undef
4351     // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4352     // simplifies back to %arg. This can only happen because %mul does not
4353     // dominate %div. To ensure a consistent return value contract, we make sure
4354     // that this case returns nullptr as well.
4355     auto PreventSelfSimplify = [V](Value *Simplified) {
4356       return Simplified != V ? Simplified : nullptr;
4357     };
4358 
4359     return PreventSelfSimplify(
4360         ::simplifyInstructionWithOperands(I, NewOps, Q, MaxRecurse));
4361   }
4362 
4363   // If all operands are constant after substituting Op for RepOp then we can
4364   // constant fold the instruction.
4365   SmallVector<Constant *, 8> ConstOps;
4366   for (Value *NewOp : NewOps) {
4367     if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4368       ConstOps.push_back(ConstOp);
4369     else
4370       return nullptr;
4371   }
4372 
4373   // Consider:
4374   //   %cmp = icmp eq i32 %x, 2147483647
4375   //   %add = add nsw i32 %x, 1
4376   //   %sel = select i1 %cmp, i32 -2147483648, i32 %add
4377   //
4378   // We can't replace %sel with %add unless we strip away the flags (which
4379   // will be done in InstCombine).
4380   // TODO: This may be unsound, because it only catches some forms of
4381   // refinement.
4382   if (!AllowRefinement && canCreatePoison(cast<Operator>(I)))
4383     return nullptr;
4384 
4385   return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
4386 }
4387 
4388 Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4389                                     const SimplifyQuery &Q,
4390                                     bool AllowRefinement) {
4391   return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
4392                                   RecursionLimit);
4393 }
4394 
4395 /// Try to simplify a select instruction when its condition operand is an
4396 /// integer comparison where one operand of the compare is a constant.
4397 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4398                                     const APInt *Y, bool TrueWhenUnset) {
4399   const APInt *C;
4400 
4401   // (X & Y) == 0 ? X & ~Y : X  --> X
4402   // (X & Y) != 0 ? X & ~Y : X  --> X & ~Y
4403   if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4404       *Y == ~*C)
4405     return TrueWhenUnset ? FalseVal : TrueVal;
4406 
4407   // (X & Y) == 0 ? X : X & ~Y  --> X & ~Y
4408   // (X & Y) != 0 ? X : X & ~Y  --> X
4409   if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4410       *Y == ~*C)
4411     return TrueWhenUnset ? FalseVal : TrueVal;
4412 
4413   if (Y->isPowerOf2()) {
4414     // (X & Y) == 0 ? X | Y : X  --> X | Y
4415     // (X & Y) != 0 ? X | Y : X  --> X
4416     if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4417         *Y == *C)
4418       return TrueWhenUnset ? TrueVal : FalseVal;
4419 
4420     // (X & Y) == 0 ? X : X | Y  --> X
4421     // (X & Y) != 0 ? X : X | Y  --> X | Y
4422     if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4423         *Y == *C)
4424       return TrueWhenUnset ? TrueVal : FalseVal;
4425   }
4426 
4427   return nullptr;
4428 }
4429 
4430 static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4431                                      ICmpInst::Predicate Pred, Value *TVal,
4432                                      Value *FVal) {
4433   // Canonicalize common cmp+sel operand as CmpLHS.
4434   if (CmpRHS == TVal || CmpRHS == FVal) {
4435     std::swap(CmpLHS, CmpRHS);
4436     Pred = ICmpInst::getSwappedPredicate(Pred);
4437   }
4438 
4439   // Canonicalize common cmp+sel operand as TVal.
4440   if (CmpLHS == FVal) {
4441     std::swap(TVal, FVal);
4442     Pred = ICmpInst::getInversePredicate(Pred);
4443   }
4444 
4445   // A vector select may be shuffling together elements that are equivalent
4446   // based on the max/min/select relationship.
4447   Value *X = CmpLHS, *Y = CmpRHS;
4448   bool PeekedThroughSelectShuffle = false;
4449   auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4450   if (Shuf && Shuf->isSelect()) {
4451     if (Shuf->getOperand(0) == Y)
4452       FVal = Shuf->getOperand(1);
4453     else if (Shuf->getOperand(1) == Y)
4454       FVal = Shuf->getOperand(0);
4455     else
4456       return nullptr;
4457     PeekedThroughSelectShuffle = true;
4458   }
4459 
4460   // (X pred Y) ? X : max/min(X, Y)
4461   auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4462   if (!MMI || TVal != X ||
4463       !match(FVal, m_c_MaxOrMin(m_Specific(X), m_Specific(Y))))
4464     return nullptr;
4465 
4466   // (X >  Y) ? X : max(X, Y) --> max(X, Y)
4467   // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4468   // (X <  Y) ? X : min(X, Y) --> min(X, Y)
4469   // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4470   //
4471   // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4472   // (X > Y) ? X : (Z ? max(X, Y) : Y)
4473   // If Z is true, this reduces as above, and if Z is false:
4474   // (X > Y) ? X : Y --> max(X, Y)
4475   ICmpInst::Predicate MMPred = MMI->getPredicate();
4476   if (MMPred == CmpInst::getStrictPredicate(Pred))
4477     return MMI;
4478 
4479   // Other transforms are not valid with a shuffle.
4480   if (PeekedThroughSelectShuffle)
4481     return nullptr;
4482 
4483   // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4484   if (Pred == CmpInst::ICMP_EQ)
4485     return MMI;
4486 
4487   // (X != Y) ? X : max/min(X, Y) --> X
4488   if (Pred == CmpInst::ICMP_NE)
4489     return X;
4490 
4491   // (X <  Y) ? X : max(X, Y) --> X
4492   // (X <= Y) ? X : max(X, Y) --> X
4493   // (X >  Y) ? X : min(X, Y) --> X
4494   // (X >= Y) ? X : min(X, Y) --> X
4495   ICmpInst::Predicate InvPred = CmpInst::getInversePredicate(Pred);
4496   if (MMPred == CmpInst::getStrictPredicate(InvPred))
4497     return X;
4498 
4499   return nullptr;
4500 }
4501 
4502 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
4503 /// eq/ne.
4504 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
4505                                            ICmpInst::Predicate Pred,
4506                                            Value *TrueVal, Value *FalseVal) {
4507   Value *X;
4508   APInt Mask;
4509   if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
4510     return nullptr;
4511 
4512   return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
4513                                Pred == ICmpInst::ICMP_EQ);
4514 }
4515 
4516 /// Try to simplify a select instruction when its condition operand is an
4517 /// integer equality comparison.
4518 static Value *simplifySelectWithICmpEq(Value *CmpLHS, Value *CmpRHS,
4519                                        Value *TrueVal, Value *FalseVal,
4520                                        const SimplifyQuery &Q,
4521                                        unsigned MaxRecurse) {
4522   if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4523                              /* AllowRefinement */ false,
4524                              MaxRecurse) == TrueVal)
4525     return FalseVal;
4526   if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4527                              /* AllowRefinement */ true,
4528                              MaxRecurse) == FalseVal)
4529     return FalseVal;
4530 
4531   return nullptr;
4532 }
4533 
4534 /// Try to simplify a select instruction when its condition operand is an
4535 /// integer comparison.
4536 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4537                                          Value *FalseVal,
4538                                          const SimplifyQuery &Q,
4539                                          unsigned MaxRecurse) {
4540   ICmpInst::Predicate Pred;
4541   Value *CmpLHS, *CmpRHS;
4542   if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4543     return nullptr;
4544 
4545   if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4546     return V;
4547 
4548   // Canonicalize ne to eq predicate.
4549   if (Pred == ICmpInst::ICMP_NE) {
4550     Pred = ICmpInst::ICMP_EQ;
4551     std::swap(TrueVal, FalseVal);
4552   }
4553 
4554   // Check for integer min/max with a limit constant:
4555   // X > MIN_INT ? X : MIN_INT --> X
4556   // X < MAX_INT ? X : MAX_INT --> X
4557   if (TrueVal->getType()->isIntOrIntVectorTy()) {
4558     Value *X, *Y;
4559     SelectPatternFlavor SPF =
4560         matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4561                                      X, Y)
4562             .Flavor;
4563     if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4564       APInt LimitC = getMinMaxLimit(getInverseMinMaxFlavor(SPF),
4565                                     X->getType()->getScalarSizeInBits());
4566       if (match(Y, m_SpecificInt(LimitC)))
4567         return X;
4568     }
4569   }
4570 
4571   if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4572     Value *X;
4573     const APInt *Y;
4574     if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4575       if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4576                                            /*TrueWhenUnset=*/true))
4577         return V;
4578 
4579     // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4580     Value *ShAmt;
4581     auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4582                              m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4583     // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4584     // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4585     if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4586       return X;
4587 
4588     // Test for a zero-shift-guard-op around rotates. These are used to
4589     // avoid UB from oversized shifts in raw IR rotate patterns, but the
4590     // intrinsics do not have that problem.
4591     // We do not allow this transform for the general funnel shift case because
4592     // that would not preserve the poison safety of the original code.
4593     auto isRotate =
4594         m_CombineOr(m_FShl(m_Value(X), m_Deferred(X), m_Value(ShAmt)),
4595                     m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4596     // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4597     // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4598     if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4599         Pred == ICmpInst::ICMP_EQ)
4600       return FalseVal;
4601 
4602     // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4603     // X == 0 ? -abs(X) : abs(X) --> abs(X)
4604     if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4605         match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4606       return FalseVal;
4607     if (match(TrueVal,
4608               m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4609         match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4610       return FalseVal;
4611   }
4612 
4613   // Check for other compares that behave like bit test.
4614   if (Value *V =
4615           simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4616     return V;
4617 
4618   // If we have a scalar equality comparison, then we know the value in one of
4619   // the arms of the select. See if substituting this value into the arm and
4620   // simplifying the result yields the same value as the other arm.
4621   if (Pred == ICmpInst::ICMP_EQ) {
4622     if (Value *V = simplifySelectWithICmpEq(CmpLHS, CmpRHS, TrueVal, FalseVal,
4623                                             Q, MaxRecurse))
4624       return V;
4625     if (Value *V = simplifySelectWithICmpEq(CmpRHS, CmpLHS, TrueVal, FalseVal,
4626                                             Q, MaxRecurse))
4627       return V;
4628 
4629     Value *X;
4630     Value *Y;
4631     // select((X | Y) == 0 ?  X : 0) --> 0 (commuted 2 ways)
4632     if (match(CmpLHS, m_Or(m_Value(X), m_Value(Y))) &&
4633         match(CmpRHS, m_Zero())) {
4634       // (X | Y) == 0 implies X == 0 and Y == 0.
4635       if (Value *V = simplifySelectWithICmpEq(X, CmpRHS, TrueVal, FalseVal, Q,
4636                                               MaxRecurse))
4637         return V;
4638       if (Value *V = simplifySelectWithICmpEq(Y, CmpRHS, TrueVal, FalseVal, Q,
4639                                               MaxRecurse))
4640         return V;
4641     }
4642 
4643     // select((X & Y) == -1 ?  X : -1) --> -1 (commuted 2 ways)
4644     if (match(CmpLHS, m_And(m_Value(X), m_Value(Y))) &&
4645         match(CmpRHS, m_AllOnes())) {
4646       // (X & Y) == -1 implies X == -1 and Y == -1.
4647       if (Value *V = simplifySelectWithICmpEq(X, CmpRHS, TrueVal, FalseVal, Q,
4648                                               MaxRecurse))
4649         return V;
4650       if (Value *V = simplifySelectWithICmpEq(Y, CmpRHS, TrueVal, FalseVal, Q,
4651                                               MaxRecurse))
4652         return V;
4653     }
4654   }
4655 
4656   return nullptr;
4657 }
4658 
4659 /// Try to simplify a select instruction when its condition operand is a
4660 /// floating-point comparison.
4661 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4662                                      const SimplifyQuery &Q) {
4663   FCmpInst::Predicate Pred;
4664   if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4665       !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4666     return nullptr;
4667 
4668   // This transform is safe if we do not have (do not care about) -0.0 or if
4669   // at least one operand is known to not be -0.0. Otherwise, the select can
4670   // change the sign of a zero operand.
4671   bool HasNoSignedZeros =
4672       Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros();
4673   const APFloat *C;
4674   if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4675       (match(F, m_APFloat(C)) && C->isNonZero())) {
4676     // (T == F) ? T : F --> F
4677     // (F == T) ? T : F --> F
4678     if (Pred == FCmpInst::FCMP_OEQ)
4679       return F;
4680 
4681     // (T != F) ? T : F --> T
4682     // (F != T) ? T : F --> T
4683     if (Pred == FCmpInst::FCMP_UNE)
4684       return T;
4685   }
4686 
4687   return nullptr;
4688 }
4689 
4690 /// Given operands for a SelectInst, see if we can fold the result.
4691 /// If not, this returns null.
4692 static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4693                                  const SimplifyQuery &Q, unsigned MaxRecurse) {
4694   if (auto *CondC = dyn_cast<Constant>(Cond)) {
4695     if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4696       if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4697         if (Constant *C = ConstantFoldSelectInstruction(CondC, TrueC, FalseC))
4698           return C;
4699 
4700     // select poison, X, Y -> poison
4701     if (isa<PoisonValue>(CondC))
4702       return PoisonValue::get(TrueVal->getType());
4703 
4704     // select undef, X, Y -> X or Y
4705     if (Q.isUndefValue(CondC))
4706       return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4707 
4708     // select true,  X, Y --> X
4709     // select false, X, Y --> Y
4710     // For vectors, allow undef/poison elements in the condition to match the
4711     // defined elements, so we can eliminate the select.
4712     if (match(CondC, m_One()))
4713       return TrueVal;
4714     if (match(CondC, m_Zero()))
4715       return FalseVal;
4716   }
4717 
4718   assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4719          "Select must have bool or bool vector condition");
4720   assert(TrueVal->getType() == FalseVal->getType() &&
4721          "Select must have same types for true/false ops");
4722 
4723   if (Cond->getType() == TrueVal->getType()) {
4724     // select i1 Cond, i1 true, i1 false --> i1 Cond
4725     if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4726       return Cond;
4727 
4728     // (X && Y) ? X : Y --> Y (commuted 2 ways)
4729     if (match(Cond, m_c_LogicalAnd(m_Specific(TrueVal), m_Specific(FalseVal))))
4730       return FalseVal;
4731 
4732     // (X || Y) ? X : Y --> X (commuted 2 ways)
4733     if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Specific(FalseVal))))
4734       return TrueVal;
4735 
4736     // (X || Y) ? false : X --> false (commuted 2 ways)
4737     if (match(Cond, m_c_LogicalOr(m_Specific(FalseVal), m_Value())) &&
4738         match(TrueVal, m_ZeroInt()))
4739       return ConstantInt::getFalse(Cond->getType());
4740 
4741     // Match patterns that end in logical-and.
4742     if (match(FalseVal, m_ZeroInt())) {
4743       // !(X || Y) && X --> false (commuted 2 ways)
4744       if (match(Cond, m_Not(m_c_LogicalOr(m_Specific(TrueVal), m_Value()))))
4745         return ConstantInt::getFalse(Cond->getType());
4746       // X && !(X || Y) --> false (commuted 2 ways)
4747       if (match(TrueVal, m_Not(m_c_LogicalOr(m_Specific(Cond), m_Value()))))
4748         return ConstantInt::getFalse(Cond->getType());
4749 
4750       // (X || Y) && Y --> Y (commuted 2 ways)
4751       if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Value())))
4752         return TrueVal;
4753       // Y && (X || Y) --> Y (commuted 2 ways)
4754       if (match(TrueVal, m_c_LogicalOr(m_Specific(Cond), m_Value())))
4755         return Cond;
4756 
4757       // (X || Y) && (X || !Y) --> X (commuted 8 ways)
4758       Value *X, *Y;
4759       if (match(Cond, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4760           match(TrueVal, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4761         return X;
4762       if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4763           match(Cond, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4764         return X;
4765     }
4766 
4767     // Match patterns that end in logical-or.
4768     if (match(TrueVal, m_One())) {
4769       // !(X && Y) || X --> true (commuted 2 ways)
4770       if (match(Cond, m_Not(m_c_LogicalAnd(m_Specific(FalseVal), m_Value()))))
4771         return ConstantInt::getTrue(Cond->getType());
4772       // X || !(X && Y) --> true (commuted 2 ways)
4773       if (match(FalseVal, m_Not(m_c_LogicalAnd(m_Specific(Cond), m_Value()))))
4774         return ConstantInt::getTrue(Cond->getType());
4775 
4776       // (X && Y) || Y --> Y (commuted 2 ways)
4777       if (match(Cond, m_c_LogicalAnd(m_Specific(FalseVal), m_Value())))
4778         return FalseVal;
4779       // Y || (X && Y) --> Y (commuted 2 ways)
4780       if (match(FalseVal, m_c_LogicalAnd(m_Specific(Cond), m_Value())))
4781         return Cond;
4782     }
4783   }
4784 
4785   // select ?, X, X -> X
4786   if (TrueVal == FalseVal)
4787     return TrueVal;
4788 
4789   if (Cond == TrueVal) {
4790     // select i1 X, i1 X, i1 false --> X (logical-and)
4791     if (match(FalseVal, m_ZeroInt()))
4792       return Cond;
4793     // select i1 X, i1 X, i1 true --> true
4794     if (match(FalseVal, m_One()))
4795       return ConstantInt::getTrue(Cond->getType());
4796   }
4797   if (Cond == FalseVal) {
4798     // select i1 X, i1 true, i1 X --> X (logical-or)
4799     if (match(TrueVal, m_One()))
4800       return Cond;
4801     // select i1 X, i1 false, i1 X --> false
4802     if (match(TrueVal, m_ZeroInt()))
4803       return ConstantInt::getFalse(Cond->getType());
4804   }
4805 
4806   // If the true or false value is poison, we can fold to the other value.
4807   // If the true or false value is undef, we can fold to the other value as
4808   // long as the other value isn't poison.
4809   // select ?, poison, X -> X
4810   // select ?, undef,  X -> X
4811   if (isa<PoisonValue>(TrueVal) ||
4812       (Q.isUndefValue(TrueVal) &&
4813        isGuaranteedNotToBePoison(FalseVal, Q.AC, Q.CxtI, Q.DT)))
4814     return FalseVal;
4815   // select ?, X, poison -> X
4816   // select ?, X, undef  -> X
4817   if (isa<PoisonValue>(FalseVal) ||
4818       (Q.isUndefValue(FalseVal) &&
4819        isGuaranteedNotToBePoison(TrueVal, Q.AC, Q.CxtI, Q.DT)))
4820     return TrueVal;
4821 
4822   // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4823   Constant *TrueC, *FalseC;
4824   if (isa<FixedVectorType>(TrueVal->getType()) &&
4825       match(TrueVal, m_Constant(TrueC)) &&
4826       match(FalseVal, m_Constant(FalseC))) {
4827     unsigned NumElts =
4828         cast<FixedVectorType>(TrueC->getType())->getNumElements();
4829     SmallVector<Constant *, 16> NewC;
4830     for (unsigned i = 0; i != NumElts; ++i) {
4831       // Bail out on incomplete vector constants.
4832       Constant *TEltC = TrueC->getAggregateElement(i);
4833       Constant *FEltC = FalseC->getAggregateElement(i);
4834       if (!TEltC || !FEltC)
4835         break;
4836 
4837       // If the elements match (undef or not), that value is the result. If only
4838       // one element is undef, choose the defined element as the safe result.
4839       if (TEltC == FEltC)
4840         NewC.push_back(TEltC);
4841       else if (isa<PoisonValue>(TEltC) ||
4842                (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
4843         NewC.push_back(FEltC);
4844       else if (isa<PoisonValue>(FEltC) ||
4845                (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
4846         NewC.push_back(TEltC);
4847       else
4848         break;
4849     }
4850     if (NewC.size() == NumElts)
4851       return ConstantVector::get(NewC);
4852   }
4853 
4854   if (Value *V =
4855           simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4856     return V;
4857 
4858   if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4859     return V;
4860 
4861   if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4862     return V;
4863 
4864   std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4865   if (Imp)
4866     return *Imp ? TrueVal : FalseVal;
4867 
4868   return nullptr;
4869 }
4870 
4871 Value *llvm::simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4872                                 const SimplifyQuery &Q) {
4873   return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4874 }
4875 
4876 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4877 /// If not, this returns null.
4878 static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
4879                               ArrayRef<Value *> Indices, bool InBounds,
4880                               const SimplifyQuery &Q, unsigned) {
4881   // The type of the GEP pointer operand.
4882   unsigned AS =
4883       cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
4884 
4885   // getelementptr P -> P.
4886   if (Indices.empty())
4887     return Ptr;
4888 
4889   // Compute the (pointer) type returned by the GEP instruction.
4890   Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
4891   Type *GEPTy = PointerType::get(LastType, AS);
4892   if (VectorType *VT = dyn_cast<VectorType>(Ptr->getType()))
4893     GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4894   else {
4895     for (Value *Op : Indices) {
4896       // If one of the operands is a vector, the result type is a vector of
4897       // pointers. All vector operands must have the same number of elements.
4898       if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4899         GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4900         break;
4901       }
4902     }
4903   }
4904 
4905   // All-zero GEP is a no-op, unless it performs a vector splat.
4906   if (Ptr->getType() == GEPTy &&
4907       all_of(Indices, [](const auto *V) { return match(V, m_Zero()); }))
4908     return Ptr;
4909 
4910   // getelementptr poison, idx -> poison
4911   // getelementptr baseptr, poison -> poison
4912   if (isa<PoisonValue>(Ptr) ||
4913       any_of(Indices, [](const auto *V) { return isa<PoisonValue>(V); }))
4914     return PoisonValue::get(GEPTy);
4915 
4916   // getelementptr undef, idx -> undef
4917   if (Q.isUndefValue(Ptr))
4918     return UndefValue::get(GEPTy);
4919 
4920   bool IsScalableVec =
4921       isa<ScalableVectorType>(SrcTy) || any_of(Indices, [](const Value *V) {
4922         return isa<ScalableVectorType>(V->getType());
4923       });
4924 
4925   if (Indices.size() == 1) {
4926     // getelementptr P, 0 -> P.
4927     if (match(Indices[0], m_Zero()) && Ptr->getType() == GEPTy)
4928       return Ptr;
4929 
4930     Type *Ty = SrcTy;
4931     if (!IsScalableVec && Ty->isSized()) {
4932       Value *P;
4933       uint64_t C;
4934       uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
4935       // getelementptr P, N -> P if P points to a type of zero size.
4936       if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
4937         return Ptr;
4938 
4939       // The following transforms are only safe if the ptrtoint cast
4940       // doesn't truncate the pointers.
4941       if (Indices[0]->getType()->getScalarSizeInBits() ==
4942           Q.DL.getPointerSizeInBits(AS)) {
4943         auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
4944           return P->getType() == GEPTy &&
4945                  getUnderlyingObject(P) == getUnderlyingObject(Ptr);
4946         };
4947         // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
4948         if (TyAllocSize == 1 &&
4949             match(Indices[0],
4950                   m_Sub(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Specific(Ptr)))) &&
4951             CanSimplify())
4952           return P;
4953 
4954         // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
4955         // size 1 << C.
4956         if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
4957                                            m_PtrToInt(m_Specific(Ptr))),
4958                                      m_ConstantInt(C))) &&
4959             TyAllocSize == 1ULL << C && CanSimplify())
4960           return P;
4961 
4962         // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
4963         // size C.
4964         if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
4965                                            m_PtrToInt(m_Specific(Ptr))),
4966                                      m_SpecificInt(TyAllocSize))) &&
4967             CanSimplify())
4968           return P;
4969       }
4970     }
4971   }
4972 
4973   if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
4974       all_of(Indices.drop_back(1),
4975              [](Value *Idx) { return match(Idx, m_Zero()); })) {
4976     unsigned IdxWidth =
4977         Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
4978     if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
4979       APInt BasePtrOffset(IdxWidth, 0);
4980       Value *StrippedBasePtr =
4981           Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
4982 
4983       // Avoid creating inttoptr of zero here: While LLVMs treatment of
4984       // inttoptr is generally conservative, this particular case is folded to
4985       // a null pointer, which will have incorrect provenance.
4986 
4987       // gep (gep V, C), (sub 0, V) -> C
4988       if (match(Indices.back(),
4989                 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
4990           !BasePtrOffset.isZero()) {
4991         auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
4992         return ConstantExpr::getIntToPtr(CI, GEPTy);
4993       }
4994       // gep (gep V, C), (xor V, -1) -> C-1
4995       if (match(Indices.back(),
4996                 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
4997           !BasePtrOffset.isOne()) {
4998         auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
4999         return ConstantExpr::getIntToPtr(CI, GEPTy);
5000       }
5001     }
5002   }
5003 
5004   // Check to see if this is constant foldable.
5005   if (!isa<Constant>(Ptr) ||
5006       !all_of(Indices, [](Value *V) { return isa<Constant>(V); }))
5007     return nullptr;
5008 
5009   if (!ConstantExpr::isSupportedGetElementPtr(SrcTy))
5010     return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), InBounds,
5011                                      std::nullopt, Indices);
5012 
5013   auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices,
5014                                             InBounds);
5015   return ConstantFoldConstant(CE, Q.DL);
5016 }
5017 
5018 Value *llvm::simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
5019                              bool InBounds, const SimplifyQuery &Q) {
5020   return ::simplifyGEPInst(SrcTy, Ptr, Indices, InBounds, Q, RecursionLimit);
5021 }
5022 
5023 /// Given operands for an InsertValueInst, see if we can fold the result.
5024 /// If not, this returns null.
5025 static Value *simplifyInsertValueInst(Value *Agg, Value *Val,
5026                                       ArrayRef<unsigned> Idxs,
5027                                       const SimplifyQuery &Q, unsigned) {
5028   if (Constant *CAgg = dyn_cast<Constant>(Agg))
5029     if (Constant *CVal = dyn_cast<Constant>(Val))
5030       return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
5031 
5032   // insertvalue x, poison, n -> x
5033   // insertvalue x, undef, n -> x if x cannot be poison
5034   if (isa<PoisonValue>(Val) ||
5035       (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Agg)))
5036     return Agg;
5037 
5038   // insertvalue x, (extractvalue y, n), n
5039   if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
5040     if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5041         EV->getIndices() == Idxs) {
5042       // insertvalue poison, (extractvalue y, n), n -> y
5043       // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5044       if (isa<PoisonValue>(Agg) ||
5045           (Q.isUndefValue(Agg) &&
5046            isGuaranteedNotToBePoison(EV->getAggregateOperand())))
5047         return EV->getAggregateOperand();
5048 
5049       // insertvalue y, (extractvalue y, n), n -> y
5050       if (Agg == EV->getAggregateOperand())
5051         return Agg;
5052     }
5053 
5054   return nullptr;
5055 }
5056 
5057 Value *llvm::simplifyInsertValueInst(Value *Agg, Value *Val,
5058                                      ArrayRef<unsigned> Idxs,
5059                                      const SimplifyQuery &Q) {
5060   return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
5061 }
5062 
5063 Value *llvm::simplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
5064                                        const SimplifyQuery &Q) {
5065   // Try to constant fold.
5066   auto *VecC = dyn_cast<Constant>(Vec);
5067   auto *ValC = dyn_cast<Constant>(Val);
5068   auto *IdxC = dyn_cast<Constant>(Idx);
5069   if (VecC && ValC && IdxC)
5070     return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
5071 
5072   // For fixed-length vector, fold into poison if index is out of bounds.
5073   if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
5074     if (isa<FixedVectorType>(Vec->getType()) &&
5075         CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
5076       return PoisonValue::get(Vec->getType());
5077   }
5078 
5079   // If index is undef, it might be out of bounds (see above case)
5080   if (Q.isUndefValue(Idx))
5081     return PoisonValue::get(Vec->getType());
5082 
5083   // If the scalar is poison, or it is undef and there is no risk of
5084   // propagating poison from the vector value, simplify to the vector value.
5085   if (isa<PoisonValue>(Val) ||
5086       (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
5087     return Vec;
5088 
5089   // If we are extracting a value from a vector, then inserting it into the same
5090   // place, that's the input vector:
5091   // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5092   if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
5093     return Vec;
5094 
5095   return nullptr;
5096 }
5097 
5098 /// Given operands for an ExtractValueInst, see if we can fold the result.
5099 /// If not, this returns null.
5100 static Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
5101                                        const SimplifyQuery &, unsigned) {
5102   if (auto *CAgg = dyn_cast<Constant>(Agg))
5103     return ConstantFoldExtractValueInstruction(CAgg, Idxs);
5104 
5105   // extractvalue x, (insertvalue y, elt, n), n -> elt
5106   unsigned NumIdxs = Idxs.size();
5107   for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
5108        IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5109     ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5110     unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5111     unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5112     if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
5113         Idxs.slice(0, NumCommonIdxs)) {
5114       if (NumIdxs == NumInsertValueIdxs)
5115         return IVI->getInsertedValueOperand();
5116       break;
5117     }
5118   }
5119 
5120   return nullptr;
5121 }
5122 
5123 Value *llvm::simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
5124                                       const SimplifyQuery &Q) {
5125   return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
5126 }
5127 
5128 /// Given operands for an ExtractElementInst, see if we can fold the result.
5129 /// If not, this returns null.
5130 static Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
5131                                          const SimplifyQuery &Q, unsigned) {
5132   auto *VecVTy = cast<VectorType>(Vec->getType());
5133   if (auto *CVec = dyn_cast<Constant>(Vec)) {
5134     if (auto *CIdx = dyn_cast<Constant>(Idx))
5135       return ConstantExpr::getExtractElement(CVec, CIdx);
5136 
5137     if (Q.isUndefValue(Vec))
5138       return UndefValue::get(VecVTy->getElementType());
5139   }
5140 
5141   // An undef extract index can be arbitrarily chosen to be an out-of-range
5142   // index value, which would result in the instruction being poison.
5143   if (Q.isUndefValue(Idx))
5144     return PoisonValue::get(VecVTy->getElementType());
5145 
5146   // If extracting a specified index from the vector, see if we can recursively
5147   // find a previously computed scalar that was inserted into the vector.
5148   if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
5149     // For fixed-length vector, fold into undef if index is out of bounds.
5150     unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5151     if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5152       return PoisonValue::get(VecVTy->getElementType());
5153     // Handle case where an element is extracted from a splat.
5154     if (IdxC->getValue().ult(MinNumElts))
5155       if (auto *Splat = getSplatValue(Vec))
5156         return Splat;
5157     if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
5158       return Elt;
5159   } else {
5160     // extractelt x, (insertelt y, elt, n), n -> elt
5161     // If the possibly-variable indices are trivially known to be equal
5162     // (because they are the same operand) then use the value that was
5163     // inserted directly.
5164     auto *IE = dyn_cast<InsertElementInst>(Vec);
5165     if (IE && IE->getOperand(2) == Idx)
5166       return IE->getOperand(1);
5167 
5168     // The index is not relevant if our vector is a splat.
5169     if (Value *Splat = getSplatValue(Vec))
5170       return Splat;
5171   }
5172   return nullptr;
5173 }
5174 
5175 Value *llvm::simplifyExtractElementInst(Value *Vec, Value *Idx,
5176                                         const SimplifyQuery &Q) {
5177   return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5178 }
5179 
5180 /// See if we can fold the given phi. If not, returns null.
5181 static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
5182                               const SimplifyQuery &Q) {
5183   // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5184   //          here, because the PHI we may succeed simplifying to was not
5185   //          def-reachable from the original PHI!
5186 
5187   // If all of the PHI's incoming values are the same then replace the PHI node
5188   // with the common value.
5189   Value *CommonValue = nullptr;
5190   bool HasUndefInput = false;
5191   for (Value *Incoming : IncomingValues) {
5192     // If the incoming value is the phi node itself, it can safely be skipped.
5193     if (Incoming == PN)
5194       continue;
5195     if (Q.isUndefValue(Incoming)) {
5196       // Remember that we saw an undef value, but otherwise ignore them.
5197       HasUndefInput = true;
5198       continue;
5199     }
5200     if (CommonValue && Incoming != CommonValue)
5201       return nullptr; // Not the same, bail out.
5202     CommonValue = Incoming;
5203   }
5204 
5205   // If CommonValue is null then all of the incoming values were either undef or
5206   // equal to the phi node itself.
5207   if (!CommonValue)
5208     return UndefValue::get(PN->getType());
5209 
5210   if (HasUndefInput) {
5211     // If we have a PHI node like phi(X, undef, X), where X is defined by some
5212     // instruction, we cannot return X as the result of the PHI node unless it
5213     // dominates the PHI block.
5214     return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
5215   }
5216 
5217   return CommonValue;
5218 }
5219 
5220 static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5221                                const SimplifyQuery &Q, unsigned MaxRecurse) {
5222   if (auto *C = dyn_cast<Constant>(Op))
5223     return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
5224 
5225   if (auto *CI = dyn_cast<CastInst>(Op)) {
5226     auto *Src = CI->getOperand(0);
5227     Type *SrcTy = Src->getType();
5228     Type *MidTy = CI->getType();
5229     Type *DstTy = Ty;
5230     if (Src->getType() == Ty) {
5231       auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
5232       auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5233       Type *SrcIntPtrTy =
5234           SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
5235       Type *MidIntPtrTy =
5236           MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
5237       Type *DstIntPtrTy =
5238           DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
5239       if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5240                                          SrcIntPtrTy, MidIntPtrTy,
5241                                          DstIntPtrTy) == Instruction::BitCast)
5242         return Src;
5243     }
5244   }
5245 
5246   // bitcast x -> x
5247   if (CastOpc == Instruction::BitCast)
5248     if (Op->getType() == Ty)
5249       return Op;
5250 
5251   return nullptr;
5252 }
5253 
5254 Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5255                               const SimplifyQuery &Q) {
5256   return ::simplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
5257 }
5258 
5259 /// For the given destination element of a shuffle, peek through shuffles to
5260 /// match a root vector source operand that contains that element in the same
5261 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
5262 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5263                                    int MaskVal, Value *RootVec,
5264                                    unsigned MaxRecurse) {
5265   if (!MaxRecurse--)
5266     return nullptr;
5267 
5268   // Bail out if any mask value is undefined. That kind of shuffle may be
5269   // simplified further based on demanded bits or other folds.
5270   if (MaskVal == -1)
5271     return nullptr;
5272 
5273   // The mask value chooses which source operand we need to look at next.
5274   int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5275   int RootElt = MaskVal;
5276   Value *SourceOp = Op0;
5277   if (MaskVal >= InVecNumElts) {
5278     RootElt = MaskVal - InVecNumElts;
5279     SourceOp = Op1;
5280   }
5281 
5282   // If the source operand is a shuffle itself, look through it to find the
5283   // matching root vector.
5284   if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5285     return foldIdentityShuffles(
5286         DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5287         SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5288   }
5289 
5290   // TODO: Look through bitcasts? What if the bitcast changes the vector element
5291   // size?
5292 
5293   // The source operand is not a shuffle. Initialize the root vector value for
5294   // this shuffle if that has not been done yet.
5295   if (!RootVec)
5296     RootVec = SourceOp;
5297 
5298   // Give up as soon as a source operand does not match the existing root value.
5299   if (RootVec != SourceOp)
5300     return nullptr;
5301 
5302   // The element must be coming from the same lane in the source vector
5303   // (although it may have crossed lanes in intermediate shuffles).
5304   if (RootElt != DestElt)
5305     return nullptr;
5306 
5307   return RootVec;
5308 }
5309 
5310 static Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5311                                         ArrayRef<int> Mask, Type *RetTy,
5312                                         const SimplifyQuery &Q,
5313                                         unsigned MaxRecurse) {
5314   if (all_of(Mask, [](int Elem) { return Elem == PoisonMaskElem; }))
5315     return PoisonValue::get(RetTy);
5316 
5317   auto *InVecTy = cast<VectorType>(Op0->getType());
5318   unsigned MaskNumElts = Mask.size();
5319   ElementCount InVecEltCount = InVecTy->getElementCount();
5320 
5321   bool Scalable = InVecEltCount.isScalable();
5322 
5323   SmallVector<int, 32> Indices;
5324   Indices.assign(Mask.begin(), Mask.end());
5325 
5326   // Canonicalization: If mask does not select elements from an input vector,
5327   // replace that input vector with poison.
5328   if (!Scalable) {
5329     bool MaskSelects0 = false, MaskSelects1 = false;
5330     unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5331     for (unsigned i = 0; i != MaskNumElts; ++i) {
5332       if (Indices[i] == -1)
5333         continue;
5334       if ((unsigned)Indices[i] < InVecNumElts)
5335         MaskSelects0 = true;
5336       else
5337         MaskSelects1 = true;
5338     }
5339     if (!MaskSelects0)
5340       Op0 = PoisonValue::get(InVecTy);
5341     if (!MaskSelects1)
5342       Op1 = PoisonValue::get(InVecTy);
5343   }
5344 
5345   auto *Op0Const = dyn_cast<Constant>(Op0);
5346   auto *Op1Const = dyn_cast<Constant>(Op1);
5347 
5348   // If all operands are constant, constant fold the shuffle. This
5349   // transformation depends on the value of the mask which is not known at
5350   // compile time for scalable vectors
5351   if (Op0Const && Op1Const)
5352     return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5353 
5354   // Canonicalization: if only one input vector is constant, it shall be the
5355   // second one. This transformation depends on the value of the mask which
5356   // is not known at compile time for scalable vectors
5357   if (!Scalable && Op0Const && !Op1Const) {
5358     std::swap(Op0, Op1);
5359     ShuffleVectorInst::commuteShuffleMask(Indices,
5360                                           InVecEltCount.getKnownMinValue());
5361   }
5362 
5363   // A splat of an inserted scalar constant becomes a vector constant:
5364   // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5365   // NOTE: We may have commuted above, so analyze the updated Indices, not the
5366   //       original mask constant.
5367   // NOTE: This transformation depends on the value of the mask which is not
5368   // known at compile time for scalable vectors
5369   Constant *C;
5370   ConstantInt *IndexC;
5371   if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
5372                                           m_ConstantInt(IndexC)))) {
5373     // Match a splat shuffle mask of the insert index allowing undef elements.
5374     int InsertIndex = IndexC->getZExtValue();
5375     if (all_of(Indices, [InsertIndex](int MaskElt) {
5376           return MaskElt == InsertIndex || MaskElt == -1;
5377         })) {
5378       assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5379 
5380       // Shuffle mask poisons become poison constant result elements.
5381       SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5382       for (unsigned i = 0; i != MaskNumElts; ++i)
5383         if (Indices[i] == -1)
5384           VecC[i] = PoisonValue::get(C->getType());
5385       return ConstantVector::get(VecC);
5386     }
5387   }
5388 
5389   // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5390   // value type is same as the input vectors' type.
5391   if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5392     if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
5393         all_equal(OpShuf->getShuffleMask()))
5394       return Op0;
5395 
5396   // All remaining transformation depend on the value of the mask, which is
5397   // not known at compile time for scalable vectors.
5398   if (Scalable)
5399     return nullptr;
5400 
5401   // Don't fold a shuffle with undef mask elements. This may get folded in a
5402   // better way using demanded bits or other analysis.
5403   // TODO: Should we allow this?
5404   if (is_contained(Indices, -1))
5405     return nullptr;
5406 
5407   // Check if every element of this shuffle can be mapped back to the
5408   // corresponding element of a single root vector. If so, we don't need this
5409   // shuffle. This handles simple identity shuffles as well as chains of
5410   // shuffles that may widen/narrow and/or move elements across lanes and back.
5411   Value *RootVec = nullptr;
5412   for (unsigned i = 0; i != MaskNumElts; ++i) {
5413     // Note that recursion is limited for each vector element, so if any element
5414     // exceeds the limit, this will fail to simplify.
5415     RootVec =
5416         foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5417 
5418     // We can't replace a widening/narrowing shuffle with one of its operands.
5419     if (!RootVec || RootVec->getType() != RetTy)
5420       return nullptr;
5421   }
5422   return RootVec;
5423 }
5424 
5425 /// Given operands for a ShuffleVectorInst, fold the result or return null.
5426 Value *llvm::simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5427                                        ArrayRef<int> Mask, Type *RetTy,
5428                                        const SimplifyQuery &Q) {
5429   return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
5430 }
5431 
5432 static Constant *foldConstant(Instruction::UnaryOps Opcode, Value *&Op,
5433                               const SimplifyQuery &Q) {
5434   if (auto *C = dyn_cast<Constant>(Op))
5435     return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
5436   return nullptr;
5437 }
5438 
5439 /// Given the operand for an FNeg, see if we can fold the result.  If not, this
5440 /// returns null.
5441 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
5442                                const SimplifyQuery &Q, unsigned MaxRecurse) {
5443   if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
5444     return C;
5445 
5446   Value *X;
5447   // fneg (fneg X) ==> X
5448   if (match(Op, m_FNeg(m_Value(X))))
5449     return X;
5450 
5451   return nullptr;
5452 }
5453 
5454 Value *llvm::simplifyFNegInst(Value *Op, FastMathFlags FMF,
5455                               const SimplifyQuery &Q) {
5456   return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
5457 }
5458 
5459 /// Try to propagate existing NaN values when possible. If not, replace the
5460 /// constant or elements in the constant with a canonical NaN.
5461 static Constant *propagateNaN(Constant *In) {
5462   Type *Ty = In->getType();
5463   if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5464     unsigned NumElts = VecTy->getNumElements();
5465     SmallVector<Constant *, 32> NewC(NumElts);
5466     for (unsigned i = 0; i != NumElts; ++i) {
5467       Constant *EltC = In->getAggregateElement(i);
5468       // Poison elements propagate. NaN propagates except signaling is quieted.
5469       // Replace unknown or undef elements with canonical NaN.
5470       if (EltC && isa<PoisonValue>(EltC))
5471         NewC[i] = EltC;
5472       else if (EltC && EltC->isNaN())
5473         NewC[i] = ConstantFP::get(
5474             EltC->getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5475       else
5476         NewC[i] = ConstantFP::getNaN(VecTy->getElementType());
5477     }
5478     return ConstantVector::get(NewC);
5479   }
5480 
5481   // If it is not a fixed vector, but not a simple NaN either, return a
5482   // canonical NaN.
5483   if (!In->isNaN())
5484     return ConstantFP::getNaN(Ty);
5485 
5486   // If we known this is a NaN, and it's scalable vector, we must have a splat
5487   // on our hands. Grab that before splatting a QNaN constant.
5488   if (isa<ScalableVectorType>(Ty)) {
5489     auto *Splat = In->getSplatValue();
5490     assert(Splat && Splat->isNaN() &&
5491            "Found a scalable-vector NaN but not a splat");
5492     In = Splat;
5493   }
5494 
5495   // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5496   // preserve the sign/payload.
5497   return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5498 }
5499 
5500 /// Perform folds that are common to any floating-point operation. This implies
5501 /// transforms based on poison/undef/NaN because the operation itself makes no
5502 /// difference to the result.
5503 static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
5504                               const SimplifyQuery &Q,
5505                               fp::ExceptionBehavior ExBehavior,
5506                               RoundingMode Rounding) {
5507   // Poison is independent of anything else. It always propagates from an
5508   // operand to a math result.
5509   if (any_of(Ops, [](Value *V) { return match(V, m_Poison()); }))
5510     return PoisonValue::get(Ops[0]->getType());
5511 
5512   for (Value *V : Ops) {
5513     bool IsNan = match(V, m_NaN());
5514     bool IsInf = match(V, m_Inf());
5515     bool IsUndef = Q.isUndefValue(V);
5516 
5517     // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5518     // (an undef operand can be chosen to be Nan/Inf), then the result of
5519     // this operation is poison.
5520     if (FMF.noNaNs() && (IsNan || IsUndef))
5521       return PoisonValue::get(V->getType());
5522     if (FMF.noInfs() && (IsInf || IsUndef))
5523       return PoisonValue::get(V->getType());
5524 
5525     if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5526       // Undef does not propagate because undef means that all bits can take on
5527       // any value. If this is undef * NaN for example, then the result values
5528       // (at least the exponent bits) are limited. Assume the undef is a
5529       // canonical NaN and propagate that.
5530       if (IsUndef)
5531         return ConstantFP::getNaN(V->getType());
5532       if (IsNan)
5533         return propagateNaN(cast<Constant>(V));
5534     } else if (ExBehavior != fp::ebStrict) {
5535       if (IsNan)
5536         return propagateNaN(cast<Constant>(V));
5537     }
5538   }
5539   return nullptr;
5540 }
5541 
5542 /// Given operands for an FAdd, see if we can fold the result.  If not, this
5543 /// returns null.
5544 static Value *
5545 simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5546                  const SimplifyQuery &Q, unsigned MaxRecurse,
5547                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5548                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5549   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5550     if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5551       return C;
5552 
5553   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5554     return C;
5555 
5556   // fadd X, -0 ==> X
5557   // With strict/constrained FP, we have these possible edge cases that do
5558   // not simplify to Op0:
5559   // fadd SNaN, -0.0 --> QNaN
5560   // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5561   if (canIgnoreSNaN(ExBehavior, FMF) &&
5562       (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5563        FMF.noSignedZeros()))
5564     if (match(Op1, m_NegZeroFP()))
5565       return Op0;
5566 
5567   // fadd X, 0 ==> X, when we know X is not -0
5568   if (canIgnoreSNaN(ExBehavior, FMF))
5569     if (match(Op1, m_PosZeroFP()) &&
5570         (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
5571       return Op0;
5572 
5573   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5574     return nullptr;
5575 
5576   if (FMF.noNaNs()) {
5577     // With nnan: X + {+/-}Inf --> {+/-}Inf
5578     if (match(Op1, m_Inf()))
5579       return Op1;
5580 
5581     // With nnan: -X + X --> 0.0 (and commuted variant)
5582     // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5583     // Negative zeros are allowed because we always end up with positive zero:
5584     // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5585     // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5586     // X =  0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5587     // X =  0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5588     if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5589         match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5590       return ConstantFP::getZero(Op0->getType());
5591 
5592     if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5593         match(Op1, m_FNeg(m_Specific(Op0))))
5594       return ConstantFP::getZero(Op0->getType());
5595   }
5596 
5597   // (X - Y) + Y --> X
5598   // Y + (X - Y) --> X
5599   Value *X;
5600   if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5601       (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5602        match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5603     return X;
5604 
5605   return nullptr;
5606 }
5607 
5608 /// Given operands for an FSub, see if we can fold the result.  If not, this
5609 /// returns null.
5610 static Value *
5611 simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5612                  const SimplifyQuery &Q, unsigned MaxRecurse,
5613                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5614                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5615   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5616     if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5617       return C;
5618 
5619   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5620     return C;
5621 
5622   // fsub X, +0 ==> X
5623   if (canIgnoreSNaN(ExBehavior, FMF) &&
5624       (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5625        FMF.noSignedZeros()))
5626     if (match(Op1, m_PosZeroFP()))
5627       return Op0;
5628 
5629   // fsub X, -0 ==> X, when we know X is not -0
5630   if (canIgnoreSNaN(ExBehavior, FMF))
5631     if (match(Op1, m_NegZeroFP()) &&
5632         (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
5633       return Op0;
5634 
5635   // fsub -0.0, (fsub -0.0, X) ==> X
5636   // fsub -0.0, (fneg X) ==> X
5637   Value *X;
5638   if (canIgnoreSNaN(ExBehavior, FMF))
5639     if (match(Op0, m_NegZeroFP()) && match(Op1, m_FNeg(m_Value(X))))
5640       return X;
5641 
5642   // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5643   // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5644   if (canIgnoreSNaN(ExBehavior, FMF))
5645     if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5646         (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5647          match(Op1, m_FNeg(m_Value(X)))))
5648       return X;
5649 
5650   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5651     return nullptr;
5652 
5653   if (FMF.noNaNs()) {
5654     // fsub nnan x, x ==> 0.0
5655     if (Op0 == Op1)
5656       return Constant::getNullValue(Op0->getType());
5657 
5658     // With nnan: {+/-}Inf - X --> {+/-}Inf
5659     if (match(Op0, m_Inf()))
5660       return Op0;
5661 
5662     // With nnan: X - {+/-}Inf --> {-/+}Inf
5663     if (match(Op1, m_Inf()))
5664       return foldConstant(Instruction::FNeg, Op1, Q);
5665   }
5666 
5667   // Y - (Y - X) --> X
5668   // (X + Y) - Y --> X
5669   if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5670       (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5671        match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5672     return X;
5673 
5674   return nullptr;
5675 }
5676 
5677 static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5678                               const SimplifyQuery &Q, unsigned MaxRecurse,
5679                               fp::ExceptionBehavior ExBehavior,
5680                               RoundingMode Rounding) {
5681   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5682     return C;
5683 
5684   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5685     return nullptr;
5686 
5687   // Canonicalize special constants as operand 1.
5688   if (match(Op0, m_FPOne()) || match(Op0, m_AnyZeroFP()))
5689     std::swap(Op0, Op1);
5690 
5691   // X * 1.0 --> X
5692   if (match(Op1, m_FPOne()))
5693     return Op0;
5694 
5695   if (match(Op1, m_AnyZeroFP())) {
5696     // X * 0.0 --> 0.0 (with nnan and nsz)
5697     if (FMF.noNaNs() && FMF.noSignedZeros())
5698       return ConstantFP::getZero(Op0->getType());
5699 
5700     // +normal number * (-)0.0 --> (-)0.0
5701     if (isKnownNeverInfOrNaN(Op0, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
5702         // TODO: Check SignBit from computeKnownFPClass when it's more complete.
5703         SignBitMustBeZero(Op0, Q.DL, Q.TLI))
5704       return Op1;
5705   }
5706 
5707   // sqrt(X) * sqrt(X) --> X, if we can:
5708   // 1. Remove the intermediate rounding (reassociate).
5709   // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5710   // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5711   Value *X;
5712   if (Op0 == Op1 && match(Op0, m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
5713       FMF.noNaNs() && FMF.noSignedZeros())
5714     return X;
5715 
5716   return nullptr;
5717 }
5718 
5719 /// Given the operands for an FMul, see if we can fold the result
5720 static Value *
5721 simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5722                  const SimplifyQuery &Q, unsigned MaxRecurse,
5723                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5724                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5725   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5726     if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5727       return C;
5728 
5729   // Now apply simplifications that do not require rounding.
5730   return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5731 }
5732 
5733 Value *llvm::simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5734                               const SimplifyQuery &Q,
5735                               fp::ExceptionBehavior ExBehavior,
5736                               RoundingMode Rounding) {
5737   return ::simplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5738                             Rounding);
5739 }
5740 
5741 Value *llvm::simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5742                               const SimplifyQuery &Q,
5743                               fp::ExceptionBehavior ExBehavior,
5744                               RoundingMode Rounding) {
5745   return ::simplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5746                             Rounding);
5747 }
5748 
5749 Value *llvm::simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5750                               const SimplifyQuery &Q,
5751                               fp::ExceptionBehavior ExBehavior,
5752                               RoundingMode Rounding) {
5753   return ::simplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5754                             Rounding);
5755 }
5756 
5757 Value *llvm::simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5758                              const SimplifyQuery &Q,
5759                              fp::ExceptionBehavior ExBehavior,
5760                              RoundingMode Rounding) {
5761   return ::simplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5762                            Rounding);
5763 }
5764 
5765 static Value *
5766 simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5767                  const SimplifyQuery &Q, unsigned,
5768                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5769                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5770   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5771     if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5772       return C;
5773 
5774   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5775     return C;
5776 
5777   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5778     return nullptr;
5779 
5780   // X / 1.0 -> X
5781   if (match(Op1, m_FPOne()))
5782     return Op0;
5783 
5784   // 0 / X -> 0
5785   // Requires that NaNs are off (X could be zero) and signed zeroes are
5786   // ignored (X could be positive or negative, so the output sign is unknown).
5787   if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5788     return ConstantFP::getZero(Op0->getType());
5789 
5790   if (FMF.noNaNs()) {
5791     // X / X -> 1.0 is legal when NaNs are ignored.
5792     // We can ignore infinities because INF/INF is NaN.
5793     if (Op0 == Op1)
5794       return ConstantFP::get(Op0->getType(), 1.0);
5795 
5796     // (X * Y) / Y --> X if we can reassociate to the above form.
5797     Value *X;
5798     if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5799       return X;
5800 
5801     // -X /  X -> -1.0 and
5802     //  X / -X -> -1.0 are legal when NaNs are ignored.
5803     // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5804     if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5805         match(Op1, m_FNegNSZ(m_Specific(Op0))))
5806       return ConstantFP::get(Op0->getType(), -1.0);
5807 
5808     // nnan ninf X / [-]0.0 -> poison
5809     if (FMF.noInfs() && match(Op1, m_AnyZeroFP()))
5810       return PoisonValue::get(Op1->getType());
5811   }
5812 
5813   return nullptr;
5814 }
5815 
5816 Value *llvm::simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5817                               const SimplifyQuery &Q,
5818                               fp::ExceptionBehavior ExBehavior,
5819                               RoundingMode Rounding) {
5820   return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5821                             Rounding);
5822 }
5823 
5824 static Value *
5825 simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5826                  const SimplifyQuery &Q, unsigned,
5827                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5828                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5829   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5830     if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5831       return C;
5832 
5833   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5834     return C;
5835 
5836   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5837     return nullptr;
5838 
5839   // Unlike fdiv, the result of frem always matches the sign of the dividend.
5840   // The constant match may include undef elements in a vector, so return a full
5841   // zero constant as the result.
5842   if (FMF.noNaNs()) {
5843     // +0 % X -> 0
5844     if (match(Op0, m_PosZeroFP()))
5845       return ConstantFP::getZero(Op0->getType());
5846     // -0 % X -> -0
5847     if (match(Op0, m_NegZeroFP()))
5848       return ConstantFP::getNegativeZero(Op0->getType());
5849   }
5850 
5851   return nullptr;
5852 }
5853 
5854 Value *llvm::simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5855                               const SimplifyQuery &Q,
5856                               fp::ExceptionBehavior ExBehavior,
5857                               RoundingMode Rounding) {
5858   return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5859                             Rounding);
5860 }
5861 
5862 //=== Helper functions for higher up the class hierarchy.
5863 
5864 /// Given the operand for a UnaryOperator, see if we can fold the result.
5865 /// If not, this returns null.
5866 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5867                            unsigned MaxRecurse) {
5868   switch (Opcode) {
5869   case Instruction::FNeg:
5870     return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5871   default:
5872     llvm_unreachable("Unexpected opcode");
5873   }
5874 }
5875 
5876 /// Given the operand for a UnaryOperator, see if we can fold the result.
5877 /// If not, this returns null.
5878 /// Try to use FastMathFlags when folding the result.
5879 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5880                              const FastMathFlags &FMF, const SimplifyQuery &Q,
5881                              unsigned MaxRecurse) {
5882   switch (Opcode) {
5883   case Instruction::FNeg:
5884     return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
5885   default:
5886     return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
5887   }
5888 }
5889 
5890 Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
5891   return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
5892 }
5893 
5894 Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
5895                           const SimplifyQuery &Q) {
5896   return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
5897 }
5898 
5899 /// Given operands for a BinaryOperator, see if we can fold the result.
5900 /// If not, this returns null.
5901 static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5902                             const SimplifyQuery &Q, unsigned MaxRecurse) {
5903   switch (Opcode) {
5904   case Instruction::Add:
5905     return simplifyAddInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5906                            MaxRecurse);
5907   case Instruction::Sub:
5908     return simplifySubInst(LHS, RHS,  /* IsNSW */ false, /* IsNUW */ false, Q,
5909                            MaxRecurse);
5910   case Instruction::Mul:
5911     return simplifyMulInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5912                            MaxRecurse);
5913   case Instruction::SDiv:
5914     return simplifySDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5915   case Instruction::UDiv:
5916     return simplifyUDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5917   case Instruction::SRem:
5918     return simplifySRemInst(LHS, RHS, Q, MaxRecurse);
5919   case Instruction::URem:
5920     return simplifyURemInst(LHS, RHS, Q, MaxRecurse);
5921   case Instruction::Shl:
5922     return simplifyShlInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5923                            MaxRecurse);
5924   case Instruction::LShr:
5925     return simplifyLShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5926   case Instruction::AShr:
5927     return simplifyAShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5928   case Instruction::And:
5929     return simplifyAndInst(LHS, RHS, Q, MaxRecurse);
5930   case Instruction::Or:
5931     return simplifyOrInst(LHS, RHS, Q, MaxRecurse);
5932   case Instruction::Xor:
5933     return simplifyXorInst(LHS, RHS, Q, MaxRecurse);
5934   case Instruction::FAdd:
5935     return simplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5936   case Instruction::FSub:
5937     return simplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5938   case Instruction::FMul:
5939     return simplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5940   case Instruction::FDiv:
5941     return simplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5942   case Instruction::FRem:
5943     return simplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5944   default:
5945     llvm_unreachable("Unexpected opcode");
5946   }
5947 }
5948 
5949 /// Given operands for a BinaryOperator, see if we can fold the result.
5950 /// If not, this returns null.
5951 /// Try to use FastMathFlags when folding the result.
5952 static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5953                             const FastMathFlags &FMF, const SimplifyQuery &Q,
5954                             unsigned MaxRecurse) {
5955   switch (Opcode) {
5956   case Instruction::FAdd:
5957     return simplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
5958   case Instruction::FSub:
5959     return simplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
5960   case Instruction::FMul:
5961     return simplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
5962   case Instruction::FDiv:
5963     return simplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
5964   default:
5965     return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
5966   }
5967 }
5968 
5969 Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5970                            const SimplifyQuery &Q) {
5971   return ::simplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
5972 }
5973 
5974 Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5975                            FastMathFlags FMF, const SimplifyQuery &Q) {
5976   return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
5977 }
5978 
5979 /// Given operands for a CmpInst, see if we can fold the result.
5980 static Value *simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5981                               const SimplifyQuery &Q, unsigned MaxRecurse) {
5982   if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
5983     return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
5984   return simplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5985 }
5986 
5987 Value *llvm::simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5988                              const SimplifyQuery &Q) {
5989   return ::simplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
5990 }
5991 
5992 static bool isIdempotent(Intrinsic::ID ID) {
5993   switch (ID) {
5994   default:
5995     return false;
5996 
5997   // Unary idempotent: f(f(x)) = f(x)
5998   case Intrinsic::fabs:
5999   case Intrinsic::floor:
6000   case Intrinsic::ceil:
6001   case Intrinsic::trunc:
6002   case Intrinsic::rint:
6003   case Intrinsic::nearbyint:
6004   case Intrinsic::round:
6005   case Intrinsic::roundeven:
6006   case Intrinsic::canonicalize:
6007   case Intrinsic::arithmetic_fence:
6008     return true;
6009   }
6010 }
6011 
6012 /// Return true if the intrinsic rounds a floating-point value to an integral
6013 /// floating-point value (not an integer type).
6014 static bool removesFPFraction(Intrinsic::ID ID) {
6015   switch (ID) {
6016   default:
6017     return false;
6018 
6019   case Intrinsic::floor:
6020   case Intrinsic::ceil:
6021   case Intrinsic::trunc:
6022   case Intrinsic::rint:
6023   case Intrinsic::nearbyint:
6024   case Intrinsic::round:
6025   case Intrinsic::roundeven:
6026     return true;
6027   }
6028 }
6029 
6030 static Value *simplifyRelativeLoad(Constant *Ptr, Constant *Offset,
6031                                    const DataLayout &DL) {
6032   GlobalValue *PtrSym;
6033   APInt PtrOffset;
6034   if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
6035     return nullptr;
6036 
6037   Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
6038   Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
6039   Type *Int32PtrTy = Int32Ty->getPointerTo();
6040   Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
6041 
6042   auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
6043   if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
6044     return nullptr;
6045 
6046   uint64_t OffsetInt = OffsetConstInt->getSExtValue();
6047   if (OffsetInt % 4 != 0)
6048     return nullptr;
6049 
6050   Constant *C = ConstantExpr::getGetElementPtr(
6051       Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
6052       ConstantInt::get(Int64Ty, OffsetInt / 4));
6053   Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
6054   if (!Loaded)
6055     return nullptr;
6056 
6057   auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6058   if (!LoadedCE)
6059     return nullptr;
6060 
6061   if (LoadedCE->getOpcode() == Instruction::Trunc) {
6062     LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6063     if (!LoadedCE)
6064       return nullptr;
6065   }
6066 
6067   if (LoadedCE->getOpcode() != Instruction::Sub)
6068     return nullptr;
6069 
6070   auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6071   if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6072     return nullptr;
6073   auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6074 
6075   Constant *LoadedRHS = LoadedCE->getOperand(1);
6076   GlobalValue *LoadedRHSSym;
6077   APInt LoadedRHSOffset;
6078   if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
6079                                   DL) ||
6080       PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6081     return nullptr;
6082 
6083   return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
6084 }
6085 
6086 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
6087                                      const SimplifyQuery &Q) {
6088   // Idempotent functions return the same result when called repeatedly.
6089   Intrinsic::ID IID = F->getIntrinsicID();
6090   if (isIdempotent(IID))
6091     if (auto *II = dyn_cast<IntrinsicInst>(Op0))
6092       if (II->getIntrinsicID() == IID)
6093         return II;
6094 
6095   if (removesFPFraction(IID)) {
6096     // Converting from int or calling a rounding function always results in a
6097     // finite integral number or infinity. For those inputs, rounding functions
6098     // always return the same value, so the (2nd) rounding is eliminated. Ex:
6099     // floor (sitofp x) -> sitofp x
6100     // round (ceil x) -> ceil x
6101     auto *II = dyn_cast<IntrinsicInst>(Op0);
6102     if ((II && removesFPFraction(II->getIntrinsicID())) ||
6103         match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
6104       return Op0;
6105   }
6106 
6107   Value *X;
6108   switch (IID) {
6109   case Intrinsic::fabs:
6110     if (SignBitMustBeZero(Op0, Q.DL, Q.TLI))
6111       return Op0;
6112     break;
6113   case Intrinsic::bswap:
6114     // bswap(bswap(x)) -> x
6115     if (match(Op0, m_BSwap(m_Value(X))))
6116       return X;
6117     break;
6118   case Intrinsic::bitreverse:
6119     // bitreverse(bitreverse(x)) -> x
6120     if (match(Op0, m_BitReverse(m_Value(X))))
6121       return X;
6122     break;
6123   case Intrinsic::ctpop: {
6124     // ctpop(X) -> 1 iff X is non-zero power of 2.
6125     if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
6126                                Q.DT))
6127       return ConstantInt::get(Op0->getType(), 1);
6128     // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6129     // ctpop(and X, 1) --> and X, 1
6130     unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6131     if (MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, BitWidth - 1),
6132                           Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
6133       return Op0;
6134     break;
6135   }
6136   case Intrinsic::exp:
6137     // exp(log(x)) -> x
6138     if (Q.CxtI->hasAllowReassoc() &&
6139         match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X))))
6140       return X;
6141     break;
6142   case Intrinsic::exp2:
6143     // exp2(log2(x)) -> x
6144     if (Q.CxtI->hasAllowReassoc() &&
6145         match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
6146       return X;
6147     break;
6148   case Intrinsic::log:
6149     // log(exp(x)) -> x
6150     if (Q.CxtI->hasAllowReassoc() &&
6151         match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
6152       return X;
6153     break;
6154   case Intrinsic::log2:
6155     // log2(exp2(x)) -> x
6156     if (Q.CxtI->hasAllowReassoc() &&
6157         (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
6158          match(Op0,
6159                m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), m_Value(X)))))
6160       return X;
6161     break;
6162   case Intrinsic::log10:
6163     // log10(pow(10.0, x)) -> x
6164     if (Q.CxtI->hasAllowReassoc() &&
6165         match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), m_Value(X))))
6166       return X;
6167     break;
6168   case Intrinsic::experimental_vector_reverse:
6169     // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
6170     if (match(Op0, m_VecReverse(m_Value(X))))
6171       return X;
6172     // experimental.vector.reverse(splat(X)) -> splat(X)
6173     if (isSplatValue(Op0))
6174       return Op0;
6175     break;
6176   case Intrinsic::frexp: {
6177     // Frexp is idempotent with the added complication of the struct return.
6178     if (match(Op0, m_ExtractValue<0>(m_Value(X)))) {
6179       if (match(X, m_Intrinsic<Intrinsic::frexp>(m_Value())))
6180         return X;
6181     }
6182 
6183     break;
6184   }
6185   default:
6186     break;
6187   }
6188 
6189   return nullptr;
6190 }
6191 
6192 /// Given a min/max intrinsic, see if it can be removed based on having an
6193 /// operand that is another min/max intrinsic with shared operand(s). The caller
6194 /// is expected to swap the operand arguments to handle commutation.
6195 static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
6196   Value *X, *Y;
6197   if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
6198     return nullptr;
6199 
6200   auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6201   if (!MM0)
6202     return nullptr;
6203   Intrinsic::ID IID0 = MM0->getIntrinsicID();
6204 
6205   if (Op1 == X || Op1 == Y ||
6206       match(Op1, m_c_MaxOrMin(m_Specific(X), m_Specific(Y)))) {
6207     // max (max X, Y), X --> max X, Y
6208     if (IID0 == IID)
6209       return MM0;
6210     // max (min X, Y), X --> X
6211     if (IID0 == getInverseMinMaxIntrinsic(IID))
6212       return Op1;
6213   }
6214   return nullptr;
6215 }
6216 
6217 /// Given a min/max intrinsic, see if it can be removed based on having an
6218 /// operand that is another min/max intrinsic with shared operand(s). The caller
6219 /// is expected to swap the operand arguments to handle commutation.
6220 static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0,
6221                                          Value *Op1) {
6222   assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6223           IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6224          "Unsupported intrinsic");
6225 
6226   auto *M0 = dyn_cast<IntrinsicInst>(Op0);
6227   // If Op0 is not the same intrinsic as IID, do not process.
6228   // This is a difference with integer min/max handling. We do not process the
6229   // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6230   if (!M0 || M0->getIntrinsicID() != IID)
6231     return nullptr;
6232   Value *X0 = M0->getOperand(0);
6233   Value *Y0 = M0->getOperand(1);
6234   // Simple case, m(m(X,Y), X) => m(X, Y)
6235   //              m(m(X,Y), Y) => m(X, Y)
6236   // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6237   // For minimum/maximum, Y is NaN => m(X, NaN) == NaN  and m(NaN, NaN) == NaN.
6238   // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6239   // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6240   if (X0 == Op1 || Y0 == Op1)
6241     return M0;
6242 
6243   auto *M1 = dyn_cast<IntrinsicInst>(Op1);
6244   if (!M1)
6245     return nullptr;
6246   Value *X1 = M1->getOperand(0);
6247   Value *Y1 = M1->getOperand(1);
6248   Intrinsic::ID IID1 = M1->getIntrinsicID();
6249   // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6250   // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6251   // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6252   // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6253   // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6254   // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6255   if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6256     if (IID1 == IID || getInverseMinMaxIntrinsic(IID1) == IID)
6257       return M0;
6258 
6259   return nullptr;
6260 }
6261 
6262 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
6263                                       const SimplifyQuery &Q) {
6264   Intrinsic::ID IID = F->getIntrinsicID();
6265   Type *ReturnType = F->getReturnType();
6266   unsigned BitWidth = ReturnType->getScalarSizeInBits();
6267   switch (IID) {
6268   case Intrinsic::abs:
6269     // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6270     // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6271     // on the outer abs.
6272     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
6273       return Op0;
6274     break;
6275 
6276   case Intrinsic::cttz: {
6277     Value *X;
6278     if (match(Op0, m_Shl(m_One(), m_Value(X))))
6279       return X;
6280     break;
6281   }
6282   case Intrinsic::ctlz: {
6283     Value *X;
6284     if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
6285       return X;
6286     if (match(Op0, m_AShr(m_Negative(), m_Value())))
6287       return Constant::getNullValue(ReturnType);
6288     break;
6289   }
6290   case Intrinsic::smax:
6291   case Intrinsic::smin:
6292   case Intrinsic::umax:
6293   case Intrinsic::umin: {
6294     // If the arguments are the same, this is a no-op.
6295     if (Op0 == Op1)
6296       return Op0;
6297 
6298     // Canonicalize immediate constant operand as Op1.
6299     if (match(Op0, m_ImmConstant()))
6300       std::swap(Op0, Op1);
6301 
6302     // Assume undef is the limit value.
6303     if (Q.isUndefValue(Op1))
6304       return ConstantInt::get(
6305           ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
6306 
6307     const APInt *C;
6308     if (match(Op1, m_APIntAllowUndef(C))) {
6309       // Clamp to limit value. For example:
6310       // umax(i8 %x, i8 255) --> 255
6311       if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
6312         return ConstantInt::get(ReturnType, *C);
6313 
6314       // If the constant op is the opposite of the limit value, the other must
6315       // be larger/smaller or equal. For example:
6316       // umin(i8 %x, i8 255) --> %x
6317       if (*C == MinMaxIntrinsic::getSaturationPoint(
6318                     getInverseMinMaxIntrinsic(IID), BitWidth))
6319         return Op0;
6320 
6321       // Remove nested call if constant operands allow it. Example:
6322       // max (max X, 7), 5 -> max X, 7
6323       auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6324       if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6325         // TODO: loosen undef/splat restrictions for vector constants.
6326         Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6327         const APInt *InnerC;
6328         if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
6329             ICmpInst::compare(*InnerC, *C,
6330                               ICmpInst::getNonStrictPredicate(
6331                                   MinMaxIntrinsic::getPredicate(IID))))
6332           return Op0;
6333       }
6334     }
6335 
6336     if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6337       return V;
6338     if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
6339       return V;
6340 
6341     ICmpInst::Predicate Pred =
6342         ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
6343     if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
6344       return Op0;
6345     if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
6346       return Op1;
6347 
6348     break;
6349   }
6350   case Intrinsic::usub_with_overflow:
6351   case Intrinsic::ssub_with_overflow:
6352     // X - X -> { 0, false }
6353     // X - undef -> { 0, false }
6354     // undef - X -> { 0, false }
6355     if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6356       return Constant::getNullValue(ReturnType);
6357     break;
6358   case Intrinsic::uadd_with_overflow:
6359   case Intrinsic::sadd_with_overflow:
6360     // X + undef -> { -1, false }
6361     // undef + x -> { -1, false }
6362     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
6363       return ConstantStruct::get(
6364           cast<StructType>(ReturnType),
6365           {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
6366            Constant::getNullValue(ReturnType->getStructElementType(1))});
6367     }
6368     break;
6369   case Intrinsic::umul_with_overflow:
6370   case Intrinsic::smul_with_overflow:
6371     // 0 * X -> { 0, false }
6372     // X * 0 -> { 0, false }
6373     if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
6374       return Constant::getNullValue(ReturnType);
6375     // undef * X -> { 0, false }
6376     // X * undef -> { 0, false }
6377     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6378       return Constant::getNullValue(ReturnType);
6379     break;
6380   case Intrinsic::uadd_sat:
6381     // sat(MAX + X) -> MAX
6382     // sat(X + MAX) -> MAX
6383     if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
6384       return Constant::getAllOnesValue(ReturnType);
6385     [[fallthrough]];
6386   case Intrinsic::sadd_sat:
6387     // sat(X + undef) -> -1
6388     // sat(undef + X) -> -1
6389     // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6390     // For signed: Assume undef is ~X, in which case X + ~X = -1.
6391     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6392       return Constant::getAllOnesValue(ReturnType);
6393 
6394     // X + 0 -> X
6395     if (match(Op1, m_Zero()))
6396       return Op0;
6397     // 0 + X -> X
6398     if (match(Op0, m_Zero()))
6399       return Op1;
6400     break;
6401   case Intrinsic::usub_sat:
6402     // sat(0 - X) -> 0, sat(X - MAX) -> 0
6403     if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
6404       return Constant::getNullValue(ReturnType);
6405     [[fallthrough]];
6406   case Intrinsic::ssub_sat:
6407     // X - X -> 0, X - undef -> 0, undef - X -> 0
6408     if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6409       return Constant::getNullValue(ReturnType);
6410     // X - 0 -> X
6411     if (match(Op1, m_Zero()))
6412       return Op0;
6413     break;
6414   case Intrinsic::load_relative:
6415     if (auto *C0 = dyn_cast<Constant>(Op0))
6416       if (auto *C1 = dyn_cast<Constant>(Op1))
6417         return simplifyRelativeLoad(C0, C1, Q.DL);
6418     break;
6419   case Intrinsic::powi:
6420     if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
6421       // powi(x, 0) -> 1.0
6422       if (Power->isZero())
6423         return ConstantFP::get(Op0->getType(), 1.0);
6424       // powi(x, 1) -> x
6425       if (Power->isOne())
6426         return Op0;
6427     }
6428     break;
6429   case Intrinsic::copysign:
6430     // copysign X, X --> X
6431     if (Op0 == Op1)
6432       return Op0;
6433     // copysign -X, X --> X
6434     // copysign X, -X --> -X
6435     if (match(Op0, m_FNeg(m_Specific(Op1))) ||
6436         match(Op1, m_FNeg(m_Specific(Op0))))
6437       return Op1;
6438     break;
6439   case Intrinsic::is_fpclass: {
6440     if (isa<PoisonValue>(Op0))
6441       return PoisonValue::get(ReturnType);
6442 
6443     uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6444     // If all tests are made, it doesn't matter what the value is.
6445     if ((Mask & fcAllFlags) == fcAllFlags)
6446       return ConstantInt::get(ReturnType, true);
6447     if ((Mask & fcAllFlags) == 0)
6448       return ConstantInt::get(ReturnType, false);
6449     if (Q.isUndefValue(Op0))
6450       return UndefValue::get(ReturnType);
6451     break;
6452   }
6453   case Intrinsic::maxnum:
6454   case Intrinsic::minnum:
6455   case Intrinsic::maximum:
6456   case Intrinsic::minimum: {
6457     // If the arguments are the same, this is a no-op.
6458     if (Op0 == Op1)
6459       return Op0;
6460 
6461     // Canonicalize constant operand as Op1.
6462     if (isa<Constant>(Op0))
6463       std::swap(Op0, Op1);
6464 
6465     // If an argument is undef, return the other argument.
6466     if (Q.isUndefValue(Op1))
6467       return Op0;
6468 
6469     bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6470     bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6471 
6472     // minnum(X, nan) -> X
6473     // maxnum(X, nan) -> X
6474     // minimum(X, nan) -> nan
6475     // maximum(X, nan) -> nan
6476     if (match(Op1, m_NaN()))
6477       return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
6478 
6479     // In the following folds, inf can be replaced with the largest finite
6480     // float, if the ninf flag is set.
6481     const APFloat *C;
6482     if (match(Op1, m_APFloat(C)) &&
6483         (C->isInfinity() || (Q.CxtI->hasNoInfs() && C->isLargest()))) {
6484       // minnum(X, -inf) -> -inf
6485       // maxnum(X, +inf) -> +inf
6486       // minimum(X, -inf) -> -inf if nnan
6487       // maximum(X, +inf) -> +inf if nnan
6488       if (C->isNegative() == IsMin && (!PropagateNaN || Q.CxtI->hasNoNaNs()))
6489         return ConstantFP::get(ReturnType, *C);
6490 
6491       // minnum(X, +inf) -> X if nnan
6492       // maxnum(X, -inf) -> X if nnan
6493       // minimum(X, +inf) -> X
6494       // maximum(X, -inf) -> X
6495       if (C->isNegative() != IsMin && (PropagateNaN || Q.CxtI->hasNoNaNs()))
6496         return Op0;
6497     }
6498 
6499     // Min/max of the same operation with common operand:
6500     // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
6501     if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1))
6502       return V;
6503     if (Value *V = foldMinimumMaximumSharedOp(IID, Op1, Op0))
6504       return V;
6505 
6506     break;
6507   }
6508   case Intrinsic::vector_extract: {
6509     Type *ReturnType = F->getReturnType();
6510 
6511     // (extract_vector (insert_vector _, X, 0), 0) -> X
6512     unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6513     Value *X = nullptr;
6514     if (match(Op0, m_Intrinsic<Intrinsic::vector_insert>(m_Value(), m_Value(X),
6515                                                          m_Zero())) &&
6516         IdxN == 0 && X->getType() == ReturnType)
6517       return X;
6518 
6519     break;
6520   }
6521   default:
6522     break;
6523   }
6524 
6525   return nullptr;
6526 }
6527 
6528 static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
6529                                 ArrayRef<Value *> Args,
6530                                 const SimplifyQuery &Q) {
6531   // Operand bundles should not be in Args.
6532   assert(Call->arg_size() == Args.size());
6533   unsigned NumOperands = Args.size();
6534   Function *F = cast<Function>(Callee);
6535   Intrinsic::ID IID = F->getIntrinsicID();
6536 
6537   // Most of the intrinsics with no operands have some kind of side effect.
6538   // Don't simplify.
6539   if (!NumOperands) {
6540     switch (IID) {
6541     case Intrinsic::vscale: {
6542       auto Attr = Call->getFunction()->getFnAttribute(Attribute::VScaleRange);
6543       if (!Attr.isValid())
6544         return nullptr;
6545       unsigned VScaleMin = Attr.getVScaleRangeMin();
6546       std::optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
6547       if (VScaleMax && VScaleMin == VScaleMax)
6548         return ConstantInt::get(F->getReturnType(), VScaleMin);
6549       return nullptr;
6550     }
6551     default:
6552       return nullptr;
6553     }
6554   }
6555 
6556   if (NumOperands == 1)
6557     return simplifyUnaryIntrinsic(F, Args[0], Q);
6558 
6559   if (NumOperands == 2)
6560     return simplifyBinaryIntrinsic(F, Args[0], Args[1], Q);
6561 
6562   // Handle intrinsics with 3 or more arguments.
6563   switch (IID) {
6564   case Intrinsic::masked_load:
6565   case Intrinsic::masked_gather: {
6566     Value *MaskArg = Args[2];
6567     Value *PassthruArg = Args[3];
6568     // If the mask is all zeros or undef, the "passthru" argument is the result.
6569     if (maskIsAllZeroOrUndef(MaskArg))
6570       return PassthruArg;
6571     return nullptr;
6572   }
6573   case Intrinsic::fshl:
6574   case Intrinsic::fshr: {
6575     Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6576 
6577     // If both operands are undef, the result is undef.
6578     if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
6579       return UndefValue::get(F->getReturnType());
6580 
6581     // If shift amount is undef, assume it is zero.
6582     if (Q.isUndefValue(ShAmtArg))
6583       return Args[IID == Intrinsic::fshl ? 0 : 1];
6584 
6585     const APInt *ShAmtC;
6586     if (match(ShAmtArg, m_APInt(ShAmtC))) {
6587       // If there's effectively no shift, return the 1st arg or 2nd arg.
6588       APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
6589       if (ShAmtC->urem(BitWidth).isZero())
6590         return Args[IID == Intrinsic::fshl ? 0 : 1];
6591     }
6592 
6593     // Rotating zero by anything is zero.
6594     if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
6595       return ConstantInt::getNullValue(F->getReturnType());
6596 
6597     // Rotating -1 by anything is -1.
6598     if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
6599       return ConstantInt::getAllOnesValue(F->getReturnType());
6600 
6601     return nullptr;
6602   }
6603   case Intrinsic::experimental_constrained_fma: {
6604     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6605     if (Value *V = simplifyFPOp(Args, {}, Q, *FPI->getExceptionBehavior(),
6606                                 *FPI->getRoundingMode()))
6607       return V;
6608     return nullptr;
6609   }
6610   case Intrinsic::fma:
6611   case Intrinsic::fmuladd: {
6612     if (Value *V = simplifyFPOp(Args, {}, Q, fp::ebIgnore,
6613                                 RoundingMode::NearestTiesToEven))
6614       return V;
6615     return nullptr;
6616   }
6617   case Intrinsic::smul_fix:
6618   case Intrinsic::smul_fix_sat: {
6619     Value *Op0 = Args[0];
6620     Value *Op1 = Args[1];
6621     Value *Op2 = Args[2];
6622     Type *ReturnType = F->getReturnType();
6623 
6624     // Canonicalize constant operand as Op1 (ConstantFolding handles the case
6625     // when both Op0 and Op1 are constant so we do not care about that special
6626     // case here).
6627     if (isa<Constant>(Op0))
6628       std::swap(Op0, Op1);
6629 
6630     // X * 0 -> 0
6631     if (match(Op1, m_Zero()))
6632       return Constant::getNullValue(ReturnType);
6633 
6634     // X * undef -> 0
6635     if (Q.isUndefValue(Op1))
6636       return Constant::getNullValue(ReturnType);
6637 
6638     // X * (1 << Scale) -> X
6639     APInt ScaledOne =
6640         APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
6641                             cast<ConstantInt>(Op2)->getZExtValue());
6642     if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
6643       return Op0;
6644 
6645     return nullptr;
6646   }
6647   case Intrinsic::vector_insert: {
6648     Value *Vec = Args[0];
6649     Value *SubVec = Args[1];
6650     Value *Idx = Args[2];
6651     Type *ReturnType = F->getReturnType();
6652 
6653     // (insert_vector Y, (extract_vector X, 0), 0) -> X
6654     // where: Y is X, or Y is undef
6655     unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6656     Value *X = nullptr;
6657     if (match(SubVec,
6658               m_Intrinsic<Intrinsic::vector_extract>(m_Value(X), m_Zero())) &&
6659         (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
6660         X->getType() == ReturnType)
6661       return X;
6662 
6663     return nullptr;
6664   }
6665   case Intrinsic::experimental_constrained_fadd: {
6666     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6667     return simplifyFAddInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6668                             *FPI->getExceptionBehavior(),
6669                             *FPI->getRoundingMode());
6670   }
6671   case Intrinsic::experimental_constrained_fsub: {
6672     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6673     return simplifyFSubInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6674                             *FPI->getExceptionBehavior(),
6675                             *FPI->getRoundingMode());
6676   }
6677   case Intrinsic::experimental_constrained_fmul: {
6678     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6679     return simplifyFMulInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6680                             *FPI->getExceptionBehavior(),
6681                             *FPI->getRoundingMode());
6682   }
6683   case Intrinsic::experimental_constrained_fdiv: {
6684     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6685     return simplifyFDivInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6686                             *FPI->getExceptionBehavior(),
6687                             *FPI->getRoundingMode());
6688   }
6689   case Intrinsic::experimental_constrained_frem: {
6690     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6691     return simplifyFRemInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6692                             *FPI->getExceptionBehavior(),
6693                             *FPI->getRoundingMode());
6694   }
6695   default:
6696     return nullptr;
6697   }
6698 }
6699 
6700 static Value *tryConstantFoldCall(CallBase *Call, Value *Callee,
6701                                   ArrayRef<Value *> Args,
6702                                   const SimplifyQuery &Q) {
6703   auto *F = dyn_cast<Function>(Callee);
6704   if (!F || !canConstantFoldCallTo(Call, F))
6705     return nullptr;
6706 
6707   SmallVector<Constant *, 4> ConstantArgs;
6708   ConstantArgs.reserve(Args.size());
6709   for (Value *Arg : Args) {
6710     Constant *C = dyn_cast<Constant>(Arg);
6711     if (!C) {
6712       if (isa<MetadataAsValue>(Arg))
6713         continue;
6714       return nullptr;
6715     }
6716     ConstantArgs.push_back(C);
6717   }
6718 
6719   return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
6720 }
6721 
6722 Value *llvm::simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
6723                           const SimplifyQuery &Q) {
6724   // Args should not contain operand bundle operands.
6725   assert(Call->arg_size() == Args.size());
6726 
6727   // musttail calls can only be simplified if they are also DCEd.
6728   // As we can't guarantee this here, don't simplify them.
6729   if (Call->isMustTailCall())
6730     return nullptr;
6731 
6732   // call undef -> poison
6733   // call null -> poison
6734   if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6735     return PoisonValue::get(Call->getType());
6736 
6737   if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q))
6738     return V;
6739 
6740   auto *F = dyn_cast<Function>(Callee);
6741   if (F && F->isIntrinsic())
6742     if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q))
6743       return Ret;
6744 
6745   return nullptr;
6746 }
6747 
6748 Value *llvm::simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q) {
6749   assert(isa<ConstrainedFPIntrinsic>(Call));
6750   SmallVector<Value *, 4> Args(Call->args());
6751   if (Value *V = tryConstantFoldCall(Call, Call->getCalledOperand(), Args, Q))
6752     return V;
6753   if (Value *Ret = simplifyIntrinsic(Call, Call->getCalledOperand(), Args, Q))
6754     return Ret;
6755   return nullptr;
6756 }
6757 
6758 /// Given operands for a Freeze, see if we can fold the result.
6759 static Value *simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6760   // Use a utility function defined in ValueTracking.
6761   if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
6762     return Op0;
6763   // We have room for improvement.
6764   return nullptr;
6765 }
6766 
6767 Value *llvm::simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6768   return ::simplifyFreezeInst(Op0, Q);
6769 }
6770 
6771 Value *llvm::simplifyLoadInst(LoadInst *LI, Value *PtrOp,
6772                               const SimplifyQuery &Q) {
6773   if (LI->isVolatile())
6774     return nullptr;
6775 
6776   if (auto *PtrOpC = dyn_cast<Constant>(PtrOp))
6777     return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
6778 
6779   // We can only fold the load if it is from a constant global with definitive
6780   // initializer. Skip expensive logic if this is not the case.
6781   auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(PtrOp));
6782   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
6783     return nullptr;
6784 
6785   // If GlobalVariable's initializer is uniform, then return the constant
6786   // regardless of its offset.
6787   if (Constant *C =
6788           ConstantFoldLoadFromUniformValue(GV->getInitializer(), LI->getType()))
6789     return C;
6790 
6791   // Try to convert operand into a constant by stripping offsets while looking
6792   // through invariant.group intrinsics.
6793   APInt Offset(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
6794   PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
6795       Q.DL, Offset, /* AllowNonInbounts */ true,
6796       /* AllowInvariantGroup */ true);
6797   if (PtrOp == GV) {
6798     // Index size may have changed due to address space casts.
6799     Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
6800     return ConstantFoldLoadFromConstPtr(GV, LI->getType(), Offset, Q.DL);
6801   }
6802 
6803   return nullptr;
6804 }
6805 
6806 /// See if we can compute a simplified version of this instruction.
6807 /// If not, this returns null.
6808 
6809 static Value *simplifyInstructionWithOperands(Instruction *I,
6810                                               ArrayRef<Value *> NewOps,
6811                                               const SimplifyQuery &SQ,
6812                                               unsigned MaxRecurse) {
6813   assert(I->getFunction() && "instruction should be inserted in a function");
6814   const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
6815 
6816   switch (I->getOpcode()) {
6817   default:
6818     if (llvm::all_of(NewOps, [](Value *V) { return isa<Constant>(V); })) {
6819       SmallVector<Constant *, 8> NewConstOps(NewOps.size());
6820       transform(NewOps, NewConstOps.begin(),
6821                 [](Value *V) { return cast<Constant>(V); });
6822       return ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
6823     }
6824     return nullptr;
6825   case Instruction::FNeg:
6826     return simplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q, MaxRecurse);
6827   case Instruction::FAdd:
6828     return simplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
6829                             MaxRecurse);
6830   case Instruction::Add:
6831     return simplifyAddInst(
6832         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6833         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
6834   case Instruction::FSub:
6835     return simplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
6836                             MaxRecurse);
6837   case Instruction::Sub:
6838     return simplifySubInst(
6839         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6840         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
6841   case Instruction::FMul:
6842     return simplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
6843                             MaxRecurse);
6844   case Instruction::Mul:
6845     return simplifyMulInst(
6846         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6847         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
6848   case Instruction::SDiv:
6849     return simplifySDivInst(NewOps[0], NewOps[1],
6850                             Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
6851                             MaxRecurse);
6852   case Instruction::UDiv:
6853     return simplifyUDivInst(NewOps[0], NewOps[1],
6854                             Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
6855                             MaxRecurse);
6856   case Instruction::FDiv:
6857     return simplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
6858                             MaxRecurse);
6859   case Instruction::SRem:
6860     return simplifySRemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
6861   case Instruction::URem:
6862     return simplifyURemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
6863   case Instruction::FRem:
6864     return simplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
6865                             MaxRecurse);
6866   case Instruction::Shl:
6867     return simplifyShlInst(
6868         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6869         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
6870   case Instruction::LShr:
6871     return simplifyLShrInst(NewOps[0], NewOps[1],
6872                             Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
6873                             MaxRecurse);
6874   case Instruction::AShr:
6875     return simplifyAShrInst(NewOps[0], NewOps[1],
6876                             Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
6877                             MaxRecurse);
6878   case Instruction::And:
6879     return simplifyAndInst(NewOps[0], NewOps[1], Q, MaxRecurse);
6880   case Instruction::Or:
6881     return simplifyOrInst(NewOps[0], NewOps[1], Q, MaxRecurse);
6882   case Instruction::Xor:
6883     return simplifyXorInst(NewOps[0], NewOps[1], Q, MaxRecurse);
6884   case Instruction::ICmp:
6885     return simplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
6886                             NewOps[1], Q, MaxRecurse);
6887   case Instruction::FCmp:
6888     return simplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
6889                             NewOps[1], I->getFastMathFlags(), Q, MaxRecurse);
6890   case Instruction::Select:
6891     return simplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse);
6892     break;
6893   case Instruction::GetElementPtr: {
6894     auto *GEPI = cast<GetElementPtrInst>(I);
6895     return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
6896                            ArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q,
6897                            MaxRecurse);
6898   }
6899   case Instruction::InsertValue: {
6900     InsertValueInst *IV = cast<InsertValueInst>(I);
6901     return simplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q,
6902                                    MaxRecurse);
6903   }
6904   case Instruction::InsertElement:
6905     return simplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
6906   case Instruction::ExtractValue: {
6907     auto *EVI = cast<ExtractValueInst>(I);
6908     return simplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q,
6909                                     MaxRecurse);
6910   }
6911   case Instruction::ExtractElement:
6912     return simplifyExtractElementInst(NewOps[0], NewOps[1], Q, MaxRecurse);
6913   case Instruction::ShuffleVector: {
6914     auto *SVI = cast<ShuffleVectorInst>(I);
6915     return simplifyShuffleVectorInst(NewOps[0], NewOps[1],
6916                                      SVI->getShuffleMask(), SVI->getType(), Q,
6917                                      MaxRecurse);
6918   }
6919   case Instruction::PHI:
6920     return simplifyPHINode(cast<PHINode>(I), NewOps, Q);
6921   case Instruction::Call:
6922     return simplifyCall(
6923         cast<CallInst>(I), NewOps.back(),
6924         NewOps.drop_back(1 + cast<CallInst>(I)->getNumTotalBundleOperands()), Q);
6925   case Instruction::Freeze:
6926     return llvm::simplifyFreezeInst(NewOps[0], Q);
6927 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
6928 #include "llvm/IR/Instruction.def"
6929 #undef HANDLE_CAST_INST
6930     return simplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q,
6931                             MaxRecurse);
6932   case Instruction::Alloca:
6933     // No simplifications for Alloca and it can't be constant folded.
6934     return nullptr;
6935   case Instruction::Load:
6936     return simplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
6937   }
6938 }
6939 
6940 Value *llvm::simplifyInstructionWithOperands(Instruction *I,
6941                                              ArrayRef<Value *> NewOps,
6942                                              const SimplifyQuery &SQ) {
6943   assert(NewOps.size() == I->getNumOperands() &&
6944          "Number of operands should match the instruction!");
6945   return ::simplifyInstructionWithOperands(I, NewOps, SQ, RecursionLimit);
6946 }
6947 
6948 Value *llvm::simplifyInstruction(Instruction *I, const SimplifyQuery &SQ) {
6949   SmallVector<Value *, 8> Ops(I->operands());
6950   Value *Result = ::simplifyInstructionWithOperands(I, Ops, SQ, RecursionLimit);
6951 
6952   /// If called on unreachable code, the instruction may simplify to itself.
6953   /// Make life easier for users by detecting that case here, and returning a
6954   /// safe value instead.
6955   return Result == I ? UndefValue::get(I->getType()) : Result;
6956 }
6957 
6958 /// Implementation of recursive simplification through an instruction's
6959 /// uses.
6960 ///
6961 /// This is the common implementation of the recursive simplification routines.
6962 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
6963 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
6964 /// instructions to process and attempt to simplify it using
6965 /// InstructionSimplify. Recursively visited users which could not be
6966 /// simplified themselves are to the optional UnsimplifiedUsers set for
6967 /// further processing by the caller.
6968 ///
6969 /// This routine returns 'true' only when *it* simplifies something. The passed
6970 /// in simplified value does not count toward this.
6971 static bool replaceAndRecursivelySimplifyImpl(
6972     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6973     const DominatorTree *DT, AssumptionCache *AC,
6974     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
6975   bool Simplified = false;
6976   SmallSetVector<Instruction *, 8> Worklist;
6977   const DataLayout &DL = I->getModule()->getDataLayout();
6978 
6979   // If we have an explicit value to collapse to, do that round of the
6980   // simplification loop by hand initially.
6981   if (SimpleV) {
6982     for (User *U : I->users())
6983       if (U != I)
6984         Worklist.insert(cast<Instruction>(U));
6985 
6986     // Replace the instruction with its simplified value.
6987     I->replaceAllUsesWith(SimpleV);
6988 
6989     if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
6990       I->eraseFromParent();
6991   } else {
6992     Worklist.insert(I);
6993   }
6994 
6995   // Note that we must test the size on each iteration, the worklist can grow.
6996   for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
6997     I = Worklist[Idx];
6998 
6999     // See if this instruction simplifies.
7000     SimpleV = simplifyInstruction(I, {DL, TLI, DT, AC});
7001     if (!SimpleV) {
7002       if (UnsimplifiedUsers)
7003         UnsimplifiedUsers->insert(I);
7004       continue;
7005     }
7006 
7007     Simplified = true;
7008 
7009     // Stash away all the uses of the old instruction so we can check them for
7010     // recursive simplifications after a RAUW. This is cheaper than checking all
7011     // uses of To on the recursive step in most cases.
7012     for (User *U : I->users())
7013       Worklist.insert(cast<Instruction>(U));
7014 
7015     // Replace the instruction with its simplified value.
7016     I->replaceAllUsesWith(SimpleV);
7017 
7018     if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7019       I->eraseFromParent();
7020   }
7021   return Simplified;
7022 }
7023 
7024 bool llvm::replaceAndRecursivelySimplify(
7025     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7026     const DominatorTree *DT, AssumptionCache *AC,
7027     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7028   assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
7029   assert(SimpleV && "Must provide a simplified value.");
7030   return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7031                                            UnsimplifiedUsers);
7032 }
7033 
7034 namespace llvm {
7035 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
7036   auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7037   auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
7038   auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7039   auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
7040   auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7041   auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
7042   return {F.getParent()->getDataLayout(), TLI, DT, AC};
7043 }
7044 
7045 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
7046                                          const DataLayout &DL) {
7047   return {DL, &AR.TLI, &AR.DT, &AR.AC};
7048 }
7049 
7050 template <class T, class... TArgs>
7051 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
7052                                          Function &F) {
7053   auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7054   auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7055   auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7056   return {F.getParent()->getDataLayout(), TLI, DT, AC};
7057 }
7058 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
7059                                                   Function &);
7060 } // namespace llvm
7061 
7062 void InstSimplifyFolder::anchor() {}
7063