xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/InstructionSimplify.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions.  This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x").  All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "llvm/Analysis/InstructionSimplify.h"
20 
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/CaptureTracking.h"
27 #include "llvm/Analysis/CmpInstAnalysis.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/InstSimplifyFolder.h"
30 #include "llvm/Analysis/LoopAnalysisManager.h"
31 #include "llvm/Analysis/MemoryBuiltins.h"
32 #include "llvm/Analysis/OverflowInstAnalysis.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/IR/ConstantRange.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/PatternMatch.h"
42 #include "llvm/Support/KnownBits.h"
43 #include <algorithm>
44 #include <optional>
45 using namespace llvm;
46 using namespace llvm::PatternMatch;
47 
48 #define DEBUG_TYPE "instsimplify"
49 
50 enum { RecursionLimit = 3 };
51 
52 STATISTIC(NumExpand, "Number of expansions");
53 STATISTIC(NumReassoc, "Number of reassociations");
54 
55 static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
56                               unsigned);
57 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
58 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
59                              const SimplifyQuery &, unsigned);
60 static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
61                             unsigned);
62 static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
63                             const SimplifyQuery &, unsigned);
64 static Value *simplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
65                               unsigned);
66 static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
67                                const SimplifyQuery &Q, unsigned MaxRecurse);
68 static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
69 static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
70                               unsigned);
71 static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
72                                unsigned);
73 static Value *simplifyGEPInst(Type *, Value *, ArrayRef<Value *>, bool,
74                               const SimplifyQuery &, unsigned);
75 static Value *simplifySelectInst(Value *, Value *, Value *,
76                                  const SimplifyQuery &, unsigned);
77 static Value *simplifyInstructionWithOperands(Instruction *I,
78                                               ArrayRef<Value *> NewOps,
79                                               const SimplifyQuery &SQ,
80                                               unsigned MaxRecurse);
81 
82 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
83                                      Value *FalseVal) {
84   BinaryOperator::BinaryOps BinOpCode;
85   if (auto *BO = dyn_cast<BinaryOperator>(Cond))
86     BinOpCode = BO->getOpcode();
87   else
88     return nullptr;
89 
90   CmpInst::Predicate ExpectedPred, Pred1, Pred2;
91   if (BinOpCode == BinaryOperator::Or) {
92     ExpectedPred = ICmpInst::ICMP_NE;
93   } else if (BinOpCode == BinaryOperator::And) {
94     ExpectedPred = ICmpInst::ICMP_EQ;
95   } else
96     return nullptr;
97 
98   // %A = icmp eq %TV, %FV
99   // %B = icmp eq %X, %Y (and one of these is a select operand)
100   // %C = and %A, %B
101   // %D = select %C, %TV, %FV
102   // -->
103   // %FV
104 
105   // %A = icmp ne %TV, %FV
106   // %B = icmp ne %X, %Y (and one of these is a select operand)
107   // %C = or %A, %B
108   // %D = select %C, %TV, %FV
109   // -->
110   // %TV
111   Value *X, *Y;
112   if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
113                                       m_Specific(FalseVal)),
114                              m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
115       Pred1 != Pred2 || Pred1 != ExpectedPred)
116     return nullptr;
117 
118   if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
119     return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
120 
121   return nullptr;
122 }
123 
124 /// For a boolean type or a vector of boolean type, return false or a vector
125 /// with every element false.
126 static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
127 
128 /// For a boolean type or a vector of boolean type, return true or a vector
129 /// with every element true.
130 static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
131 
132 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
133 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
134                           Value *RHS) {
135   CmpInst *Cmp = dyn_cast<CmpInst>(V);
136   if (!Cmp)
137     return false;
138   CmpInst::Predicate CPred = Cmp->getPredicate();
139   Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
140   if (CPred == Pred && CLHS == LHS && CRHS == RHS)
141     return true;
142   return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
143          CRHS == LHS;
144 }
145 
146 /// Simplify comparison with true or false branch of select:
147 ///  %sel = select i1 %cond, i32 %tv, i32 %fv
148 ///  %cmp = icmp sle i32 %sel, %rhs
149 /// Compose new comparison by substituting %sel with either %tv or %fv
150 /// and see if it simplifies.
151 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
152                                  Value *RHS, Value *Cond,
153                                  const SimplifyQuery &Q, unsigned MaxRecurse,
154                                  Constant *TrueOrFalse) {
155   Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
156   if (SimplifiedCmp == Cond) {
157     // %cmp simplified to the select condition (%cond).
158     return TrueOrFalse;
159   } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
160     // It didn't simplify. However, if composed comparison is equivalent
161     // to the select condition (%cond) then we can replace it.
162     return TrueOrFalse;
163   }
164   return SimplifiedCmp;
165 }
166 
167 /// Simplify comparison with true branch of select
168 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
169                                      Value *RHS, Value *Cond,
170                                      const SimplifyQuery &Q,
171                                      unsigned MaxRecurse) {
172   return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
173                             getTrue(Cond->getType()));
174 }
175 
176 /// Simplify comparison with false branch of select
177 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
178                                       Value *RHS, Value *Cond,
179                                       const SimplifyQuery &Q,
180                                       unsigned MaxRecurse) {
181   return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
182                             getFalse(Cond->getType()));
183 }
184 
185 /// We know comparison with both branches of select can be simplified, but they
186 /// are not equal. This routine handles some logical simplifications.
187 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
188                                                Value *Cond,
189                                                const SimplifyQuery &Q,
190                                                unsigned MaxRecurse) {
191   // If the false value simplified to false, then the result of the compare
192   // is equal to "Cond && TCmp".  This also catches the case when the false
193   // value simplified to false and the true value to true, returning "Cond".
194   // Folding select to and/or isn't poison-safe in general; impliesPoison
195   // checks whether folding it does not convert a well-defined value into
196   // poison.
197   if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
198     if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
199       return V;
200   // If the true value simplified to true, then the result of the compare
201   // is equal to "Cond || FCmp".
202   if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
203     if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
204       return V;
205   // Finally, if the false value simplified to true and the true value to
206   // false, then the result of the compare is equal to "!Cond".
207   if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
208     if (Value *V = simplifyXorInst(
209             Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
210       return V;
211   return nullptr;
212 }
213 
214 /// Does the given value dominate the specified phi node?
215 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
216   Instruction *I = dyn_cast<Instruction>(V);
217   if (!I)
218     // Arguments and constants dominate all instructions.
219     return true;
220 
221   // If we have a DominatorTree then do a precise test.
222   if (DT)
223     return DT->dominates(I, P);
224 
225   // Otherwise, if the instruction is in the entry block and is not an invoke,
226   // then it obviously dominates all phi nodes.
227   if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
228       !isa<CallBrInst>(I))
229     return true;
230 
231   return false;
232 }
233 
234 /// Try to simplify a binary operator of form "V op OtherOp" where V is
235 /// "(B0 opex B1)" by distributing 'op' across 'opex' as
236 /// "(B0 op OtherOp) opex (B1 op OtherOp)".
237 static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
238                           Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
239                           const SimplifyQuery &Q, unsigned MaxRecurse) {
240   auto *B = dyn_cast<BinaryOperator>(V);
241   if (!B || B->getOpcode() != OpcodeToExpand)
242     return nullptr;
243   Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
244   Value *L =
245       simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
246   if (!L)
247     return nullptr;
248   Value *R =
249       simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
250   if (!R)
251     return nullptr;
252 
253   // Does the expanded pair of binops simplify to the existing binop?
254   if ((L == B0 && R == B1) ||
255       (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
256     ++NumExpand;
257     return B;
258   }
259 
260   // Otherwise, return "L op' R" if it simplifies.
261   Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
262   if (!S)
263     return nullptr;
264 
265   ++NumExpand;
266   return S;
267 }
268 
269 /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
270 /// distributing op over op'.
271 static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L,
272                                      Value *R,
273                                      Instruction::BinaryOps OpcodeToExpand,
274                                      const SimplifyQuery &Q,
275                                      unsigned MaxRecurse) {
276   // Recursion is always used, so bail out at once if we already hit the limit.
277   if (!MaxRecurse--)
278     return nullptr;
279 
280   if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
281     return V;
282   if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
283     return V;
284   return nullptr;
285 }
286 
287 /// Generic simplifications for associative binary operations.
288 /// Returns the simpler value, or null if none was found.
289 static Value *simplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
290                                        Value *LHS, Value *RHS,
291                                        const SimplifyQuery &Q,
292                                        unsigned MaxRecurse) {
293   assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
294 
295   // Recursion is always used, so bail out at once if we already hit the limit.
296   if (!MaxRecurse--)
297     return nullptr;
298 
299   BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
300   BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
301 
302   // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
303   if (Op0 && Op0->getOpcode() == Opcode) {
304     Value *A = Op0->getOperand(0);
305     Value *B = Op0->getOperand(1);
306     Value *C = RHS;
307 
308     // Does "B op C" simplify?
309     if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
310       // It does!  Return "A op V" if it simplifies or is already available.
311       // If V equals B then "A op V" is just the LHS.
312       if (V == B)
313         return LHS;
314       // Otherwise return "A op V" if it simplifies.
315       if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
316         ++NumReassoc;
317         return W;
318       }
319     }
320   }
321 
322   // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
323   if (Op1 && Op1->getOpcode() == Opcode) {
324     Value *A = LHS;
325     Value *B = Op1->getOperand(0);
326     Value *C = Op1->getOperand(1);
327 
328     // Does "A op B" simplify?
329     if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
330       // It does!  Return "V op C" if it simplifies or is already available.
331       // If V equals B then "V op C" is just the RHS.
332       if (V == B)
333         return RHS;
334       // Otherwise return "V op C" if it simplifies.
335       if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
336         ++NumReassoc;
337         return W;
338       }
339     }
340   }
341 
342   // The remaining transforms require commutativity as well as associativity.
343   if (!Instruction::isCommutative(Opcode))
344     return nullptr;
345 
346   // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
347   if (Op0 && Op0->getOpcode() == Opcode) {
348     Value *A = Op0->getOperand(0);
349     Value *B = Op0->getOperand(1);
350     Value *C = RHS;
351 
352     // Does "C op A" simplify?
353     if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
354       // It does!  Return "V op B" if it simplifies or is already available.
355       // If V equals A then "V op B" is just the LHS.
356       if (V == A)
357         return LHS;
358       // Otherwise return "V op B" if it simplifies.
359       if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
360         ++NumReassoc;
361         return W;
362       }
363     }
364   }
365 
366   // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
367   if (Op1 && Op1->getOpcode() == Opcode) {
368     Value *A = LHS;
369     Value *B = Op1->getOperand(0);
370     Value *C = Op1->getOperand(1);
371 
372     // Does "C op A" simplify?
373     if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
374       // It does!  Return "B op V" if it simplifies or is already available.
375       // If V equals C then "B op V" is just the RHS.
376       if (V == C)
377         return RHS;
378       // Otherwise return "B op V" if it simplifies.
379       if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
380         ++NumReassoc;
381         return W;
382       }
383     }
384   }
385 
386   return nullptr;
387 }
388 
389 /// In the case of a binary operation with a select instruction as an operand,
390 /// try to simplify the binop by seeing whether evaluating it on both branches
391 /// of the select results in the same value. Returns the common value if so,
392 /// otherwise returns null.
393 static Value *threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
394                                     Value *RHS, const SimplifyQuery &Q,
395                                     unsigned MaxRecurse) {
396   // Recursion is always used, so bail out at once if we already hit the limit.
397   if (!MaxRecurse--)
398     return nullptr;
399 
400   SelectInst *SI;
401   if (isa<SelectInst>(LHS)) {
402     SI = cast<SelectInst>(LHS);
403   } else {
404     assert(isa<SelectInst>(RHS) && "No select instruction operand!");
405     SI = cast<SelectInst>(RHS);
406   }
407 
408   // Evaluate the BinOp on the true and false branches of the select.
409   Value *TV;
410   Value *FV;
411   if (SI == LHS) {
412     TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
413     FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
414   } else {
415     TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
416     FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
417   }
418 
419   // If they simplified to the same value, then return the common value.
420   // If they both failed to simplify then return null.
421   if (TV == FV)
422     return TV;
423 
424   // If one branch simplified to undef, return the other one.
425   if (TV && Q.isUndefValue(TV))
426     return FV;
427   if (FV && Q.isUndefValue(FV))
428     return TV;
429 
430   // If applying the operation did not change the true and false select values,
431   // then the result of the binop is the select itself.
432   if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
433     return SI;
434 
435   // If one branch simplified and the other did not, and the simplified
436   // value is equal to the unsimplified one, return the simplified value.
437   // For example, select (cond, X, X & Z) & Z -> X & Z.
438   if ((FV && !TV) || (TV && !FV)) {
439     // Check that the simplified value has the form "X op Y" where "op" is the
440     // same as the original operation.
441     Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
442     if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
443       // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
444       // We already know that "op" is the same as for the simplified value.  See
445       // if the operands match too.  If so, return the simplified value.
446       Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
447       Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
448       Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
449       if (Simplified->getOperand(0) == UnsimplifiedLHS &&
450           Simplified->getOperand(1) == UnsimplifiedRHS)
451         return Simplified;
452       if (Simplified->isCommutative() &&
453           Simplified->getOperand(1) == UnsimplifiedLHS &&
454           Simplified->getOperand(0) == UnsimplifiedRHS)
455         return Simplified;
456     }
457   }
458 
459   return nullptr;
460 }
461 
462 /// In the case of a comparison with a select instruction, try to simplify the
463 /// comparison by seeing whether both branches of the select result in the same
464 /// value. Returns the common value if so, otherwise returns null.
465 /// For example, if we have:
466 ///  %tmp = select i1 %cmp, i32 1, i32 2
467 ///  %cmp1 = icmp sle i32 %tmp, 3
468 /// We can simplify %cmp1 to true, because both branches of select are
469 /// less than 3. We compose new comparison by substituting %tmp with both
470 /// branches of select and see if it can be simplified.
471 static Value *threadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
472                                   Value *RHS, const SimplifyQuery &Q,
473                                   unsigned MaxRecurse) {
474   // Recursion is always used, so bail out at once if we already hit the limit.
475   if (!MaxRecurse--)
476     return nullptr;
477 
478   // Make sure the select is on the LHS.
479   if (!isa<SelectInst>(LHS)) {
480     std::swap(LHS, RHS);
481     Pred = CmpInst::getSwappedPredicate(Pred);
482   }
483   assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
484   SelectInst *SI = cast<SelectInst>(LHS);
485   Value *Cond = SI->getCondition();
486   Value *TV = SI->getTrueValue();
487   Value *FV = SI->getFalseValue();
488 
489   // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
490   // Does "cmp TV, RHS" simplify?
491   Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
492   if (!TCmp)
493     return nullptr;
494 
495   // Does "cmp FV, RHS" simplify?
496   Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
497   if (!FCmp)
498     return nullptr;
499 
500   // If both sides simplified to the same value, then use it as the result of
501   // the original comparison.
502   if (TCmp == FCmp)
503     return TCmp;
504 
505   // The remaining cases only make sense if the select condition has the same
506   // type as the result of the comparison, so bail out if this is not so.
507   if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
508     return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
509 
510   return nullptr;
511 }
512 
513 /// In the case of a binary operation with an operand that is a PHI instruction,
514 /// try to simplify the binop by seeing whether evaluating it on the incoming
515 /// phi values yields the same result for every value. If so returns the common
516 /// value, otherwise returns null.
517 static Value *threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
518                                  Value *RHS, const SimplifyQuery &Q,
519                                  unsigned MaxRecurse) {
520   // Recursion is always used, so bail out at once if we already hit the limit.
521   if (!MaxRecurse--)
522     return nullptr;
523 
524   PHINode *PI;
525   if (isa<PHINode>(LHS)) {
526     PI = cast<PHINode>(LHS);
527     // Bail out if RHS and the phi may be mutually interdependent due to a loop.
528     if (!valueDominatesPHI(RHS, PI, Q.DT))
529       return nullptr;
530   } else {
531     assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
532     PI = cast<PHINode>(RHS);
533     // Bail out if LHS and the phi may be mutually interdependent due to a loop.
534     if (!valueDominatesPHI(LHS, PI, Q.DT))
535       return nullptr;
536   }
537 
538   // Evaluate the BinOp on the incoming phi values.
539   Value *CommonValue = nullptr;
540   for (Use &Incoming : PI->incoming_values()) {
541     // If the incoming value is the phi node itself, it can safely be skipped.
542     if (Incoming == PI)
543       continue;
544     Instruction *InTI = PI->getIncomingBlock(Incoming)->getTerminator();
545     Value *V = PI == LHS
546                    ? simplifyBinOp(Opcode, Incoming, RHS,
547                                    Q.getWithInstruction(InTI), MaxRecurse)
548                    : simplifyBinOp(Opcode, LHS, Incoming,
549                                    Q.getWithInstruction(InTI), MaxRecurse);
550     // If the operation failed to simplify, or simplified to a different value
551     // to previously, then give up.
552     if (!V || (CommonValue && V != CommonValue))
553       return nullptr;
554     CommonValue = V;
555   }
556 
557   return CommonValue;
558 }
559 
560 /// In the case of a comparison with a PHI instruction, try to simplify the
561 /// comparison by seeing whether comparing with all of the incoming phi values
562 /// yields the same result every time. If so returns the common result,
563 /// otherwise returns null.
564 static Value *threadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
565                                const SimplifyQuery &Q, unsigned MaxRecurse) {
566   // Recursion is always used, so bail out at once if we already hit the limit.
567   if (!MaxRecurse--)
568     return nullptr;
569 
570   // Make sure the phi is on the LHS.
571   if (!isa<PHINode>(LHS)) {
572     std::swap(LHS, RHS);
573     Pred = CmpInst::getSwappedPredicate(Pred);
574   }
575   assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
576   PHINode *PI = cast<PHINode>(LHS);
577 
578   // Bail out if RHS and the phi may be mutually interdependent due to a loop.
579   if (!valueDominatesPHI(RHS, PI, Q.DT))
580     return nullptr;
581 
582   // Evaluate the BinOp on the incoming phi values.
583   Value *CommonValue = nullptr;
584   for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
585     Value *Incoming = PI->getIncomingValue(u);
586     Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
587     // If the incoming value is the phi node itself, it can safely be skipped.
588     if (Incoming == PI)
589       continue;
590     // Change the context instruction to the "edge" that flows into the phi.
591     // This is important because that is where incoming is actually "evaluated"
592     // even though it is used later somewhere else.
593     Value *V = simplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
594                                MaxRecurse);
595     // If the operation failed to simplify, or simplified to a different value
596     // to previously, then give up.
597     if (!V || (CommonValue && V != CommonValue))
598       return nullptr;
599     CommonValue = V;
600   }
601 
602   return CommonValue;
603 }
604 
605 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
606                                        Value *&Op0, Value *&Op1,
607                                        const SimplifyQuery &Q) {
608   if (auto *CLHS = dyn_cast<Constant>(Op0)) {
609     if (auto *CRHS = dyn_cast<Constant>(Op1)) {
610       switch (Opcode) {
611       default:
612         break;
613       case Instruction::FAdd:
614       case Instruction::FSub:
615       case Instruction::FMul:
616       case Instruction::FDiv:
617       case Instruction::FRem:
618         if (Q.CxtI != nullptr)
619           return ConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
620       }
621       return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
622     }
623 
624     // Canonicalize the constant to the RHS if this is a commutative operation.
625     if (Instruction::isCommutative(Opcode))
626       std::swap(Op0, Op1);
627   }
628   return nullptr;
629 }
630 
631 /// Given operands for an Add, see if we can fold the result.
632 /// If not, this returns null.
633 static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
634                               const SimplifyQuery &Q, unsigned MaxRecurse) {
635   if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
636     return C;
637 
638   // X + poison -> poison
639   if (isa<PoisonValue>(Op1))
640     return Op1;
641 
642   // X + undef -> undef
643   if (Q.isUndefValue(Op1))
644     return Op1;
645 
646   // X + 0 -> X
647   if (match(Op1, m_Zero()))
648     return Op0;
649 
650   // If two operands are negative, return 0.
651   if (isKnownNegation(Op0, Op1))
652     return Constant::getNullValue(Op0->getType());
653 
654   // X + (Y - X) -> Y
655   // (Y - X) + X -> Y
656   // Eg: X + -X -> 0
657   Value *Y = nullptr;
658   if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
659       match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
660     return Y;
661 
662   // X + ~X -> -1   since   ~X = -X-1
663   Type *Ty = Op0->getType();
664   if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
665     return Constant::getAllOnesValue(Ty);
666 
667   // add nsw/nuw (xor Y, signmask), signmask --> Y
668   // The no-wrapping add guarantees that the top bit will be set by the add.
669   // Therefore, the xor must be clearing the already set sign bit of Y.
670   if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
671       match(Op0, m_Xor(m_Value(Y), m_SignMask())))
672     return Y;
673 
674   // add nuw %x, -1  ->  -1, because %x can only be 0.
675   if (IsNUW && match(Op1, m_AllOnes()))
676     return Op1; // Which is -1.
677 
678   /// i1 add -> xor.
679   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
680     if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
681       return V;
682 
683   // Try some generic simplifications for associative operations.
684   if (Value *V =
685           simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
686     return V;
687 
688   // Threading Add over selects and phi nodes is pointless, so don't bother.
689   // Threading over the select in "A + select(cond, B, C)" means evaluating
690   // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
691   // only if B and C are equal.  If B and C are equal then (since we assume
692   // that operands have already been simplified) "select(cond, B, C)" should
693   // have been simplified to the common value of B and C already.  Analysing
694   // "A+B" and "A+C" thus gains nothing, but costs compile time.  Similarly
695   // for threading over phi nodes.
696 
697   return nullptr;
698 }
699 
700 Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
701                              const SimplifyQuery &Query) {
702   return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
703 }
704 
705 /// Compute the base pointer and cumulative constant offsets for V.
706 ///
707 /// This strips all constant offsets off of V, leaving it the base pointer, and
708 /// accumulates the total constant offset applied in the returned constant.
709 /// It returns zero if there are no constant offsets applied.
710 ///
711 /// This is very similar to stripAndAccumulateConstantOffsets(), except it
712 /// normalizes the offset bitwidth to the stripped pointer type, not the
713 /// original pointer type.
714 static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
715                                             bool AllowNonInbounds = false) {
716   assert(V->getType()->isPtrOrPtrVectorTy());
717 
718   APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
719   V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
720   // As that strip may trace through `addrspacecast`, need to sext or trunc
721   // the offset calculated.
722   return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
723 }
724 
725 /// Compute the constant difference between two pointer values.
726 /// If the difference is not a constant, returns zero.
727 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
728                                           Value *RHS) {
729   APInt LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
730   APInt RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
731 
732   // If LHS and RHS are not related via constant offsets to the same base
733   // value, there is nothing we can do here.
734   if (LHS != RHS)
735     return nullptr;
736 
737   // Otherwise, the difference of LHS - RHS can be computed as:
738   //    LHS - RHS
739   //  = (LHSOffset + Base) - (RHSOffset + Base)
740   //  = LHSOffset - RHSOffset
741   Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
742   if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
743     Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
744   return Res;
745 }
746 
747 /// Test if there is a dominating equivalence condition for the
748 /// two operands. If there is, try to reduce the binary operation
749 /// between the two operands.
750 /// Example: Op0 - Op1 --> 0 when Op0 == Op1
751 static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
752                               const SimplifyQuery &Q, unsigned MaxRecurse) {
753   // Recursive run it can not get any benefit
754   if (MaxRecurse != RecursionLimit)
755     return nullptr;
756 
757   std::optional<bool> Imp =
758       isImpliedByDomCondition(CmpInst::ICMP_EQ, Op0, Op1, Q.CxtI, Q.DL);
759   if (Imp && *Imp) {
760     Type *Ty = Op0->getType();
761     switch (Opcode) {
762     case Instruction::Sub:
763     case Instruction::Xor:
764     case Instruction::URem:
765     case Instruction::SRem:
766       return Constant::getNullValue(Ty);
767 
768     case Instruction::SDiv:
769     case Instruction::UDiv:
770       return ConstantInt::get(Ty, 1);
771 
772     case Instruction::And:
773     case Instruction::Or:
774       // Could be either one - choose Op1 since that's more likely a constant.
775       return Op1;
776     default:
777       break;
778     }
779   }
780   return nullptr;
781 }
782 
783 /// Given operands for a Sub, see if we can fold the result.
784 /// If not, this returns null.
785 static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
786                               const SimplifyQuery &Q, unsigned MaxRecurse) {
787   if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
788     return C;
789 
790   // X - poison -> poison
791   // poison - X -> poison
792   if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
793     return PoisonValue::get(Op0->getType());
794 
795   // X - undef -> undef
796   // undef - X -> undef
797   if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
798     return UndefValue::get(Op0->getType());
799 
800   // X - 0 -> X
801   if (match(Op1, m_Zero()))
802     return Op0;
803 
804   // X - X -> 0
805   if (Op0 == Op1)
806     return Constant::getNullValue(Op0->getType());
807 
808   // Is this a negation?
809   if (match(Op0, m_Zero())) {
810     // 0 - X -> 0 if the sub is NUW.
811     if (IsNUW)
812       return Constant::getNullValue(Op0->getType());
813 
814     KnownBits Known = computeKnownBits(Op1, /* Depth */ 0, Q);
815     if (Known.Zero.isMaxSignedValue()) {
816       // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
817       // Op1 must be 0 because negating the minimum signed value is undefined.
818       if (IsNSW)
819         return Constant::getNullValue(Op0->getType());
820 
821       // 0 - X -> X if X is 0 or the minimum signed value.
822       return Op1;
823     }
824   }
825 
826   // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
827   // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
828   Value *X = nullptr, *Y = nullptr, *Z = Op1;
829   if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
830     // See if "V === Y - Z" simplifies.
831     if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
832       // It does!  Now see if "X + V" simplifies.
833       if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
834         // It does, we successfully reassociated!
835         ++NumReassoc;
836         return W;
837       }
838     // See if "V === X - Z" simplifies.
839     if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
840       // It does!  Now see if "Y + V" simplifies.
841       if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
842         // It does, we successfully reassociated!
843         ++NumReassoc;
844         return W;
845       }
846   }
847 
848   // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
849   // For example, X - (X + 1) -> -1
850   X = Op0;
851   if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
852     // See if "V === X - Y" simplifies.
853     if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
854       // It does!  Now see if "V - Z" simplifies.
855       if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
856         // It does, we successfully reassociated!
857         ++NumReassoc;
858         return W;
859       }
860     // See if "V === X - Z" simplifies.
861     if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
862       // It does!  Now see if "V - Y" simplifies.
863       if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
864         // It does, we successfully reassociated!
865         ++NumReassoc;
866         return W;
867       }
868   }
869 
870   // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
871   // For example, X - (X - Y) -> Y.
872   Z = Op0;
873   if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
874     // See if "V === Z - X" simplifies.
875     if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
876       // It does!  Now see if "V + Y" simplifies.
877       if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
878         // It does, we successfully reassociated!
879         ++NumReassoc;
880         return W;
881       }
882 
883   // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
884   if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
885       match(Op1, m_Trunc(m_Value(Y))))
886     if (X->getType() == Y->getType())
887       // See if "V === X - Y" simplifies.
888       if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
889         // It does!  Now see if "trunc V" simplifies.
890         if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
891                                         Q, MaxRecurse - 1))
892           // It does, return the simplified "trunc V".
893           return W;
894 
895   // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
896   if (match(Op0, m_PtrToInt(m_Value(X))) && match(Op1, m_PtrToInt(m_Value(Y))))
897     if (Constant *Result = computePointerDifference(Q.DL, X, Y))
898       return ConstantFoldIntegerCast(Result, Op0->getType(), /*IsSigned*/ true,
899                                      Q.DL);
900 
901   // i1 sub -> xor.
902   if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
903     if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
904       return V;
905 
906   // Threading Sub over selects and phi nodes is pointless, so don't bother.
907   // Threading over the select in "A - select(cond, B, C)" means evaluating
908   // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
909   // only if B and C are equal.  If B and C are equal then (since we assume
910   // that operands have already been simplified) "select(cond, B, C)" should
911   // have been simplified to the common value of B and C already.  Analysing
912   // "A-B" and "A-C" thus gains nothing, but costs compile time.  Similarly
913   // for threading over phi nodes.
914 
915   if (Value *V = simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
916     return V;
917 
918   return nullptr;
919 }
920 
921 Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
922                              const SimplifyQuery &Q) {
923   return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
924 }
925 
926 /// Given operands for a Mul, see if we can fold the result.
927 /// If not, this returns null.
928 static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
929                               const SimplifyQuery &Q, unsigned MaxRecurse) {
930   if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
931     return C;
932 
933   // X * poison -> poison
934   if (isa<PoisonValue>(Op1))
935     return Op1;
936 
937   // X * undef -> 0
938   // X * 0 -> 0
939   if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
940     return Constant::getNullValue(Op0->getType());
941 
942   // X * 1 -> X
943   if (match(Op1, m_One()))
944     return Op0;
945 
946   // (X / Y) * Y -> X if the division is exact.
947   Value *X = nullptr;
948   if (Q.IIQ.UseInstrInfo &&
949       (match(Op0,
950              m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) ||     // (X / Y) * Y
951        match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
952     return X;
953 
954    if (Op0->getType()->isIntOrIntVectorTy(1)) {
955     // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
956     // representable). All other cases reduce to 0, so just return 0.
957     if (IsNSW)
958       return ConstantInt::getNullValue(Op0->getType());
959 
960     // Treat "mul i1" as "and i1".
961     if (MaxRecurse)
962       if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
963         return V;
964   }
965 
966   // Try some generic simplifications for associative operations.
967   if (Value *V =
968           simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
969     return V;
970 
971   // Mul distributes over Add. Try some generic simplifications based on this.
972   if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
973                                         Instruction::Add, Q, MaxRecurse))
974     return V;
975 
976   // If the operation is with the result of a select instruction, check whether
977   // operating on either branch of the select always yields the same value.
978   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
979     if (Value *V =
980             threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
981       return V;
982 
983   // If the operation is with the result of a phi instruction, check whether
984   // operating on all incoming values of the phi always yields the same value.
985   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
986     if (Value *V =
987             threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
988       return V;
989 
990   return nullptr;
991 }
992 
993 Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
994                              const SimplifyQuery &Q) {
995   return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
996 }
997 
998 /// Given a predicate and two operands, return true if the comparison is true.
999 /// This is a helper for div/rem simplification where we return some other value
1000 /// when we can prove a relationship between the operands.
1001 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
1002                        const SimplifyQuery &Q, unsigned MaxRecurse) {
1003   Value *V = simplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
1004   Constant *C = dyn_cast_or_null<Constant>(V);
1005   return (C && C->isAllOnesValue());
1006 }
1007 
1008 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
1009 /// to simplify X % Y to X.
1010 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
1011                       unsigned MaxRecurse, bool IsSigned) {
1012   // Recursion is always used, so bail out at once if we already hit the limit.
1013   if (!MaxRecurse--)
1014     return false;
1015 
1016   if (IsSigned) {
1017     // (X srem Y) sdiv Y --> 0
1018     if (match(X, m_SRem(m_Value(), m_Specific(Y))))
1019       return true;
1020 
1021     // |X| / |Y| --> 0
1022     //
1023     // We require that 1 operand is a simple constant. That could be extended to
1024     // 2 variables if we computed the sign bit for each.
1025     //
1026     // Make sure that a constant is not the minimum signed value because taking
1027     // the abs() of that is undefined.
1028     Type *Ty = X->getType();
1029     const APInt *C;
1030     if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1031       // Is the variable divisor magnitude always greater than the constant
1032       // dividend magnitude?
1033       // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1034       Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1035       Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1036       if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1037           isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1038         return true;
1039     }
1040     if (match(Y, m_APInt(C))) {
1041       // Special-case: we can't take the abs() of a minimum signed value. If
1042       // that's the divisor, then all we have to do is prove that the dividend
1043       // is also not the minimum signed value.
1044       if (C->isMinSignedValue())
1045         return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1046 
1047       // Is the variable dividend magnitude always less than the constant
1048       // divisor magnitude?
1049       // |X| < |C| --> X > -abs(C) and X < abs(C)
1050       Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1051       Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1052       if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1053           isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1054         return true;
1055     }
1056     return false;
1057   }
1058 
1059   // IsSigned == false.
1060 
1061   // Is the unsigned dividend known to be less than a constant divisor?
1062   // TODO: Convert this (and above) to range analysis
1063   //      ("computeConstantRangeIncludingKnownBits")?
1064   const APInt *C;
1065   if (match(Y, m_APInt(C)) &&
1066       computeKnownBits(X, /* Depth */ 0, Q).getMaxValue().ult(*C))
1067     return true;
1068 
1069   // Try again for any divisor:
1070   // Is the dividend unsigned less than the divisor?
1071   return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1072 }
1073 
1074 /// Check for common or similar folds of integer division or integer remainder.
1075 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
1076 static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
1077                              Value *Op1, const SimplifyQuery &Q,
1078                              unsigned MaxRecurse) {
1079   bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1080   bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1081 
1082   Type *Ty = Op0->getType();
1083 
1084   // X / undef -> poison
1085   // X % undef -> poison
1086   if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1087     return PoisonValue::get(Ty);
1088 
1089   // X / 0 -> poison
1090   // X % 0 -> poison
1091   // We don't need to preserve faults!
1092   if (match(Op1, m_Zero()))
1093     return PoisonValue::get(Ty);
1094 
1095   // If any element of a constant divisor fixed width vector is zero or undef
1096   // the behavior is undefined and we can fold the whole op to poison.
1097   auto *Op1C = dyn_cast<Constant>(Op1);
1098   auto *VTy = dyn_cast<FixedVectorType>(Ty);
1099   if (Op1C && VTy) {
1100     unsigned NumElts = VTy->getNumElements();
1101     for (unsigned i = 0; i != NumElts; ++i) {
1102       Constant *Elt = Op1C->getAggregateElement(i);
1103       if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt)))
1104         return PoisonValue::get(Ty);
1105     }
1106   }
1107 
1108   // poison / X -> poison
1109   // poison % X -> poison
1110   if (isa<PoisonValue>(Op0))
1111     return Op0;
1112 
1113   // undef / X -> 0
1114   // undef % X -> 0
1115   if (Q.isUndefValue(Op0))
1116     return Constant::getNullValue(Ty);
1117 
1118   // 0 / X -> 0
1119   // 0 % X -> 0
1120   if (match(Op0, m_Zero()))
1121     return Constant::getNullValue(Op0->getType());
1122 
1123   // X / X -> 1
1124   // X % X -> 0
1125   if (Op0 == Op1)
1126     return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1127 
1128   KnownBits Known = computeKnownBits(Op1, /* Depth */ 0, Q);
1129   // X / 0 -> poison
1130   // X % 0 -> poison
1131   // If the divisor is known to be zero, just return poison. This can happen in
1132   // some cases where its provable indirectly the denominator is zero but it's
1133   // not trivially simplifiable (i.e known zero through a phi node).
1134   if (Known.isZero())
1135     return PoisonValue::get(Ty);
1136 
1137   // X / 1 -> X
1138   // X % 1 -> 0
1139   // If the divisor can only be zero or one, we can't have division-by-zero
1140   // or remainder-by-zero, so assume the divisor is 1.
1141   //   e.g. 1, zext (i8 X), sdiv X (Y and 1)
1142   if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1143     return IsDiv ? Op0 : Constant::getNullValue(Ty);
1144 
1145   // If X * Y does not overflow, then:
1146   //   X * Y / Y -> X
1147   //   X * Y % Y -> 0
1148   Value *X;
1149   if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1150     auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1151     // The multiplication can't overflow if it is defined not to, or if
1152     // X == A / Y for some A.
1153     if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1154         (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1155         (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1156         (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1157       return IsDiv ? X : Constant::getNullValue(Op0->getType());
1158     }
1159   }
1160 
1161   if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1162     return IsDiv ? Constant::getNullValue(Op0->getType()) : Op0;
1163 
1164   if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1165     return V;
1166 
1167   // If the operation is with the result of a select instruction, check whether
1168   // operating on either branch of the select always yields the same value.
1169   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1170     if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1171       return V;
1172 
1173   // If the operation is with the result of a phi instruction, check whether
1174   // operating on all incoming values of the phi always yields the same value.
1175   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1176     if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1177       return V;
1178 
1179   return nullptr;
1180 }
1181 
1182 /// These are simplifications common to SDiv and UDiv.
1183 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1184                           bool IsExact, const SimplifyQuery &Q,
1185                           unsigned MaxRecurse) {
1186   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1187     return C;
1188 
1189   if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1190     return V;
1191 
1192   const APInt *DivC;
1193   if (IsExact && match(Op1, m_APInt(DivC))) {
1194     // If this is an exact divide by a constant, then the dividend (Op0) must
1195     // have at least as many trailing zeros as the divisor to divide evenly. If
1196     // it has less trailing zeros, then the result must be poison.
1197     if (DivC->countr_zero()) {
1198       KnownBits KnownOp0 = computeKnownBits(Op0, /* Depth */ 0, Q);
1199       if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1200         return PoisonValue::get(Op0->getType());
1201     }
1202 
1203     // udiv exact (mul nsw X, C), C --> X
1204     // sdiv exact (mul nuw X, C), C --> X
1205     // where C is not a power of 2.
1206     Value *X;
1207     if (!DivC->isPowerOf2() &&
1208         (Opcode == Instruction::UDiv
1209              ? match(Op0, m_NSWMul(m_Value(X), m_Specific(Op1)))
1210              : match(Op0, m_NUWMul(m_Value(X), m_Specific(Op1)))))
1211       return X;
1212   }
1213 
1214   return nullptr;
1215 }
1216 
1217 /// These are simplifications common to SRem and URem.
1218 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1219                           const SimplifyQuery &Q, unsigned MaxRecurse) {
1220   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1221     return C;
1222 
1223   if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1224     return V;
1225 
1226   // (X << Y) % X -> 0
1227   if (Q.IIQ.UseInstrInfo &&
1228       ((Opcode == Instruction::SRem &&
1229         match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1230        (Opcode == Instruction::URem &&
1231         match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1232     return Constant::getNullValue(Op0->getType());
1233 
1234   return nullptr;
1235 }
1236 
1237 /// Given operands for an SDiv, see if we can fold the result.
1238 /// If not, this returns null.
1239 static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1240                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1241   // If two operands are negated and no signed overflow, return -1.
1242   if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1243     return Constant::getAllOnesValue(Op0->getType());
1244 
1245   return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1246 }
1247 
1248 Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1249                               const SimplifyQuery &Q) {
1250   return ::simplifySDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1251 }
1252 
1253 /// Given operands for a UDiv, see if we can fold the result.
1254 /// If not, this returns null.
1255 static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1256                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1257   return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1258 }
1259 
1260 Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1261                               const SimplifyQuery &Q) {
1262   return ::simplifyUDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1263 }
1264 
1265 /// Given operands for an SRem, see if we can fold the result.
1266 /// If not, this returns null.
1267 static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1268                                unsigned MaxRecurse) {
1269   // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1270   // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1271   Value *X;
1272   if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1273     return ConstantInt::getNullValue(Op0->getType());
1274 
1275   // If the two operands are negated, return 0.
1276   if (isKnownNegation(Op0, Op1))
1277     return ConstantInt::getNullValue(Op0->getType());
1278 
1279   return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1280 }
1281 
1282 Value *llvm::simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1283   return ::simplifySRemInst(Op0, Op1, Q, RecursionLimit);
1284 }
1285 
1286 /// Given operands for a URem, see if we can fold the result.
1287 /// If not, this returns null.
1288 static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1289                                unsigned MaxRecurse) {
1290   return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1291 }
1292 
1293 Value *llvm::simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1294   return ::simplifyURemInst(Op0, Op1, Q, RecursionLimit);
1295 }
1296 
1297 /// Returns true if a shift by \c Amount always yields poison.
1298 static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1299   Constant *C = dyn_cast<Constant>(Amount);
1300   if (!C)
1301     return false;
1302 
1303   // X shift by undef -> poison because it may shift by the bitwidth.
1304   if (Q.isUndefValue(C))
1305     return true;
1306 
1307   // Shifting by the bitwidth or more is poison. This covers scalars and
1308   // fixed/scalable vectors with splat constants.
1309   const APInt *AmountC;
1310   if (match(C, m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1311     return true;
1312 
1313   // Try harder for fixed-length vectors:
1314   // If all lanes of a vector shift are poison, the whole shift is poison.
1315   if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1316     for (unsigned I = 0,
1317                   E = cast<FixedVectorType>(C->getType())->getNumElements();
1318          I != E; ++I)
1319       if (!isPoisonShift(C->getAggregateElement(I), Q))
1320         return false;
1321     return true;
1322   }
1323 
1324   return false;
1325 }
1326 
1327 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1328 /// If not, this returns null.
1329 static Value *simplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1330                             Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1331                             unsigned MaxRecurse) {
1332   if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1333     return C;
1334 
1335   // poison shift by X -> poison
1336   if (isa<PoisonValue>(Op0))
1337     return Op0;
1338 
1339   // 0 shift by X -> 0
1340   if (match(Op0, m_Zero()))
1341     return Constant::getNullValue(Op0->getType());
1342 
1343   // X shift by 0 -> X
1344   // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1345   // would be poison.
1346   Value *X;
1347   if (match(Op1, m_Zero()) ||
1348       (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1349     return Op0;
1350 
1351   // Fold undefined shifts.
1352   if (isPoisonShift(Op1, Q))
1353     return PoisonValue::get(Op0->getType());
1354 
1355   // If the operation is with the result of a select instruction, check whether
1356   // operating on either branch of the select always yields the same value.
1357   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1358     if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1359       return V;
1360 
1361   // If the operation is with the result of a phi instruction, check whether
1362   // operating on all incoming values of the phi always yields the same value.
1363   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1364     if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1365       return V;
1366 
1367   // If any bits in the shift amount make that value greater than or equal to
1368   // the number of bits in the type, the shift is undefined.
1369   KnownBits KnownAmt = computeKnownBits(Op1, /* Depth */ 0, Q);
1370   if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1371     return PoisonValue::get(Op0->getType());
1372 
1373   // If all valid bits in the shift amount are known zero, the first operand is
1374   // unchanged.
1375   unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1376   if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1377     return Op0;
1378 
1379   // Check for nsw shl leading to a poison value.
1380   if (IsNSW) {
1381     assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1382     KnownBits KnownVal = computeKnownBits(Op0, /* Depth */ 0, Q);
1383     KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1384 
1385     if (KnownVal.Zero.isSignBitSet())
1386       KnownShl.Zero.setSignBit();
1387     if (KnownVal.One.isSignBitSet())
1388       KnownShl.One.setSignBit();
1389 
1390     if (KnownShl.hasConflict())
1391       return PoisonValue::get(Op0->getType());
1392   }
1393 
1394   return nullptr;
1395 }
1396 
1397 /// Given operands for an LShr or AShr, see if we can fold the result.  If not,
1398 /// this returns null.
1399 static Value *simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1400                                  Value *Op1, bool IsExact,
1401                                  const SimplifyQuery &Q, unsigned MaxRecurse) {
1402   if (Value *V =
1403           simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1404     return V;
1405 
1406   // X >> X -> 0
1407   if (Op0 == Op1)
1408     return Constant::getNullValue(Op0->getType());
1409 
1410   // undef >> X -> 0
1411   // undef >> X -> undef (if it's exact)
1412   if (Q.isUndefValue(Op0))
1413     return IsExact ? Op0 : Constant::getNullValue(Op0->getType());
1414 
1415   // The low bit cannot be shifted out of an exact shift if it is set.
1416   // TODO: Generalize by counting trailing zeros (see fold for exact division).
1417   if (IsExact) {
1418     KnownBits Op0Known = computeKnownBits(Op0, /* Depth */ 0, Q);
1419     if (Op0Known.One[0])
1420       return Op0;
1421   }
1422 
1423   return nullptr;
1424 }
1425 
1426 /// Given operands for an Shl, see if we can fold the result.
1427 /// If not, this returns null.
1428 static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1429                               const SimplifyQuery &Q, unsigned MaxRecurse) {
1430   if (Value *V =
1431           simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1432     return V;
1433 
1434   Type *Ty = Op0->getType();
1435   // undef << X -> 0
1436   // undef << X -> undef if (if it's NSW/NUW)
1437   if (Q.isUndefValue(Op0))
1438     return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty);
1439 
1440   // (X >> A) << A -> X
1441   Value *X;
1442   if (Q.IIQ.UseInstrInfo &&
1443       match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1444     return X;
1445 
1446   // shl nuw i8 C, %x  ->  C  iff C has sign bit set.
1447   if (IsNUW && match(Op0, m_Negative()))
1448     return Op0;
1449   // NOTE: could use computeKnownBits() / LazyValueInfo,
1450   // but the cost-benefit analysis suggests it isn't worth it.
1451 
1452   // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1453   // that the sign-bit does not change, so the only input that does not
1454   // produce poison is 0, and "0 << (bitwidth-1) --> 0".
1455   if (IsNSW && IsNUW &&
1456       match(Op1, m_SpecificInt(Ty->getScalarSizeInBits() - 1)))
1457     return Constant::getNullValue(Ty);
1458 
1459   return nullptr;
1460 }
1461 
1462 Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1463                              const SimplifyQuery &Q) {
1464   return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
1465 }
1466 
1467 /// Given operands for an LShr, see if we can fold the result.
1468 /// If not, this returns null.
1469 static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1470                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1471   if (Value *V = simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1472                                     MaxRecurse))
1473     return V;
1474 
1475   // (X << A) >> A -> X
1476   Value *X;
1477   if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1478     return X;
1479 
1480   // ((X << A) | Y) >> A -> X  if effective width of Y is not larger than A.
1481   // We can return X as we do in the above case since OR alters no bits in X.
1482   // SimplifyDemandedBits in InstCombine can do more general optimization for
1483   // bit manipulation. This pattern aims to provide opportunities for other
1484   // optimizers by supporting a simple but common case in InstSimplify.
1485   Value *Y;
1486   const APInt *ShRAmt, *ShLAmt;
1487   if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(ShRAmt)) &&
1488       match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1489       *ShRAmt == *ShLAmt) {
1490     const KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
1491     const unsigned EffWidthY = YKnown.countMaxActiveBits();
1492     if (ShRAmt->uge(EffWidthY))
1493       return X;
1494   }
1495 
1496   return nullptr;
1497 }
1498 
1499 Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1500                               const SimplifyQuery &Q) {
1501   return ::simplifyLShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1502 }
1503 
1504 /// Given operands for an AShr, see if we can fold the result.
1505 /// If not, this returns null.
1506 static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1507                                const SimplifyQuery &Q, unsigned MaxRecurse) {
1508   if (Value *V = simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1509                                     MaxRecurse))
1510     return V;
1511 
1512   // -1 >>a X --> -1
1513   // (-1 << X) a>> X --> -1
1514   // Do not return Op0 because it may contain undef elements if it's a vector.
1515   if (match(Op0, m_AllOnes()) ||
1516       match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1517     return Constant::getAllOnesValue(Op0->getType());
1518 
1519   // (X << A) >> A -> X
1520   Value *X;
1521   if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1522     return X;
1523 
1524   // Arithmetic shifting an all-sign-bit value is a no-op.
1525   unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1526   if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1527     return Op0;
1528 
1529   return nullptr;
1530 }
1531 
1532 Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1533                               const SimplifyQuery &Q) {
1534   return ::simplifyAShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1535 }
1536 
1537 /// Commuted variants are assumed to be handled by calling this function again
1538 /// with the parameters swapped.
1539 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1540                                          ICmpInst *UnsignedICmp, bool IsAnd,
1541                                          const SimplifyQuery &Q) {
1542   Value *X, *Y;
1543 
1544   ICmpInst::Predicate EqPred;
1545   if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1546       !ICmpInst::isEquality(EqPred))
1547     return nullptr;
1548 
1549   ICmpInst::Predicate UnsignedPred;
1550 
1551   Value *A, *B;
1552   // Y = (A - B);
1553   if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1554     if (match(UnsignedICmp,
1555               m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1556         ICmpInst::isUnsigned(UnsignedPred)) {
1557       // A >=/<= B || (A - B) != 0  <-->  true
1558       if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1559            UnsignedPred == ICmpInst::ICMP_ULE) &&
1560           EqPred == ICmpInst::ICMP_NE && !IsAnd)
1561         return ConstantInt::getTrue(UnsignedICmp->getType());
1562       // A </> B && (A - B) == 0  <-->  false
1563       if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1564            UnsignedPred == ICmpInst::ICMP_UGT) &&
1565           EqPred == ICmpInst::ICMP_EQ && IsAnd)
1566         return ConstantInt::getFalse(UnsignedICmp->getType());
1567 
1568       // A </> B && (A - B) != 0  <-->  A </> B
1569       // A </> B || (A - B) != 0  <-->  (A - B) != 0
1570       if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1571                                           UnsignedPred == ICmpInst::ICMP_UGT))
1572         return IsAnd ? UnsignedICmp : ZeroICmp;
1573 
1574       // A <=/>= B && (A - B) == 0  <-->  (A - B) == 0
1575       // A <=/>= B || (A - B) == 0  <-->  A <=/>= B
1576       if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1577                                           UnsignedPred == ICmpInst::ICMP_UGE))
1578         return IsAnd ? ZeroICmp : UnsignedICmp;
1579     }
1580 
1581     // Given  Y = (A - B)
1582     //   Y >= A && Y != 0  --> Y >= A  iff B != 0
1583     //   Y <  A || Y == 0  --> Y <  A  iff B != 0
1584     if (match(UnsignedICmp,
1585               m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1586       if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1587           EqPred == ICmpInst::ICMP_NE &&
1588           isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1589         return UnsignedICmp;
1590       if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1591           EqPred == ICmpInst::ICMP_EQ &&
1592           isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1593         return UnsignedICmp;
1594     }
1595   }
1596 
1597   if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1598       ICmpInst::isUnsigned(UnsignedPred))
1599     ;
1600   else if (match(UnsignedICmp,
1601                  m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1602            ICmpInst::isUnsigned(UnsignedPred))
1603     UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1604   else
1605     return nullptr;
1606 
1607   // X > Y && Y == 0  -->  Y == 0  iff X != 0
1608   // X > Y || Y == 0  -->  X > Y   iff X != 0
1609   if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1610       isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1611     return IsAnd ? ZeroICmp : UnsignedICmp;
1612 
1613   // X <= Y && Y != 0  -->  X <= Y  iff X != 0
1614   // X <= Y || Y != 0  -->  Y != 0  iff X != 0
1615   if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1616       isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1617     return IsAnd ? UnsignedICmp : ZeroICmp;
1618 
1619   // The transforms below here are expected to be handled more generally with
1620   // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1621   // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1622   // these are candidates for removal.
1623 
1624   // X < Y && Y != 0  -->  X < Y
1625   // X < Y || Y != 0  -->  Y != 0
1626   if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1627     return IsAnd ? UnsignedICmp : ZeroICmp;
1628 
1629   // X >= Y && Y == 0  -->  Y == 0
1630   // X >= Y || Y == 0  -->  X >= Y
1631   if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1632     return IsAnd ? ZeroICmp : UnsignedICmp;
1633 
1634   // X < Y && Y == 0  -->  false
1635   if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1636       IsAnd)
1637     return getFalse(UnsignedICmp->getType());
1638 
1639   // X >= Y || Y != 0  -->  true
1640   if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1641       !IsAnd)
1642     return getTrue(UnsignedICmp->getType());
1643 
1644   return nullptr;
1645 }
1646 
1647 /// Test if a pair of compares with a shared operand and 2 constants has an
1648 /// empty set intersection, full set union, or if one compare is a superset of
1649 /// the other.
1650 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1651                                                 bool IsAnd) {
1652   // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1653   if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1654     return nullptr;
1655 
1656   const APInt *C0, *C1;
1657   if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1658       !match(Cmp1->getOperand(1), m_APInt(C1)))
1659     return nullptr;
1660 
1661   auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1662   auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1663 
1664   // For and-of-compares, check if the intersection is empty:
1665   // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1666   if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1667     return getFalse(Cmp0->getType());
1668 
1669   // For or-of-compares, check if the union is full:
1670   // (icmp X, C0) || (icmp X, C1) --> full set --> true
1671   if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1672     return getTrue(Cmp0->getType());
1673 
1674   // Is one range a superset of the other?
1675   // If this is and-of-compares, take the smaller set:
1676   // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1677   // If this is or-of-compares, take the larger set:
1678   // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1679   if (Range0.contains(Range1))
1680     return IsAnd ? Cmp1 : Cmp0;
1681   if (Range1.contains(Range0))
1682     return IsAnd ? Cmp0 : Cmp1;
1683 
1684   return nullptr;
1685 }
1686 
1687 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1688                                         const InstrInfoQuery &IIQ) {
1689   // (icmp (add V, C0), C1) & (icmp V, C0)
1690   ICmpInst::Predicate Pred0, Pred1;
1691   const APInt *C0, *C1;
1692   Value *V;
1693   if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1694     return nullptr;
1695 
1696   if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1697     return nullptr;
1698 
1699   auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1700   if (AddInst->getOperand(1) != Op1->getOperand(1))
1701     return nullptr;
1702 
1703   Type *ITy = Op0->getType();
1704   bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1705   bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1706 
1707   const APInt Delta = *C1 - *C0;
1708   if (C0->isStrictlyPositive()) {
1709     if (Delta == 2) {
1710       if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1711         return getFalse(ITy);
1712       if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1713         return getFalse(ITy);
1714     }
1715     if (Delta == 1) {
1716       if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1717         return getFalse(ITy);
1718       if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1719         return getFalse(ITy);
1720     }
1721   }
1722   if (C0->getBoolValue() && IsNUW) {
1723     if (Delta == 2)
1724       if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1725         return getFalse(ITy);
1726     if (Delta == 1)
1727       if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1728         return getFalse(ITy);
1729   }
1730 
1731   return nullptr;
1732 }
1733 
1734 /// Try to simplify and/or of icmp with ctpop intrinsic.
1735 static Value *simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1,
1736                                             bool IsAnd) {
1737   ICmpInst::Predicate Pred0, Pred1;
1738   Value *X;
1739   const APInt *C;
1740   if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
1741                           m_APInt(C))) ||
1742       !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())) || C->isZero())
1743     return nullptr;
1744 
1745   // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1746   if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1747     return Cmp1;
1748   // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1749   if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1750     return Cmp1;
1751 
1752   return nullptr;
1753 }
1754 
1755 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1756                                  const SimplifyQuery &Q) {
1757   if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1758     return X;
1759   if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1760     return X;
1761 
1762   if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1763     return X;
1764 
1765   if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, true))
1766     return X;
1767   if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, true))
1768     return X;
1769 
1770   if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1771     return X;
1772   if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1773     return X;
1774 
1775   return nullptr;
1776 }
1777 
1778 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1779                                        const InstrInfoQuery &IIQ) {
1780   // (icmp (add V, C0), C1) | (icmp V, C0)
1781   ICmpInst::Predicate Pred0, Pred1;
1782   const APInt *C0, *C1;
1783   Value *V;
1784   if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1785     return nullptr;
1786 
1787   if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1788     return nullptr;
1789 
1790   auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1791   if (AddInst->getOperand(1) != Op1->getOperand(1))
1792     return nullptr;
1793 
1794   Type *ITy = Op0->getType();
1795   bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1796   bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1797 
1798   const APInt Delta = *C1 - *C0;
1799   if (C0->isStrictlyPositive()) {
1800     if (Delta == 2) {
1801       if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1802         return getTrue(ITy);
1803       if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1804         return getTrue(ITy);
1805     }
1806     if (Delta == 1) {
1807       if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1808         return getTrue(ITy);
1809       if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1810         return getTrue(ITy);
1811     }
1812   }
1813   if (C0->getBoolValue() && IsNUW) {
1814     if (Delta == 2)
1815       if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1816         return getTrue(ITy);
1817     if (Delta == 1)
1818       if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1819         return getTrue(ITy);
1820   }
1821 
1822   return nullptr;
1823 }
1824 
1825 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1826                                 const SimplifyQuery &Q) {
1827   if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1828     return X;
1829   if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1830     return X;
1831 
1832   if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1833     return X;
1834 
1835   if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, false))
1836     return X;
1837   if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, false))
1838     return X;
1839 
1840   if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1841     return X;
1842   if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1843     return X;
1844 
1845   return nullptr;
1846 }
1847 
1848 static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS,
1849                                    FCmpInst *RHS, bool IsAnd) {
1850   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1851   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1852   if (LHS0->getType() != RHS0->getType())
1853     return nullptr;
1854 
1855   const DataLayout &DL = Q.DL;
1856   const TargetLibraryInfo *TLI = Q.TLI;
1857 
1858   FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1859   if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1860       (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1861     // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1862     // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1863     // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1864     // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1865     // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1866     // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1867     // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1868     // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1869     if (((LHS1 == RHS0 || LHS1 == RHS1) &&
1870          isKnownNeverNaN(LHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
1871         ((LHS0 == RHS0 || LHS0 == RHS1) &&
1872          isKnownNeverNaN(LHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
1873       return RHS;
1874 
1875     // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1876     // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1877     // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1878     // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1879     // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1880     // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1881     // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1882     // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1883     if (((RHS1 == LHS0 || RHS1 == LHS1) &&
1884          isKnownNeverNaN(RHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
1885         ((RHS0 == LHS0 || RHS0 == LHS1) &&
1886          isKnownNeverNaN(RHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
1887       return LHS;
1888   }
1889 
1890   return nullptr;
1891 }
1892 
1893 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0,
1894                                   Value *Op1, bool IsAnd) {
1895   // Look through casts of the 'and' operands to find compares.
1896   auto *Cast0 = dyn_cast<CastInst>(Op0);
1897   auto *Cast1 = dyn_cast<CastInst>(Op1);
1898   if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1899       Cast0->getSrcTy() == Cast1->getSrcTy()) {
1900     Op0 = Cast0->getOperand(0);
1901     Op1 = Cast1->getOperand(0);
1902   }
1903 
1904   Value *V = nullptr;
1905   auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1906   auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1907   if (ICmp0 && ICmp1)
1908     V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1909               : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1910 
1911   auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1912   auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1913   if (FCmp0 && FCmp1)
1914     V = simplifyAndOrOfFCmps(Q, FCmp0, FCmp1, IsAnd);
1915 
1916   if (!V)
1917     return nullptr;
1918   if (!Cast0)
1919     return V;
1920 
1921   // If we looked through casts, we can only handle a constant simplification
1922   // because we are not allowed to create a cast instruction here.
1923   if (auto *C = dyn_cast<Constant>(V))
1924     return ConstantFoldCastOperand(Cast0->getOpcode(), C, Cast0->getType(),
1925                                    Q.DL);
1926 
1927   return nullptr;
1928 }
1929 
1930 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
1931                                      const SimplifyQuery &Q,
1932                                      bool AllowRefinement,
1933                                      SmallVectorImpl<Instruction *> *DropFlags,
1934                                      unsigned MaxRecurse);
1935 
1936 static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
1937                                       const SimplifyQuery &Q,
1938                                       unsigned MaxRecurse) {
1939   assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1940          "Must be and/or");
1941   ICmpInst::Predicate Pred;
1942   Value *A, *B;
1943   if (!match(Op0, m_ICmp(Pred, m_Value(A), m_Value(B))) ||
1944       !ICmpInst::isEquality(Pred))
1945     return nullptr;
1946 
1947   auto Simplify = [&](Value *Res) -> Value * {
1948     Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Res->getType());
1949 
1950     // and (icmp eq a, b), x implies (a==b) inside x.
1951     // or (icmp ne a, b), x implies (a==b) inside x.
1952     // If x simplifies to true/false, we can simplify the and/or.
1953     if (Pred ==
1954         (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1955       if (Res == Absorber)
1956         return Absorber;
1957       if (Res == ConstantExpr::getBinOpIdentity(Opcode, Res->getType()))
1958         return Op0;
1959       return nullptr;
1960     }
1961 
1962     // If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
1963     // then we can drop the icmp, as x will already be false in the case where
1964     // the icmp is false. Similar for or and true.
1965     if (Res == Absorber)
1966       return Op1;
1967     return nullptr;
1968   };
1969 
1970   if (Value *Res =
1971           simplifyWithOpReplaced(Op1, A, B, Q, /* AllowRefinement */ true,
1972                                  /* DropFlags */ nullptr, MaxRecurse))
1973     return Simplify(Res);
1974   if (Value *Res =
1975           simplifyWithOpReplaced(Op1, B, A, Q, /* AllowRefinement */ true,
1976                                  /* DropFlags */ nullptr, MaxRecurse))
1977     return Simplify(Res);
1978 
1979   return nullptr;
1980 }
1981 
1982 /// Given a bitwise logic op, check if the operands are add/sub with a common
1983 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
1984 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
1985                                     Instruction::BinaryOps Opcode) {
1986   assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
1987   assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
1988   Value *X;
1989   Constant *C1, *C2;
1990   if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
1991        match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
1992       (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
1993        match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
1994     if (ConstantExpr::getNot(C1) == C2) {
1995       // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
1996       // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
1997       // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
1998       Type *Ty = Op0->getType();
1999       return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2000                                         : ConstantInt::getAllOnesValue(Ty);
2001     }
2002   }
2003   return nullptr;
2004 }
2005 
2006 // Commutative patterns for and that will be tried with both operand orders.
2007 static Value *simplifyAndCommutative(Value *Op0, Value *Op1,
2008                                      const SimplifyQuery &Q,
2009                                      unsigned MaxRecurse) {
2010   // ~A & A =  0
2011   if (match(Op0, m_Not(m_Specific(Op1))))
2012     return Constant::getNullValue(Op0->getType());
2013 
2014   // (A | ?) & A = A
2015   if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2016     return Op1;
2017 
2018   // (X | ~Y) & (X | Y) --> X
2019   Value *X, *Y;
2020   if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2021       match(Op1, m_c_Or(m_Deferred(X), m_Deferred(Y))))
2022     return X;
2023 
2024   // If we have a multiplication overflow check that is being 'and'ed with a
2025   // check that one of the multipliers is not zero, we can omit the 'and', and
2026   // only keep the overflow check.
2027   if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2028     return Op1;
2029 
2030   // -A & A = A if A is a power of two or zero.
2031   if (match(Op0, m_Neg(m_Specific(Op1))) &&
2032       isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2033     return Op1;
2034 
2035   // This is a similar pattern used for checking if a value is a power-of-2:
2036   // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2037   if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2038       isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2039     return Constant::getNullValue(Op1->getType());
2040 
2041   // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
2042   // M <= N.
2043   const APInt *Shift1, *Shift2;
2044   if (match(Op0, m_Shl(m_Value(X), m_APInt(Shift1))) &&
2045       match(Op1, m_Add(m_Shl(m_Specific(X), m_APInt(Shift2)), m_AllOnes())) &&
2046       isKnownToBeAPowerOfTwo(X, Q.DL, /*OrZero*/ true, /*Depth*/ 0, Q.AC,
2047                              Q.CxtI) &&
2048       Shift1->uge(*Shift2))
2049     return Constant::getNullValue(Op0->getType());
2050 
2051   if (Value *V =
2052           simplifyAndOrWithICmpEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2053     return V;
2054 
2055   return nullptr;
2056 }
2057 
2058 /// Given operands for an And, see if we can fold the result.
2059 /// If not, this returns null.
2060 static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2061                               unsigned MaxRecurse) {
2062   if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2063     return C;
2064 
2065   // X & poison -> poison
2066   if (isa<PoisonValue>(Op1))
2067     return Op1;
2068 
2069   // X & undef -> 0
2070   if (Q.isUndefValue(Op1))
2071     return Constant::getNullValue(Op0->getType());
2072 
2073   // X & X = X
2074   if (Op0 == Op1)
2075     return Op0;
2076 
2077   // X & 0 = 0
2078   if (match(Op1, m_Zero()))
2079     return Constant::getNullValue(Op0->getType());
2080 
2081   // X & -1 = X
2082   if (match(Op1, m_AllOnes()))
2083     return Op0;
2084 
2085   if (Value *Res = simplifyAndCommutative(Op0, Op1, Q, MaxRecurse))
2086     return Res;
2087   if (Value *Res = simplifyAndCommutative(Op1, Op0, Q, MaxRecurse))
2088     return Res;
2089 
2090   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2091     return V;
2092 
2093   // A mask that only clears known zeros of a shifted value is a no-op.
2094   const APInt *Mask;
2095   const APInt *ShAmt;
2096   Value *X, *Y;
2097   if (match(Op1, m_APInt(Mask))) {
2098     // If all bits in the inverted and shifted mask are clear:
2099     // and (shl X, ShAmt), Mask --> shl X, ShAmt
2100     if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2101         (~(*Mask)).lshr(*ShAmt).isZero())
2102       return Op0;
2103 
2104     // If all bits in the inverted and shifted mask are clear:
2105     // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2106     if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2107         (~(*Mask)).shl(*ShAmt).isZero())
2108       return Op0;
2109   }
2110 
2111   // and 2^x-1, 2^C --> 0 where x <= C.
2112   const APInt *PowerC;
2113   Value *Shift;
2114   if (match(Op1, m_Power2(PowerC)) &&
2115       match(Op0, m_Add(m_Value(Shift), m_AllOnes())) &&
2116       isKnownToBeAPowerOfTwo(Shift, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
2117                              Q.DT)) {
2118     KnownBits Known = computeKnownBits(Shift, /* Depth */ 0, Q);
2119     // Use getActiveBits() to make use of the additional power of two knowledge
2120     if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
2121       return ConstantInt::getNullValue(Op1->getType());
2122   }
2123 
2124   if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2125     return V;
2126 
2127   // Try some generic simplifications for associative operations.
2128   if (Value *V =
2129           simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2130     return V;
2131 
2132   // And distributes over Or.  Try some generic simplifications based on this.
2133   if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2134                                         Instruction::Or, Q, MaxRecurse))
2135     return V;
2136 
2137   // And distributes over Xor.  Try some generic simplifications based on this.
2138   if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2139                                         Instruction::Xor, Q, MaxRecurse))
2140     return V;
2141 
2142   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2143     if (Op0->getType()->isIntOrIntVectorTy(1)) {
2144       // A & (A && B) -> A && B
2145       if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2146         return Op1;
2147       else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2148         return Op0;
2149     }
2150     // If the operation is with the result of a select instruction, check
2151     // whether operating on either branch of the select always yields the same
2152     // value.
2153     if (Value *V =
2154             threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2155       return V;
2156   }
2157 
2158   // If the operation is with the result of a phi instruction, check whether
2159   // operating on all incoming values of the phi always yields the same value.
2160   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2161     if (Value *V =
2162             threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2163       return V;
2164 
2165   // Assuming the effective width of Y is not larger than A, i.e. all bits
2166   // from X and Y are disjoint in (X << A) | Y,
2167   // if the mask of this AND op covers all bits of X or Y, while it covers
2168   // no bits from the other, we can bypass this AND op. E.g.,
2169   // ((X << A) | Y) & Mask -> Y,
2170   //     if Mask = ((1 << effective_width_of(Y)) - 1)
2171   // ((X << A) | Y) & Mask -> X << A,
2172   //     if Mask = ((1 << effective_width_of(X)) - 1) << A
2173   // SimplifyDemandedBits in InstCombine can optimize the general case.
2174   // This pattern aims to help other passes for a common case.
2175   Value *XShifted;
2176   if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(Mask)) &&
2177       match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2178                                      m_Value(XShifted)),
2179                         m_Value(Y)))) {
2180     const unsigned Width = Op0->getType()->getScalarSizeInBits();
2181     const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2182     const KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
2183     const unsigned EffWidthY = YKnown.countMaxActiveBits();
2184     if (EffWidthY <= ShftCnt) {
2185       const KnownBits XKnown = computeKnownBits(X, /* Depth */ 0, Q);
2186       const unsigned EffWidthX = XKnown.countMaxActiveBits();
2187       const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2188       const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2189       // If the mask is extracting all bits from X or Y as is, we can skip
2190       // this AND op.
2191       if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2192         return Y;
2193       if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2194         return XShifted;
2195     }
2196   }
2197 
2198   // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2199   // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2200   BinaryOperator *Or;
2201   if (match(Op0, m_c_Xor(m_Value(X),
2202                          m_CombineAnd(m_BinOp(Or),
2203                                       m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2204       match(Op1, m_c_Xor(m_Specific(Or), m_Specific(Y))))
2205     return Constant::getNullValue(Op0->getType());
2206 
2207   const APInt *C1;
2208   Value *A;
2209   // (A ^ C) & (A ^ ~C) -> 0
2210   if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2211       match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2212     return Constant::getNullValue(Op0->getType());
2213 
2214   if (Op0->getType()->isIntOrIntVectorTy(1)) {
2215     if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
2216       // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2217       if (*Implied == true)
2218         return Op0;
2219       // If Op0 is true implies Op1 is false, then they are not true together.
2220       if (*Implied == false)
2221         return ConstantInt::getFalse(Op0->getType());
2222     }
2223     if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
2224       // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2225       if (*Implied)
2226         return Op1;
2227       // If Op1 is true implies Op0 is false, then they are not true together.
2228       if (!*Implied)
2229         return ConstantInt::getFalse(Op1->getType());
2230     }
2231   }
2232 
2233   if (Value *V = simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2234     return V;
2235 
2236   return nullptr;
2237 }
2238 
2239 Value *llvm::simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2240   return ::simplifyAndInst(Op0, Op1, Q, RecursionLimit);
2241 }
2242 
2243 // TODO: Many of these folds could use LogicalAnd/LogicalOr.
2244 static Value *simplifyOrLogic(Value *X, Value *Y) {
2245   assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2246   Type *Ty = X->getType();
2247 
2248   // X | ~X --> -1
2249   if (match(Y, m_Not(m_Specific(X))))
2250     return ConstantInt::getAllOnesValue(Ty);
2251 
2252   // X | ~(X & ?) = -1
2253   if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2254     return ConstantInt::getAllOnesValue(Ty);
2255 
2256   // X | (X & ?) --> X
2257   if (match(Y, m_c_And(m_Specific(X), m_Value())))
2258     return X;
2259 
2260   Value *A, *B;
2261 
2262   // (A ^ B) | (A | B) --> A | B
2263   // (A ^ B) | (B | A) --> B | A
2264   if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2265       match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2266     return Y;
2267 
2268   // ~(A ^ B) | (A | B) --> -1
2269   // ~(A ^ B) | (B | A) --> -1
2270   if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2271       match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2272     return ConstantInt::getAllOnesValue(Ty);
2273 
2274   // (A & ~B) | (A ^ B) --> A ^ B
2275   // (~B & A) | (A ^ B) --> A ^ B
2276   // (A & ~B) | (B ^ A) --> B ^ A
2277   // (~B & A) | (B ^ A) --> B ^ A
2278   if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2279       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2280     return Y;
2281 
2282   // (~A ^ B) | (A & B) --> ~A ^ B
2283   // (B ^ ~A) | (A & B) --> B ^ ~A
2284   // (~A ^ B) | (B & A) --> ~A ^ B
2285   // (B ^ ~A) | (B & A) --> B ^ ~A
2286   if (match(X, m_c_Xor(m_NotForbidUndef(m_Value(A)), m_Value(B))) &&
2287       match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2288     return X;
2289 
2290   // (~A | B) | (A ^ B) --> -1
2291   // (~A | B) | (B ^ A) --> -1
2292   // (B | ~A) | (A ^ B) --> -1
2293   // (B | ~A) | (B ^ A) --> -1
2294   if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2295       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2296     return ConstantInt::getAllOnesValue(Ty);
2297 
2298   // (~A & B) | ~(A | B) --> ~A
2299   // (~A & B) | ~(B | A) --> ~A
2300   // (B & ~A) | ~(A | B) --> ~A
2301   // (B & ~A) | ~(B | A) --> ~A
2302   Value *NotA;
2303   if (match(X,
2304             m_c_And(m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2305                     m_Value(B))) &&
2306       match(Y, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2307     return NotA;
2308   // The same is true of Logical And
2309   // TODO: This could share the logic of the version above if there was a
2310   // version of LogicalAnd that allowed more than just i1 types.
2311   if (match(X, m_c_LogicalAnd(
2312                    m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2313                    m_Value(B))) &&
2314       match(Y, m_Not(m_c_LogicalOr(m_Specific(A), m_Specific(B)))))
2315     return NotA;
2316 
2317   // ~(A ^ B) | (A & B) --> ~(A ^ B)
2318   // ~(A ^ B) | (B & A) --> ~(A ^ B)
2319   Value *NotAB;
2320   if (match(X, m_CombineAnd(m_NotForbidUndef(m_Xor(m_Value(A), m_Value(B))),
2321                             m_Value(NotAB))) &&
2322       match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2323     return NotAB;
2324 
2325   // ~(A & B) | (A ^ B) --> ~(A & B)
2326   // ~(A & B) | (B ^ A) --> ~(A & B)
2327   if (match(X, m_CombineAnd(m_NotForbidUndef(m_And(m_Value(A), m_Value(B))),
2328                             m_Value(NotAB))) &&
2329       match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2330     return NotAB;
2331 
2332   return nullptr;
2333 }
2334 
2335 /// Given operands for an Or, see if we can fold the result.
2336 /// If not, this returns null.
2337 static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2338                              unsigned MaxRecurse) {
2339   if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2340     return C;
2341 
2342   // X | poison -> poison
2343   if (isa<PoisonValue>(Op1))
2344     return Op1;
2345 
2346   // X | undef -> -1
2347   // X | -1 = -1
2348   // Do not return Op1 because it may contain undef elements if it's a vector.
2349   if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2350     return Constant::getAllOnesValue(Op0->getType());
2351 
2352   // X | X = X
2353   // X | 0 = X
2354   if (Op0 == Op1 || match(Op1, m_Zero()))
2355     return Op0;
2356 
2357   if (Value *R = simplifyOrLogic(Op0, Op1))
2358     return R;
2359   if (Value *R = simplifyOrLogic(Op1, Op0))
2360     return R;
2361 
2362   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2363     return V;
2364 
2365   // Rotated -1 is still -1:
2366   // (-1 << X) | (-1 >> (C - X)) --> -1
2367   // (-1 >> X) | (-1 << (C - X)) --> -1
2368   // ...with C <= bitwidth (and commuted variants).
2369   Value *X, *Y;
2370   if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2371        match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2372       (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2373        match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2374     const APInt *C;
2375     if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2376          match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2377         C->ule(X->getType()->getScalarSizeInBits())) {
2378       return ConstantInt::getAllOnesValue(X->getType());
2379     }
2380   }
2381 
2382   // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2383   // are mixing in another shift that is redundant with the funnel shift.
2384 
2385   // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2386   // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2387   if (match(Op0,
2388             m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2389       match(Op1, m_Shl(m_Specific(X), m_Specific(Y))))
2390     return Op0;
2391   if (match(Op1,
2392             m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2393       match(Op0, m_Shl(m_Specific(X), m_Specific(Y))))
2394     return Op1;
2395 
2396   // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2397   // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2398   if (match(Op0,
2399             m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2400       match(Op1, m_LShr(m_Specific(X), m_Specific(Y))))
2401     return Op0;
2402   if (match(Op1,
2403             m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2404       match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
2405     return Op1;
2406 
2407   if (Value *V =
2408           simplifyAndOrWithICmpEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2409     return V;
2410   if (Value *V =
2411           simplifyAndOrWithICmpEq(Instruction::Or, Op1, Op0, Q, MaxRecurse))
2412     return V;
2413 
2414   if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2415     return V;
2416 
2417   // If we have a multiplication overflow check that is being 'and'ed with a
2418   // check that one of the multipliers is not zero, we can omit the 'and', and
2419   // only keep the overflow check.
2420   if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2421     return Op1;
2422   if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2423     return Op0;
2424 
2425   // Try some generic simplifications for associative operations.
2426   if (Value *V =
2427           simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2428     return V;
2429 
2430   // Or distributes over And.  Try some generic simplifications based on this.
2431   if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2432                                         Instruction::And, Q, MaxRecurse))
2433     return V;
2434 
2435   if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2436     if (Op0->getType()->isIntOrIntVectorTy(1)) {
2437       // A | (A || B) -> A || B
2438       if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2439         return Op1;
2440       else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2441         return Op0;
2442     }
2443     // If the operation is with the result of a select instruction, check
2444     // whether operating on either branch of the select always yields the same
2445     // value.
2446     if (Value *V =
2447             threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2448       return V;
2449   }
2450 
2451   // (A & C1)|(B & C2)
2452   Value *A, *B;
2453   const APInt *C1, *C2;
2454   if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2455       match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2456     if (*C1 == ~*C2) {
2457       // (A & C1)|(B & C2)
2458       // If we have: ((V + N) & C1) | (V & C2)
2459       // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2460       // replace with V+N.
2461       Value *N;
2462       if (C2->isMask() && // C2 == 0+1+
2463           match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2464         // Add commutes, try both ways.
2465         if (MaskedValueIsZero(N, *C2, Q))
2466           return A;
2467       }
2468       // Or commutes, try both ways.
2469       if (C1->isMask() && match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2470         // Add commutes, try both ways.
2471         if (MaskedValueIsZero(N, *C1, Q))
2472           return B;
2473       }
2474     }
2475   }
2476 
2477   // If the operation is with the result of a phi instruction, check whether
2478   // operating on all incoming values of the phi always yields the same value.
2479   if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2480     if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2481       return V;
2482 
2483   // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one.
2484   if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2485       match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2486     return Constant::getAllOnesValue(Op0->getType());
2487 
2488   if (Op0->getType()->isIntOrIntVectorTy(1)) {
2489     if (std::optional<bool> Implied =
2490             isImpliedCondition(Op0, Op1, Q.DL, false)) {
2491       // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2492       if (*Implied == false)
2493         return Op0;
2494       // If Op0 is false implies Op1 is true, then at least one is always true.
2495       if (*Implied == true)
2496         return ConstantInt::getTrue(Op0->getType());
2497     }
2498     if (std::optional<bool> Implied =
2499             isImpliedCondition(Op1, Op0, Q.DL, false)) {
2500       // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2501       if (*Implied == false)
2502         return Op1;
2503       // If Op1 is false implies Op0 is true, then at least one is always true.
2504       if (*Implied == true)
2505         return ConstantInt::getTrue(Op1->getType());
2506     }
2507   }
2508 
2509   if (Value *V = simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2510     return V;
2511 
2512   return nullptr;
2513 }
2514 
2515 Value *llvm::simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2516   return ::simplifyOrInst(Op0, Op1, Q, RecursionLimit);
2517 }
2518 
2519 /// Given operands for a Xor, see if we can fold the result.
2520 /// If not, this returns null.
2521 static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2522                               unsigned MaxRecurse) {
2523   if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2524     return C;
2525 
2526   // X ^ poison -> poison
2527   if (isa<PoisonValue>(Op1))
2528     return Op1;
2529 
2530   // A ^ undef -> undef
2531   if (Q.isUndefValue(Op1))
2532     return Op1;
2533 
2534   // A ^ 0 = A
2535   if (match(Op1, m_Zero()))
2536     return Op0;
2537 
2538   // A ^ A = 0
2539   if (Op0 == Op1)
2540     return Constant::getNullValue(Op0->getType());
2541 
2542   // A ^ ~A  =  ~A ^ A  =  -1
2543   if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2544     return Constant::getAllOnesValue(Op0->getType());
2545 
2546   auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2547     Value *A, *B;
2548     // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2549     if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2550         match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2551       return A;
2552 
2553     // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2554     // The 'not' op must contain a complete -1 operand (no undef elements for
2555     // vector) for the transform to be safe.
2556     Value *NotA;
2557     if (match(X,
2558               m_c_Or(m_CombineAnd(m_NotForbidUndef(m_Value(A)), m_Value(NotA)),
2559                      m_Value(B))) &&
2560         match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2561       return NotA;
2562 
2563     return nullptr;
2564   };
2565   if (Value *R = foldAndOrNot(Op0, Op1))
2566     return R;
2567   if (Value *R = foldAndOrNot(Op1, Op0))
2568     return R;
2569 
2570   if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2571     return V;
2572 
2573   // Try some generic simplifications for associative operations.
2574   if (Value *V =
2575           simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2576     return V;
2577 
2578   // Threading Xor over selects and phi nodes is pointless, so don't bother.
2579   // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2580   // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2581   // only if B and C are equal.  If B and C are equal then (since we assume
2582   // that operands have already been simplified) "select(cond, B, C)" should
2583   // have been simplified to the common value of B and C already.  Analysing
2584   // "A^B" and "A^C" thus gains nothing, but costs compile time.  Similarly
2585   // for threading over phi nodes.
2586 
2587   if (Value *V = simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2588     return V;
2589 
2590   return nullptr;
2591 }
2592 
2593 Value *llvm::simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2594   return ::simplifyXorInst(Op0, Op1, Q, RecursionLimit);
2595 }
2596 
2597 static Type *getCompareTy(Value *Op) {
2598   return CmpInst::makeCmpResultType(Op->getType());
2599 }
2600 
2601 /// Rummage around inside V looking for something equivalent to the comparison
2602 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2603 /// Helper function for analyzing max/min idioms.
2604 static Value *extractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2605                                          Value *LHS, Value *RHS) {
2606   SelectInst *SI = dyn_cast<SelectInst>(V);
2607   if (!SI)
2608     return nullptr;
2609   CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2610   if (!Cmp)
2611     return nullptr;
2612   Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2613   if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2614     return Cmp;
2615   if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2616       LHS == CmpRHS && RHS == CmpLHS)
2617     return Cmp;
2618   return nullptr;
2619 }
2620 
2621 /// Return true if the underlying object (storage) must be disjoint from
2622 /// storage returned by any noalias return call.
2623 static bool isAllocDisjoint(const Value *V) {
2624   // For allocas, we consider only static ones (dynamic
2625   // allocas might be transformed into calls to malloc not simultaneously
2626   // live with the compared-to allocation). For globals, we exclude symbols
2627   // that might be resolve lazily to symbols in another dynamically-loaded
2628   // library (and, thus, could be malloc'ed by the implementation).
2629   if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2630     return AI->isStaticAlloca();
2631   if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2632     return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2633             GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2634            !GV->isThreadLocal();
2635   if (const Argument *A = dyn_cast<Argument>(V))
2636     return A->hasByValAttr();
2637   return false;
2638 }
2639 
2640 /// Return true if V1 and V2 are each the base of some distict storage region
2641 /// [V, object_size(V)] which do not overlap.  Note that zero sized regions
2642 /// *are* possible, and that zero sized regions do not overlap with any other.
2643 static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2644   // Global variables always exist, so they always exist during the lifetime
2645   // of each other and all allocas.  Global variables themselves usually have
2646   // non-overlapping storage, but since their addresses are constants, the
2647   // case involving two globals does not reach here and is instead handled in
2648   // constant folding.
2649   //
2650   // Two different allocas usually have different addresses...
2651   //
2652   // However, if there's an @llvm.stackrestore dynamically in between two
2653   // allocas, they may have the same address. It's tempting to reduce the
2654   // scope of the problem by only looking at *static* allocas here. That would
2655   // cover the majority of allocas while significantly reducing the likelihood
2656   // of having an @llvm.stackrestore pop up in the middle. However, it's not
2657   // actually impossible for an @llvm.stackrestore to pop up in the middle of
2658   // an entry block. Also, if we have a block that's not attached to a
2659   // function, we can't tell if it's "static" under the current definition.
2660   // Theoretically, this problem could be fixed by creating a new kind of
2661   // instruction kind specifically for static allocas. Such a new instruction
2662   // could be required to be at the top of the entry block, thus preventing it
2663   // from being subject to a @llvm.stackrestore. Instcombine could even
2664   // convert regular allocas into these special allocas. It'd be nifty.
2665   // However, until then, this problem remains open.
2666   //
2667   // So, we'll assume that two non-empty allocas have different addresses
2668   // for now.
2669   auto isByValArg = [](const Value *V) {
2670     const Argument *A = dyn_cast<Argument>(V);
2671     return A && A->hasByValAttr();
2672   };
2673 
2674   // Byval args are backed by store which does not overlap with each other,
2675   // allocas, or globals.
2676   if (isByValArg(V1))
2677     return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2678   if (isByValArg(V2))
2679     return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2680 
2681   return isa<AllocaInst>(V1) &&
2682          (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2683 }
2684 
2685 // A significant optimization not implemented here is assuming that alloca
2686 // addresses are not equal to incoming argument values. They don't *alias*,
2687 // as we say, but that doesn't mean they aren't equal, so we take a
2688 // conservative approach.
2689 //
2690 // This is inspired in part by C++11 5.10p1:
2691 //   "Two pointers of the same type compare equal if and only if they are both
2692 //    null, both point to the same function, or both represent the same
2693 //    address."
2694 //
2695 // This is pretty permissive.
2696 //
2697 // It's also partly due to C11 6.5.9p6:
2698 //   "Two pointers compare equal if and only if both are null pointers, both are
2699 //    pointers to the same object (including a pointer to an object and a
2700 //    subobject at its beginning) or function, both are pointers to one past the
2701 //    last element of the same array object, or one is a pointer to one past the
2702 //    end of one array object and the other is a pointer to the start of a
2703 //    different array object that happens to immediately follow the first array
2704 //    object in the address space.)
2705 //
2706 // C11's version is more restrictive, however there's no reason why an argument
2707 // couldn't be a one-past-the-end value for a stack object in the caller and be
2708 // equal to the beginning of a stack object in the callee.
2709 //
2710 // If the C and C++ standards are ever made sufficiently restrictive in this
2711 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2712 // this optimization.
2713 static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
2714                                     Value *RHS, const SimplifyQuery &Q) {
2715   assert(LHS->getType() == RHS->getType() && "Must have same types");
2716   const DataLayout &DL = Q.DL;
2717   const TargetLibraryInfo *TLI = Q.TLI;
2718   const DominatorTree *DT = Q.DT;
2719   const Instruction *CxtI = Q.CxtI;
2720 
2721   // We can only fold certain predicates on pointer comparisons.
2722   switch (Pred) {
2723   default:
2724     return nullptr;
2725 
2726     // Equality comparisons are easy to fold.
2727   case CmpInst::ICMP_EQ:
2728   case CmpInst::ICMP_NE:
2729     break;
2730 
2731     // We can only handle unsigned relational comparisons because 'inbounds' on
2732     // a GEP only protects against unsigned wrapping.
2733   case CmpInst::ICMP_UGT:
2734   case CmpInst::ICMP_UGE:
2735   case CmpInst::ICMP_ULT:
2736   case CmpInst::ICMP_ULE:
2737     // However, we have to switch them to their signed variants to handle
2738     // negative indices from the base pointer.
2739     Pred = ICmpInst::getSignedPredicate(Pred);
2740     break;
2741   }
2742 
2743   // Strip off any constant offsets so that we can reason about them.
2744   // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2745   // here and compare base addresses like AliasAnalysis does, however there are
2746   // numerous hazards. AliasAnalysis and its utilities rely on special rules
2747   // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2748   // doesn't need to guarantee pointer inequality when it says NoAlias.
2749 
2750   // Even if an non-inbounds GEP occurs along the path we can still optimize
2751   // equality comparisons concerning the result.
2752   bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2753   unsigned IndexSize = DL.getIndexTypeSizeInBits(LHS->getType());
2754   APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2755   LHS = LHS->stripAndAccumulateConstantOffsets(DL, LHSOffset, AllowNonInbounds);
2756   RHS = RHS->stripAndAccumulateConstantOffsets(DL, RHSOffset, AllowNonInbounds);
2757 
2758   // If LHS and RHS are related via constant offsets to the same base
2759   // value, we can replace it with an icmp which just compares the offsets.
2760   if (LHS == RHS)
2761     return ConstantInt::get(getCompareTy(LHS),
2762                             ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2763 
2764   // Various optimizations for (in)equality comparisons.
2765   if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2766     // Different non-empty allocations that exist at the same time have
2767     // different addresses (if the program can tell). If the offsets are
2768     // within the bounds of their allocations (and not one-past-the-end!
2769     // so we can't use inbounds!), and their allocations aren't the same,
2770     // the pointers are not equal.
2771     if (haveNonOverlappingStorage(LHS, RHS)) {
2772       uint64_t LHSSize, RHSSize;
2773       ObjectSizeOpts Opts;
2774       Opts.EvalMode = ObjectSizeOpts::Mode::Min;
2775       auto *F = [](Value *V) -> Function * {
2776         if (auto *I = dyn_cast<Instruction>(V))
2777           return I->getFunction();
2778         if (auto *A = dyn_cast<Argument>(V))
2779           return A->getParent();
2780         return nullptr;
2781       }(LHS);
2782       Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2783       if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2784           getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
2785         APInt Dist = LHSOffset - RHSOffset;
2786         if (Dist.isNonNegative() ? Dist.ult(LHSSize) : (-Dist).ult(RHSSize))
2787           return ConstantInt::get(getCompareTy(LHS),
2788                                   !CmpInst::isTrueWhenEqual(Pred));
2789       }
2790     }
2791 
2792     // If one side of the equality comparison must come from a noalias call
2793     // (meaning a system memory allocation function), and the other side must
2794     // come from a pointer that cannot overlap with dynamically-allocated
2795     // memory within the lifetime of the current function (allocas, byval
2796     // arguments, globals), then determine the comparison result here.
2797     SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2798     getUnderlyingObjects(LHS, LHSUObjs);
2799     getUnderlyingObjects(RHS, RHSUObjs);
2800 
2801     // Is the set of underlying objects all noalias calls?
2802     auto IsNAC = [](ArrayRef<const Value *> Objects) {
2803       return all_of(Objects, isNoAliasCall);
2804     };
2805 
2806     // Is the set of underlying objects all things which must be disjoint from
2807     // noalias calls.  We assume that indexing from such disjoint storage
2808     // into the heap is undefined, and thus offsets can be safely ignored.
2809     auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2810       return all_of(Objects, ::isAllocDisjoint);
2811     };
2812 
2813     if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2814         (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2815       return ConstantInt::get(getCompareTy(LHS),
2816                               !CmpInst::isTrueWhenEqual(Pred));
2817 
2818     // Fold comparisons for non-escaping pointer even if the allocation call
2819     // cannot be elided. We cannot fold malloc comparison to null. Also, the
2820     // dynamic allocation call could be either of the operands.  Note that
2821     // the other operand can not be based on the alloc - if it were, then
2822     // the cmp itself would be a capture.
2823     Value *MI = nullptr;
2824     if (isAllocLikeFn(LHS, TLI) &&
2825         llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2826       MI = LHS;
2827     else if (isAllocLikeFn(RHS, TLI) &&
2828              llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2829       MI = RHS;
2830     if (MI) {
2831       // FIXME: This is incorrect, see PR54002. While we can assume that the
2832       // allocation is at an address that makes the comparison false, this
2833       // requires that *all* comparisons to that address be false, which
2834       // InstSimplify cannot guarantee.
2835       struct CustomCaptureTracker : public CaptureTracker {
2836         bool Captured = false;
2837         void tooManyUses() override { Captured = true; }
2838         bool captured(const Use *U) override {
2839           if (auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2840             // Comparison against value stored in global variable. Given the
2841             // pointer does not escape, its value cannot be guessed and stored
2842             // separately in a global variable.
2843             unsigned OtherIdx = 1 - U->getOperandNo();
2844             auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2845             if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2846               return false;
2847           }
2848 
2849           Captured = true;
2850           return true;
2851         }
2852       };
2853       CustomCaptureTracker Tracker;
2854       PointerMayBeCaptured(MI, &Tracker);
2855       if (!Tracker.Captured)
2856         return ConstantInt::get(getCompareTy(LHS),
2857                                 CmpInst::isFalseWhenEqual(Pred));
2858     }
2859   }
2860 
2861   // Otherwise, fail.
2862   return nullptr;
2863 }
2864 
2865 /// Fold an icmp when its operands have i1 scalar type.
2866 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2867                                   Value *RHS, const SimplifyQuery &Q) {
2868   Type *ITy = getCompareTy(LHS); // The return type.
2869   Type *OpTy = LHS->getType();   // The operand type.
2870   if (!OpTy->isIntOrIntVectorTy(1))
2871     return nullptr;
2872 
2873   // A boolean compared to true/false can be reduced in 14 out of the 20
2874   // (10 predicates * 2 constants) possible combinations. The other
2875   // 6 cases require a 'not' of the LHS.
2876 
2877   auto ExtractNotLHS = [](Value *V) -> Value * {
2878     Value *X;
2879     if (match(V, m_Not(m_Value(X))))
2880       return X;
2881     return nullptr;
2882   };
2883 
2884   if (match(RHS, m_Zero())) {
2885     switch (Pred) {
2886     case CmpInst::ICMP_NE:  // X !=  0 -> X
2887     case CmpInst::ICMP_UGT: // X >u  0 -> X
2888     case CmpInst::ICMP_SLT: // X <s  0 -> X
2889       return LHS;
2890 
2891     case CmpInst::ICMP_EQ:  // not(X) ==  0 -> X != 0 -> X
2892     case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2893     case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2894       if (Value *X = ExtractNotLHS(LHS))
2895         return X;
2896       break;
2897 
2898     case CmpInst::ICMP_ULT: // X <u  0 -> false
2899     case CmpInst::ICMP_SGT: // X >s  0 -> false
2900       return getFalse(ITy);
2901 
2902     case CmpInst::ICMP_UGE: // X >=u 0 -> true
2903     case CmpInst::ICMP_SLE: // X <=s 0 -> true
2904       return getTrue(ITy);
2905 
2906     default:
2907       break;
2908     }
2909   } else if (match(RHS, m_One())) {
2910     switch (Pred) {
2911     case CmpInst::ICMP_EQ:  // X ==   1 -> X
2912     case CmpInst::ICMP_UGE: // X >=u  1 -> X
2913     case CmpInst::ICMP_SLE: // X <=s -1 -> X
2914       return LHS;
2915 
2916     case CmpInst::ICMP_NE:  // not(X) !=  1 -> X ==   1 -> X
2917     case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u  1 -> X
2918     case CmpInst::ICMP_SGT: // not(X) >s  1 -> X <=s -1 -> X
2919       if (Value *X = ExtractNotLHS(LHS))
2920         return X;
2921       break;
2922 
2923     case CmpInst::ICMP_UGT: // X >u   1 -> false
2924     case CmpInst::ICMP_SLT: // X <s  -1 -> false
2925       return getFalse(ITy);
2926 
2927     case CmpInst::ICMP_ULE: // X <=u  1 -> true
2928     case CmpInst::ICMP_SGE: // X >=s -1 -> true
2929       return getTrue(ITy);
2930 
2931     default:
2932       break;
2933     }
2934   }
2935 
2936   switch (Pred) {
2937   default:
2938     break;
2939   case ICmpInst::ICMP_UGE:
2940     if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2941       return getTrue(ITy);
2942     break;
2943   case ICmpInst::ICMP_SGE:
2944     /// For signed comparison, the values for an i1 are 0 and -1
2945     /// respectively. This maps into a truth table of:
2946     /// LHS | RHS | LHS >=s RHS   | LHS implies RHS
2947     ///  0  |  0  |  1 (0 >= 0)   |  1
2948     ///  0  |  1  |  1 (0 >= -1)  |  1
2949     ///  1  |  0  |  0 (-1 >= 0)  |  0
2950     ///  1  |  1  |  1 (-1 >= -1) |  1
2951     if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2952       return getTrue(ITy);
2953     break;
2954   case ICmpInst::ICMP_ULE:
2955     if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2956       return getTrue(ITy);
2957     break;
2958   case ICmpInst::ICMP_SLE:
2959     /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2960     if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2961       return getTrue(ITy);
2962     break;
2963   }
2964 
2965   return nullptr;
2966 }
2967 
2968 /// Try hard to fold icmp with zero RHS because this is a common case.
2969 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2970                                    Value *RHS, const SimplifyQuery &Q) {
2971   if (!match(RHS, m_Zero()))
2972     return nullptr;
2973 
2974   Type *ITy = getCompareTy(LHS); // The return type.
2975   switch (Pred) {
2976   default:
2977     llvm_unreachable("Unknown ICmp predicate!");
2978   case ICmpInst::ICMP_ULT:
2979     return getFalse(ITy);
2980   case ICmpInst::ICMP_UGE:
2981     return getTrue(ITy);
2982   case ICmpInst::ICMP_EQ:
2983   case ICmpInst::ICMP_ULE:
2984     if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2985       return getFalse(ITy);
2986     break;
2987   case ICmpInst::ICMP_NE:
2988   case ICmpInst::ICMP_UGT:
2989     if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2990       return getTrue(ITy);
2991     break;
2992   case ICmpInst::ICMP_SLT: {
2993     KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
2994     if (LHSKnown.isNegative())
2995       return getTrue(ITy);
2996     if (LHSKnown.isNonNegative())
2997       return getFalse(ITy);
2998     break;
2999   }
3000   case ICmpInst::ICMP_SLE: {
3001     KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
3002     if (LHSKnown.isNegative())
3003       return getTrue(ITy);
3004     if (LHSKnown.isNonNegative() &&
3005         isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
3006       return getFalse(ITy);
3007     break;
3008   }
3009   case ICmpInst::ICMP_SGE: {
3010     KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
3011     if (LHSKnown.isNegative())
3012       return getFalse(ITy);
3013     if (LHSKnown.isNonNegative())
3014       return getTrue(ITy);
3015     break;
3016   }
3017   case ICmpInst::ICMP_SGT: {
3018     KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
3019     if (LHSKnown.isNegative())
3020       return getFalse(ITy);
3021     if (LHSKnown.isNonNegative() &&
3022         isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
3023       return getTrue(ITy);
3024     break;
3025   }
3026   }
3027 
3028   return nullptr;
3029 }
3030 
3031 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
3032                                        Value *RHS, const InstrInfoQuery &IIQ) {
3033   Type *ITy = getCompareTy(RHS); // The return type.
3034 
3035   Value *X;
3036   // Sign-bit checks can be optimized to true/false after unsigned
3037   // floating-point casts:
3038   // icmp slt (bitcast (uitofp X)),  0 --> false
3039   // icmp sgt (bitcast (uitofp X)), -1 --> true
3040   if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
3041     if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
3042       return ConstantInt::getFalse(ITy);
3043     if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
3044       return ConstantInt::getTrue(ITy);
3045   }
3046 
3047   const APInt *C;
3048   if (!match(RHS, m_APIntAllowUndef(C)))
3049     return nullptr;
3050 
3051   // Rule out tautological comparisons (eg., ult 0 or uge 0).
3052   ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
3053   if (RHS_CR.isEmptySet())
3054     return ConstantInt::getFalse(ITy);
3055   if (RHS_CR.isFullSet())
3056     return ConstantInt::getTrue(ITy);
3057 
3058   ConstantRange LHS_CR =
3059       computeConstantRange(LHS, CmpInst::isSigned(Pred), IIQ.UseInstrInfo);
3060   if (!LHS_CR.isFullSet()) {
3061     if (RHS_CR.contains(LHS_CR))
3062       return ConstantInt::getTrue(ITy);
3063     if (RHS_CR.inverse().contains(LHS_CR))
3064       return ConstantInt::getFalse(ITy);
3065   }
3066 
3067   // (mul nuw/nsw X, MulC) != C --> true  (if C is not a multiple of MulC)
3068   // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3069   const APInt *MulC;
3070   if (IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
3071       ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
3072         *MulC != 0 && C->urem(*MulC) != 0) ||
3073        (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
3074         *MulC != 0 && C->srem(*MulC) != 0)))
3075     return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3076 
3077   return nullptr;
3078 }
3079 
3080 static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
3081                                          BinaryOperator *LBO, Value *RHS,
3082                                          const SimplifyQuery &Q,
3083                                          unsigned MaxRecurse) {
3084   Type *ITy = getCompareTy(RHS); // The return type.
3085 
3086   Value *Y = nullptr;
3087   // icmp pred (or X, Y), X
3088   if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
3089     if (Pred == ICmpInst::ICMP_ULT)
3090       return getFalse(ITy);
3091     if (Pred == ICmpInst::ICMP_UGE)
3092       return getTrue(ITy);
3093 
3094     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3095       KnownBits RHSKnown = computeKnownBits(RHS, /* Depth */ 0, Q);
3096       KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
3097       if (RHSKnown.isNonNegative() && YKnown.isNegative())
3098         return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
3099       if (RHSKnown.isNegative() || YKnown.isNonNegative())
3100         return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
3101     }
3102   }
3103 
3104   // icmp pred (and X, Y), X
3105   if (match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
3106     if (Pred == ICmpInst::ICMP_UGT)
3107       return getFalse(ITy);
3108     if (Pred == ICmpInst::ICMP_ULE)
3109       return getTrue(ITy);
3110   }
3111 
3112   // icmp pred (urem X, Y), Y
3113   if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
3114     switch (Pred) {
3115     default:
3116       break;
3117     case ICmpInst::ICMP_SGT:
3118     case ICmpInst::ICMP_SGE: {
3119       KnownBits Known = computeKnownBits(RHS, /* Depth */ 0, Q);
3120       if (!Known.isNonNegative())
3121         break;
3122       [[fallthrough]];
3123     }
3124     case ICmpInst::ICMP_EQ:
3125     case ICmpInst::ICMP_UGT:
3126     case ICmpInst::ICMP_UGE:
3127       return getFalse(ITy);
3128     case ICmpInst::ICMP_SLT:
3129     case ICmpInst::ICMP_SLE: {
3130       KnownBits Known = computeKnownBits(RHS, /* Depth */ 0, Q);
3131       if (!Known.isNonNegative())
3132         break;
3133       [[fallthrough]];
3134     }
3135     case ICmpInst::ICMP_NE:
3136     case ICmpInst::ICMP_ULT:
3137     case ICmpInst::ICMP_ULE:
3138       return getTrue(ITy);
3139     }
3140   }
3141 
3142   // icmp pred (urem X, Y), X
3143   if (match(LBO, m_URem(m_Specific(RHS), m_Value()))) {
3144     if (Pred == ICmpInst::ICMP_ULE)
3145       return getTrue(ITy);
3146     if (Pred == ICmpInst::ICMP_UGT)
3147       return getFalse(ITy);
3148   }
3149 
3150   // x >>u y <=u x --> true.
3151   // x >>u y >u  x --> false.
3152   // x udiv y <=u x --> true.
3153   // x udiv y >u  x --> false.
3154   if (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
3155       match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
3156     // icmp pred (X op Y), X
3157     if (Pred == ICmpInst::ICMP_UGT)
3158       return getFalse(ITy);
3159     if (Pred == ICmpInst::ICMP_ULE)
3160       return getTrue(ITy);
3161   }
3162 
3163   // If x is nonzero:
3164   // x >>u C <u  x --> true  for C != 0.
3165   // x >>u C !=  x --> true  for C != 0.
3166   // x >>u C >=u x --> false for C != 0.
3167   // x >>u C ==  x --> false for C != 0.
3168   // x udiv C <u  x --> true  for C != 1.
3169   // x udiv C !=  x --> true  for C != 1.
3170   // x udiv C >=u x --> false for C != 1.
3171   // x udiv C ==  x --> false for C != 1.
3172   // TODO: allow non-constant shift amount/divisor
3173   const APInt *C;
3174   if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3175       (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3176     if (isKnownNonZero(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) {
3177       switch (Pred) {
3178       default:
3179         break;
3180       case ICmpInst::ICMP_EQ:
3181       case ICmpInst::ICMP_UGE:
3182         return getFalse(ITy);
3183       case ICmpInst::ICMP_NE:
3184       case ICmpInst::ICMP_ULT:
3185         return getTrue(ITy);
3186       case ICmpInst::ICMP_UGT:
3187       case ICmpInst::ICMP_ULE:
3188         // UGT/ULE are handled by the more general case just above
3189         llvm_unreachable("Unexpected UGT/ULE, should have been handled");
3190       }
3191     }
3192   }
3193 
3194   // (x*C1)/C2 <= x for C1 <= C2.
3195   // This holds even if the multiplication overflows: Assume that x != 0 and
3196   // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3197   // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3198   //
3199   // Additionally, either the multiplication and division might be represented
3200   // as shifts:
3201   // (x*C1)>>C2 <= x for C1 < 2**C2.
3202   // (x<<C1)/C2 <= x for 2**C1 < C2.
3203   const APInt *C1, *C2;
3204   if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3205        C1->ule(*C2)) ||
3206       (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3207        C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3208       (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3209        (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3210     if (Pred == ICmpInst::ICMP_UGT)
3211       return getFalse(ITy);
3212     if (Pred == ICmpInst::ICMP_ULE)
3213       return getTrue(ITy);
3214   }
3215 
3216   // (sub C, X) == X, C is odd  --> false
3217   // (sub C, X) != X, C is odd  --> true
3218   if (match(LBO, m_Sub(m_APIntAllowUndef(C), m_Specific(RHS))) &&
3219       (*C & 1) == 1 && ICmpInst::isEquality(Pred))
3220     return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
3221 
3222   return nullptr;
3223 }
3224 
3225 // If only one of the icmp's operands has NSW flags, try to prove that:
3226 //
3227 //   icmp slt (x + C1), (x +nsw C2)
3228 //
3229 // is equivalent to:
3230 //
3231 //   icmp slt C1, C2
3232 //
3233 // which is true if x + C2 has the NSW flags set and:
3234 // *) C1 < C2 && C1 >= 0, or
3235 // *) C2 < C1 && C1 <= 0.
3236 //
3237 static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
3238                                     Value *RHS, const InstrInfoQuery &IIQ) {
3239   // TODO: only support icmp slt for now.
3240   if (Pred != CmpInst::ICMP_SLT || !IIQ.UseInstrInfo)
3241     return false;
3242 
3243   // Canonicalize nsw add as RHS.
3244   if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3245     std::swap(LHS, RHS);
3246   if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3247     return false;
3248 
3249   Value *X;
3250   const APInt *C1, *C2;
3251   if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
3252       !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
3253     return false;
3254 
3255   return (C1->slt(*C2) && C1->isNonNegative()) ||
3256          (C2->slt(*C1) && C1->isNonPositive());
3257 }
3258 
3259 /// TODO: A large part of this logic is duplicated in InstCombine's
3260 /// foldICmpBinOp(). We should be able to share that and avoid the code
3261 /// duplication.
3262 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
3263                                     Value *RHS, const SimplifyQuery &Q,
3264                                     unsigned MaxRecurse) {
3265   BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
3266   BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
3267   if (MaxRecurse && (LBO || RBO)) {
3268     // Analyze the case when either LHS or RHS is an add instruction.
3269     Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3270     // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3271     bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3272     if (LBO && LBO->getOpcode() == Instruction::Add) {
3273       A = LBO->getOperand(0);
3274       B = LBO->getOperand(1);
3275       NoLHSWrapProblem =
3276           ICmpInst::isEquality(Pred) ||
3277           (CmpInst::isUnsigned(Pred) &&
3278            Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
3279           (CmpInst::isSigned(Pred) &&
3280            Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
3281     }
3282     if (RBO && RBO->getOpcode() == Instruction::Add) {
3283       C = RBO->getOperand(0);
3284       D = RBO->getOperand(1);
3285       NoRHSWrapProblem =
3286           ICmpInst::isEquality(Pred) ||
3287           (CmpInst::isUnsigned(Pred) &&
3288            Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
3289           (CmpInst::isSigned(Pred) &&
3290            Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
3291     }
3292 
3293     // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3294     if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3295       if (Value *V = simplifyICmpInst(Pred, A == RHS ? B : A,
3296                                       Constant::getNullValue(RHS->getType()), Q,
3297                                       MaxRecurse - 1))
3298         return V;
3299 
3300     // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3301     if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3302       if (Value *V =
3303               simplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
3304                                C == LHS ? D : C, Q, MaxRecurse - 1))
3305         return V;
3306 
3307     // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3308     bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3309                        trySimplifyICmpWithAdds(Pred, LHS, RHS, Q.IIQ);
3310     if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3311       // Determine Y and Z in the form icmp (X+Y), (X+Z).
3312       Value *Y, *Z;
3313       if (A == C) {
3314         // C + B == C + D  ->  B == D
3315         Y = B;
3316         Z = D;
3317       } else if (A == D) {
3318         // D + B == C + D  ->  B == C
3319         Y = B;
3320         Z = C;
3321       } else if (B == C) {
3322         // A + C == C + D  ->  A == D
3323         Y = A;
3324         Z = D;
3325       } else {
3326         assert(B == D);
3327         // A + D == C + D  ->  A == C
3328         Y = A;
3329         Z = C;
3330       }
3331       if (Value *V = simplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3332         return V;
3333     }
3334   }
3335 
3336   if (LBO)
3337     if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3338       return V;
3339 
3340   if (RBO)
3341     if (Value *V = simplifyICmpWithBinOpOnLHS(
3342             ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3343       return V;
3344 
3345   // 0 - (zext X) pred C
3346   if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3347     const APInt *C;
3348     if (match(RHS, m_APInt(C))) {
3349       if (C->isStrictlyPositive()) {
3350         if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3351           return ConstantInt::getTrue(getCompareTy(RHS));
3352         if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3353           return ConstantInt::getFalse(getCompareTy(RHS));
3354       }
3355       if (C->isNonNegative()) {
3356         if (Pred == ICmpInst::ICMP_SLE)
3357           return ConstantInt::getTrue(getCompareTy(RHS));
3358         if (Pred == ICmpInst::ICMP_SGT)
3359           return ConstantInt::getFalse(getCompareTy(RHS));
3360       }
3361     }
3362   }
3363 
3364   //   If C2 is a power-of-2 and C is not:
3365   //   (C2 << X) == C --> false
3366   //   (C2 << X) != C --> true
3367   const APInt *C;
3368   if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3369       match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
3370     // C2 << X can equal zero in some circumstances.
3371     // This simplification might be unsafe if C is zero.
3372     //
3373     // We know it is safe if:
3374     // - The shift is nsw. We can't shift out the one bit.
3375     // - The shift is nuw. We can't shift out the one bit.
3376     // - C2 is one.
3377     // - C isn't zero.
3378     if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3379         Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3380         match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3381       if (Pred == ICmpInst::ICMP_EQ)
3382         return ConstantInt::getFalse(getCompareTy(RHS));
3383       if (Pred == ICmpInst::ICMP_NE)
3384         return ConstantInt::getTrue(getCompareTy(RHS));
3385     }
3386   }
3387 
3388   // If C is a power-of-2:
3389   // (C << X)  >u 0x8000 --> false
3390   // (C << X) <=u 0x8000 --> true
3391   if (match(LHS, m_Shl(m_Power2(), m_Value())) && match(RHS, m_SignMask())) {
3392     if (Pred == ICmpInst::ICMP_UGT)
3393       return ConstantInt::getFalse(getCompareTy(RHS));
3394     if (Pred == ICmpInst::ICMP_ULE)
3395       return ConstantInt::getTrue(getCompareTy(RHS));
3396   }
3397 
3398   if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3399     return nullptr;
3400 
3401   if (LBO->getOperand(0) == RBO->getOperand(0)) {
3402     switch (LBO->getOpcode()) {
3403     default:
3404       break;
3405     case Instruction::Shl: {
3406       bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3407       bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3408       if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3409           !isKnownNonZero(LBO->getOperand(0), Q.DL))
3410         break;
3411       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
3412                                       RBO->getOperand(1), Q, MaxRecurse - 1))
3413         return V;
3414       break;
3415     }
3416     // If C1 & C2 == C1, A = X and/or C1, B = X and/or C2:
3417     // icmp ule A, B -> true
3418     // icmp ugt A, B -> false
3419     // icmp sle A, B -> true (C1 and C2 are the same sign)
3420     // icmp sgt A, B -> false (C1 and C2 are the same sign)
3421     case Instruction::And:
3422     case Instruction::Or: {
3423       const APInt *C1, *C2;
3424       if (ICmpInst::isRelational(Pred) &&
3425           match(LBO->getOperand(1), m_APInt(C1)) &&
3426           match(RBO->getOperand(1), m_APInt(C2))) {
3427         if (!C1->isSubsetOf(*C2)) {
3428           std::swap(C1, C2);
3429           Pred = ICmpInst::getSwappedPredicate(Pred);
3430         }
3431         if (C1->isSubsetOf(*C2)) {
3432           if (Pred == ICmpInst::ICMP_ULE)
3433             return ConstantInt::getTrue(getCompareTy(LHS));
3434           if (Pred == ICmpInst::ICMP_UGT)
3435             return ConstantInt::getFalse(getCompareTy(LHS));
3436           if (C1->isNonNegative() == C2->isNonNegative()) {
3437             if (Pred == ICmpInst::ICMP_SLE)
3438               return ConstantInt::getTrue(getCompareTy(LHS));
3439             if (Pred == ICmpInst::ICMP_SGT)
3440               return ConstantInt::getFalse(getCompareTy(LHS));
3441           }
3442         }
3443       }
3444       break;
3445     }
3446     }
3447   }
3448 
3449   if (LBO->getOperand(1) == RBO->getOperand(1)) {
3450     switch (LBO->getOpcode()) {
3451     default:
3452       break;
3453     case Instruction::UDiv:
3454     case Instruction::LShr:
3455       if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3456           !Q.IIQ.isExact(RBO))
3457         break;
3458       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3459                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3460         return V;
3461       break;
3462     case Instruction::SDiv:
3463       if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3464           !Q.IIQ.isExact(RBO))
3465         break;
3466       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3467                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3468         return V;
3469       break;
3470     case Instruction::AShr:
3471       if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3472         break;
3473       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3474                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3475         return V;
3476       break;
3477     case Instruction::Shl: {
3478       bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3479       bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3480       if (!NUW && !NSW)
3481         break;
3482       if (!NSW && ICmpInst::isSigned(Pred))
3483         break;
3484       if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3485                                       RBO->getOperand(0), Q, MaxRecurse - 1))
3486         return V;
3487       break;
3488     }
3489     }
3490   }
3491   return nullptr;
3492 }
3493 
3494 /// simplify integer comparisons where at least one operand of the compare
3495 /// matches an integer min/max idiom.
3496 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3497                                      Value *RHS, const SimplifyQuery &Q,
3498                                      unsigned MaxRecurse) {
3499   Type *ITy = getCompareTy(LHS); // The return type.
3500   Value *A, *B;
3501   CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3502   CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3503 
3504   // Signed variants on "max(a,b)>=a -> true".
3505   if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3506     if (A != RHS)
3507       std::swap(A, B);       // smax(A, B) pred A.
3508     EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3509     // We analyze this as smax(A, B) pred A.
3510     P = Pred;
3511   } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3512              (A == LHS || B == LHS)) {
3513     if (A != LHS)
3514       std::swap(A, B);       // A pred smax(A, B).
3515     EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3516     // We analyze this as smax(A, B) swapped-pred A.
3517     P = CmpInst::getSwappedPredicate(Pred);
3518   } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3519              (A == RHS || B == RHS)) {
3520     if (A != RHS)
3521       std::swap(A, B);       // smin(A, B) pred A.
3522     EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3523     // We analyze this as smax(-A, -B) swapped-pred -A.
3524     // Note that we do not need to actually form -A or -B thanks to EqP.
3525     P = CmpInst::getSwappedPredicate(Pred);
3526   } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3527              (A == LHS || B == LHS)) {
3528     if (A != LHS)
3529       std::swap(A, B);       // A pred smin(A, B).
3530     EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3531     // We analyze this as smax(-A, -B) pred -A.
3532     // Note that we do not need to actually form -A or -B thanks to EqP.
3533     P = Pred;
3534   }
3535   if (P != CmpInst::BAD_ICMP_PREDICATE) {
3536     // Cases correspond to "max(A, B) p A".
3537     switch (P) {
3538     default:
3539       break;
3540     case CmpInst::ICMP_EQ:
3541     case CmpInst::ICMP_SLE:
3542       // Equivalent to "A EqP B".  This may be the same as the condition tested
3543       // in the max/min; if so, we can just return that.
3544       if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3545         return V;
3546       if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3547         return V;
3548       // Otherwise, see if "A EqP B" simplifies.
3549       if (MaxRecurse)
3550         if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3551           return V;
3552       break;
3553     case CmpInst::ICMP_NE:
3554     case CmpInst::ICMP_SGT: {
3555       CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3556       // Equivalent to "A InvEqP B".  This may be the same as the condition
3557       // tested in the max/min; if so, we can just return that.
3558       if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3559         return V;
3560       if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3561         return V;
3562       // Otherwise, see if "A InvEqP B" simplifies.
3563       if (MaxRecurse)
3564         if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3565           return V;
3566       break;
3567     }
3568     case CmpInst::ICMP_SGE:
3569       // Always true.
3570       return getTrue(ITy);
3571     case CmpInst::ICMP_SLT:
3572       // Always false.
3573       return getFalse(ITy);
3574     }
3575   }
3576 
3577   // Unsigned variants on "max(a,b)>=a -> true".
3578   P = CmpInst::BAD_ICMP_PREDICATE;
3579   if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3580     if (A != RHS)
3581       std::swap(A, B);       // umax(A, B) pred A.
3582     EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3583     // We analyze this as umax(A, B) pred A.
3584     P = Pred;
3585   } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3586              (A == LHS || B == LHS)) {
3587     if (A != LHS)
3588       std::swap(A, B);       // A pred umax(A, B).
3589     EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3590     // We analyze this as umax(A, B) swapped-pred A.
3591     P = CmpInst::getSwappedPredicate(Pred);
3592   } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3593              (A == RHS || B == RHS)) {
3594     if (A != RHS)
3595       std::swap(A, B);       // umin(A, B) pred A.
3596     EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3597     // We analyze this as umax(-A, -B) swapped-pred -A.
3598     // Note that we do not need to actually form -A or -B thanks to EqP.
3599     P = CmpInst::getSwappedPredicate(Pred);
3600   } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3601              (A == LHS || B == LHS)) {
3602     if (A != LHS)
3603       std::swap(A, B);       // A pred umin(A, B).
3604     EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3605     // We analyze this as umax(-A, -B) pred -A.
3606     // Note that we do not need to actually form -A or -B thanks to EqP.
3607     P = Pred;
3608   }
3609   if (P != CmpInst::BAD_ICMP_PREDICATE) {
3610     // Cases correspond to "max(A, B) p A".
3611     switch (P) {
3612     default:
3613       break;
3614     case CmpInst::ICMP_EQ:
3615     case CmpInst::ICMP_ULE:
3616       // Equivalent to "A EqP B".  This may be the same as the condition tested
3617       // in the max/min; if so, we can just return that.
3618       if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3619         return V;
3620       if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3621         return V;
3622       // Otherwise, see if "A EqP B" simplifies.
3623       if (MaxRecurse)
3624         if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3625           return V;
3626       break;
3627     case CmpInst::ICMP_NE:
3628     case CmpInst::ICMP_UGT: {
3629       CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3630       // Equivalent to "A InvEqP B".  This may be the same as the condition
3631       // tested in the max/min; if so, we can just return that.
3632       if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3633         return V;
3634       if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3635         return V;
3636       // Otherwise, see if "A InvEqP B" simplifies.
3637       if (MaxRecurse)
3638         if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3639           return V;
3640       break;
3641     }
3642     case CmpInst::ICMP_UGE:
3643       return getTrue(ITy);
3644     case CmpInst::ICMP_ULT:
3645       return getFalse(ITy);
3646     }
3647   }
3648 
3649   // Comparing 1 each of min/max with a common operand?
3650   // Canonicalize min operand to RHS.
3651   if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3652       match(LHS, m_SMin(m_Value(), m_Value()))) {
3653     std::swap(LHS, RHS);
3654     Pred = ICmpInst::getSwappedPredicate(Pred);
3655   }
3656 
3657   Value *C, *D;
3658   if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3659       match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3660       (A == C || A == D || B == C || B == D)) {
3661     // smax(A, B) >=s smin(A, D) --> true
3662     if (Pred == CmpInst::ICMP_SGE)
3663       return getTrue(ITy);
3664     // smax(A, B) <s smin(A, D) --> false
3665     if (Pred == CmpInst::ICMP_SLT)
3666       return getFalse(ITy);
3667   } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3668              match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3669              (A == C || A == D || B == C || B == D)) {
3670     // umax(A, B) >=u umin(A, D) --> true
3671     if (Pred == CmpInst::ICMP_UGE)
3672       return getTrue(ITy);
3673     // umax(A, B) <u umin(A, D) --> false
3674     if (Pred == CmpInst::ICMP_ULT)
3675       return getFalse(ITy);
3676   }
3677 
3678   return nullptr;
3679 }
3680 
3681 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3682                                                Value *LHS, Value *RHS,
3683                                                const SimplifyQuery &Q) {
3684   // Gracefully handle instructions that have not been inserted yet.
3685   if (!Q.AC || !Q.CxtI)
3686     return nullptr;
3687 
3688   for (Value *AssumeBaseOp : {LHS, RHS}) {
3689     for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3690       if (!AssumeVH)
3691         continue;
3692 
3693       CallInst *Assume = cast<CallInst>(AssumeVH);
3694       if (std::optional<bool> Imp = isImpliedCondition(
3695               Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
3696         if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3697           return ConstantInt::get(getCompareTy(LHS), *Imp);
3698     }
3699   }
3700 
3701   return nullptr;
3702 }
3703 
3704 static Value *simplifyICmpWithIntrinsicOnLHS(CmpInst::Predicate Pred,
3705                                              Value *LHS, Value *RHS) {
3706   auto *II = dyn_cast<IntrinsicInst>(LHS);
3707   if (!II)
3708     return nullptr;
3709 
3710   switch (II->getIntrinsicID()) {
3711   case Intrinsic::uadd_sat:
3712     // uadd.sat(X, Y) uge X, uadd.sat(X, Y) uge Y
3713     if (II->getArgOperand(0) == RHS || II->getArgOperand(1) == RHS) {
3714       if (Pred == ICmpInst::ICMP_UGE)
3715         return ConstantInt::getTrue(getCompareTy(II));
3716       if (Pred == ICmpInst::ICMP_ULT)
3717         return ConstantInt::getFalse(getCompareTy(II));
3718     }
3719     return nullptr;
3720   case Intrinsic::usub_sat:
3721     // usub.sat(X, Y) ule X
3722     if (II->getArgOperand(0) == RHS) {
3723       if (Pred == ICmpInst::ICMP_ULE)
3724         return ConstantInt::getTrue(getCompareTy(II));
3725       if (Pred == ICmpInst::ICMP_UGT)
3726         return ConstantInt::getFalse(getCompareTy(II));
3727     }
3728     return nullptr;
3729   default:
3730     return nullptr;
3731   }
3732 }
3733 
3734 /// Given operands for an ICmpInst, see if we can fold the result.
3735 /// If not, this returns null.
3736 static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3737                                const SimplifyQuery &Q, unsigned MaxRecurse) {
3738   CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3739   assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3740 
3741   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3742     if (Constant *CRHS = dyn_cast<Constant>(RHS))
3743       return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3744 
3745     // If we have a constant, make sure it is on the RHS.
3746     std::swap(LHS, RHS);
3747     Pred = CmpInst::getSwappedPredicate(Pred);
3748   }
3749   assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3750 
3751   Type *ITy = getCompareTy(LHS); // The return type.
3752 
3753   // icmp poison, X -> poison
3754   if (isa<PoisonValue>(RHS))
3755     return PoisonValue::get(ITy);
3756 
3757   // For EQ and NE, we can always pick a value for the undef to make the
3758   // predicate pass or fail, so we can return undef.
3759   // Matches behavior in llvm::ConstantFoldCompareInstruction.
3760   if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3761     return UndefValue::get(ITy);
3762 
3763   // icmp X, X -> true/false
3764   // icmp X, undef -> true/false because undef could be X.
3765   if (LHS == RHS || Q.isUndefValue(RHS))
3766     return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3767 
3768   if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3769     return V;
3770 
3771   // TODO: Sink/common this with other potentially expensive calls that use
3772   //       ValueTracking? See comment below for isKnownNonEqual().
3773   if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3774     return V;
3775 
3776   if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3777     return V;
3778 
3779   // If both operands have range metadata, use the metadata
3780   // to simplify the comparison.
3781   if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3782     auto RHS_Instr = cast<Instruction>(RHS);
3783     auto LHS_Instr = cast<Instruction>(LHS);
3784 
3785     if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3786         Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3787       auto RHS_CR = getConstantRangeFromMetadata(
3788           *RHS_Instr->getMetadata(LLVMContext::MD_range));
3789       auto LHS_CR = getConstantRangeFromMetadata(
3790           *LHS_Instr->getMetadata(LLVMContext::MD_range));
3791 
3792       if (LHS_CR.icmp(Pred, RHS_CR))
3793         return ConstantInt::getTrue(RHS->getContext());
3794 
3795       if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
3796         return ConstantInt::getFalse(RHS->getContext());
3797     }
3798   }
3799 
3800   // Compare of cast, for example (zext X) != 0 -> X != 0
3801   if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3802     Instruction *LI = cast<CastInst>(LHS);
3803     Value *SrcOp = LI->getOperand(0);
3804     Type *SrcTy = SrcOp->getType();
3805     Type *DstTy = LI->getType();
3806 
3807     // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3808     // if the integer type is the same size as the pointer type.
3809     if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3810         Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3811       if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3812         // Transfer the cast to the constant.
3813         if (Value *V = simplifyICmpInst(Pred, SrcOp,
3814                                         ConstantExpr::getIntToPtr(RHSC, SrcTy),
3815                                         Q, MaxRecurse - 1))
3816           return V;
3817       } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3818         if (RI->getOperand(0)->getType() == SrcTy)
3819           // Compare without the cast.
3820           if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3821                                           MaxRecurse - 1))
3822             return V;
3823       }
3824     }
3825 
3826     if (isa<ZExtInst>(LHS)) {
3827       // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3828       // same type.
3829       if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3830         if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3831           // Compare X and Y.  Note that signed predicates become unsigned.
3832           if (Value *V =
3833                   simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), SrcOp,
3834                                    RI->getOperand(0), Q, MaxRecurse - 1))
3835             return V;
3836       }
3837       // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3838       else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3839         if (SrcOp == RI->getOperand(0)) {
3840           if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3841             return ConstantInt::getTrue(ITy);
3842           if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3843             return ConstantInt::getFalse(ITy);
3844         }
3845       }
3846       // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3847       // too.  If not, then try to deduce the result of the comparison.
3848       else if (match(RHS, m_ImmConstant())) {
3849         Constant *C = dyn_cast<Constant>(RHS);
3850         assert(C != nullptr);
3851 
3852         // Compute the constant that would happen if we truncated to SrcTy then
3853         // reextended to DstTy.
3854         Constant *Trunc =
3855             ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3856         assert(Trunc && "Constant-fold of ImmConstant should not fail");
3857         Constant *RExt =
3858             ConstantFoldCastOperand(CastInst::ZExt, Trunc, DstTy, Q.DL);
3859         assert(RExt && "Constant-fold of ImmConstant should not fail");
3860         Constant *AnyEq =
3861             ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, RExt, C, Q.DL);
3862         assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3863 
3864         // If the re-extended constant didn't change any of the elements then
3865         // this is effectively also a case of comparing two zero-extended
3866         // values.
3867         if (AnyEq->isAllOnesValue() && MaxRecurse)
3868           if (Value *V = simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3869                                           SrcOp, Trunc, Q, MaxRecurse - 1))
3870             return V;
3871 
3872         // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3873         // there.  Use this to work out the result of the comparison.
3874         if (AnyEq->isNullValue()) {
3875           switch (Pred) {
3876           default:
3877             llvm_unreachable("Unknown ICmp predicate!");
3878           // LHS <u RHS.
3879           case ICmpInst::ICMP_EQ:
3880           case ICmpInst::ICMP_UGT:
3881           case ICmpInst::ICMP_UGE:
3882             return Constant::getNullValue(ITy);
3883 
3884           case ICmpInst::ICMP_NE:
3885           case ICmpInst::ICMP_ULT:
3886           case ICmpInst::ICMP_ULE:
3887             return Constant::getAllOnesValue(ITy);
3888 
3889           // LHS is non-negative.  If RHS is negative then LHS >s LHS.  If RHS
3890           // is non-negative then LHS <s RHS.
3891           case ICmpInst::ICMP_SGT:
3892           case ICmpInst::ICMP_SGE:
3893             return ConstantFoldCompareInstOperands(
3894                 ICmpInst::ICMP_SLT, C, Constant::getNullValue(C->getType()),
3895                 Q.DL);
3896           case ICmpInst::ICMP_SLT:
3897           case ICmpInst::ICMP_SLE:
3898             return ConstantFoldCompareInstOperands(
3899                 ICmpInst::ICMP_SGE, C, Constant::getNullValue(C->getType()),
3900                 Q.DL);
3901           }
3902         }
3903       }
3904     }
3905 
3906     if (isa<SExtInst>(LHS)) {
3907       // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3908       // same type.
3909       if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3910         if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3911           // Compare X and Y.  Note that the predicate does not change.
3912           if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3913                                           MaxRecurse - 1))
3914             return V;
3915       }
3916       // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3917       else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3918         if (SrcOp == RI->getOperand(0)) {
3919           if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3920             return ConstantInt::getTrue(ITy);
3921           if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3922             return ConstantInt::getFalse(ITy);
3923         }
3924       }
3925       // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3926       // too.  If not, then try to deduce the result of the comparison.
3927       else if (match(RHS, m_ImmConstant())) {
3928         Constant *C = cast<Constant>(RHS);
3929 
3930         // Compute the constant that would happen if we truncated to SrcTy then
3931         // reextended to DstTy.
3932         Constant *Trunc =
3933             ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3934         assert(Trunc && "Constant-fold of ImmConstant should not fail");
3935         Constant *RExt =
3936             ConstantFoldCastOperand(CastInst::SExt, Trunc, DstTy, Q.DL);
3937         assert(RExt && "Constant-fold of ImmConstant should not fail");
3938         Constant *AnyEq =
3939             ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, RExt, C, Q.DL);
3940         assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3941 
3942         // If the re-extended constant didn't change then this is effectively
3943         // also a case of comparing two sign-extended values.
3944         if (AnyEq->isAllOnesValue() && MaxRecurse)
3945           if (Value *V =
3946                   simplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse - 1))
3947             return V;
3948 
3949         // Otherwise the upper bits of LHS are all equal, while RHS has varying
3950         // bits there.  Use this to work out the result of the comparison.
3951         if (AnyEq->isNullValue()) {
3952           switch (Pred) {
3953           default:
3954             llvm_unreachable("Unknown ICmp predicate!");
3955           case ICmpInst::ICMP_EQ:
3956             return Constant::getNullValue(ITy);
3957           case ICmpInst::ICMP_NE:
3958             return Constant::getAllOnesValue(ITy);
3959 
3960           // If RHS is non-negative then LHS <s RHS.  If RHS is negative then
3961           // LHS >s RHS.
3962           case ICmpInst::ICMP_SGT:
3963           case ICmpInst::ICMP_SGE:
3964             return ConstantExpr::getICmp(ICmpInst::ICMP_SLT, C,
3965                                          Constant::getNullValue(C->getType()));
3966           case ICmpInst::ICMP_SLT:
3967           case ICmpInst::ICMP_SLE:
3968             return ConstantExpr::getICmp(ICmpInst::ICMP_SGE, C,
3969                                          Constant::getNullValue(C->getType()));
3970 
3971           // If LHS is non-negative then LHS <u RHS.  If LHS is negative then
3972           // LHS >u RHS.
3973           case ICmpInst::ICMP_UGT:
3974           case ICmpInst::ICMP_UGE:
3975             // Comparison is true iff the LHS <s 0.
3976             if (MaxRecurse)
3977               if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3978                                               Constant::getNullValue(SrcTy), Q,
3979                                               MaxRecurse - 1))
3980                 return V;
3981             break;
3982           case ICmpInst::ICMP_ULT:
3983           case ICmpInst::ICMP_ULE:
3984             // Comparison is true iff the LHS >=s 0.
3985             if (MaxRecurse)
3986               if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3987                                               Constant::getNullValue(SrcTy), Q,
3988                                               MaxRecurse - 1))
3989                 return V;
3990             break;
3991           }
3992         }
3993       }
3994     }
3995   }
3996 
3997   // icmp eq|ne X, Y -> false|true if X != Y
3998   // This is potentially expensive, and we have already computedKnownBits for
3999   // compares with 0 above here, so only try this for a non-zero compare.
4000   if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
4001       isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
4002     return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
4003   }
4004 
4005   if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
4006     return V;
4007 
4008   if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
4009     return V;
4010 
4011   if (Value *V = simplifyICmpWithIntrinsicOnLHS(Pred, LHS, RHS))
4012     return V;
4013   if (Value *V = simplifyICmpWithIntrinsicOnLHS(
4014           ICmpInst::getSwappedPredicate(Pred), RHS, LHS))
4015     return V;
4016 
4017   if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
4018     return V;
4019 
4020   if (std::optional<bool> Res =
4021           isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4022     return ConstantInt::getBool(ITy, *Res);
4023 
4024   // Simplify comparisons of related pointers using a powerful, recursive
4025   // GEP-walk when we have target data available..
4026   if (LHS->getType()->isPointerTy())
4027     if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
4028       return C;
4029   if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
4030     if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
4031       if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4032           Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
4033               Q.DL.getTypeSizeInBits(CLHS->getType()))
4034         if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
4035                                          CRHS->getPointerOperand(), Q))
4036           return C;
4037 
4038   // If the comparison is with the result of a select instruction, check whether
4039   // comparing with either branch of the select always yields the same value.
4040   if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4041     if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4042       return V;
4043 
4044   // If the comparison is with the result of a phi instruction, check whether
4045   // doing the compare with each incoming phi value yields a common result.
4046   if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4047     if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4048       return V;
4049 
4050   return nullptr;
4051 }
4052 
4053 Value *llvm::simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4054                               const SimplifyQuery &Q) {
4055   return ::simplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4056 }
4057 
4058 /// Given operands for an FCmpInst, see if we can fold the result.
4059 /// If not, this returns null.
4060 static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4061                                FastMathFlags FMF, const SimplifyQuery &Q,
4062                                unsigned MaxRecurse) {
4063   CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
4064   assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
4065 
4066   if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4067     if (Constant *CRHS = dyn_cast<Constant>(RHS))
4068       return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI,
4069                                              Q.CxtI);
4070 
4071     // If we have a constant, make sure it is on the RHS.
4072     std::swap(LHS, RHS);
4073     Pred = CmpInst::getSwappedPredicate(Pred);
4074   }
4075 
4076   // Fold trivial predicates.
4077   Type *RetTy = getCompareTy(LHS);
4078   if (Pred == FCmpInst::FCMP_FALSE)
4079     return getFalse(RetTy);
4080   if (Pred == FCmpInst::FCMP_TRUE)
4081     return getTrue(RetTy);
4082 
4083   // fcmp pred x, poison and  fcmp pred poison, x
4084   // fold to poison
4085   if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
4086     return PoisonValue::get(RetTy);
4087 
4088   // fcmp pred x, undef  and  fcmp pred undef, x
4089   // fold to true if unordered, false if ordered
4090   if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
4091     // Choosing NaN for the undef will always make unordered comparison succeed
4092     // and ordered comparison fail.
4093     return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4094   }
4095 
4096   // fcmp x,x -> true/false.  Not all compares are foldable.
4097   if (LHS == RHS) {
4098     if (CmpInst::isTrueWhenEqual(Pred))
4099       return getTrue(RetTy);
4100     if (CmpInst::isFalseWhenEqual(Pred))
4101       return getFalse(RetTy);
4102   }
4103 
4104   // Fold (un)ordered comparison if we can determine there are no NaNs.
4105   //
4106   // This catches the 2 variable input case, constants are handled below as a
4107   // class-like compare.
4108   if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4109     if (FMF.noNaNs() ||
4110         (isKnownNeverNaN(RHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
4111          isKnownNeverNaN(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT)))
4112       return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
4113   }
4114 
4115   const APFloat *C = nullptr;
4116   match(RHS, m_APFloatAllowUndef(C));
4117   std::optional<KnownFPClass> FullKnownClassLHS;
4118 
4119   // Lazily compute the possible classes for LHS. Avoid computing it twice if
4120   // RHS is a 0.
4121   auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags =
4122                                                      fcAllFlags) {
4123     if (FullKnownClassLHS)
4124       return *FullKnownClassLHS;
4125     return computeKnownFPClass(LHS, FMF, Q.DL, InterestedFlags, 0, Q.TLI, Q.AC,
4126                                Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo);
4127   };
4128 
4129   if (C && Q.CxtI) {
4130     // Fold out compares that express a class test.
4131     //
4132     // FIXME: Should be able to perform folds without context
4133     // instruction. Always pass in the context function?
4134 
4135     const Function *ParentF = Q.CxtI->getFunction();
4136     auto [ClassVal, ClassTest] = fcmpToClassTest(Pred, *ParentF, LHS, C);
4137     if (ClassVal) {
4138       FullKnownClassLHS = computeLHSClass();
4139       if ((FullKnownClassLHS->KnownFPClasses & ClassTest) == fcNone)
4140         return getFalse(RetTy);
4141       if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) == fcNone)
4142         return getTrue(RetTy);
4143     }
4144   }
4145 
4146   // Handle fcmp with constant RHS.
4147   if (C) {
4148     // TODO: If we always required a context function, we wouldn't need to
4149     // special case nans.
4150     if (C->isNaN())
4151       return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4152 
4153     // TODO: Need version fcmpToClassTest which returns implied class when the
4154     // compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but
4155     // isn't implementable as a class call.
4156     if (C->isNegative() && !C->isNegZero()) {
4157       FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4158 
4159       // TODO: We can catch more cases by using a range check rather than
4160       //       relying on CannotBeOrderedLessThanZero.
4161       switch (Pred) {
4162       case FCmpInst::FCMP_UGE:
4163       case FCmpInst::FCMP_UGT:
4164       case FCmpInst::FCMP_UNE: {
4165         KnownFPClass KnownClass = computeLHSClass(Interested);
4166 
4167         // (X >= 0) implies (X > C) when (C < 0)
4168         if (KnownClass.cannotBeOrderedLessThanZero())
4169           return getTrue(RetTy);
4170         break;
4171       }
4172       case FCmpInst::FCMP_OEQ:
4173       case FCmpInst::FCMP_OLE:
4174       case FCmpInst::FCMP_OLT: {
4175         KnownFPClass KnownClass = computeLHSClass(Interested);
4176 
4177         // (X >= 0) implies !(X < C) when (C < 0)
4178         if (KnownClass.cannotBeOrderedLessThanZero())
4179           return getFalse(RetTy);
4180         break;
4181       }
4182       default:
4183         break;
4184       }
4185     }
4186     // Check comparison of [minnum/maxnum with constant] with other constant.
4187     const APFloat *C2;
4188     if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
4189          *C2 < *C) ||
4190         (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
4191          *C2 > *C)) {
4192       bool IsMaxNum =
4193           cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
4194       // The ordered relationship and minnum/maxnum guarantee that we do not
4195       // have NaN constants, so ordered/unordered preds are handled the same.
4196       switch (Pred) {
4197       case FCmpInst::FCMP_OEQ:
4198       case FCmpInst::FCMP_UEQ:
4199         // minnum(X, LesserC)  == C --> false
4200         // maxnum(X, GreaterC) == C --> false
4201         return getFalse(RetTy);
4202       case FCmpInst::FCMP_ONE:
4203       case FCmpInst::FCMP_UNE:
4204         // minnum(X, LesserC)  != C --> true
4205         // maxnum(X, GreaterC) != C --> true
4206         return getTrue(RetTy);
4207       case FCmpInst::FCMP_OGE:
4208       case FCmpInst::FCMP_UGE:
4209       case FCmpInst::FCMP_OGT:
4210       case FCmpInst::FCMP_UGT:
4211         // minnum(X, LesserC)  >= C --> false
4212         // minnum(X, LesserC)  >  C --> false
4213         // maxnum(X, GreaterC) >= C --> true
4214         // maxnum(X, GreaterC) >  C --> true
4215         return ConstantInt::get(RetTy, IsMaxNum);
4216       case FCmpInst::FCMP_OLE:
4217       case FCmpInst::FCMP_ULE:
4218       case FCmpInst::FCMP_OLT:
4219       case FCmpInst::FCMP_ULT:
4220         // minnum(X, LesserC)  <= C --> true
4221         // minnum(X, LesserC)  <  C --> true
4222         // maxnum(X, GreaterC) <= C --> false
4223         // maxnum(X, GreaterC) <  C --> false
4224         return ConstantInt::get(RetTy, !IsMaxNum);
4225       default:
4226         // TRUE/FALSE/ORD/UNO should be handled before this.
4227         llvm_unreachable("Unexpected fcmp predicate");
4228       }
4229     }
4230   }
4231 
4232   // TODO: Could fold this with above if there were a matcher which returned all
4233   // classes in a non-splat vector.
4234   if (match(RHS, m_AnyZeroFP())) {
4235     switch (Pred) {
4236     case FCmpInst::FCMP_OGE:
4237     case FCmpInst::FCMP_ULT: {
4238       FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4239       if (!FMF.noNaNs())
4240         Interested |= fcNan;
4241 
4242       KnownFPClass Known = computeLHSClass(Interested);
4243 
4244       // Positive or zero X >= 0.0 --> true
4245       // Positive or zero X <  0.0 --> false
4246       if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4247           Known.cannotBeOrderedLessThanZero())
4248         return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
4249       break;
4250     }
4251     case FCmpInst::FCMP_UGE:
4252     case FCmpInst::FCMP_OLT: {
4253       FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4254       KnownFPClass Known = computeLHSClass(Interested);
4255 
4256       // Positive or zero or nan X >= 0.0 --> true
4257       // Positive or zero or nan X <  0.0 --> false
4258       if (Known.cannotBeOrderedLessThanZero())
4259         return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
4260       break;
4261     }
4262     default:
4263       break;
4264     }
4265   }
4266 
4267   // If the comparison is with the result of a select instruction, check whether
4268   // comparing with either branch of the select always yields the same value.
4269   if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4270     if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4271       return V;
4272 
4273   // If the comparison is with the result of a phi instruction, check whether
4274   // doing the compare with each incoming phi value yields a common result.
4275   if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4276     if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4277       return V;
4278 
4279   return nullptr;
4280 }
4281 
4282 Value *llvm::simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4283                               FastMathFlags FMF, const SimplifyQuery &Q) {
4284   return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
4285 }
4286 
4287 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4288                                      const SimplifyQuery &Q,
4289                                      bool AllowRefinement,
4290                                      SmallVectorImpl<Instruction *> *DropFlags,
4291                                      unsigned MaxRecurse) {
4292   // Trivial replacement.
4293   if (V == Op)
4294     return RepOp;
4295 
4296   if (!MaxRecurse--)
4297     return nullptr;
4298 
4299   // We cannot replace a constant, and shouldn't even try.
4300   if (isa<Constant>(Op))
4301     return nullptr;
4302 
4303   auto *I = dyn_cast<Instruction>(V);
4304   if (!I)
4305     return nullptr;
4306 
4307   // The arguments of a phi node might refer to a value from a previous
4308   // cycle iteration.
4309   if (isa<PHINode>(I))
4310     return nullptr;
4311 
4312   if (Op->getType()->isVectorTy()) {
4313     // For vector types, the simplification must hold per-lane, so forbid
4314     // potentially cross-lane operations like shufflevector.
4315     if (!I->getType()->isVectorTy() || isa<ShuffleVectorInst>(I) ||
4316         isa<CallBase>(I) || isa<BitCastInst>(I))
4317       return nullptr;
4318   }
4319 
4320   // Don't fold away llvm.is.constant checks based on assumptions.
4321   if (match(I, m_Intrinsic<Intrinsic::is_constant>()))
4322     return nullptr;
4323 
4324   // Replace Op with RepOp in instruction operands.
4325   SmallVector<Value *, 8> NewOps;
4326   bool AnyReplaced = false;
4327   for (Value *InstOp : I->operands()) {
4328     if (Value *NewInstOp = simplifyWithOpReplaced(
4329             InstOp, Op, RepOp, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4330       NewOps.push_back(NewInstOp);
4331       AnyReplaced = InstOp != NewInstOp;
4332     } else {
4333       NewOps.push_back(InstOp);
4334     }
4335   }
4336 
4337   if (!AnyReplaced)
4338     return nullptr;
4339 
4340   if (!AllowRefinement) {
4341     // General InstSimplify functions may refine the result, e.g. by returning
4342     // a constant for a potentially poison value. To avoid this, implement only
4343     // a few non-refining but profitable transforms here.
4344 
4345     if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4346       unsigned Opcode = BO->getOpcode();
4347       // id op x -> x, x op id -> x
4348       if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4349         return NewOps[1];
4350       if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4351                                                       /* RHS */ true))
4352         return NewOps[0];
4353 
4354       // x & x -> x, x | x -> x
4355       if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4356           NewOps[0] == NewOps[1]) {
4357         // or disjoint x, x results in poison.
4358         if (auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4359           if (PDI->isDisjoint()) {
4360             if (!DropFlags)
4361               return nullptr;
4362             DropFlags->push_back(BO);
4363           }
4364         }
4365         return NewOps[0];
4366       }
4367 
4368       // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4369       // by assumption and this case never wraps, so nowrap flags can be
4370       // ignored.
4371       if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4372           NewOps[0] == RepOp && NewOps[1] == RepOp)
4373         return Constant::getNullValue(I->getType());
4374 
4375       // If we are substituting an absorber constant into a binop and extra
4376       // poison can't leak if we remove the select -- because both operands of
4377       // the binop are based on the same value -- then it may be safe to replace
4378       // the value with the absorber constant. Examples:
4379       // (Op == 0) ? 0 : (Op & -Op)            --> Op & -Op
4380       // (Op == 0) ? 0 : (Op * (binop Op, C))  --> Op * (binop Op, C)
4381       // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4382       Constant *Absorber =
4383           ConstantExpr::getBinOpAbsorber(Opcode, I->getType());
4384       if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4385           impliesPoison(BO, Op))
4386         return Absorber;
4387     }
4388 
4389     if (isa<GetElementPtrInst>(I)) {
4390       // getelementptr x, 0 -> x.
4391       // This never returns poison, even if inbounds is set.
4392       if (NewOps.size() == 2 && match(NewOps[1], m_Zero()))
4393         return NewOps[0];
4394     }
4395   } else {
4396     // The simplification queries below may return the original value. Consider:
4397     //   %div = udiv i32 %arg, %arg2
4398     //   %mul = mul nsw i32 %div, %arg2
4399     //   %cmp = icmp eq i32 %mul, %arg
4400     //   %sel = select i1 %cmp, i32 %div, i32 undef
4401     // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4402     // simplifies back to %arg. This can only happen because %mul does not
4403     // dominate %div. To ensure a consistent return value contract, we make sure
4404     // that this case returns nullptr as well.
4405     auto PreventSelfSimplify = [V](Value *Simplified) {
4406       return Simplified != V ? Simplified : nullptr;
4407     };
4408 
4409     return PreventSelfSimplify(
4410         ::simplifyInstructionWithOperands(I, NewOps, Q, MaxRecurse));
4411   }
4412 
4413   // If all operands are constant after substituting Op for RepOp then we can
4414   // constant fold the instruction.
4415   SmallVector<Constant *, 8> ConstOps;
4416   for (Value *NewOp : NewOps) {
4417     if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4418       ConstOps.push_back(ConstOp);
4419     else
4420       return nullptr;
4421   }
4422 
4423   // Consider:
4424   //   %cmp = icmp eq i32 %x, 2147483647
4425   //   %add = add nsw i32 %x, 1
4426   //   %sel = select i1 %cmp, i32 -2147483648, i32 %add
4427   //
4428   // We can't replace %sel with %add unless we strip away the flags (which
4429   // will be done in InstCombine).
4430   // TODO: This may be unsound, because it only catches some forms of
4431   // refinement.
4432   if (!AllowRefinement) {
4433     if (canCreatePoison(cast<Operator>(I), !DropFlags)) {
4434       // abs cannot create poison if the value is known to never be int_min.
4435       if (auto *II = dyn_cast<IntrinsicInst>(I);
4436           II && II->getIntrinsicID() == Intrinsic::abs) {
4437         if (!ConstOps[0]->isNotMinSignedValue())
4438           return nullptr;
4439       } else
4440         return nullptr;
4441     }
4442     Constant *Res = ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
4443     if (DropFlags && Res && I->hasPoisonGeneratingFlagsOrMetadata())
4444       DropFlags->push_back(I);
4445     return Res;
4446   }
4447 
4448   return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
4449 }
4450 
4451 Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4452                                     const SimplifyQuery &Q,
4453                                     bool AllowRefinement,
4454                                     SmallVectorImpl<Instruction *> *DropFlags) {
4455   return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags,
4456                                   RecursionLimit);
4457 }
4458 
4459 /// Try to simplify a select instruction when its condition operand is an
4460 /// integer comparison where one operand of the compare is a constant.
4461 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4462                                     const APInt *Y, bool TrueWhenUnset) {
4463   const APInt *C;
4464 
4465   // (X & Y) == 0 ? X & ~Y : X  --> X
4466   // (X & Y) != 0 ? X & ~Y : X  --> X & ~Y
4467   if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4468       *Y == ~*C)
4469     return TrueWhenUnset ? FalseVal : TrueVal;
4470 
4471   // (X & Y) == 0 ? X : X & ~Y  --> X & ~Y
4472   // (X & Y) != 0 ? X : X & ~Y  --> X
4473   if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4474       *Y == ~*C)
4475     return TrueWhenUnset ? FalseVal : TrueVal;
4476 
4477   if (Y->isPowerOf2()) {
4478     // (X & Y) == 0 ? X | Y : X  --> X | Y
4479     // (X & Y) != 0 ? X | Y : X  --> X
4480     if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4481         *Y == *C) {
4482       // We can't return the or if it has the disjoint flag.
4483       if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4484         return nullptr;
4485       return TrueWhenUnset ? TrueVal : FalseVal;
4486     }
4487 
4488     // (X & Y) == 0 ? X : X | Y  --> X
4489     // (X & Y) != 0 ? X : X | Y  --> X | Y
4490     if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4491         *Y == *C) {
4492       // We can't return the or if it has the disjoint flag.
4493       if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4494         return nullptr;
4495       return TrueWhenUnset ? TrueVal : FalseVal;
4496     }
4497   }
4498 
4499   return nullptr;
4500 }
4501 
4502 static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4503                                      ICmpInst::Predicate Pred, Value *TVal,
4504                                      Value *FVal) {
4505   // Canonicalize common cmp+sel operand as CmpLHS.
4506   if (CmpRHS == TVal || CmpRHS == FVal) {
4507     std::swap(CmpLHS, CmpRHS);
4508     Pred = ICmpInst::getSwappedPredicate(Pred);
4509   }
4510 
4511   // Canonicalize common cmp+sel operand as TVal.
4512   if (CmpLHS == FVal) {
4513     std::swap(TVal, FVal);
4514     Pred = ICmpInst::getInversePredicate(Pred);
4515   }
4516 
4517   // A vector select may be shuffling together elements that are equivalent
4518   // based on the max/min/select relationship.
4519   Value *X = CmpLHS, *Y = CmpRHS;
4520   bool PeekedThroughSelectShuffle = false;
4521   auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4522   if (Shuf && Shuf->isSelect()) {
4523     if (Shuf->getOperand(0) == Y)
4524       FVal = Shuf->getOperand(1);
4525     else if (Shuf->getOperand(1) == Y)
4526       FVal = Shuf->getOperand(0);
4527     else
4528       return nullptr;
4529     PeekedThroughSelectShuffle = true;
4530   }
4531 
4532   // (X pred Y) ? X : max/min(X, Y)
4533   auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4534   if (!MMI || TVal != X ||
4535       !match(FVal, m_c_MaxOrMin(m_Specific(X), m_Specific(Y))))
4536     return nullptr;
4537 
4538   // (X >  Y) ? X : max(X, Y) --> max(X, Y)
4539   // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4540   // (X <  Y) ? X : min(X, Y) --> min(X, Y)
4541   // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4542   //
4543   // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4544   // (X > Y) ? X : (Z ? max(X, Y) : Y)
4545   // If Z is true, this reduces as above, and if Z is false:
4546   // (X > Y) ? X : Y --> max(X, Y)
4547   ICmpInst::Predicate MMPred = MMI->getPredicate();
4548   if (MMPred == CmpInst::getStrictPredicate(Pred))
4549     return MMI;
4550 
4551   // Other transforms are not valid with a shuffle.
4552   if (PeekedThroughSelectShuffle)
4553     return nullptr;
4554 
4555   // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4556   if (Pred == CmpInst::ICMP_EQ)
4557     return MMI;
4558 
4559   // (X != Y) ? X : max/min(X, Y) --> X
4560   if (Pred == CmpInst::ICMP_NE)
4561     return X;
4562 
4563   // (X <  Y) ? X : max(X, Y) --> X
4564   // (X <= Y) ? X : max(X, Y) --> X
4565   // (X >  Y) ? X : min(X, Y) --> X
4566   // (X >= Y) ? X : min(X, Y) --> X
4567   ICmpInst::Predicate InvPred = CmpInst::getInversePredicate(Pred);
4568   if (MMPred == CmpInst::getStrictPredicate(InvPred))
4569     return X;
4570 
4571   return nullptr;
4572 }
4573 
4574 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
4575 /// eq/ne.
4576 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
4577                                            ICmpInst::Predicate Pred,
4578                                            Value *TrueVal, Value *FalseVal) {
4579   Value *X;
4580   APInt Mask;
4581   if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
4582     return nullptr;
4583 
4584   return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
4585                                Pred == ICmpInst::ICMP_EQ);
4586 }
4587 
4588 /// Try to simplify a select instruction when its condition operand is an
4589 /// integer equality comparison.
4590 static Value *simplifySelectWithICmpEq(Value *CmpLHS, Value *CmpRHS,
4591                                        Value *TrueVal, Value *FalseVal,
4592                                        const SimplifyQuery &Q,
4593                                        unsigned MaxRecurse) {
4594   if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4595                              /* AllowRefinement */ false,
4596                              /* DropFlags */ nullptr, MaxRecurse) == TrueVal)
4597     return FalseVal;
4598   if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4599                              /* AllowRefinement */ true,
4600                              /* DropFlags */ nullptr, MaxRecurse) == FalseVal)
4601     return FalseVal;
4602 
4603   return nullptr;
4604 }
4605 
4606 /// Try to simplify a select instruction when its condition operand is an
4607 /// integer comparison.
4608 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4609                                          Value *FalseVal,
4610                                          const SimplifyQuery &Q,
4611                                          unsigned MaxRecurse) {
4612   ICmpInst::Predicate Pred;
4613   Value *CmpLHS, *CmpRHS;
4614   if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4615     return nullptr;
4616 
4617   if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4618     return V;
4619 
4620   // Canonicalize ne to eq predicate.
4621   if (Pred == ICmpInst::ICMP_NE) {
4622     Pred = ICmpInst::ICMP_EQ;
4623     std::swap(TrueVal, FalseVal);
4624   }
4625 
4626   // Check for integer min/max with a limit constant:
4627   // X > MIN_INT ? X : MIN_INT --> X
4628   // X < MAX_INT ? X : MAX_INT --> X
4629   if (TrueVal->getType()->isIntOrIntVectorTy()) {
4630     Value *X, *Y;
4631     SelectPatternFlavor SPF =
4632         matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4633                                      X, Y)
4634             .Flavor;
4635     if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4636       APInt LimitC = getMinMaxLimit(getInverseMinMaxFlavor(SPF),
4637                                     X->getType()->getScalarSizeInBits());
4638       if (match(Y, m_SpecificInt(LimitC)))
4639         return X;
4640     }
4641   }
4642 
4643   if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4644     Value *X;
4645     const APInt *Y;
4646     if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4647       if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4648                                            /*TrueWhenUnset=*/true))
4649         return V;
4650 
4651     // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4652     Value *ShAmt;
4653     auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4654                              m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4655     // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4656     // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4657     if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4658       return X;
4659 
4660     // Test for a zero-shift-guard-op around rotates. These are used to
4661     // avoid UB from oversized shifts in raw IR rotate patterns, but the
4662     // intrinsics do not have that problem.
4663     // We do not allow this transform for the general funnel shift case because
4664     // that would not preserve the poison safety of the original code.
4665     auto isRotate =
4666         m_CombineOr(m_FShl(m_Value(X), m_Deferred(X), m_Value(ShAmt)),
4667                     m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4668     // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4669     // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4670     if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4671         Pred == ICmpInst::ICMP_EQ)
4672       return FalseVal;
4673 
4674     // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4675     // X == 0 ? -abs(X) : abs(X) --> abs(X)
4676     if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4677         match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4678       return FalseVal;
4679     if (match(TrueVal,
4680               m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4681         match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4682       return FalseVal;
4683   }
4684 
4685   // Check for other compares that behave like bit test.
4686   if (Value *V =
4687           simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4688     return V;
4689 
4690   // If we have a scalar equality comparison, then we know the value in one of
4691   // the arms of the select. See if substituting this value into the arm and
4692   // simplifying the result yields the same value as the other arm.
4693   if (Pred == ICmpInst::ICMP_EQ) {
4694     if (Value *V = simplifySelectWithICmpEq(CmpLHS, CmpRHS, TrueVal, FalseVal,
4695                                             Q, MaxRecurse))
4696       return V;
4697     if (Value *V = simplifySelectWithICmpEq(CmpRHS, CmpLHS, TrueVal, FalseVal,
4698                                             Q, MaxRecurse))
4699       return V;
4700 
4701     Value *X;
4702     Value *Y;
4703     // select((X | Y) == 0 ?  X : 0) --> 0 (commuted 2 ways)
4704     if (match(CmpLHS, m_Or(m_Value(X), m_Value(Y))) &&
4705         match(CmpRHS, m_Zero())) {
4706       // (X | Y) == 0 implies X == 0 and Y == 0.
4707       if (Value *V = simplifySelectWithICmpEq(X, CmpRHS, TrueVal, FalseVal, Q,
4708                                               MaxRecurse))
4709         return V;
4710       if (Value *V = simplifySelectWithICmpEq(Y, CmpRHS, TrueVal, FalseVal, Q,
4711                                               MaxRecurse))
4712         return V;
4713     }
4714 
4715     // select((X & Y) == -1 ?  X : -1) --> -1 (commuted 2 ways)
4716     if (match(CmpLHS, m_And(m_Value(X), m_Value(Y))) &&
4717         match(CmpRHS, m_AllOnes())) {
4718       // (X & Y) == -1 implies X == -1 and Y == -1.
4719       if (Value *V = simplifySelectWithICmpEq(X, CmpRHS, TrueVal, FalseVal, Q,
4720                                               MaxRecurse))
4721         return V;
4722       if (Value *V = simplifySelectWithICmpEq(Y, CmpRHS, TrueVal, FalseVal, Q,
4723                                               MaxRecurse))
4724         return V;
4725     }
4726   }
4727 
4728   return nullptr;
4729 }
4730 
4731 /// Try to simplify a select instruction when its condition operand is a
4732 /// floating-point comparison.
4733 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4734                                      const SimplifyQuery &Q) {
4735   FCmpInst::Predicate Pred;
4736   if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4737       !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4738     return nullptr;
4739 
4740   // This transform is safe if we do not have (do not care about) -0.0 or if
4741   // at least one operand is known to not be -0.0. Otherwise, the select can
4742   // change the sign of a zero operand.
4743   bool HasNoSignedZeros =
4744       Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros();
4745   const APFloat *C;
4746   if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4747       (match(F, m_APFloat(C)) && C->isNonZero())) {
4748     // (T == F) ? T : F --> F
4749     // (F == T) ? T : F --> F
4750     if (Pred == FCmpInst::FCMP_OEQ)
4751       return F;
4752 
4753     // (T != F) ? T : F --> T
4754     // (F != T) ? T : F --> T
4755     if (Pred == FCmpInst::FCMP_UNE)
4756       return T;
4757   }
4758 
4759   return nullptr;
4760 }
4761 
4762 /// Given operands for a SelectInst, see if we can fold the result.
4763 /// If not, this returns null.
4764 static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4765                                  const SimplifyQuery &Q, unsigned MaxRecurse) {
4766   if (auto *CondC = dyn_cast<Constant>(Cond)) {
4767     if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4768       if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4769         if (Constant *C = ConstantFoldSelectInstruction(CondC, TrueC, FalseC))
4770           return C;
4771 
4772     // select poison, X, Y -> poison
4773     if (isa<PoisonValue>(CondC))
4774       return PoisonValue::get(TrueVal->getType());
4775 
4776     // select undef, X, Y -> X or Y
4777     if (Q.isUndefValue(CondC))
4778       return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4779 
4780     // select true,  X, Y --> X
4781     // select false, X, Y --> Y
4782     // For vectors, allow undef/poison elements in the condition to match the
4783     // defined elements, so we can eliminate the select.
4784     if (match(CondC, m_One()))
4785       return TrueVal;
4786     if (match(CondC, m_Zero()))
4787       return FalseVal;
4788   }
4789 
4790   assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4791          "Select must have bool or bool vector condition");
4792   assert(TrueVal->getType() == FalseVal->getType() &&
4793          "Select must have same types for true/false ops");
4794 
4795   if (Cond->getType() == TrueVal->getType()) {
4796     // select i1 Cond, i1 true, i1 false --> i1 Cond
4797     if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4798       return Cond;
4799 
4800     // (X && Y) ? X : Y --> Y (commuted 2 ways)
4801     if (match(Cond, m_c_LogicalAnd(m_Specific(TrueVal), m_Specific(FalseVal))))
4802       return FalseVal;
4803 
4804     // (X || Y) ? X : Y --> X (commuted 2 ways)
4805     if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Specific(FalseVal))))
4806       return TrueVal;
4807 
4808     // (X || Y) ? false : X --> false (commuted 2 ways)
4809     if (match(Cond, m_c_LogicalOr(m_Specific(FalseVal), m_Value())) &&
4810         match(TrueVal, m_ZeroInt()))
4811       return ConstantInt::getFalse(Cond->getType());
4812 
4813     // Match patterns that end in logical-and.
4814     if (match(FalseVal, m_ZeroInt())) {
4815       // !(X || Y) && X --> false (commuted 2 ways)
4816       if (match(Cond, m_Not(m_c_LogicalOr(m_Specific(TrueVal), m_Value()))))
4817         return ConstantInt::getFalse(Cond->getType());
4818       // X && !(X || Y) --> false (commuted 2 ways)
4819       if (match(TrueVal, m_Not(m_c_LogicalOr(m_Specific(Cond), m_Value()))))
4820         return ConstantInt::getFalse(Cond->getType());
4821 
4822       // (X || Y) && Y --> Y (commuted 2 ways)
4823       if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Value())))
4824         return TrueVal;
4825       // Y && (X || Y) --> Y (commuted 2 ways)
4826       if (match(TrueVal, m_c_LogicalOr(m_Specific(Cond), m_Value())))
4827         return Cond;
4828 
4829       // (X || Y) && (X || !Y) --> X (commuted 8 ways)
4830       Value *X, *Y;
4831       if (match(Cond, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4832           match(TrueVal, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4833         return X;
4834       if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4835           match(Cond, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4836         return X;
4837     }
4838 
4839     // Match patterns that end in logical-or.
4840     if (match(TrueVal, m_One())) {
4841       // !(X && Y) || X --> true (commuted 2 ways)
4842       if (match(Cond, m_Not(m_c_LogicalAnd(m_Specific(FalseVal), m_Value()))))
4843         return ConstantInt::getTrue(Cond->getType());
4844       // X || !(X && Y) --> true (commuted 2 ways)
4845       if (match(FalseVal, m_Not(m_c_LogicalAnd(m_Specific(Cond), m_Value()))))
4846         return ConstantInt::getTrue(Cond->getType());
4847 
4848       // (X && Y) || Y --> Y (commuted 2 ways)
4849       if (match(Cond, m_c_LogicalAnd(m_Specific(FalseVal), m_Value())))
4850         return FalseVal;
4851       // Y || (X && Y) --> Y (commuted 2 ways)
4852       if (match(FalseVal, m_c_LogicalAnd(m_Specific(Cond), m_Value())))
4853         return Cond;
4854     }
4855   }
4856 
4857   // select ?, X, X -> X
4858   if (TrueVal == FalseVal)
4859     return TrueVal;
4860 
4861   if (Cond == TrueVal) {
4862     // select i1 X, i1 X, i1 false --> X (logical-and)
4863     if (match(FalseVal, m_ZeroInt()))
4864       return Cond;
4865     // select i1 X, i1 X, i1 true --> true
4866     if (match(FalseVal, m_One()))
4867       return ConstantInt::getTrue(Cond->getType());
4868   }
4869   if (Cond == FalseVal) {
4870     // select i1 X, i1 true, i1 X --> X (logical-or)
4871     if (match(TrueVal, m_One()))
4872       return Cond;
4873     // select i1 X, i1 false, i1 X --> false
4874     if (match(TrueVal, m_ZeroInt()))
4875       return ConstantInt::getFalse(Cond->getType());
4876   }
4877 
4878   // If the true or false value is poison, we can fold to the other value.
4879   // If the true or false value is undef, we can fold to the other value as
4880   // long as the other value isn't poison.
4881   // select ?, poison, X -> X
4882   // select ?, undef,  X -> X
4883   if (isa<PoisonValue>(TrueVal) ||
4884       (Q.isUndefValue(TrueVal) && impliesPoison(FalseVal, Cond)))
4885     return FalseVal;
4886   // select ?, X, poison -> X
4887   // select ?, X, undef  -> X
4888   if (isa<PoisonValue>(FalseVal) ||
4889       (Q.isUndefValue(FalseVal) && impliesPoison(TrueVal, Cond)))
4890     return TrueVal;
4891 
4892   // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4893   Constant *TrueC, *FalseC;
4894   if (isa<FixedVectorType>(TrueVal->getType()) &&
4895       match(TrueVal, m_Constant(TrueC)) &&
4896       match(FalseVal, m_Constant(FalseC))) {
4897     unsigned NumElts =
4898         cast<FixedVectorType>(TrueC->getType())->getNumElements();
4899     SmallVector<Constant *, 16> NewC;
4900     for (unsigned i = 0; i != NumElts; ++i) {
4901       // Bail out on incomplete vector constants.
4902       Constant *TEltC = TrueC->getAggregateElement(i);
4903       Constant *FEltC = FalseC->getAggregateElement(i);
4904       if (!TEltC || !FEltC)
4905         break;
4906 
4907       // If the elements match (undef or not), that value is the result. If only
4908       // one element is undef, choose the defined element as the safe result.
4909       if (TEltC == FEltC)
4910         NewC.push_back(TEltC);
4911       else if (isa<PoisonValue>(TEltC) ||
4912                (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
4913         NewC.push_back(FEltC);
4914       else if (isa<PoisonValue>(FEltC) ||
4915                (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
4916         NewC.push_back(TEltC);
4917       else
4918         break;
4919     }
4920     if (NewC.size() == NumElts)
4921       return ConstantVector::get(NewC);
4922   }
4923 
4924   if (Value *V =
4925           simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4926     return V;
4927 
4928   if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4929     return V;
4930 
4931   if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4932     return V;
4933 
4934   std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4935   if (Imp)
4936     return *Imp ? TrueVal : FalseVal;
4937 
4938   return nullptr;
4939 }
4940 
4941 Value *llvm::simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4942                                 const SimplifyQuery &Q) {
4943   return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4944 }
4945 
4946 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4947 /// If not, this returns null.
4948 static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
4949                               ArrayRef<Value *> Indices, bool InBounds,
4950                               const SimplifyQuery &Q, unsigned) {
4951   // The type of the GEP pointer operand.
4952   unsigned AS =
4953       cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
4954 
4955   // getelementptr P -> P.
4956   if (Indices.empty())
4957     return Ptr;
4958 
4959   // Compute the (pointer) type returned by the GEP instruction.
4960   Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
4961   Type *GEPTy = Ptr->getType();
4962   if (!GEPTy->isVectorTy()) {
4963     for (Value *Op : Indices) {
4964       // If one of the operands is a vector, the result type is a vector of
4965       // pointers. All vector operands must have the same number of elements.
4966       if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4967         GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4968         break;
4969       }
4970     }
4971   }
4972 
4973   // All-zero GEP is a no-op, unless it performs a vector splat.
4974   if (Ptr->getType() == GEPTy &&
4975       all_of(Indices, [](const auto *V) { return match(V, m_Zero()); }))
4976     return Ptr;
4977 
4978   // getelementptr poison, idx -> poison
4979   // getelementptr baseptr, poison -> poison
4980   if (isa<PoisonValue>(Ptr) ||
4981       any_of(Indices, [](const auto *V) { return isa<PoisonValue>(V); }))
4982     return PoisonValue::get(GEPTy);
4983 
4984   // getelementptr undef, idx -> undef
4985   if (Q.isUndefValue(Ptr))
4986     return UndefValue::get(GEPTy);
4987 
4988   bool IsScalableVec =
4989       SrcTy->isScalableTy() || any_of(Indices, [](const Value *V) {
4990         return isa<ScalableVectorType>(V->getType());
4991       });
4992 
4993   if (Indices.size() == 1) {
4994     Type *Ty = SrcTy;
4995     if (!IsScalableVec && Ty->isSized()) {
4996       Value *P;
4997       uint64_t C;
4998       uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
4999       // getelementptr P, N -> P if P points to a type of zero size.
5000       if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
5001         return Ptr;
5002 
5003       // The following transforms are only safe if the ptrtoint cast
5004       // doesn't truncate the pointers.
5005       if (Indices[0]->getType()->getScalarSizeInBits() ==
5006           Q.DL.getPointerSizeInBits(AS)) {
5007         auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
5008           return P->getType() == GEPTy &&
5009                  getUnderlyingObject(P) == getUnderlyingObject(Ptr);
5010         };
5011         // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
5012         if (TyAllocSize == 1 &&
5013             match(Indices[0],
5014                   m_Sub(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Specific(Ptr)))) &&
5015             CanSimplify())
5016           return P;
5017 
5018         // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
5019         // size 1 << C.
5020         if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
5021                                            m_PtrToInt(m_Specific(Ptr))),
5022                                      m_ConstantInt(C))) &&
5023             TyAllocSize == 1ULL << C && CanSimplify())
5024           return P;
5025 
5026         // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
5027         // size C.
5028         if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
5029                                            m_PtrToInt(m_Specific(Ptr))),
5030                                      m_SpecificInt(TyAllocSize))) &&
5031             CanSimplify())
5032           return P;
5033       }
5034     }
5035   }
5036 
5037   if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
5038       all_of(Indices.drop_back(1),
5039              [](Value *Idx) { return match(Idx, m_Zero()); })) {
5040     unsigned IdxWidth =
5041         Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
5042     if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
5043       APInt BasePtrOffset(IdxWidth, 0);
5044       Value *StrippedBasePtr =
5045           Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
5046 
5047       // Avoid creating inttoptr of zero here: While LLVMs treatment of
5048       // inttoptr is generally conservative, this particular case is folded to
5049       // a null pointer, which will have incorrect provenance.
5050 
5051       // gep (gep V, C), (sub 0, V) -> C
5052       if (match(Indices.back(),
5053                 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
5054           !BasePtrOffset.isZero()) {
5055         auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
5056         return ConstantExpr::getIntToPtr(CI, GEPTy);
5057       }
5058       // gep (gep V, C), (xor V, -1) -> C-1
5059       if (match(Indices.back(),
5060                 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
5061           !BasePtrOffset.isOne()) {
5062         auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
5063         return ConstantExpr::getIntToPtr(CI, GEPTy);
5064       }
5065     }
5066   }
5067 
5068   // Check to see if this is constant foldable.
5069   if (!isa<Constant>(Ptr) ||
5070       !all_of(Indices, [](Value *V) { return isa<Constant>(V); }))
5071     return nullptr;
5072 
5073   if (!ConstantExpr::isSupportedGetElementPtr(SrcTy))
5074     return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), InBounds,
5075                                      std::nullopt, Indices);
5076 
5077   auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices,
5078                                             InBounds);
5079   return ConstantFoldConstant(CE, Q.DL);
5080 }
5081 
5082 Value *llvm::simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
5083                              bool InBounds, const SimplifyQuery &Q) {
5084   return ::simplifyGEPInst(SrcTy, Ptr, Indices, InBounds, Q, RecursionLimit);
5085 }
5086 
5087 /// Given operands for an InsertValueInst, see if we can fold the result.
5088 /// If not, this returns null.
5089 static Value *simplifyInsertValueInst(Value *Agg, Value *Val,
5090                                       ArrayRef<unsigned> Idxs,
5091                                       const SimplifyQuery &Q, unsigned) {
5092   if (Constant *CAgg = dyn_cast<Constant>(Agg))
5093     if (Constant *CVal = dyn_cast<Constant>(Val))
5094       return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
5095 
5096   // insertvalue x, poison, n -> x
5097   // insertvalue x, undef, n -> x if x cannot be poison
5098   if (isa<PoisonValue>(Val) ||
5099       (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Agg)))
5100     return Agg;
5101 
5102   // insertvalue x, (extractvalue y, n), n
5103   if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
5104     if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5105         EV->getIndices() == Idxs) {
5106       // insertvalue poison, (extractvalue y, n), n -> y
5107       // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5108       if (isa<PoisonValue>(Agg) ||
5109           (Q.isUndefValue(Agg) &&
5110            isGuaranteedNotToBePoison(EV->getAggregateOperand())))
5111         return EV->getAggregateOperand();
5112 
5113       // insertvalue y, (extractvalue y, n), n -> y
5114       if (Agg == EV->getAggregateOperand())
5115         return Agg;
5116     }
5117 
5118   return nullptr;
5119 }
5120 
5121 Value *llvm::simplifyInsertValueInst(Value *Agg, Value *Val,
5122                                      ArrayRef<unsigned> Idxs,
5123                                      const SimplifyQuery &Q) {
5124   return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
5125 }
5126 
5127 Value *llvm::simplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
5128                                        const SimplifyQuery &Q) {
5129   // Try to constant fold.
5130   auto *VecC = dyn_cast<Constant>(Vec);
5131   auto *ValC = dyn_cast<Constant>(Val);
5132   auto *IdxC = dyn_cast<Constant>(Idx);
5133   if (VecC && ValC && IdxC)
5134     return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
5135 
5136   // For fixed-length vector, fold into poison if index is out of bounds.
5137   if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
5138     if (isa<FixedVectorType>(Vec->getType()) &&
5139         CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
5140       return PoisonValue::get(Vec->getType());
5141   }
5142 
5143   // If index is undef, it might be out of bounds (see above case)
5144   if (Q.isUndefValue(Idx))
5145     return PoisonValue::get(Vec->getType());
5146 
5147   // If the scalar is poison, or it is undef and there is no risk of
5148   // propagating poison from the vector value, simplify to the vector value.
5149   if (isa<PoisonValue>(Val) ||
5150       (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
5151     return Vec;
5152 
5153   // If we are extracting a value from a vector, then inserting it into the same
5154   // place, that's the input vector:
5155   // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5156   if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
5157     return Vec;
5158 
5159   return nullptr;
5160 }
5161 
5162 /// Given operands for an ExtractValueInst, see if we can fold the result.
5163 /// If not, this returns null.
5164 static Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
5165                                        const SimplifyQuery &, unsigned) {
5166   if (auto *CAgg = dyn_cast<Constant>(Agg))
5167     return ConstantFoldExtractValueInstruction(CAgg, Idxs);
5168 
5169   // extractvalue x, (insertvalue y, elt, n), n -> elt
5170   unsigned NumIdxs = Idxs.size();
5171   for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
5172        IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5173     ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5174     unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5175     unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5176     if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
5177         Idxs.slice(0, NumCommonIdxs)) {
5178       if (NumIdxs == NumInsertValueIdxs)
5179         return IVI->getInsertedValueOperand();
5180       break;
5181     }
5182   }
5183 
5184   return nullptr;
5185 }
5186 
5187 Value *llvm::simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
5188                                       const SimplifyQuery &Q) {
5189   return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
5190 }
5191 
5192 /// Given operands for an ExtractElementInst, see if we can fold the result.
5193 /// If not, this returns null.
5194 static Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
5195                                          const SimplifyQuery &Q, unsigned) {
5196   auto *VecVTy = cast<VectorType>(Vec->getType());
5197   if (auto *CVec = dyn_cast<Constant>(Vec)) {
5198     if (auto *CIdx = dyn_cast<Constant>(Idx))
5199       return ConstantExpr::getExtractElement(CVec, CIdx);
5200 
5201     if (Q.isUndefValue(Vec))
5202       return UndefValue::get(VecVTy->getElementType());
5203   }
5204 
5205   // An undef extract index can be arbitrarily chosen to be an out-of-range
5206   // index value, which would result in the instruction being poison.
5207   if (Q.isUndefValue(Idx))
5208     return PoisonValue::get(VecVTy->getElementType());
5209 
5210   // If extracting a specified index from the vector, see if we can recursively
5211   // find a previously computed scalar that was inserted into the vector.
5212   if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
5213     // For fixed-length vector, fold into undef if index is out of bounds.
5214     unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5215     if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5216       return PoisonValue::get(VecVTy->getElementType());
5217     // Handle case where an element is extracted from a splat.
5218     if (IdxC->getValue().ult(MinNumElts))
5219       if (auto *Splat = getSplatValue(Vec))
5220         return Splat;
5221     if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
5222       return Elt;
5223   } else {
5224     // extractelt x, (insertelt y, elt, n), n -> elt
5225     // If the possibly-variable indices are trivially known to be equal
5226     // (because they are the same operand) then use the value that was
5227     // inserted directly.
5228     auto *IE = dyn_cast<InsertElementInst>(Vec);
5229     if (IE && IE->getOperand(2) == Idx)
5230       return IE->getOperand(1);
5231 
5232     // The index is not relevant if our vector is a splat.
5233     if (Value *Splat = getSplatValue(Vec))
5234       return Splat;
5235   }
5236   return nullptr;
5237 }
5238 
5239 Value *llvm::simplifyExtractElementInst(Value *Vec, Value *Idx,
5240                                         const SimplifyQuery &Q) {
5241   return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5242 }
5243 
5244 /// See if we can fold the given phi. If not, returns null.
5245 static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
5246                               const SimplifyQuery &Q) {
5247   // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5248   //          here, because the PHI we may succeed simplifying to was not
5249   //          def-reachable from the original PHI!
5250 
5251   // If all of the PHI's incoming values are the same then replace the PHI node
5252   // with the common value.
5253   Value *CommonValue = nullptr;
5254   bool HasUndefInput = false;
5255   for (Value *Incoming : IncomingValues) {
5256     // If the incoming value is the phi node itself, it can safely be skipped.
5257     if (Incoming == PN)
5258       continue;
5259     if (Q.isUndefValue(Incoming)) {
5260       // Remember that we saw an undef value, but otherwise ignore them.
5261       HasUndefInput = true;
5262       continue;
5263     }
5264     if (CommonValue && Incoming != CommonValue)
5265       return nullptr; // Not the same, bail out.
5266     CommonValue = Incoming;
5267   }
5268 
5269   // If CommonValue is null then all of the incoming values were either undef or
5270   // equal to the phi node itself.
5271   if (!CommonValue)
5272     return UndefValue::get(PN->getType());
5273 
5274   if (HasUndefInput) {
5275     // If we have a PHI node like phi(X, undef, X), where X is defined by some
5276     // instruction, we cannot return X as the result of the PHI node unless it
5277     // dominates the PHI block.
5278     return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
5279   }
5280 
5281   return CommonValue;
5282 }
5283 
5284 static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5285                                const SimplifyQuery &Q, unsigned MaxRecurse) {
5286   if (auto *C = dyn_cast<Constant>(Op))
5287     return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
5288 
5289   if (auto *CI = dyn_cast<CastInst>(Op)) {
5290     auto *Src = CI->getOperand(0);
5291     Type *SrcTy = Src->getType();
5292     Type *MidTy = CI->getType();
5293     Type *DstTy = Ty;
5294     if (Src->getType() == Ty) {
5295       auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
5296       auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5297       Type *SrcIntPtrTy =
5298           SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
5299       Type *MidIntPtrTy =
5300           MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
5301       Type *DstIntPtrTy =
5302           DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
5303       if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5304                                          SrcIntPtrTy, MidIntPtrTy,
5305                                          DstIntPtrTy) == Instruction::BitCast)
5306         return Src;
5307     }
5308   }
5309 
5310   // bitcast x -> x
5311   if (CastOpc == Instruction::BitCast)
5312     if (Op->getType() == Ty)
5313       return Op;
5314 
5315   return nullptr;
5316 }
5317 
5318 Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5319                               const SimplifyQuery &Q) {
5320   return ::simplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
5321 }
5322 
5323 /// For the given destination element of a shuffle, peek through shuffles to
5324 /// match a root vector source operand that contains that element in the same
5325 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
5326 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5327                                    int MaskVal, Value *RootVec,
5328                                    unsigned MaxRecurse) {
5329   if (!MaxRecurse--)
5330     return nullptr;
5331 
5332   // Bail out if any mask value is undefined. That kind of shuffle may be
5333   // simplified further based on demanded bits or other folds.
5334   if (MaskVal == -1)
5335     return nullptr;
5336 
5337   // The mask value chooses which source operand we need to look at next.
5338   int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5339   int RootElt = MaskVal;
5340   Value *SourceOp = Op0;
5341   if (MaskVal >= InVecNumElts) {
5342     RootElt = MaskVal - InVecNumElts;
5343     SourceOp = Op1;
5344   }
5345 
5346   // If the source operand is a shuffle itself, look through it to find the
5347   // matching root vector.
5348   if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5349     return foldIdentityShuffles(
5350         DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5351         SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5352   }
5353 
5354   // TODO: Look through bitcasts? What if the bitcast changes the vector element
5355   // size?
5356 
5357   // The source operand is not a shuffle. Initialize the root vector value for
5358   // this shuffle if that has not been done yet.
5359   if (!RootVec)
5360     RootVec = SourceOp;
5361 
5362   // Give up as soon as a source operand does not match the existing root value.
5363   if (RootVec != SourceOp)
5364     return nullptr;
5365 
5366   // The element must be coming from the same lane in the source vector
5367   // (although it may have crossed lanes in intermediate shuffles).
5368   if (RootElt != DestElt)
5369     return nullptr;
5370 
5371   return RootVec;
5372 }
5373 
5374 static Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5375                                         ArrayRef<int> Mask, Type *RetTy,
5376                                         const SimplifyQuery &Q,
5377                                         unsigned MaxRecurse) {
5378   if (all_of(Mask, [](int Elem) { return Elem == PoisonMaskElem; }))
5379     return PoisonValue::get(RetTy);
5380 
5381   auto *InVecTy = cast<VectorType>(Op0->getType());
5382   unsigned MaskNumElts = Mask.size();
5383   ElementCount InVecEltCount = InVecTy->getElementCount();
5384 
5385   bool Scalable = InVecEltCount.isScalable();
5386 
5387   SmallVector<int, 32> Indices;
5388   Indices.assign(Mask.begin(), Mask.end());
5389 
5390   // Canonicalization: If mask does not select elements from an input vector,
5391   // replace that input vector with poison.
5392   if (!Scalable) {
5393     bool MaskSelects0 = false, MaskSelects1 = false;
5394     unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5395     for (unsigned i = 0; i != MaskNumElts; ++i) {
5396       if (Indices[i] == -1)
5397         continue;
5398       if ((unsigned)Indices[i] < InVecNumElts)
5399         MaskSelects0 = true;
5400       else
5401         MaskSelects1 = true;
5402     }
5403     if (!MaskSelects0)
5404       Op0 = PoisonValue::get(InVecTy);
5405     if (!MaskSelects1)
5406       Op1 = PoisonValue::get(InVecTy);
5407   }
5408 
5409   auto *Op0Const = dyn_cast<Constant>(Op0);
5410   auto *Op1Const = dyn_cast<Constant>(Op1);
5411 
5412   // If all operands are constant, constant fold the shuffle. This
5413   // transformation depends on the value of the mask which is not known at
5414   // compile time for scalable vectors
5415   if (Op0Const && Op1Const)
5416     return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5417 
5418   // Canonicalization: if only one input vector is constant, it shall be the
5419   // second one. This transformation depends on the value of the mask which
5420   // is not known at compile time for scalable vectors
5421   if (!Scalable && Op0Const && !Op1Const) {
5422     std::swap(Op0, Op1);
5423     ShuffleVectorInst::commuteShuffleMask(Indices,
5424                                           InVecEltCount.getKnownMinValue());
5425   }
5426 
5427   // A splat of an inserted scalar constant becomes a vector constant:
5428   // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5429   // NOTE: We may have commuted above, so analyze the updated Indices, not the
5430   //       original mask constant.
5431   // NOTE: This transformation depends on the value of the mask which is not
5432   // known at compile time for scalable vectors
5433   Constant *C;
5434   ConstantInt *IndexC;
5435   if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
5436                                           m_ConstantInt(IndexC)))) {
5437     // Match a splat shuffle mask of the insert index allowing undef elements.
5438     int InsertIndex = IndexC->getZExtValue();
5439     if (all_of(Indices, [InsertIndex](int MaskElt) {
5440           return MaskElt == InsertIndex || MaskElt == -1;
5441         })) {
5442       assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5443 
5444       // Shuffle mask poisons become poison constant result elements.
5445       SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5446       for (unsigned i = 0; i != MaskNumElts; ++i)
5447         if (Indices[i] == -1)
5448           VecC[i] = PoisonValue::get(C->getType());
5449       return ConstantVector::get(VecC);
5450     }
5451   }
5452 
5453   // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5454   // value type is same as the input vectors' type.
5455   if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5456     if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
5457         all_equal(OpShuf->getShuffleMask()))
5458       return Op0;
5459 
5460   // All remaining transformation depend on the value of the mask, which is
5461   // not known at compile time for scalable vectors.
5462   if (Scalable)
5463     return nullptr;
5464 
5465   // Don't fold a shuffle with undef mask elements. This may get folded in a
5466   // better way using demanded bits or other analysis.
5467   // TODO: Should we allow this?
5468   if (is_contained(Indices, -1))
5469     return nullptr;
5470 
5471   // Check if every element of this shuffle can be mapped back to the
5472   // corresponding element of a single root vector. If so, we don't need this
5473   // shuffle. This handles simple identity shuffles as well as chains of
5474   // shuffles that may widen/narrow and/or move elements across lanes and back.
5475   Value *RootVec = nullptr;
5476   for (unsigned i = 0; i != MaskNumElts; ++i) {
5477     // Note that recursion is limited for each vector element, so if any element
5478     // exceeds the limit, this will fail to simplify.
5479     RootVec =
5480         foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5481 
5482     // We can't replace a widening/narrowing shuffle with one of its operands.
5483     if (!RootVec || RootVec->getType() != RetTy)
5484       return nullptr;
5485   }
5486   return RootVec;
5487 }
5488 
5489 /// Given operands for a ShuffleVectorInst, fold the result or return null.
5490 Value *llvm::simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5491                                        ArrayRef<int> Mask, Type *RetTy,
5492                                        const SimplifyQuery &Q) {
5493   return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
5494 }
5495 
5496 static Constant *foldConstant(Instruction::UnaryOps Opcode, Value *&Op,
5497                               const SimplifyQuery &Q) {
5498   if (auto *C = dyn_cast<Constant>(Op))
5499     return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
5500   return nullptr;
5501 }
5502 
5503 /// Given the operand for an FNeg, see if we can fold the result.  If not, this
5504 /// returns null.
5505 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
5506                                const SimplifyQuery &Q, unsigned MaxRecurse) {
5507   if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
5508     return C;
5509 
5510   Value *X;
5511   // fneg (fneg X) ==> X
5512   if (match(Op, m_FNeg(m_Value(X))))
5513     return X;
5514 
5515   return nullptr;
5516 }
5517 
5518 Value *llvm::simplifyFNegInst(Value *Op, FastMathFlags FMF,
5519                               const SimplifyQuery &Q) {
5520   return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
5521 }
5522 
5523 /// Try to propagate existing NaN values when possible. If not, replace the
5524 /// constant or elements in the constant with a canonical NaN.
5525 static Constant *propagateNaN(Constant *In) {
5526   Type *Ty = In->getType();
5527   if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5528     unsigned NumElts = VecTy->getNumElements();
5529     SmallVector<Constant *, 32> NewC(NumElts);
5530     for (unsigned i = 0; i != NumElts; ++i) {
5531       Constant *EltC = In->getAggregateElement(i);
5532       // Poison elements propagate. NaN propagates except signaling is quieted.
5533       // Replace unknown or undef elements with canonical NaN.
5534       if (EltC && isa<PoisonValue>(EltC))
5535         NewC[i] = EltC;
5536       else if (EltC && EltC->isNaN())
5537         NewC[i] = ConstantFP::get(
5538             EltC->getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5539       else
5540         NewC[i] = ConstantFP::getNaN(VecTy->getElementType());
5541     }
5542     return ConstantVector::get(NewC);
5543   }
5544 
5545   // If it is not a fixed vector, but not a simple NaN either, return a
5546   // canonical NaN.
5547   if (!In->isNaN())
5548     return ConstantFP::getNaN(Ty);
5549 
5550   // If we known this is a NaN, and it's scalable vector, we must have a splat
5551   // on our hands. Grab that before splatting a QNaN constant.
5552   if (isa<ScalableVectorType>(Ty)) {
5553     auto *Splat = In->getSplatValue();
5554     assert(Splat && Splat->isNaN() &&
5555            "Found a scalable-vector NaN but not a splat");
5556     In = Splat;
5557   }
5558 
5559   // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5560   // preserve the sign/payload.
5561   return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5562 }
5563 
5564 /// Perform folds that are common to any floating-point operation. This implies
5565 /// transforms based on poison/undef/NaN because the operation itself makes no
5566 /// difference to the result.
5567 static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
5568                               const SimplifyQuery &Q,
5569                               fp::ExceptionBehavior ExBehavior,
5570                               RoundingMode Rounding) {
5571   // Poison is independent of anything else. It always propagates from an
5572   // operand to a math result.
5573   if (any_of(Ops, [](Value *V) { return match(V, m_Poison()); }))
5574     return PoisonValue::get(Ops[0]->getType());
5575 
5576   for (Value *V : Ops) {
5577     bool IsNan = match(V, m_NaN());
5578     bool IsInf = match(V, m_Inf());
5579     bool IsUndef = Q.isUndefValue(V);
5580 
5581     // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5582     // (an undef operand can be chosen to be Nan/Inf), then the result of
5583     // this operation is poison.
5584     if (FMF.noNaNs() && (IsNan || IsUndef))
5585       return PoisonValue::get(V->getType());
5586     if (FMF.noInfs() && (IsInf || IsUndef))
5587       return PoisonValue::get(V->getType());
5588 
5589     if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5590       // Undef does not propagate because undef means that all bits can take on
5591       // any value. If this is undef * NaN for example, then the result values
5592       // (at least the exponent bits) are limited. Assume the undef is a
5593       // canonical NaN and propagate that.
5594       if (IsUndef)
5595         return ConstantFP::getNaN(V->getType());
5596       if (IsNan)
5597         return propagateNaN(cast<Constant>(V));
5598     } else if (ExBehavior != fp::ebStrict) {
5599       if (IsNan)
5600         return propagateNaN(cast<Constant>(V));
5601     }
5602   }
5603   return nullptr;
5604 }
5605 
5606 /// Given operands for an FAdd, see if we can fold the result.  If not, this
5607 /// returns null.
5608 static Value *
5609 simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5610                  const SimplifyQuery &Q, unsigned MaxRecurse,
5611                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5612                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5613   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5614     if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5615       return C;
5616 
5617   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5618     return C;
5619 
5620   // fadd X, -0 ==> X
5621   // With strict/constrained FP, we have these possible edge cases that do
5622   // not simplify to Op0:
5623   // fadd SNaN, -0.0 --> QNaN
5624   // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5625   if (canIgnoreSNaN(ExBehavior, FMF) &&
5626       (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5627        FMF.noSignedZeros()))
5628     if (match(Op1, m_NegZeroFP()))
5629       return Op0;
5630 
5631   // fadd X, 0 ==> X, when we know X is not -0
5632   if (canIgnoreSNaN(ExBehavior, FMF))
5633     if (match(Op1, m_PosZeroFP()) &&
5634         (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
5635       return Op0;
5636 
5637   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5638     return nullptr;
5639 
5640   if (FMF.noNaNs()) {
5641     // With nnan: X + {+/-}Inf --> {+/-}Inf
5642     if (match(Op1, m_Inf()))
5643       return Op1;
5644 
5645     // With nnan: -X + X --> 0.0 (and commuted variant)
5646     // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5647     // Negative zeros are allowed because we always end up with positive zero:
5648     // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5649     // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5650     // X =  0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5651     // X =  0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5652     if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5653         match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5654       return ConstantFP::getZero(Op0->getType());
5655 
5656     if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5657         match(Op1, m_FNeg(m_Specific(Op0))))
5658       return ConstantFP::getZero(Op0->getType());
5659   }
5660 
5661   // (X - Y) + Y --> X
5662   // Y + (X - Y) --> X
5663   Value *X;
5664   if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5665       (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5666        match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5667     return X;
5668 
5669   return nullptr;
5670 }
5671 
5672 /// Given operands for an FSub, see if we can fold the result.  If not, this
5673 /// returns null.
5674 static Value *
5675 simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5676                  const SimplifyQuery &Q, unsigned MaxRecurse,
5677                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5678                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5679   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5680     if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5681       return C;
5682 
5683   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5684     return C;
5685 
5686   // fsub X, +0 ==> X
5687   if (canIgnoreSNaN(ExBehavior, FMF) &&
5688       (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5689        FMF.noSignedZeros()))
5690     if (match(Op1, m_PosZeroFP()))
5691       return Op0;
5692 
5693   // fsub X, -0 ==> X, when we know X is not -0
5694   if (canIgnoreSNaN(ExBehavior, FMF))
5695     if (match(Op1, m_NegZeroFP()) &&
5696         (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
5697       return Op0;
5698 
5699   // fsub -0.0, (fsub -0.0, X) ==> X
5700   // fsub -0.0, (fneg X) ==> X
5701   Value *X;
5702   if (canIgnoreSNaN(ExBehavior, FMF))
5703     if (match(Op0, m_NegZeroFP()) && match(Op1, m_FNeg(m_Value(X))))
5704       return X;
5705 
5706   // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5707   // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5708   if (canIgnoreSNaN(ExBehavior, FMF))
5709     if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5710         (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5711          match(Op1, m_FNeg(m_Value(X)))))
5712       return X;
5713 
5714   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5715     return nullptr;
5716 
5717   if (FMF.noNaNs()) {
5718     // fsub nnan x, x ==> 0.0
5719     if (Op0 == Op1)
5720       return Constant::getNullValue(Op0->getType());
5721 
5722     // With nnan: {+/-}Inf - X --> {+/-}Inf
5723     if (match(Op0, m_Inf()))
5724       return Op0;
5725 
5726     // With nnan: X - {+/-}Inf --> {-/+}Inf
5727     if (match(Op1, m_Inf()))
5728       return foldConstant(Instruction::FNeg, Op1, Q);
5729   }
5730 
5731   // Y - (Y - X) --> X
5732   // (X + Y) - Y --> X
5733   if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5734       (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5735        match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5736     return X;
5737 
5738   return nullptr;
5739 }
5740 
5741 static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5742                               const SimplifyQuery &Q, unsigned MaxRecurse,
5743                               fp::ExceptionBehavior ExBehavior,
5744                               RoundingMode Rounding) {
5745   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5746     return C;
5747 
5748   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5749     return nullptr;
5750 
5751   // Canonicalize special constants as operand 1.
5752   if (match(Op0, m_FPOne()) || match(Op0, m_AnyZeroFP()))
5753     std::swap(Op0, Op1);
5754 
5755   // X * 1.0 --> X
5756   if (match(Op1, m_FPOne()))
5757     return Op0;
5758 
5759   if (match(Op1, m_AnyZeroFP())) {
5760     // X * 0.0 --> 0.0 (with nnan and nsz)
5761     if (FMF.noNaNs() && FMF.noSignedZeros())
5762       return ConstantFP::getZero(Op0->getType());
5763 
5764     // +normal number * (-)0.0 --> (-)0.0
5765     if (isKnownNeverInfOrNaN(Op0, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
5766         // TODO: Check SignBit from computeKnownFPClass when it's more complete.
5767         SignBitMustBeZero(Op0, Q.DL, Q.TLI))
5768       return Op1;
5769   }
5770 
5771   // sqrt(X) * sqrt(X) --> X, if we can:
5772   // 1. Remove the intermediate rounding (reassociate).
5773   // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5774   // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5775   Value *X;
5776   if (Op0 == Op1 && match(Op0, m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
5777       FMF.noNaNs() && FMF.noSignedZeros())
5778     return X;
5779 
5780   return nullptr;
5781 }
5782 
5783 /// Given the operands for an FMul, see if we can fold the result
5784 static Value *
5785 simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5786                  const SimplifyQuery &Q, unsigned MaxRecurse,
5787                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5788                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5789   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5790     if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5791       return C;
5792 
5793   // Now apply simplifications that do not require rounding.
5794   return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5795 }
5796 
5797 Value *llvm::simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5798                               const SimplifyQuery &Q,
5799                               fp::ExceptionBehavior ExBehavior,
5800                               RoundingMode Rounding) {
5801   return ::simplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5802                             Rounding);
5803 }
5804 
5805 Value *llvm::simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5806                               const SimplifyQuery &Q,
5807                               fp::ExceptionBehavior ExBehavior,
5808                               RoundingMode Rounding) {
5809   return ::simplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5810                             Rounding);
5811 }
5812 
5813 Value *llvm::simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5814                               const SimplifyQuery &Q,
5815                               fp::ExceptionBehavior ExBehavior,
5816                               RoundingMode Rounding) {
5817   return ::simplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5818                             Rounding);
5819 }
5820 
5821 Value *llvm::simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5822                              const SimplifyQuery &Q,
5823                              fp::ExceptionBehavior ExBehavior,
5824                              RoundingMode Rounding) {
5825   return ::simplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5826                            Rounding);
5827 }
5828 
5829 static Value *
5830 simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5831                  const SimplifyQuery &Q, unsigned,
5832                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5833                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5834   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5835     if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5836       return C;
5837 
5838   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5839     return C;
5840 
5841   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5842     return nullptr;
5843 
5844   // X / 1.0 -> X
5845   if (match(Op1, m_FPOne()))
5846     return Op0;
5847 
5848   // 0 / X -> 0
5849   // Requires that NaNs are off (X could be zero) and signed zeroes are
5850   // ignored (X could be positive or negative, so the output sign is unknown).
5851   if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5852     return ConstantFP::getZero(Op0->getType());
5853 
5854   if (FMF.noNaNs()) {
5855     // X / X -> 1.0 is legal when NaNs are ignored.
5856     // We can ignore infinities because INF/INF is NaN.
5857     if (Op0 == Op1)
5858       return ConstantFP::get(Op0->getType(), 1.0);
5859 
5860     // (X * Y) / Y --> X if we can reassociate to the above form.
5861     Value *X;
5862     if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5863       return X;
5864 
5865     // -X /  X -> -1.0 and
5866     //  X / -X -> -1.0 are legal when NaNs are ignored.
5867     // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5868     if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5869         match(Op1, m_FNegNSZ(m_Specific(Op0))))
5870       return ConstantFP::get(Op0->getType(), -1.0);
5871 
5872     // nnan ninf X / [-]0.0 -> poison
5873     if (FMF.noInfs() && match(Op1, m_AnyZeroFP()))
5874       return PoisonValue::get(Op1->getType());
5875   }
5876 
5877   return nullptr;
5878 }
5879 
5880 Value *llvm::simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5881                               const SimplifyQuery &Q,
5882                               fp::ExceptionBehavior ExBehavior,
5883                               RoundingMode Rounding) {
5884   return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5885                             Rounding);
5886 }
5887 
5888 static Value *
5889 simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5890                  const SimplifyQuery &Q, unsigned,
5891                  fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5892                  RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5893   if (isDefaultFPEnvironment(ExBehavior, Rounding))
5894     if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5895       return C;
5896 
5897   if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5898     return C;
5899 
5900   if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5901     return nullptr;
5902 
5903   // Unlike fdiv, the result of frem always matches the sign of the dividend.
5904   // The constant match may include undef elements in a vector, so return a full
5905   // zero constant as the result.
5906   if (FMF.noNaNs()) {
5907     // +0 % X -> 0
5908     if (match(Op0, m_PosZeroFP()))
5909       return ConstantFP::getZero(Op0->getType());
5910     // -0 % X -> -0
5911     if (match(Op0, m_NegZeroFP()))
5912       return ConstantFP::getNegativeZero(Op0->getType());
5913   }
5914 
5915   return nullptr;
5916 }
5917 
5918 Value *llvm::simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5919                               const SimplifyQuery &Q,
5920                               fp::ExceptionBehavior ExBehavior,
5921                               RoundingMode Rounding) {
5922   return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5923                             Rounding);
5924 }
5925 
5926 //=== Helper functions for higher up the class hierarchy.
5927 
5928 /// Given the operand for a UnaryOperator, see if we can fold the result.
5929 /// If not, this returns null.
5930 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5931                            unsigned MaxRecurse) {
5932   switch (Opcode) {
5933   case Instruction::FNeg:
5934     return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5935   default:
5936     llvm_unreachable("Unexpected opcode");
5937   }
5938 }
5939 
5940 /// Given the operand for a UnaryOperator, see if we can fold the result.
5941 /// If not, this returns null.
5942 /// Try to use FastMathFlags when folding the result.
5943 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5944                              const FastMathFlags &FMF, const SimplifyQuery &Q,
5945                              unsigned MaxRecurse) {
5946   switch (Opcode) {
5947   case Instruction::FNeg:
5948     return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
5949   default:
5950     return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
5951   }
5952 }
5953 
5954 Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
5955   return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
5956 }
5957 
5958 Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
5959                           const SimplifyQuery &Q) {
5960   return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
5961 }
5962 
5963 /// Given operands for a BinaryOperator, see if we can fold the result.
5964 /// If not, this returns null.
5965 static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5966                             const SimplifyQuery &Q, unsigned MaxRecurse) {
5967   switch (Opcode) {
5968   case Instruction::Add:
5969     return simplifyAddInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5970                            MaxRecurse);
5971   case Instruction::Sub:
5972     return simplifySubInst(LHS, RHS,  /* IsNSW */ false, /* IsNUW */ false, Q,
5973                            MaxRecurse);
5974   case Instruction::Mul:
5975     return simplifyMulInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5976                            MaxRecurse);
5977   case Instruction::SDiv:
5978     return simplifySDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5979   case Instruction::UDiv:
5980     return simplifyUDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5981   case Instruction::SRem:
5982     return simplifySRemInst(LHS, RHS, Q, MaxRecurse);
5983   case Instruction::URem:
5984     return simplifyURemInst(LHS, RHS, Q, MaxRecurse);
5985   case Instruction::Shl:
5986     return simplifyShlInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5987                            MaxRecurse);
5988   case Instruction::LShr:
5989     return simplifyLShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5990   case Instruction::AShr:
5991     return simplifyAShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5992   case Instruction::And:
5993     return simplifyAndInst(LHS, RHS, Q, MaxRecurse);
5994   case Instruction::Or:
5995     return simplifyOrInst(LHS, RHS, Q, MaxRecurse);
5996   case Instruction::Xor:
5997     return simplifyXorInst(LHS, RHS, Q, MaxRecurse);
5998   case Instruction::FAdd:
5999     return simplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6000   case Instruction::FSub:
6001     return simplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6002   case Instruction::FMul:
6003     return simplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6004   case Instruction::FDiv:
6005     return simplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6006   case Instruction::FRem:
6007     return simplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6008   default:
6009     llvm_unreachable("Unexpected opcode");
6010   }
6011 }
6012 
6013 /// Given operands for a BinaryOperator, see if we can fold the result.
6014 /// If not, this returns null.
6015 /// Try to use FastMathFlags when folding the result.
6016 static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6017                             const FastMathFlags &FMF, const SimplifyQuery &Q,
6018                             unsigned MaxRecurse) {
6019   switch (Opcode) {
6020   case Instruction::FAdd:
6021     return simplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
6022   case Instruction::FSub:
6023     return simplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
6024   case Instruction::FMul:
6025     return simplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
6026   case Instruction::FDiv:
6027     return simplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
6028   default:
6029     return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
6030   }
6031 }
6032 
6033 Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6034                            const SimplifyQuery &Q) {
6035   return ::simplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
6036 }
6037 
6038 Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6039                            FastMathFlags FMF, const SimplifyQuery &Q) {
6040   return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
6041 }
6042 
6043 /// Given operands for a CmpInst, see if we can fold the result.
6044 static Value *simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
6045                               const SimplifyQuery &Q, unsigned MaxRecurse) {
6046   if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
6047     return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
6048   return simplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6049 }
6050 
6051 Value *llvm::simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
6052                              const SimplifyQuery &Q) {
6053   return ::simplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
6054 }
6055 
6056 static bool isIdempotent(Intrinsic::ID ID) {
6057   switch (ID) {
6058   default:
6059     return false;
6060 
6061   // Unary idempotent: f(f(x)) = f(x)
6062   case Intrinsic::fabs:
6063   case Intrinsic::floor:
6064   case Intrinsic::ceil:
6065   case Intrinsic::trunc:
6066   case Intrinsic::rint:
6067   case Intrinsic::nearbyint:
6068   case Intrinsic::round:
6069   case Intrinsic::roundeven:
6070   case Intrinsic::canonicalize:
6071   case Intrinsic::arithmetic_fence:
6072     return true;
6073   }
6074 }
6075 
6076 /// Return true if the intrinsic rounds a floating-point value to an integral
6077 /// floating-point value (not an integer type).
6078 static bool removesFPFraction(Intrinsic::ID ID) {
6079   switch (ID) {
6080   default:
6081     return false;
6082 
6083   case Intrinsic::floor:
6084   case Intrinsic::ceil:
6085   case Intrinsic::trunc:
6086   case Intrinsic::rint:
6087   case Intrinsic::nearbyint:
6088   case Intrinsic::round:
6089   case Intrinsic::roundeven:
6090     return true;
6091   }
6092 }
6093 
6094 static Value *simplifyRelativeLoad(Constant *Ptr, Constant *Offset,
6095                                    const DataLayout &DL) {
6096   GlobalValue *PtrSym;
6097   APInt PtrOffset;
6098   if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
6099     return nullptr;
6100 
6101   Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
6102 
6103   auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
6104   if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6105     return nullptr;
6106 
6107   APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc(
6108       DL.getIndexTypeSizeInBits(Ptr->getType()));
6109   if (OffsetInt.srem(4) != 0)
6110     return nullptr;
6111 
6112   Constant *Loaded = ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, OffsetInt, DL);
6113   if (!Loaded)
6114     return nullptr;
6115 
6116   auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6117   if (!LoadedCE)
6118     return nullptr;
6119 
6120   if (LoadedCE->getOpcode() == Instruction::Trunc) {
6121     LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6122     if (!LoadedCE)
6123       return nullptr;
6124   }
6125 
6126   if (LoadedCE->getOpcode() != Instruction::Sub)
6127     return nullptr;
6128 
6129   auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6130   if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6131     return nullptr;
6132   auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6133 
6134   Constant *LoadedRHS = LoadedCE->getOperand(1);
6135   GlobalValue *LoadedRHSSym;
6136   APInt LoadedRHSOffset;
6137   if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
6138                                   DL) ||
6139       PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6140     return nullptr;
6141 
6142   return LoadedLHSPtr;
6143 }
6144 
6145 // TODO: Need to pass in FastMathFlags
6146 static Value *simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q,
6147                             bool IsStrict) {
6148   // ldexp(poison, x) -> poison
6149   // ldexp(x, poison) -> poison
6150   if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6151     return Op0;
6152 
6153   // ldexp(undef, x) -> nan
6154   if (Q.isUndefValue(Op0))
6155     return ConstantFP::getNaN(Op0->getType());
6156 
6157   if (!IsStrict) {
6158     // TODO: Could insert a canonicalize for strict
6159 
6160     // ldexp(x, undef) -> x
6161     if (Q.isUndefValue(Op1))
6162       return Op0;
6163   }
6164 
6165   const APFloat *C = nullptr;
6166   match(Op0, PatternMatch::m_APFloat(C));
6167 
6168   // These cases should be safe, even with strictfp.
6169   // ldexp(0.0, x) -> 0.0
6170   // ldexp(-0.0, x) -> -0.0
6171   // ldexp(inf, x) -> inf
6172   // ldexp(-inf, x) -> -inf
6173   if (C && (C->isZero() || C->isInfinity()))
6174     return Op0;
6175 
6176   // These are canonicalization dropping, could do it if we knew how we could
6177   // ignore denormal flushes and target handling of nan payload bits.
6178   if (IsStrict)
6179     return nullptr;
6180 
6181   // TODO: Could quiet this with strictfp if the exception mode isn't strict.
6182   if (C && C->isNaN())
6183     return ConstantFP::get(Op0->getType(), C->makeQuiet());
6184 
6185   // ldexp(x, 0) -> x
6186 
6187   // TODO: Could fold this if we know the exception mode isn't
6188   // strict, we know the denormal mode and other target modes.
6189   if (match(Op1, PatternMatch::m_ZeroInt()))
6190     return Op0;
6191 
6192   return nullptr;
6193 }
6194 
6195 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
6196                                      const SimplifyQuery &Q,
6197                                      const CallBase *Call) {
6198   // Idempotent functions return the same result when called repeatedly.
6199   Intrinsic::ID IID = F->getIntrinsicID();
6200   if (isIdempotent(IID))
6201     if (auto *II = dyn_cast<IntrinsicInst>(Op0))
6202       if (II->getIntrinsicID() == IID)
6203         return II;
6204 
6205   if (removesFPFraction(IID)) {
6206     // Converting from int or calling a rounding function always results in a
6207     // finite integral number or infinity. For those inputs, rounding functions
6208     // always return the same value, so the (2nd) rounding is eliminated. Ex:
6209     // floor (sitofp x) -> sitofp x
6210     // round (ceil x) -> ceil x
6211     auto *II = dyn_cast<IntrinsicInst>(Op0);
6212     if ((II && removesFPFraction(II->getIntrinsicID())) ||
6213         match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
6214       return Op0;
6215   }
6216 
6217   Value *X;
6218   switch (IID) {
6219   case Intrinsic::fabs:
6220     if (SignBitMustBeZero(Op0, Q.DL, Q.TLI))
6221       return Op0;
6222     break;
6223   case Intrinsic::bswap:
6224     // bswap(bswap(x)) -> x
6225     if (match(Op0, m_BSwap(m_Value(X))))
6226       return X;
6227     break;
6228   case Intrinsic::bitreverse:
6229     // bitreverse(bitreverse(x)) -> x
6230     if (match(Op0, m_BitReverse(m_Value(X))))
6231       return X;
6232     break;
6233   case Intrinsic::ctpop: {
6234     // ctpop(X) -> 1 iff X is non-zero power of 2.
6235     if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
6236                                Q.DT))
6237       return ConstantInt::get(Op0->getType(), 1);
6238     // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6239     // ctpop(and X, 1) --> and X, 1
6240     unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6241     if (MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, BitWidth - 1),
6242                           Q))
6243       return Op0;
6244     break;
6245   }
6246   case Intrinsic::exp:
6247     // exp(log(x)) -> x
6248     if (Call->hasAllowReassoc() &&
6249         match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X))))
6250       return X;
6251     break;
6252   case Intrinsic::exp2:
6253     // exp2(log2(x)) -> x
6254     if (Call->hasAllowReassoc() &&
6255         match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
6256       return X;
6257     break;
6258   case Intrinsic::exp10:
6259     // exp10(log10(x)) -> x
6260     if (Call->hasAllowReassoc() &&
6261         match(Op0, m_Intrinsic<Intrinsic::log10>(m_Value(X))))
6262       return X;
6263     break;
6264   case Intrinsic::log:
6265     // log(exp(x)) -> x
6266     if (Call->hasAllowReassoc() &&
6267         match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
6268       return X;
6269     break;
6270   case Intrinsic::log2:
6271     // log2(exp2(x)) -> x
6272     if (Call->hasAllowReassoc() &&
6273         (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
6274          match(Op0,
6275                m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), m_Value(X)))))
6276       return X;
6277     break;
6278   case Intrinsic::log10:
6279     // log10(pow(10.0, x)) -> x
6280     // log10(exp10(x)) -> x
6281     if (Call->hasAllowReassoc() &&
6282         (match(Op0, m_Intrinsic<Intrinsic::exp10>(m_Value(X))) ||
6283          match(Op0,
6284                m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), m_Value(X)))))
6285       return X;
6286     break;
6287   case Intrinsic::experimental_vector_reverse:
6288     // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
6289     if (match(Op0, m_VecReverse(m_Value(X))))
6290       return X;
6291     // experimental.vector.reverse(splat(X)) -> splat(X)
6292     if (isSplatValue(Op0))
6293       return Op0;
6294     break;
6295   case Intrinsic::frexp: {
6296     // Frexp is idempotent with the added complication of the struct return.
6297     if (match(Op0, m_ExtractValue<0>(m_Value(X)))) {
6298       if (match(X, m_Intrinsic<Intrinsic::frexp>(m_Value())))
6299         return X;
6300     }
6301 
6302     break;
6303   }
6304   default:
6305     break;
6306   }
6307 
6308   return nullptr;
6309 }
6310 
6311 /// Given a min/max intrinsic, see if it can be removed based on having an
6312 /// operand that is another min/max intrinsic with shared operand(s). The caller
6313 /// is expected to swap the operand arguments to handle commutation.
6314 static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
6315   Value *X, *Y;
6316   if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
6317     return nullptr;
6318 
6319   auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6320   if (!MM0)
6321     return nullptr;
6322   Intrinsic::ID IID0 = MM0->getIntrinsicID();
6323 
6324   if (Op1 == X || Op1 == Y ||
6325       match(Op1, m_c_MaxOrMin(m_Specific(X), m_Specific(Y)))) {
6326     // max (max X, Y), X --> max X, Y
6327     if (IID0 == IID)
6328       return MM0;
6329     // max (min X, Y), X --> X
6330     if (IID0 == getInverseMinMaxIntrinsic(IID))
6331       return Op1;
6332   }
6333   return nullptr;
6334 }
6335 
6336 /// Given a min/max intrinsic, see if it can be removed based on having an
6337 /// operand that is another min/max intrinsic with shared operand(s). The caller
6338 /// is expected to swap the operand arguments to handle commutation.
6339 static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0,
6340                                          Value *Op1) {
6341   assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6342           IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6343          "Unsupported intrinsic");
6344 
6345   auto *M0 = dyn_cast<IntrinsicInst>(Op0);
6346   // If Op0 is not the same intrinsic as IID, do not process.
6347   // This is a difference with integer min/max handling. We do not process the
6348   // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6349   if (!M0 || M0->getIntrinsicID() != IID)
6350     return nullptr;
6351   Value *X0 = M0->getOperand(0);
6352   Value *Y0 = M0->getOperand(1);
6353   // Simple case, m(m(X,Y), X) => m(X, Y)
6354   //              m(m(X,Y), Y) => m(X, Y)
6355   // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6356   // For minimum/maximum, Y is NaN => m(X, NaN) == NaN  and m(NaN, NaN) == NaN.
6357   // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6358   // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6359   if (X0 == Op1 || Y0 == Op1)
6360     return M0;
6361 
6362   auto *M1 = dyn_cast<IntrinsicInst>(Op1);
6363   if (!M1)
6364     return nullptr;
6365   Value *X1 = M1->getOperand(0);
6366   Value *Y1 = M1->getOperand(1);
6367   Intrinsic::ID IID1 = M1->getIntrinsicID();
6368   // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6369   // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6370   // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6371   // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6372   // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6373   // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6374   if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6375     if (IID1 == IID || getInverseMinMaxIntrinsic(IID1) == IID)
6376       return M0;
6377 
6378   return nullptr;
6379 }
6380 
6381 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
6382                                       const SimplifyQuery &Q,
6383                                       const CallBase *Call) {
6384   Intrinsic::ID IID = F->getIntrinsicID();
6385   Type *ReturnType = F->getReturnType();
6386   unsigned BitWidth = ReturnType->getScalarSizeInBits();
6387   switch (IID) {
6388   case Intrinsic::abs:
6389     // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6390     // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6391     // on the outer abs.
6392     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
6393       return Op0;
6394     break;
6395 
6396   case Intrinsic::cttz: {
6397     Value *X;
6398     if (match(Op0, m_Shl(m_One(), m_Value(X))))
6399       return X;
6400     break;
6401   }
6402   case Intrinsic::ctlz: {
6403     Value *X;
6404     if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
6405       return X;
6406     if (match(Op0, m_AShr(m_Negative(), m_Value())))
6407       return Constant::getNullValue(ReturnType);
6408     break;
6409   }
6410   case Intrinsic::ptrmask: {
6411     if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6412       return PoisonValue::get(Op0->getType());
6413 
6414     // NOTE: We can't apply this simplifications based on the value of Op1
6415     // because we need to preserve provenance.
6416     if (Q.isUndefValue(Op0) || match(Op0, m_Zero()))
6417       return Constant::getNullValue(Op0->getType());
6418 
6419     assert(Op1->getType()->getScalarSizeInBits() ==
6420                Q.DL.getIndexTypeSizeInBits(Op0->getType()) &&
6421            "Invalid mask width");
6422     // If index-width (mask size) is less than pointer-size then mask is
6423     // 1-extended.
6424     if (match(Op1, m_PtrToInt(m_Specific(Op0))))
6425       return Op0;
6426 
6427     // NOTE: We may have attributes associated with the return value of the
6428     // llvm.ptrmask intrinsic that will be lost when we just return the
6429     // operand. We should try to preserve them.
6430     if (match(Op1, m_AllOnes()) || Q.isUndefValue(Op1))
6431       return Op0;
6432 
6433     Constant *C;
6434     if (match(Op1, m_ImmConstant(C))) {
6435       KnownBits PtrKnown = computeKnownBits(Op0, /*Depth=*/0, Q);
6436       // See if we only masking off bits we know are already zero due to
6437       // alignment.
6438       APInt IrrelevantPtrBits =
6439           PtrKnown.Zero.zextOrTrunc(C->getType()->getScalarSizeInBits());
6440       C = ConstantFoldBinaryOpOperands(
6441           Instruction::Or, C, ConstantInt::get(C->getType(), IrrelevantPtrBits),
6442           Q.DL);
6443       if (C != nullptr && C->isAllOnesValue())
6444         return Op0;
6445     }
6446     break;
6447   }
6448   case Intrinsic::smax:
6449   case Intrinsic::smin:
6450   case Intrinsic::umax:
6451   case Intrinsic::umin: {
6452     // If the arguments are the same, this is a no-op.
6453     if (Op0 == Op1)
6454       return Op0;
6455 
6456     // Canonicalize immediate constant operand as Op1.
6457     if (match(Op0, m_ImmConstant()))
6458       std::swap(Op0, Op1);
6459 
6460     // Assume undef is the limit value.
6461     if (Q.isUndefValue(Op1))
6462       return ConstantInt::get(
6463           ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
6464 
6465     const APInt *C;
6466     if (match(Op1, m_APIntAllowUndef(C))) {
6467       // Clamp to limit value. For example:
6468       // umax(i8 %x, i8 255) --> 255
6469       if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
6470         return ConstantInt::get(ReturnType, *C);
6471 
6472       // If the constant op is the opposite of the limit value, the other must
6473       // be larger/smaller or equal. For example:
6474       // umin(i8 %x, i8 255) --> %x
6475       if (*C == MinMaxIntrinsic::getSaturationPoint(
6476                     getInverseMinMaxIntrinsic(IID), BitWidth))
6477         return Op0;
6478 
6479       // Remove nested call if constant operands allow it. Example:
6480       // max (max X, 7), 5 -> max X, 7
6481       auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6482       if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6483         // TODO: loosen undef/splat restrictions for vector constants.
6484         Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6485         const APInt *InnerC;
6486         if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
6487             ICmpInst::compare(*InnerC, *C,
6488                               ICmpInst::getNonStrictPredicate(
6489                                   MinMaxIntrinsic::getPredicate(IID))))
6490           return Op0;
6491       }
6492     }
6493 
6494     if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6495       return V;
6496     if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
6497       return V;
6498 
6499     ICmpInst::Predicate Pred =
6500         ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
6501     if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
6502       return Op0;
6503     if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
6504       return Op1;
6505 
6506     break;
6507   }
6508   case Intrinsic::usub_with_overflow:
6509   case Intrinsic::ssub_with_overflow:
6510     // X - X -> { 0, false }
6511     // X - undef -> { 0, false }
6512     // undef - X -> { 0, false }
6513     if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6514       return Constant::getNullValue(ReturnType);
6515     break;
6516   case Intrinsic::uadd_with_overflow:
6517   case Intrinsic::sadd_with_overflow:
6518     // X + undef -> { -1, false }
6519     // undef + x -> { -1, false }
6520     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
6521       return ConstantStruct::get(
6522           cast<StructType>(ReturnType),
6523           {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
6524            Constant::getNullValue(ReturnType->getStructElementType(1))});
6525     }
6526     break;
6527   case Intrinsic::umul_with_overflow:
6528   case Intrinsic::smul_with_overflow:
6529     // 0 * X -> { 0, false }
6530     // X * 0 -> { 0, false }
6531     if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
6532       return Constant::getNullValue(ReturnType);
6533     // undef * X -> { 0, false }
6534     // X * undef -> { 0, false }
6535     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6536       return Constant::getNullValue(ReturnType);
6537     break;
6538   case Intrinsic::uadd_sat:
6539     // sat(MAX + X) -> MAX
6540     // sat(X + MAX) -> MAX
6541     if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
6542       return Constant::getAllOnesValue(ReturnType);
6543     [[fallthrough]];
6544   case Intrinsic::sadd_sat:
6545     // sat(X + undef) -> -1
6546     // sat(undef + X) -> -1
6547     // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6548     // For signed: Assume undef is ~X, in which case X + ~X = -1.
6549     if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6550       return Constant::getAllOnesValue(ReturnType);
6551 
6552     // X + 0 -> X
6553     if (match(Op1, m_Zero()))
6554       return Op0;
6555     // 0 + X -> X
6556     if (match(Op0, m_Zero()))
6557       return Op1;
6558     break;
6559   case Intrinsic::usub_sat:
6560     // sat(0 - X) -> 0, sat(X - MAX) -> 0
6561     if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
6562       return Constant::getNullValue(ReturnType);
6563     [[fallthrough]];
6564   case Intrinsic::ssub_sat:
6565     // X - X -> 0, X - undef -> 0, undef - X -> 0
6566     if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6567       return Constant::getNullValue(ReturnType);
6568     // X - 0 -> X
6569     if (match(Op1, m_Zero()))
6570       return Op0;
6571     break;
6572   case Intrinsic::load_relative:
6573     if (auto *C0 = dyn_cast<Constant>(Op0))
6574       if (auto *C1 = dyn_cast<Constant>(Op1))
6575         return simplifyRelativeLoad(C0, C1, Q.DL);
6576     break;
6577   case Intrinsic::powi:
6578     if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
6579       // powi(x, 0) -> 1.0
6580       if (Power->isZero())
6581         return ConstantFP::get(Op0->getType(), 1.0);
6582       // powi(x, 1) -> x
6583       if (Power->isOne())
6584         return Op0;
6585     }
6586     break;
6587   case Intrinsic::ldexp:
6588     return simplifyLdexp(Op0, Op1, Q, false);
6589   case Intrinsic::copysign:
6590     // copysign X, X --> X
6591     if (Op0 == Op1)
6592       return Op0;
6593     // copysign -X, X --> X
6594     // copysign X, -X --> -X
6595     if (match(Op0, m_FNeg(m_Specific(Op1))) ||
6596         match(Op1, m_FNeg(m_Specific(Op0))))
6597       return Op1;
6598     break;
6599   case Intrinsic::is_fpclass: {
6600     if (isa<PoisonValue>(Op0))
6601       return PoisonValue::get(ReturnType);
6602 
6603     uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6604     // If all tests are made, it doesn't matter what the value is.
6605     if ((Mask & fcAllFlags) == fcAllFlags)
6606       return ConstantInt::get(ReturnType, true);
6607     if ((Mask & fcAllFlags) == 0)
6608       return ConstantInt::get(ReturnType, false);
6609     if (Q.isUndefValue(Op0))
6610       return UndefValue::get(ReturnType);
6611     break;
6612   }
6613   case Intrinsic::maxnum:
6614   case Intrinsic::minnum:
6615   case Intrinsic::maximum:
6616   case Intrinsic::minimum: {
6617     // If the arguments are the same, this is a no-op.
6618     if (Op0 == Op1)
6619       return Op0;
6620 
6621     // Canonicalize constant operand as Op1.
6622     if (isa<Constant>(Op0))
6623       std::swap(Op0, Op1);
6624 
6625     // If an argument is undef, return the other argument.
6626     if (Q.isUndefValue(Op1))
6627       return Op0;
6628 
6629     bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6630     bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6631 
6632     // minnum(X, nan) -> X
6633     // maxnum(X, nan) -> X
6634     // minimum(X, nan) -> nan
6635     // maximum(X, nan) -> nan
6636     if (match(Op1, m_NaN()))
6637       return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
6638 
6639     // In the following folds, inf can be replaced with the largest finite
6640     // float, if the ninf flag is set.
6641     const APFloat *C;
6642     if (match(Op1, m_APFloat(C)) &&
6643         (C->isInfinity() || (Call->hasNoInfs() && C->isLargest()))) {
6644       // minnum(X, -inf) -> -inf
6645       // maxnum(X, +inf) -> +inf
6646       // minimum(X, -inf) -> -inf if nnan
6647       // maximum(X, +inf) -> +inf if nnan
6648       if (C->isNegative() == IsMin && (!PropagateNaN || Call->hasNoNaNs()))
6649         return ConstantFP::get(ReturnType, *C);
6650 
6651       // minnum(X, +inf) -> X if nnan
6652       // maxnum(X, -inf) -> X if nnan
6653       // minimum(X, +inf) -> X
6654       // maximum(X, -inf) -> X
6655       if (C->isNegative() != IsMin && (PropagateNaN || Call->hasNoNaNs()))
6656         return Op0;
6657     }
6658 
6659     // Min/max of the same operation with common operand:
6660     // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
6661     if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1))
6662       return V;
6663     if (Value *V = foldMinimumMaximumSharedOp(IID, Op1, Op0))
6664       return V;
6665 
6666     break;
6667   }
6668   case Intrinsic::vector_extract: {
6669     Type *ReturnType = F->getReturnType();
6670 
6671     // (extract_vector (insert_vector _, X, 0), 0) -> X
6672     unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6673     Value *X = nullptr;
6674     if (match(Op0, m_Intrinsic<Intrinsic::vector_insert>(m_Value(), m_Value(X),
6675                                                          m_Zero())) &&
6676         IdxN == 0 && X->getType() == ReturnType)
6677       return X;
6678 
6679     break;
6680   }
6681   default:
6682     break;
6683   }
6684 
6685   return nullptr;
6686 }
6687 
6688 static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
6689                                 ArrayRef<Value *> Args,
6690                                 const SimplifyQuery &Q) {
6691   // Operand bundles should not be in Args.
6692   assert(Call->arg_size() == Args.size());
6693   unsigned NumOperands = Args.size();
6694   Function *F = cast<Function>(Callee);
6695   Intrinsic::ID IID = F->getIntrinsicID();
6696 
6697   // Most of the intrinsics with no operands have some kind of side effect.
6698   // Don't simplify.
6699   if (!NumOperands) {
6700     switch (IID) {
6701     case Intrinsic::vscale: {
6702       Type *RetTy = F->getReturnType();
6703       ConstantRange CR = getVScaleRange(Call->getFunction(), 64);
6704       if (const APInt *C = CR.getSingleElement())
6705         return ConstantInt::get(RetTy, C->getZExtValue());
6706       return nullptr;
6707     }
6708     default:
6709       return nullptr;
6710     }
6711   }
6712 
6713   if (NumOperands == 1)
6714     return simplifyUnaryIntrinsic(F, Args[0], Q, Call);
6715 
6716   if (NumOperands == 2)
6717     return simplifyBinaryIntrinsic(F, Args[0], Args[1], Q, Call);
6718 
6719   // Handle intrinsics with 3 or more arguments.
6720   switch (IID) {
6721   case Intrinsic::masked_load:
6722   case Intrinsic::masked_gather: {
6723     Value *MaskArg = Args[2];
6724     Value *PassthruArg = Args[3];
6725     // If the mask is all zeros or undef, the "passthru" argument is the result.
6726     if (maskIsAllZeroOrUndef(MaskArg))
6727       return PassthruArg;
6728     return nullptr;
6729   }
6730   case Intrinsic::fshl:
6731   case Intrinsic::fshr: {
6732     Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6733 
6734     // If both operands are undef, the result is undef.
6735     if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
6736       return UndefValue::get(F->getReturnType());
6737 
6738     // If shift amount is undef, assume it is zero.
6739     if (Q.isUndefValue(ShAmtArg))
6740       return Args[IID == Intrinsic::fshl ? 0 : 1];
6741 
6742     const APInt *ShAmtC;
6743     if (match(ShAmtArg, m_APInt(ShAmtC))) {
6744       // If there's effectively no shift, return the 1st arg or 2nd arg.
6745       APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
6746       if (ShAmtC->urem(BitWidth).isZero())
6747         return Args[IID == Intrinsic::fshl ? 0 : 1];
6748     }
6749 
6750     // Rotating zero by anything is zero.
6751     if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
6752       return ConstantInt::getNullValue(F->getReturnType());
6753 
6754     // Rotating -1 by anything is -1.
6755     if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
6756       return ConstantInt::getAllOnesValue(F->getReturnType());
6757 
6758     return nullptr;
6759   }
6760   case Intrinsic::experimental_constrained_fma: {
6761     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6762     if (Value *V = simplifyFPOp(Args, {}, Q, *FPI->getExceptionBehavior(),
6763                                 *FPI->getRoundingMode()))
6764       return V;
6765     return nullptr;
6766   }
6767   case Intrinsic::fma:
6768   case Intrinsic::fmuladd: {
6769     if (Value *V = simplifyFPOp(Args, {}, Q, fp::ebIgnore,
6770                                 RoundingMode::NearestTiesToEven))
6771       return V;
6772     return nullptr;
6773   }
6774   case Intrinsic::smul_fix:
6775   case Intrinsic::smul_fix_sat: {
6776     Value *Op0 = Args[0];
6777     Value *Op1 = Args[1];
6778     Value *Op2 = Args[2];
6779     Type *ReturnType = F->getReturnType();
6780 
6781     // Canonicalize constant operand as Op1 (ConstantFolding handles the case
6782     // when both Op0 and Op1 are constant so we do not care about that special
6783     // case here).
6784     if (isa<Constant>(Op0))
6785       std::swap(Op0, Op1);
6786 
6787     // X * 0 -> 0
6788     if (match(Op1, m_Zero()))
6789       return Constant::getNullValue(ReturnType);
6790 
6791     // X * undef -> 0
6792     if (Q.isUndefValue(Op1))
6793       return Constant::getNullValue(ReturnType);
6794 
6795     // X * (1 << Scale) -> X
6796     APInt ScaledOne =
6797         APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
6798                             cast<ConstantInt>(Op2)->getZExtValue());
6799     if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
6800       return Op0;
6801 
6802     return nullptr;
6803   }
6804   case Intrinsic::vector_insert: {
6805     Value *Vec = Args[0];
6806     Value *SubVec = Args[1];
6807     Value *Idx = Args[2];
6808     Type *ReturnType = F->getReturnType();
6809 
6810     // (insert_vector Y, (extract_vector X, 0), 0) -> X
6811     // where: Y is X, or Y is undef
6812     unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6813     Value *X = nullptr;
6814     if (match(SubVec,
6815               m_Intrinsic<Intrinsic::vector_extract>(m_Value(X), m_Zero())) &&
6816         (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
6817         X->getType() == ReturnType)
6818       return X;
6819 
6820     return nullptr;
6821   }
6822   case Intrinsic::experimental_constrained_fadd: {
6823     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6824     return simplifyFAddInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6825                             *FPI->getExceptionBehavior(),
6826                             *FPI->getRoundingMode());
6827   }
6828   case Intrinsic::experimental_constrained_fsub: {
6829     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6830     return simplifyFSubInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6831                             *FPI->getExceptionBehavior(),
6832                             *FPI->getRoundingMode());
6833   }
6834   case Intrinsic::experimental_constrained_fmul: {
6835     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6836     return simplifyFMulInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6837                             *FPI->getExceptionBehavior(),
6838                             *FPI->getRoundingMode());
6839   }
6840   case Intrinsic::experimental_constrained_fdiv: {
6841     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6842     return simplifyFDivInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6843                             *FPI->getExceptionBehavior(),
6844                             *FPI->getRoundingMode());
6845   }
6846   case Intrinsic::experimental_constrained_frem: {
6847     auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6848     return simplifyFRemInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6849                             *FPI->getExceptionBehavior(),
6850                             *FPI->getRoundingMode());
6851   }
6852   case Intrinsic::experimental_constrained_ldexp:
6853     return simplifyLdexp(Args[0], Args[1], Q, true);
6854   default:
6855     return nullptr;
6856   }
6857 }
6858 
6859 static Value *tryConstantFoldCall(CallBase *Call, Value *Callee,
6860                                   ArrayRef<Value *> Args,
6861                                   const SimplifyQuery &Q) {
6862   auto *F = dyn_cast<Function>(Callee);
6863   if (!F || !canConstantFoldCallTo(Call, F))
6864     return nullptr;
6865 
6866   SmallVector<Constant *, 4> ConstantArgs;
6867   ConstantArgs.reserve(Args.size());
6868   for (Value *Arg : Args) {
6869     Constant *C = dyn_cast<Constant>(Arg);
6870     if (!C) {
6871       if (isa<MetadataAsValue>(Arg))
6872         continue;
6873       return nullptr;
6874     }
6875     ConstantArgs.push_back(C);
6876   }
6877 
6878   return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
6879 }
6880 
6881 Value *llvm::simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
6882                           const SimplifyQuery &Q) {
6883   // Args should not contain operand bundle operands.
6884   assert(Call->arg_size() == Args.size());
6885 
6886   // musttail calls can only be simplified if they are also DCEd.
6887   // As we can't guarantee this here, don't simplify them.
6888   if (Call->isMustTailCall())
6889     return nullptr;
6890 
6891   // call undef -> poison
6892   // call null -> poison
6893   if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6894     return PoisonValue::get(Call->getType());
6895 
6896   if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q))
6897     return V;
6898 
6899   auto *F = dyn_cast<Function>(Callee);
6900   if (F && F->isIntrinsic())
6901     if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q))
6902       return Ret;
6903 
6904   return nullptr;
6905 }
6906 
6907 Value *llvm::simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q) {
6908   assert(isa<ConstrainedFPIntrinsic>(Call));
6909   SmallVector<Value *, 4> Args(Call->args());
6910   if (Value *V = tryConstantFoldCall(Call, Call->getCalledOperand(), Args, Q))
6911     return V;
6912   if (Value *Ret = simplifyIntrinsic(Call, Call->getCalledOperand(), Args, Q))
6913     return Ret;
6914   return nullptr;
6915 }
6916 
6917 /// Given operands for a Freeze, see if we can fold the result.
6918 static Value *simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6919   // Use a utility function defined in ValueTracking.
6920   if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
6921     return Op0;
6922   // We have room for improvement.
6923   return nullptr;
6924 }
6925 
6926 Value *llvm::simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6927   return ::simplifyFreezeInst(Op0, Q);
6928 }
6929 
6930 Value *llvm::simplifyLoadInst(LoadInst *LI, Value *PtrOp,
6931                               const SimplifyQuery &Q) {
6932   if (LI->isVolatile())
6933     return nullptr;
6934 
6935   if (auto *PtrOpC = dyn_cast<Constant>(PtrOp))
6936     return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
6937 
6938   // We can only fold the load if it is from a constant global with definitive
6939   // initializer. Skip expensive logic if this is not the case.
6940   auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(PtrOp));
6941   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
6942     return nullptr;
6943 
6944   // If GlobalVariable's initializer is uniform, then return the constant
6945   // regardless of its offset.
6946   if (Constant *C =
6947           ConstantFoldLoadFromUniformValue(GV->getInitializer(), LI->getType()))
6948     return C;
6949 
6950   // Try to convert operand into a constant by stripping offsets while looking
6951   // through invariant.group intrinsics.
6952   APInt Offset(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
6953   PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
6954       Q.DL, Offset, /* AllowNonInbounts */ true,
6955       /* AllowInvariantGroup */ true);
6956   if (PtrOp == GV) {
6957     // Index size may have changed due to address space casts.
6958     Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
6959     return ConstantFoldLoadFromConstPtr(GV, LI->getType(), Offset, Q.DL);
6960   }
6961 
6962   return nullptr;
6963 }
6964 
6965 /// See if we can compute a simplified version of this instruction.
6966 /// If not, this returns null.
6967 
6968 static Value *simplifyInstructionWithOperands(Instruction *I,
6969                                               ArrayRef<Value *> NewOps,
6970                                               const SimplifyQuery &SQ,
6971                                               unsigned MaxRecurse) {
6972   assert(I->getFunction() && "instruction should be inserted in a function");
6973   assert((!SQ.CxtI || SQ.CxtI->getFunction() == I->getFunction()) &&
6974          "context instruction should be in the same function");
6975 
6976   const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
6977 
6978   switch (I->getOpcode()) {
6979   default:
6980     if (llvm::all_of(NewOps, [](Value *V) { return isa<Constant>(V); })) {
6981       SmallVector<Constant *, 8> NewConstOps(NewOps.size());
6982       transform(NewOps, NewConstOps.begin(),
6983                 [](Value *V) { return cast<Constant>(V); });
6984       return ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
6985     }
6986     return nullptr;
6987   case Instruction::FNeg:
6988     return simplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q, MaxRecurse);
6989   case Instruction::FAdd:
6990     return simplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
6991                             MaxRecurse);
6992   case Instruction::Add:
6993     return simplifyAddInst(
6994         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6995         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
6996   case Instruction::FSub:
6997     return simplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
6998                             MaxRecurse);
6999   case Instruction::Sub:
7000     return simplifySubInst(
7001         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7002         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7003   case Instruction::FMul:
7004     return simplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7005                             MaxRecurse);
7006   case Instruction::Mul:
7007     return simplifyMulInst(
7008         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7009         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7010   case Instruction::SDiv:
7011     return simplifySDivInst(NewOps[0], NewOps[1],
7012                             Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7013                             MaxRecurse);
7014   case Instruction::UDiv:
7015     return simplifyUDivInst(NewOps[0], NewOps[1],
7016                             Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7017                             MaxRecurse);
7018   case Instruction::FDiv:
7019     return simplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7020                             MaxRecurse);
7021   case Instruction::SRem:
7022     return simplifySRemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7023   case Instruction::URem:
7024     return simplifyURemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7025   case Instruction::FRem:
7026     return simplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7027                             MaxRecurse);
7028   case Instruction::Shl:
7029     return simplifyShlInst(
7030         NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7031         Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7032   case Instruction::LShr:
7033     return simplifyLShrInst(NewOps[0], NewOps[1],
7034                             Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7035                             MaxRecurse);
7036   case Instruction::AShr:
7037     return simplifyAShrInst(NewOps[0], NewOps[1],
7038                             Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7039                             MaxRecurse);
7040   case Instruction::And:
7041     return simplifyAndInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7042   case Instruction::Or:
7043     return simplifyOrInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7044   case Instruction::Xor:
7045     return simplifyXorInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7046   case Instruction::ICmp:
7047     return simplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
7048                             NewOps[1], Q, MaxRecurse);
7049   case Instruction::FCmp:
7050     return simplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
7051                             NewOps[1], I->getFastMathFlags(), Q, MaxRecurse);
7052   case Instruction::Select:
7053     return simplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse);
7054     break;
7055   case Instruction::GetElementPtr: {
7056     auto *GEPI = cast<GetElementPtrInst>(I);
7057     return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
7058                            ArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q,
7059                            MaxRecurse);
7060   }
7061   case Instruction::InsertValue: {
7062     InsertValueInst *IV = cast<InsertValueInst>(I);
7063     return simplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q,
7064                                    MaxRecurse);
7065   }
7066   case Instruction::InsertElement:
7067     return simplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
7068   case Instruction::ExtractValue: {
7069     auto *EVI = cast<ExtractValueInst>(I);
7070     return simplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q,
7071                                     MaxRecurse);
7072   }
7073   case Instruction::ExtractElement:
7074     return simplifyExtractElementInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7075   case Instruction::ShuffleVector: {
7076     auto *SVI = cast<ShuffleVectorInst>(I);
7077     return simplifyShuffleVectorInst(NewOps[0], NewOps[1],
7078                                      SVI->getShuffleMask(), SVI->getType(), Q,
7079                                      MaxRecurse);
7080   }
7081   case Instruction::PHI:
7082     return simplifyPHINode(cast<PHINode>(I), NewOps, Q);
7083   case Instruction::Call:
7084     return simplifyCall(
7085         cast<CallInst>(I), NewOps.back(),
7086         NewOps.drop_back(1 + cast<CallInst>(I)->getNumTotalBundleOperands()), Q);
7087   case Instruction::Freeze:
7088     return llvm::simplifyFreezeInst(NewOps[0], Q);
7089 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7090 #include "llvm/IR/Instruction.def"
7091 #undef HANDLE_CAST_INST
7092     return simplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q,
7093                             MaxRecurse);
7094   case Instruction::Alloca:
7095     // No simplifications for Alloca and it can't be constant folded.
7096     return nullptr;
7097   case Instruction::Load:
7098     return simplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
7099   }
7100 }
7101 
7102 Value *llvm::simplifyInstructionWithOperands(Instruction *I,
7103                                              ArrayRef<Value *> NewOps,
7104                                              const SimplifyQuery &SQ) {
7105   assert(NewOps.size() == I->getNumOperands() &&
7106          "Number of operands should match the instruction!");
7107   return ::simplifyInstructionWithOperands(I, NewOps, SQ, RecursionLimit);
7108 }
7109 
7110 Value *llvm::simplifyInstruction(Instruction *I, const SimplifyQuery &SQ) {
7111   SmallVector<Value *, 8> Ops(I->operands());
7112   Value *Result = ::simplifyInstructionWithOperands(I, Ops, SQ, RecursionLimit);
7113 
7114   /// If called on unreachable code, the instruction may simplify to itself.
7115   /// Make life easier for users by detecting that case here, and returning a
7116   /// safe value instead.
7117   return Result == I ? UndefValue::get(I->getType()) : Result;
7118 }
7119 
7120 /// Implementation of recursive simplification through an instruction's
7121 /// uses.
7122 ///
7123 /// This is the common implementation of the recursive simplification routines.
7124 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
7125 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
7126 /// instructions to process and attempt to simplify it using
7127 /// InstructionSimplify. Recursively visited users which could not be
7128 /// simplified themselves are to the optional UnsimplifiedUsers set for
7129 /// further processing by the caller.
7130 ///
7131 /// This routine returns 'true' only when *it* simplifies something. The passed
7132 /// in simplified value does not count toward this.
7133 static bool replaceAndRecursivelySimplifyImpl(
7134     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7135     const DominatorTree *DT, AssumptionCache *AC,
7136     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
7137   bool Simplified = false;
7138   SmallSetVector<Instruction *, 8> Worklist;
7139   const DataLayout &DL = I->getModule()->getDataLayout();
7140 
7141   // If we have an explicit value to collapse to, do that round of the
7142   // simplification loop by hand initially.
7143   if (SimpleV) {
7144     for (User *U : I->users())
7145       if (U != I)
7146         Worklist.insert(cast<Instruction>(U));
7147 
7148     // Replace the instruction with its simplified value.
7149     I->replaceAllUsesWith(SimpleV);
7150 
7151     if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7152       I->eraseFromParent();
7153   } else {
7154     Worklist.insert(I);
7155   }
7156 
7157   // Note that we must test the size on each iteration, the worklist can grow.
7158   for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
7159     I = Worklist[Idx];
7160 
7161     // See if this instruction simplifies.
7162     SimpleV = simplifyInstruction(I, {DL, TLI, DT, AC});
7163     if (!SimpleV) {
7164       if (UnsimplifiedUsers)
7165         UnsimplifiedUsers->insert(I);
7166       continue;
7167     }
7168 
7169     Simplified = true;
7170 
7171     // Stash away all the uses of the old instruction so we can check them for
7172     // recursive simplifications after a RAUW. This is cheaper than checking all
7173     // uses of To on the recursive step in most cases.
7174     for (User *U : I->users())
7175       Worklist.insert(cast<Instruction>(U));
7176 
7177     // Replace the instruction with its simplified value.
7178     I->replaceAllUsesWith(SimpleV);
7179 
7180     if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7181       I->eraseFromParent();
7182   }
7183   return Simplified;
7184 }
7185 
7186 bool llvm::replaceAndRecursivelySimplify(
7187     Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7188     const DominatorTree *DT, AssumptionCache *AC,
7189     SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7190   assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
7191   assert(SimpleV && "Must provide a simplified value.");
7192   return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7193                                            UnsimplifiedUsers);
7194 }
7195 
7196 namespace llvm {
7197 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
7198   auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7199   auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
7200   auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7201   auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
7202   auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7203   auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
7204   return {F.getParent()->getDataLayout(), TLI, DT, AC};
7205 }
7206 
7207 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
7208                                          const DataLayout &DL) {
7209   return {DL, &AR.TLI, &AR.DT, &AR.AC};
7210 }
7211 
7212 template <class T, class... TArgs>
7213 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
7214                                          Function &F) {
7215   auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7216   auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7217   auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7218   return {F.getParent()->getDataLayout(), TLI, DT, AC};
7219 }
7220 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
7221                                                   Function &);
7222 } // namespace llvm
7223 
7224 void InstSimplifyFolder::anchor() {}
7225