xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineSelect.cpp (revision e6bfd18d21b225af6a0ed67ceeaf1293b7b9eba5)
1 //===- InstCombineSelect.cpp ----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitSelect function.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/CmpInstAnalysis.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/OverflowInstAnalysis.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/BasicBlock.h"
24 #include "llvm/IR/Constant.h"
25 #include "llvm/IR/ConstantRange.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/InstrTypes.h"
30 #include "llvm/IR/Instruction.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/Operator.h"
35 #include "llvm/IR/PatternMatch.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/User.h"
38 #include "llvm/IR/Value.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/KnownBits.h"
42 #include "llvm/Transforms/InstCombine/InstCombiner.h"
43 #include <cassert>
44 #include <utility>
45 
46 #define DEBUG_TYPE "instcombine"
47 #include "llvm/Transforms/Utils/InstructionWorklist.h"
48 
49 using namespace llvm;
50 using namespace PatternMatch;
51 
52 
53 /// Replace a select operand based on an equality comparison with the identity
54 /// constant of a binop.
55 static Instruction *foldSelectBinOpIdentity(SelectInst &Sel,
56                                             const TargetLibraryInfo &TLI,
57                                             InstCombinerImpl &IC) {
58   // The select condition must be an equality compare with a constant operand.
59   Value *X;
60   Constant *C;
61   CmpInst::Predicate Pred;
62   if (!match(Sel.getCondition(), m_Cmp(Pred, m_Value(X), m_Constant(C))))
63     return nullptr;
64 
65   bool IsEq;
66   if (ICmpInst::isEquality(Pred))
67     IsEq = Pred == ICmpInst::ICMP_EQ;
68   else if (Pred == FCmpInst::FCMP_OEQ)
69     IsEq = true;
70   else if (Pred == FCmpInst::FCMP_UNE)
71     IsEq = false;
72   else
73     return nullptr;
74 
75   // A select operand must be a binop.
76   BinaryOperator *BO;
77   if (!match(Sel.getOperand(IsEq ? 1 : 2), m_BinOp(BO)))
78     return nullptr;
79 
80   // The compare constant must be the identity constant for that binop.
81   // If this a floating-point compare with 0.0, any zero constant will do.
82   Type *Ty = BO->getType();
83   Constant *IdC = ConstantExpr::getBinOpIdentity(BO->getOpcode(), Ty, true);
84   if (IdC != C) {
85     if (!IdC || !CmpInst::isFPPredicate(Pred))
86       return nullptr;
87     if (!match(IdC, m_AnyZeroFP()) || !match(C, m_AnyZeroFP()))
88       return nullptr;
89   }
90 
91   // Last, match the compare variable operand with a binop operand.
92   Value *Y;
93   if (!BO->isCommutative() && !match(BO, m_BinOp(m_Value(Y), m_Specific(X))))
94     return nullptr;
95   if (!match(BO, m_c_BinOp(m_Value(Y), m_Specific(X))))
96     return nullptr;
97 
98   // +0.0 compares equal to -0.0, and so it does not behave as required for this
99   // transform. Bail out if we can not exclude that possibility.
100   if (isa<FPMathOperator>(BO))
101     if (!BO->hasNoSignedZeros() && !CannotBeNegativeZero(Y, &TLI))
102       return nullptr;
103 
104   // BO = binop Y, X
105   // S = { select (cmp eq X, C), BO, ? } or { select (cmp ne X, C), ?, BO }
106   // =>
107   // S = { select (cmp eq X, C),  Y, ? } or { select (cmp ne X, C), ?,  Y }
108   return IC.replaceOperand(Sel, IsEq ? 1 : 2, Y);
109 }
110 
111 /// This folds:
112 ///  select (icmp eq (and X, C1)), TC, FC
113 ///    iff C1 is a power 2 and the difference between TC and FC is a power-of-2.
114 /// To something like:
115 ///  (shr (and (X, C1)), (log2(C1) - log2(TC-FC))) + FC
116 /// Or:
117 ///  (shl (and (X, C1)), (log2(TC-FC) - log2(C1))) + FC
118 /// With some variations depending if FC is larger than TC, or the shift
119 /// isn't needed, or the bit widths don't match.
120 static Value *foldSelectICmpAnd(SelectInst &Sel, ICmpInst *Cmp,
121                                 InstCombiner::BuilderTy &Builder) {
122   const APInt *SelTC, *SelFC;
123   if (!match(Sel.getTrueValue(), m_APInt(SelTC)) ||
124       !match(Sel.getFalseValue(), m_APInt(SelFC)))
125     return nullptr;
126 
127   // If this is a vector select, we need a vector compare.
128   Type *SelType = Sel.getType();
129   if (SelType->isVectorTy() != Cmp->getType()->isVectorTy())
130     return nullptr;
131 
132   Value *V;
133   APInt AndMask;
134   bool CreateAnd = false;
135   ICmpInst::Predicate Pred = Cmp->getPredicate();
136   if (ICmpInst::isEquality(Pred)) {
137     if (!match(Cmp->getOperand(1), m_Zero()))
138       return nullptr;
139 
140     V = Cmp->getOperand(0);
141     const APInt *AndRHS;
142     if (!match(V, m_And(m_Value(), m_Power2(AndRHS))))
143       return nullptr;
144 
145     AndMask = *AndRHS;
146   } else if (decomposeBitTestICmp(Cmp->getOperand(0), Cmp->getOperand(1),
147                                   Pred, V, AndMask)) {
148     assert(ICmpInst::isEquality(Pred) && "Not equality test?");
149     if (!AndMask.isPowerOf2())
150       return nullptr;
151 
152     CreateAnd = true;
153   } else {
154     return nullptr;
155   }
156 
157   // In general, when both constants are non-zero, we would need an offset to
158   // replace the select. This would require more instructions than we started
159   // with. But there's one special-case that we handle here because it can
160   // simplify/reduce the instructions.
161   APInt TC = *SelTC;
162   APInt FC = *SelFC;
163   if (!TC.isZero() && !FC.isZero()) {
164     // If the select constants differ by exactly one bit and that's the same
165     // bit that is masked and checked by the select condition, the select can
166     // be replaced by bitwise logic to set/clear one bit of the constant result.
167     if (TC.getBitWidth() != AndMask.getBitWidth() || (TC ^ FC) != AndMask)
168       return nullptr;
169     if (CreateAnd) {
170       // If we have to create an 'and', then we must kill the cmp to not
171       // increase the instruction count.
172       if (!Cmp->hasOneUse())
173         return nullptr;
174       V = Builder.CreateAnd(V, ConstantInt::get(SelType, AndMask));
175     }
176     bool ExtraBitInTC = TC.ugt(FC);
177     if (Pred == ICmpInst::ICMP_EQ) {
178       // If the masked bit in V is clear, clear or set the bit in the result:
179       // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) ^ TC
180       // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) | TC
181       Constant *C = ConstantInt::get(SelType, TC);
182       return ExtraBitInTC ? Builder.CreateXor(V, C) : Builder.CreateOr(V, C);
183     }
184     if (Pred == ICmpInst::ICMP_NE) {
185       // If the masked bit in V is set, set or clear the bit in the result:
186       // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) | FC
187       // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) ^ FC
188       Constant *C = ConstantInt::get(SelType, FC);
189       return ExtraBitInTC ? Builder.CreateOr(V, C) : Builder.CreateXor(V, C);
190     }
191     llvm_unreachable("Only expecting equality predicates");
192   }
193 
194   // Make sure one of the select arms is a power-of-2.
195   if (!TC.isPowerOf2() && !FC.isPowerOf2())
196     return nullptr;
197 
198   // Determine which shift is needed to transform result of the 'and' into the
199   // desired result.
200   const APInt &ValC = !TC.isZero() ? TC : FC;
201   unsigned ValZeros = ValC.logBase2();
202   unsigned AndZeros = AndMask.logBase2();
203 
204   // Insert the 'and' instruction on the input to the truncate.
205   if (CreateAnd)
206     V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), AndMask));
207 
208   // If types don't match, we can still convert the select by introducing a zext
209   // or a trunc of the 'and'.
210   if (ValZeros > AndZeros) {
211     V = Builder.CreateZExtOrTrunc(V, SelType);
212     V = Builder.CreateShl(V, ValZeros - AndZeros);
213   } else if (ValZeros < AndZeros) {
214     V = Builder.CreateLShr(V, AndZeros - ValZeros);
215     V = Builder.CreateZExtOrTrunc(V, SelType);
216   } else {
217     V = Builder.CreateZExtOrTrunc(V, SelType);
218   }
219 
220   // Okay, now we know that everything is set up, we just don't know whether we
221   // have a icmp_ne or icmp_eq and whether the true or false val is the zero.
222   bool ShouldNotVal = !TC.isZero();
223   ShouldNotVal ^= Pred == ICmpInst::ICMP_NE;
224   if (ShouldNotVal)
225     V = Builder.CreateXor(V, ValC);
226 
227   return V;
228 }
229 
230 /// We want to turn code that looks like this:
231 ///   %C = or %A, %B
232 ///   %D = select %cond, %C, %A
233 /// into:
234 ///   %C = select %cond, %B, 0
235 ///   %D = or %A, %C
236 ///
237 /// Assuming that the specified instruction is an operand to the select, return
238 /// a bitmask indicating which operands of this instruction are foldable if they
239 /// equal the other incoming value of the select.
240 static unsigned getSelectFoldableOperands(BinaryOperator *I) {
241   switch (I->getOpcode()) {
242   case Instruction::Add:
243   case Instruction::FAdd:
244   case Instruction::Mul:
245   case Instruction::FMul:
246   case Instruction::And:
247   case Instruction::Or:
248   case Instruction::Xor:
249     return 3;              // Can fold through either operand.
250   case Instruction::Sub:   // Can only fold on the amount subtracted.
251   case Instruction::FSub:
252   case Instruction::FDiv:  // Can only fold on the divisor amount.
253   case Instruction::Shl:   // Can only fold on the shift amount.
254   case Instruction::LShr:
255   case Instruction::AShr:
256     return 1;
257   default:
258     return 0;              // Cannot fold
259   }
260 }
261 
262 /// We have (select c, TI, FI), and we know that TI and FI have the same opcode.
263 Instruction *InstCombinerImpl::foldSelectOpOp(SelectInst &SI, Instruction *TI,
264                                               Instruction *FI) {
265   // Don't break up min/max patterns. The hasOneUse checks below prevent that
266   // for most cases, but vector min/max with bitcasts can be transformed. If the
267   // one-use restrictions are eased for other patterns, we still don't want to
268   // obfuscate min/max.
269   if ((match(&SI, m_SMin(m_Value(), m_Value())) ||
270        match(&SI, m_SMax(m_Value(), m_Value())) ||
271        match(&SI, m_UMin(m_Value(), m_Value())) ||
272        match(&SI, m_UMax(m_Value(), m_Value()))))
273     return nullptr;
274 
275   // If this is a cast from the same type, merge.
276   Value *Cond = SI.getCondition();
277   Type *CondTy = Cond->getType();
278   if (TI->getNumOperands() == 1 && TI->isCast()) {
279     Type *FIOpndTy = FI->getOperand(0)->getType();
280     if (TI->getOperand(0)->getType() != FIOpndTy)
281       return nullptr;
282 
283     // The select condition may be a vector. We may only change the operand
284     // type if the vector width remains the same (and matches the condition).
285     if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) {
286       if (!FIOpndTy->isVectorTy() ||
287           CondVTy->getElementCount() !=
288               cast<VectorType>(FIOpndTy)->getElementCount())
289         return nullptr;
290 
291       // TODO: If the backend knew how to deal with casts better, we could
292       // remove this limitation. For now, there's too much potential to create
293       // worse codegen by promoting the select ahead of size-altering casts
294       // (PR28160).
295       //
296       // Note that ValueTracking's matchSelectPattern() looks through casts
297       // without checking 'hasOneUse' when it matches min/max patterns, so this
298       // transform may end up happening anyway.
299       if (TI->getOpcode() != Instruction::BitCast &&
300           (!TI->hasOneUse() || !FI->hasOneUse()))
301         return nullptr;
302     } else if (!TI->hasOneUse() || !FI->hasOneUse()) {
303       // TODO: The one-use restrictions for a scalar select could be eased if
304       // the fold of a select in visitLoadInst() was enhanced to match a pattern
305       // that includes a cast.
306       return nullptr;
307     }
308 
309     // Fold this by inserting a select from the input values.
310     Value *NewSI =
311         Builder.CreateSelect(Cond, TI->getOperand(0), FI->getOperand(0),
312                              SI.getName() + ".v", &SI);
313     return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
314                             TI->getType());
315   }
316 
317   // Cond ? -X : -Y --> -(Cond ? X : Y)
318   Value *X, *Y;
319   if (match(TI, m_FNeg(m_Value(X))) && match(FI, m_FNeg(m_Value(Y))) &&
320       (TI->hasOneUse() || FI->hasOneUse())) {
321     // Intersect FMF from the fneg instructions and union those with the select.
322     FastMathFlags FMF = TI->getFastMathFlags();
323     FMF &= FI->getFastMathFlags();
324     FMF |= SI.getFastMathFlags();
325     Value *NewSel = Builder.CreateSelect(Cond, X, Y, SI.getName() + ".v", &SI);
326     if (auto *NewSelI = dyn_cast<Instruction>(NewSel))
327       NewSelI->setFastMathFlags(FMF);
328     Instruction *NewFNeg = UnaryOperator::CreateFNeg(NewSel);
329     NewFNeg->setFastMathFlags(FMF);
330     return NewFNeg;
331   }
332 
333   // Min/max intrinsic with a common operand can have the common operand pulled
334   // after the select. This is the same transform as below for binops, but
335   // specialized for intrinsic matching and without the restrictive uses clause.
336   auto *TII = dyn_cast<IntrinsicInst>(TI);
337   auto *FII = dyn_cast<IntrinsicInst>(FI);
338   if (TII && FII && TII->getIntrinsicID() == FII->getIntrinsicID() &&
339       (TII->hasOneUse() || FII->hasOneUse())) {
340     Value *T0, *T1, *F0, *F1;
341     if (match(TII, m_MaxOrMin(m_Value(T0), m_Value(T1))) &&
342         match(FII, m_MaxOrMin(m_Value(F0), m_Value(F1)))) {
343       if (T0 == F0) {
344         Value *NewSel = Builder.CreateSelect(Cond, T1, F1, "minmaxop", &SI);
345         return CallInst::Create(TII->getCalledFunction(), {NewSel, T0});
346       }
347       if (T0 == F1) {
348         Value *NewSel = Builder.CreateSelect(Cond, T1, F0, "minmaxop", &SI);
349         return CallInst::Create(TII->getCalledFunction(), {NewSel, T0});
350       }
351       if (T1 == F0) {
352         Value *NewSel = Builder.CreateSelect(Cond, T0, F1, "minmaxop", &SI);
353         return CallInst::Create(TII->getCalledFunction(), {NewSel, T1});
354       }
355       if (T1 == F1) {
356         Value *NewSel = Builder.CreateSelect(Cond, T0, F0, "minmaxop", &SI);
357         return CallInst::Create(TII->getCalledFunction(), {NewSel, T1});
358       }
359     }
360   }
361 
362   // Only handle binary operators (including two-operand getelementptr) with
363   // one-use here. As with the cast case above, it may be possible to relax the
364   // one-use constraint, but that needs be examined carefully since it may not
365   // reduce the total number of instructions.
366   if (TI->getNumOperands() != 2 || FI->getNumOperands() != 2 ||
367       !TI->isSameOperationAs(FI) ||
368       (!isa<BinaryOperator>(TI) && !isa<GetElementPtrInst>(TI)) ||
369       !TI->hasOneUse() || !FI->hasOneUse())
370     return nullptr;
371 
372   // Figure out if the operations have any operands in common.
373   Value *MatchOp, *OtherOpT, *OtherOpF;
374   bool MatchIsOpZero;
375   if (TI->getOperand(0) == FI->getOperand(0)) {
376     MatchOp  = TI->getOperand(0);
377     OtherOpT = TI->getOperand(1);
378     OtherOpF = FI->getOperand(1);
379     MatchIsOpZero = true;
380   } else if (TI->getOperand(1) == FI->getOperand(1)) {
381     MatchOp  = TI->getOperand(1);
382     OtherOpT = TI->getOperand(0);
383     OtherOpF = FI->getOperand(0);
384     MatchIsOpZero = false;
385   } else if (!TI->isCommutative()) {
386     return nullptr;
387   } else if (TI->getOperand(0) == FI->getOperand(1)) {
388     MatchOp  = TI->getOperand(0);
389     OtherOpT = TI->getOperand(1);
390     OtherOpF = FI->getOperand(0);
391     MatchIsOpZero = true;
392   } else if (TI->getOperand(1) == FI->getOperand(0)) {
393     MatchOp  = TI->getOperand(1);
394     OtherOpT = TI->getOperand(0);
395     OtherOpF = FI->getOperand(1);
396     MatchIsOpZero = true;
397   } else {
398     return nullptr;
399   }
400 
401   // If the select condition is a vector, the operands of the original select's
402   // operands also must be vectors. This may not be the case for getelementptr
403   // for example.
404   if (CondTy->isVectorTy() && (!OtherOpT->getType()->isVectorTy() ||
405                                !OtherOpF->getType()->isVectorTy()))
406     return nullptr;
407 
408   // If we reach here, they do have operations in common.
409   Value *NewSI = Builder.CreateSelect(Cond, OtherOpT, OtherOpF,
410                                       SI.getName() + ".v", &SI);
411   Value *Op0 = MatchIsOpZero ? MatchOp : NewSI;
412   Value *Op1 = MatchIsOpZero ? NewSI : MatchOp;
413   if (auto *BO = dyn_cast<BinaryOperator>(TI)) {
414     BinaryOperator *NewBO = BinaryOperator::Create(BO->getOpcode(), Op0, Op1);
415     NewBO->copyIRFlags(TI);
416     NewBO->andIRFlags(FI);
417     return NewBO;
418   }
419   if (auto *TGEP = dyn_cast<GetElementPtrInst>(TI)) {
420     auto *FGEP = cast<GetElementPtrInst>(FI);
421     Type *ElementType = TGEP->getResultElementType();
422     return TGEP->isInBounds() && FGEP->isInBounds()
423                ? GetElementPtrInst::CreateInBounds(ElementType, Op0, {Op1})
424                : GetElementPtrInst::Create(ElementType, Op0, {Op1});
425   }
426   llvm_unreachable("Expected BinaryOperator or GEP");
427   return nullptr;
428 }
429 
430 static bool isSelect01(const APInt &C1I, const APInt &C2I) {
431   if (!C1I.isZero() && !C2I.isZero()) // One side must be zero.
432     return false;
433   return C1I.isOne() || C1I.isAllOnes() || C2I.isOne() || C2I.isAllOnes();
434 }
435 
436 /// Try to fold the select into one of the operands to allow further
437 /// optimization.
438 Instruction *InstCombinerImpl::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
439                                                 Value *FalseVal) {
440   // See the comment above GetSelectFoldableOperands for a description of the
441   // transformation we are doing here.
442   auto TryFoldSelectIntoOp = [&](SelectInst &SI, Value *TrueVal,
443                                  Value *FalseVal,
444                                  bool Swapped) -> Instruction * {
445     if (auto *TVI = dyn_cast<BinaryOperator>(TrueVal)) {
446       if (TVI->hasOneUse() && !isa<Constant>(FalseVal)) {
447         if (unsigned SFO = getSelectFoldableOperands(TVI)) {
448           unsigned OpToFold = 0;
449           if ((SFO & 1) && FalseVal == TVI->getOperand(0))
450             OpToFold = 1;
451           else if ((SFO & 2) && FalseVal == TVI->getOperand(1))
452             OpToFold = 2;
453 
454           if (OpToFold) {
455             FastMathFlags FMF;
456             // TODO: We probably ought to revisit cases where the select and FP
457             // instructions have different flags and add tests to ensure the
458             // behaviour is correct.
459             if (isa<FPMathOperator>(&SI))
460               FMF = SI.getFastMathFlags();
461             Constant *C = ConstantExpr::getBinOpIdentity(
462                 TVI->getOpcode(), TVI->getType(), true, FMF.noSignedZeros());
463             Value *OOp = TVI->getOperand(2 - OpToFold);
464             // Avoid creating select between 2 constants unless it's selecting
465             // between 0, 1 and -1.
466             const APInt *OOpC;
467             bool OOpIsAPInt = match(OOp, m_APInt(OOpC));
468             if (!isa<Constant>(OOp) ||
469                 (OOpIsAPInt && isSelect01(C->getUniqueInteger(), *OOpC))) {
470               Value *NewSel = Builder.CreateSelect(
471                   SI.getCondition(), Swapped ? C : OOp, Swapped ? OOp : C);
472               if (isa<FPMathOperator>(&SI))
473                 cast<Instruction>(NewSel)->setFastMathFlags(FMF);
474               NewSel->takeName(TVI);
475               BinaryOperator *BO =
476                   BinaryOperator::Create(TVI->getOpcode(), FalseVal, NewSel);
477               BO->copyIRFlags(TVI);
478               return BO;
479             }
480           }
481         }
482       }
483     }
484     return nullptr;
485   };
486 
487   if (Instruction *R = TryFoldSelectIntoOp(SI, TrueVal, FalseVal, false))
488     return R;
489 
490   if (Instruction *R = TryFoldSelectIntoOp(SI, FalseVal, TrueVal, true))
491     return R;
492 
493   return nullptr;
494 }
495 
496 /// We want to turn:
497 ///   (select (icmp eq (and X, Y), 0), (and (lshr X, Z), 1), 1)
498 /// into:
499 ///   zext (icmp ne i32 (and X, (or Y, (shl 1, Z))), 0)
500 /// Note:
501 ///   Z may be 0 if lshr is missing.
502 /// Worst-case scenario is that we will replace 5 instructions with 5 different
503 /// instructions, but we got rid of select.
504 static Instruction *foldSelectICmpAndAnd(Type *SelType, const ICmpInst *Cmp,
505                                          Value *TVal, Value *FVal,
506                                          InstCombiner::BuilderTy &Builder) {
507   if (!(Cmp->hasOneUse() && Cmp->getOperand(0)->hasOneUse() &&
508         Cmp->getPredicate() == ICmpInst::ICMP_EQ &&
509         match(Cmp->getOperand(1), m_Zero()) && match(FVal, m_One())))
510     return nullptr;
511 
512   // The TrueVal has general form of:  and %B, 1
513   Value *B;
514   if (!match(TVal, m_OneUse(m_And(m_Value(B), m_One()))))
515     return nullptr;
516 
517   // Where %B may be optionally shifted:  lshr %X, %Z.
518   Value *X, *Z;
519   const bool HasShift = match(B, m_OneUse(m_LShr(m_Value(X), m_Value(Z))));
520 
521   // The shift must be valid.
522   // TODO: This restricts the fold to constant shift amounts. Is there a way to
523   //       handle variable shifts safely? PR47012
524   if (HasShift &&
525       !match(Z, m_SpecificInt_ICMP(CmpInst::ICMP_ULT,
526                                    APInt(SelType->getScalarSizeInBits(),
527                                          SelType->getScalarSizeInBits()))))
528     return nullptr;
529 
530   if (!HasShift)
531     X = B;
532 
533   Value *Y;
534   if (!match(Cmp->getOperand(0), m_c_And(m_Specific(X), m_Value(Y))))
535     return nullptr;
536 
537   // ((X & Y) == 0) ? ((X >> Z) & 1) : 1 --> (X & (Y | (1 << Z))) != 0
538   // ((X & Y) == 0) ? (X & 1) : 1 --> (X & (Y | 1)) != 0
539   Constant *One = ConstantInt::get(SelType, 1);
540   Value *MaskB = HasShift ? Builder.CreateShl(One, Z) : One;
541   Value *FullMask = Builder.CreateOr(Y, MaskB);
542   Value *MaskedX = Builder.CreateAnd(X, FullMask);
543   Value *ICmpNeZero = Builder.CreateIsNotNull(MaskedX);
544   return new ZExtInst(ICmpNeZero, SelType);
545 }
546 
547 /// We want to turn:
548 ///   (select (icmp sgt x, C), lshr (X, Y), ashr (X, Y)); iff C s>= -1
549 ///   (select (icmp slt x, C), ashr (X, Y), lshr (X, Y)); iff C s>= 0
550 /// into:
551 ///   ashr (X, Y)
552 static Value *foldSelectICmpLshrAshr(const ICmpInst *IC, Value *TrueVal,
553                                      Value *FalseVal,
554                                      InstCombiner::BuilderTy &Builder) {
555   ICmpInst::Predicate Pred = IC->getPredicate();
556   Value *CmpLHS = IC->getOperand(0);
557   Value *CmpRHS = IC->getOperand(1);
558   if (!CmpRHS->getType()->isIntOrIntVectorTy())
559     return nullptr;
560 
561   Value *X, *Y;
562   unsigned Bitwidth = CmpRHS->getType()->getScalarSizeInBits();
563   if ((Pred != ICmpInst::ICMP_SGT ||
564        !match(CmpRHS,
565               m_SpecificInt_ICMP(ICmpInst::ICMP_SGE, APInt(Bitwidth, -1)))) &&
566       (Pred != ICmpInst::ICMP_SLT ||
567        !match(CmpRHS,
568               m_SpecificInt_ICMP(ICmpInst::ICMP_SGE, APInt(Bitwidth, 0)))))
569     return nullptr;
570 
571   // Canonicalize so that ashr is in FalseVal.
572   if (Pred == ICmpInst::ICMP_SLT)
573     std::swap(TrueVal, FalseVal);
574 
575   if (match(TrueVal, m_LShr(m_Value(X), m_Value(Y))) &&
576       match(FalseVal, m_AShr(m_Specific(X), m_Specific(Y))) &&
577       match(CmpLHS, m_Specific(X))) {
578     const auto *Ashr = cast<Instruction>(FalseVal);
579     // if lshr is not exact and ashr is, this new ashr must not be exact.
580     bool IsExact = Ashr->isExact() && cast<Instruction>(TrueVal)->isExact();
581     return Builder.CreateAShr(X, Y, IC->getName(), IsExact);
582   }
583 
584   return nullptr;
585 }
586 
587 /// We want to turn:
588 ///   (select (icmp eq (and X, C1), 0), Y, (or Y, C2))
589 /// into:
590 ///   (or (shl (and X, C1), C3), Y)
591 /// iff:
592 ///   C1 and C2 are both powers of 2
593 /// where:
594 ///   C3 = Log(C2) - Log(C1)
595 ///
596 /// This transform handles cases where:
597 /// 1. The icmp predicate is inverted
598 /// 2. The select operands are reversed
599 /// 3. The magnitude of C2 and C1 are flipped
600 static Value *foldSelectICmpAndOr(const ICmpInst *IC, Value *TrueVal,
601                                   Value *FalseVal,
602                                   InstCombiner::BuilderTy &Builder) {
603   // Only handle integer compares. Also, if this is a vector select, we need a
604   // vector compare.
605   if (!TrueVal->getType()->isIntOrIntVectorTy() ||
606       TrueVal->getType()->isVectorTy() != IC->getType()->isVectorTy())
607     return nullptr;
608 
609   Value *CmpLHS = IC->getOperand(0);
610   Value *CmpRHS = IC->getOperand(1);
611 
612   Value *V;
613   unsigned C1Log;
614   bool IsEqualZero;
615   bool NeedAnd = false;
616   if (IC->isEquality()) {
617     if (!match(CmpRHS, m_Zero()))
618       return nullptr;
619 
620     const APInt *C1;
621     if (!match(CmpLHS, m_And(m_Value(), m_Power2(C1))))
622       return nullptr;
623 
624     V = CmpLHS;
625     C1Log = C1->logBase2();
626     IsEqualZero = IC->getPredicate() == ICmpInst::ICMP_EQ;
627   } else if (IC->getPredicate() == ICmpInst::ICMP_SLT ||
628              IC->getPredicate() == ICmpInst::ICMP_SGT) {
629     // We also need to recognize (icmp slt (trunc (X)), 0) and
630     // (icmp sgt (trunc (X)), -1).
631     IsEqualZero = IC->getPredicate() == ICmpInst::ICMP_SGT;
632     if ((IsEqualZero && !match(CmpRHS, m_AllOnes())) ||
633         (!IsEqualZero && !match(CmpRHS, m_Zero())))
634       return nullptr;
635 
636     if (!match(CmpLHS, m_OneUse(m_Trunc(m_Value(V)))))
637       return nullptr;
638 
639     C1Log = CmpLHS->getType()->getScalarSizeInBits() - 1;
640     NeedAnd = true;
641   } else {
642     return nullptr;
643   }
644 
645   const APInt *C2;
646   bool OrOnTrueVal = false;
647   bool OrOnFalseVal = match(FalseVal, m_Or(m_Specific(TrueVal), m_Power2(C2)));
648   if (!OrOnFalseVal)
649     OrOnTrueVal = match(TrueVal, m_Or(m_Specific(FalseVal), m_Power2(C2)));
650 
651   if (!OrOnFalseVal && !OrOnTrueVal)
652     return nullptr;
653 
654   Value *Y = OrOnFalseVal ? TrueVal : FalseVal;
655 
656   unsigned C2Log = C2->logBase2();
657 
658   bool NeedXor = (!IsEqualZero && OrOnFalseVal) || (IsEqualZero && OrOnTrueVal);
659   bool NeedShift = C1Log != C2Log;
660   bool NeedZExtTrunc = Y->getType()->getScalarSizeInBits() !=
661                        V->getType()->getScalarSizeInBits();
662 
663   // Make sure we don't create more instructions than we save.
664   Value *Or = OrOnFalseVal ? FalseVal : TrueVal;
665   if ((NeedShift + NeedXor + NeedZExtTrunc) >
666       (IC->hasOneUse() + Or->hasOneUse()))
667     return nullptr;
668 
669   if (NeedAnd) {
670     // Insert the AND instruction on the input to the truncate.
671     APInt C1 = APInt::getOneBitSet(V->getType()->getScalarSizeInBits(), C1Log);
672     V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), C1));
673   }
674 
675   if (C2Log > C1Log) {
676     V = Builder.CreateZExtOrTrunc(V, Y->getType());
677     V = Builder.CreateShl(V, C2Log - C1Log);
678   } else if (C1Log > C2Log) {
679     V = Builder.CreateLShr(V, C1Log - C2Log);
680     V = Builder.CreateZExtOrTrunc(V, Y->getType());
681   } else
682     V = Builder.CreateZExtOrTrunc(V, Y->getType());
683 
684   if (NeedXor)
685     V = Builder.CreateXor(V, *C2);
686 
687   return Builder.CreateOr(V, Y);
688 }
689 
690 /// Canonicalize a set or clear of a masked set of constant bits to
691 /// select-of-constants form.
692 static Instruction *foldSetClearBits(SelectInst &Sel,
693                                      InstCombiner::BuilderTy &Builder) {
694   Value *Cond = Sel.getCondition();
695   Value *T = Sel.getTrueValue();
696   Value *F = Sel.getFalseValue();
697   Type *Ty = Sel.getType();
698   Value *X;
699   const APInt *NotC, *C;
700 
701   // Cond ? (X & ~C) : (X | C) --> (X & ~C) | (Cond ? 0 : C)
702   if (match(T, m_And(m_Value(X), m_APInt(NotC))) &&
703       match(F, m_OneUse(m_Or(m_Specific(X), m_APInt(C)))) && *NotC == ~(*C)) {
704     Constant *Zero = ConstantInt::getNullValue(Ty);
705     Constant *OrC = ConstantInt::get(Ty, *C);
706     Value *NewSel = Builder.CreateSelect(Cond, Zero, OrC, "masksel", &Sel);
707     return BinaryOperator::CreateOr(T, NewSel);
708   }
709 
710   // Cond ? (X | C) : (X & ~C) --> (X & ~C) | (Cond ? C : 0)
711   if (match(F, m_And(m_Value(X), m_APInt(NotC))) &&
712       match(T, m_OneUse(m_Or(m_Specific(X), m_APInt(C)))) && *NotC == ~(*C)) {
713     Constant *Zero = ConstantInt::getNullValue(Ty);
714     Constant *OrC = ConstantInt::get(Ty, *C);
715     Value *NewSel = Builder.CreateSelect(Cond, OrC, Zero, "masksel", &Sel);
716     return BinaryOperator::CreateOr(F, NewSel);
717   }
718 
719   return nullptr;
720 }
721 
722 //   select (x == 0), 0, x * y --> freeze(y) * x
723 //   select (y == 0), 0, x * y --> freeze(x) * y
724 //   select (x == 0), undef, x * y --> freeze(y) * x
725 //   select (x == undef), 0, x * y --> freeze(y) * x
726 // Usage of mul instead of 0 will make the result more poisonous,
727 // so the operand that was not checked in the condition should be frozen.
728 // The latter folding is applied only when a constant compared with x is
729 // is a vector consisting of 0 and undefs. If a constant compared with x
730 // is a scalar undefined value or undefined vector then an expression
731 // should be already folded into a constant.
732 static Instruction *foldSelectZeroOrMul(SelectInst &SI, InstCombinerImpl &IC) {
733   auto *CondVal = SI.getCondition();
734   auto *TrueVal = SI.getTrueValue();
735   auto *FalseVal = SI.getFalseValue();
736   Value *X, *Y;
737   ICmpInst::Predicate Predicate;
738 
739   // Assuming that constant compared with zero is not undef (but it may be
740   // a vector with some undef elements). Otherwise (when a constant is undef)
741   // the select expression should be already simplified.
742   if (!match(CondVal, m_ICmp(Predicate, m_Value(X), m_Zero())) ||
743       !ICmpInst::isEquality(Predicate))
744     return nullptr;
745 
746   if (Predicate == ICmpInst::ICMP_NE)
747     std::swap(TrueVal, FalseVal);
748 
749   // Check that TrueVal is a constant instead of matching it with m_Zero()
750   // to handle the case when it is a scalar undef value or a vector containing
751   // non-zero elements that are masked by undef elements in the compare
752   // constant.
753   auto *TrueValC = dyn_cast<Constant>(TrueVal);
754   if (TrueValC == nullptr ||
755       !match(FalseVal, m_c_Mul(m_Specific(X), m_Value(Y))) ||
756       !isa<Instruction>(FalseVal))
757     return nullptr;
758 
759   auto *ZeroC = cast<Constant>(cast<Instruction>(CondVal)->getOperand(1));
760   auto *MergedC = Constant::mergeUndefsWith(TrueValC, ZeroC);
761   // If X is compared with 0 then TrueVal could be either zero or undef.
762   // m_Zero match vectors containing some undef elements, but for scalars
763   // m_Undef should be used explicitly.
764   if (!match(MergedC, m_Zero()) && !match(MergedC, m_Undef()))
765     return nullptr;
766 
767   auto *FalseValI = cast<Instruction>(FalseVal);
768   auto *FrY = IC.InsertNewInstBefore(new FreezeInst(Y, Y->getName() + ".fr"),
769                                      *FalseValI);
770   IC.replaceOperand(*FalseValI, FalseValI->getOperand(0) == Y ? 0 : 1, FrY);
771   return IC.replaceInstUsesWith(SI, FalseValI);
772 }
773 
774 /// Transform patterns such as (a > b) ? a - b : 0 into usub.sat(a, b).
775 /// There are 8 commuted/swapped variants of this pattern.
776 /// TODO: Also support a - UMIN(a,b) patterns.
777 static Value *canonicalizeSaturatedSubtract(const ICmpInst *ICI,
778                                             const Value *TrueVal,
779                                             const Value *FalseVal,
780                                             InstCombiner::BuilderTy &Builder) {
781   ICmpInst::Predicate Pred = ICI->getPredicate();
782   if (!ICmpInst::isUnsigned(Pred))
783     return nullptr;
784 
785   // (b > a) ? 0 : a - b -> (b <= a) ? a - b : 0
786   if (match(TrueVal, m_Zero())) {
787     Pred = ICmpInst::getInversePredicate(Pred);
788     std::swap(TrueVal, FalseVal);
789   }
790   if (!match(FalseVal, m_Zero()))
791     return nullptr;
792 
793   Value *A = ICI->getOperand(0);
794   Value *B = ICI->getOperand(1);
795   if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_ULT) {
796     // (b < a) ? a - b : 0 -> (a > b) ? a - b : 0
797     std::swap(A, B);
798     Pred = ICmpInst::getSwappedPredicate(Pred);
799   }
800 
801   assert((Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_UGT) &&
802          "Unexpected isUnsigned predicate!");
803 
804   // Ensure the sub is of the form:
805   //  (a > b) ? a - b : 0 -> usub.sat(a, b)
806   //  (a > b) ? b - a : 0 -> -usub.sat(a, b)
807   // Checking for both a-b and a+(-b) as a constant.
808   bool IsNegative = false;
809   const APInt *C;
810   if (match(TrueVal, m_Sub(m_Specific(B), m_Specific(A))) ||
811       (match(A, m_APInt(C)) &&
812        match(TrueVal, m_Add(m_Specific(B), m_SpecificInt(-*C)))))
813     IsNegative = true;
814   else if (!match(TrueVal, m_Sub(m_Specific(A), m_Specific(B))) &&
815            !(match(B, m_APInt(C)) &&
816              match(TrueVal, m_Add(m_Specific(A), m_SpecificInt(-*C)))))
817     return nullptr;
818 
819   // If we are adding a negate and the sub and icmp are used anywhere else, we
820   // would end up with more instructions.
821   if (IsNegative && !TrueVal->hasOneUse() && !ICI->hasOneUse())
822     return nullptr;
823 
824   // (a > b) ? a - b : 0 -> usub.sat(a, b)
825   // (a > b) ? b - a : 0 -> -usub.sat(a, b)
826   Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, A, B);
827   if (IsNegative)
828     Result = Builder.CreateNeg(Result);
829   return Result;
830 }
831 
832 static Value *canonicalizeSaturatedAdd(ICmpInst *Cmp, Value *TVal, Value *FVal,
833                                        InstCombiner::BuilderTy &Builder) {
834   if (!Cmp->hasOneUse())
835     return nullptr;
836 
837   // Match unsigned saturated add with constant.
838   Value *Cmp0 = Cmp->getOperand(0);
839   Value *Cmp1 = Cmp->getOperand(1);
840   ICmpInst::Predicate Pred = Cmp->getPredicate();
841   Value *X;
842   const APInt *C, *CmpC;
843   if (Pred == ICmpInst::ICMP_ULT &&
844       match(TVal, m_Add(m_Value(X), m_APInt(C))) && X == Cmp0 &&
845       match(FVal, m_AllOnes()) && match(Cmp1, m_APInt(CmpC)) && *CmpC == ~*C) {
846     // (X u< ~C) ? (X + C) : -1 --> uadd.sat(X, C)
847     return Builder.CreateBinaryIntrinsic(
848         Intrinsic::uadd_sat, X, ConstantInt::get(X->getType(), *C));
849   }
850 
851   // Match unsigned saturated add of 2 variables with an unnecessary 'not'.
852   // There are 8 commuted variants.
853   // Canonicalize -1 (saturated result) to true value of the select.
854   if (match(FVal, m_AllOnes())) {
855     std::swap(TVal, FVal);
856     Pred = CmpInst::getInversePredicate(Pred);
857   }
858   if (!match(TVal, m_AllOnes()))
859     return nullptr;
860 
861   // Canonicalize predicate to less-than or less-or-equal-than.
862   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
863     std::swap(Cmp0, Cmp1);
864     Pred = CmpInst::getSwappedPredicate(Pred);
865   }
866   if (Pred != ICmpInst::ICMP_ULT && Pred != ICmpInst::ICMP_ULE)
867     return nullptr;
868 
869   // Match unsigned saturated add of 2 variables with an unnecessary 'not'.
870   // Strictness of the comparison is irrelevant.
871   Value *Y;
872   if (match(Cmp0, m_Not(m_Value(X))) &&
873       match(FVal, m_c_Add(m_Specific(X), m_Value(Y))) && Y == Cmp1) {
874     // (~X u< Y) ? -1 : (X + Y) --> uadd.sat(X, Y)
875     // (~X u< Y) ? -1 : (Y + X) --> uadd.sat(X, Y)
876     return Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat, X, Y);
877   }
878   // The 'not' op may be included in the sum but not the compare.
879   // Strictness of the comparison is irrelevant.
880   X = Cmp0;
881   Y = Cmp1;
882   if (match(FVal, m_c_Add(m_Not(m_Specific(X)), m_Specific(Y)))) {
883     // (X u< Y) ? -1 : (~X + Y) --> uadd.sat(~X, Y)
884     // (X u< Y) ? -1 : (Y + ~X) --> uadd.sat(Y, ~X)
885     BinaryOperator *BO = cast<BinaryOperator>(FVal);
886     return Builder.CreateBinaryIntrinsic(
887         Intrinsic::uadd_sat, BO->getOperand(0), BO->getOperand(1));
888   }
889   // The overflow may be detected via the add wrapping round.
890   // This is only valid for strict comparison!
891   if (Pred == ICmpInst::ICMP_ULT &&
892       match(Cmp0, m_c_Add(m_Specific(Cmp1), m_Value(Y))) &&
893       match(FVal, m_c_Add(m_Specific(Cmp1), m_Specific(Y)))) {
894     // ((X + Y) u< X) ? -1 : (X + Y) --> uadd.sat(X, Y)
895     // ((X + Y) u< Y) ? -1 : (X + Y) --> uadd.sat(X, Y)
896     return Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat, Cmp1, Y);
897   }
898 
899   return nullptr;
900 }
901 
902 /// Fold the following code sequence:
903 /// \code
904 ///   int a = ctlz(x & -x);
905 //    x ? 31 - a : a;
906 /// \code
907 ///
908 /// into:
909 ///   cttz(x)
910 static Instruction *foldSelectCtlzToCttz(ICmpInst *ICI, Value *TrueVal,
911                                          Value *FalseVal,
912                                          InstCombiner::BuilderTy &Builder) {
913   unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
914   if (!ICI->isEquality() || !match(ICI->getOperand(1), m_Zero()))
915     return nullptr;
916 
917   if (ICI->getPredicate() == ICmpInst::ICMP_NE)
918     std::swap(TrueVal, FalseVal);
919 
920   if (!match(FalseVal,
921              m_Xor(m_Deferred(TrueVal), m_SpecificInt(BitWidth - 1))))
922     return nullptr;
923 
924   if (!match(TrueVal, m_Intrinsic<Intrinsic::ctlz>()))
925     return nullptr;
926 
927   Value *X = ICI->getOperand(0);
928   auto *II = cast<IntrinsicInst>(TrueVal);
929   if (!match(II->getOperand(0), m_c_And(m_Specific(X), m_Neg(m_Specific(X)))))
930     return nullptr;
931 
932   Function *F = Intrinsic::getDeclaration(II->getModule(), Intrinsic::cttz,
933                                           II->getType());
934   return CallInst::Create(F, {X, II->getArgOperand(1)});
935 }
936 
937 /// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single
938 /// call to cttz/ctlz with flag 'is_zero_poison' cleared.
939 ///
940 /// For example, we can fold the following code sequence:
941 /// \code
942 ///   %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
943 ///   %1 = icmp ne i32 %x, 0
944 ///   %2 = select i1 %1, i32 %0, i32 32
945 /// \code
946 ///
947 /// into:
948 ///   %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
949 static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
950                                  InstCombiner::BuilderTy &Builder) {
951   ICmpInst::Predicate Pred = ICI->getPredicate();
952   Value *CmpLHS = ICI->getOperand(0);
953   Value *CmpRHS = ICI->getOperand(1);
954 
955   // Check if the condition value compares a value for equality against zero.
956   if (!ICI->isEquality() || !match(CmpRHS, m_Zero()))
957     return nullptr;
958 
959   Value *SelectArg = FalseVal;
960   Value *ValueOnZero = TrueVal;
961   if (Pred == ICmpInst::ICMP_NE)
962     std::swap(SelectArg, ValueOnZero);
963 
964   // Skip zero extend/truncate.
965   Value *Count = nullptr;
966   if (!match(SelectArg, m_ZExt(m_Value(Count))) &&
967       !match(SelectArg, m_Trunc(m_Value(Count))))
968     Count = SelectArg;
969 
970   // Check that 'Count' is a call to intrinsic cttz/ctlz. Also check that the
971   // input to the cttz/ctlz is used as LHS for the compare instruction.
972   if (!match(Count, m_Intrinsic<Intrinsic::cttz>(m_Specific(CmpLHS))) &&
973       !match(Count, m_Intrinsic<Intrinsic::ctlz>(m_Specific(CmpLHS))))
974     return nullptr;
975 
976   IntrinsicInst *II = cast<IntrinsicInst>(Count);
977 
978   // Check if the value propagated on zero is a constant number equal to the
979   // sizeof in bits of 'Count'.
980   unsigned SizeOfInBits = Count->getType()->getScalarSizeInBits();
981   if (match(ValueOnZero, m_SpecificInt(SizeOfInBits))) {
982     // Explicitly clear the 'is_zero_poison' flag. It's always valid to go from
983     // true to false on this flag, so we can replace it for all users.
984     II->setArgOperand(1, ConstantInt::getFalse(II->getContext()));
985     return SelectArg;
986   }
987 
988   // The ValueOnZero is not the bitwidth. But if the cttz/ctlz (and optional
989   // zext/trunc) have one use (ending at the select), the cttz/ctlz result will
990   // not be used if the input is zero. Relax to 'zero is poison' for that case.
991   if (II->hasOneUse() && SelectArg->hasOneUse() &&
992       !match(II->getArgOperand(1), m_One()))
993     II->setArgOperand(1, ConstantInt::getTrue(II->getContext()));
994 
995   return nullptr;
996 }
997 
998 /// Return true if we find and adjust an icmp+select pattern where the compare
999 /// is with a constant that can be incremented or decremented to match the
1000 /// minimum or maximum idiom.
1001 static bool adjustMinMax(SelectInst &Sel, ICmpInst &Cmp) {
1002   ICmpInst::Predicate Pred = Cmp.getPredicate();
1003   Value *CmpLHS = Cmp.getOperand(0);
1004   Value *CmpRHS = Cmp.getOperand(1);
1005   Value *TrueVal = Sel.getTrueValue();
1006   Value *FalseVal = Sel.getFalseValue();
1007 
1008   // We may move or edit the compare, so make sure the select is the only user.
1009   const APInt *CmpC;
1010   if (!Cmp.hasOneUse() || !match(CmpRHS, m_APInt(CmpC)))
1011     return false;
1012 
1013   // These transforms only work for selects of integers or vector selects of
1014   // integer vectors.
1015   Type *SelTy = Sel.getType();
1016   auto *SelEltTy = dyn_cast<IntegerType>(SelTy->getScalarType());
1017   if (!SelEltTy || SelTy->isVectorTy() != Cmp.getType()->isVectorTy())
1018     return false;
1019 
1020   Constant *AdjustedRHS;
1021   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SGT)
1022     AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC + 1);
1023   else if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT)
1024     AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC - 1);
1025   else
1026     return false;
1027 
1028   // X > C ? X : C+1  -->  X < C+1 ? C+1 : X
1029   // X < C ? X : C-1  -->  X > C-1 ? C-1 : X
1030   if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
1031       (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
1032     ; // Nothing to do here. Values match without any sign/zero extension.
1033   }
1034   // Types do not match. Instead of calculating this with mixed types, promote
1035   // all to the larger type. This enables scalar evolution to analyze this
1036   // expression.
1037   else if (CmpRHS->getType()->getScalarSizeInBits() < SelEltTy->getBitWidth()) {
1038     Constant *SextRHS = ConstantExpr::getSExt(AdjustedRHS, SelTy);
1039 
1040     // X = sext x; x >s c ? X : C+1 --> X = sext x; X <s C+1 ? C+1 : X
1041     // X = sext x; x <s c ? X : C-1 --> X = sext x; X >s C-1 ? C-1 : X
1042     // X = sext x; x >u c ? X : C+1 --> X = sext x; X <u C+1 ? C+1 : X
1043     // X = sext x; x <u c ? X : C-1 --> X = sext x; X >u C-1 ? C-1 : X
1044     if (match(TrueVal, m_SExt(m_Specific(CmpLHS))) && SextRHS == FalseVal) {
1045       CmpLHS = TrueVal;
1046       AdjustedRHS = SextRHS;
1047     } else if (match(FalseVal, m_SExt(m_Specific(CmpLHS))) &&
1048                SextRHS == TrueVal) {
1049       CmpLHS = FalseVal;
1050       AdjustedRHS = SextRHS;
1051     } else if (Cmp.isUnsigned()) {
1052       Constant *ZextRHS = ConstantExpr::getZExt(AdjustedRHS, SelTy);
1053       // X = zext x; x >u c ? X : C+1 --> X = zext x; X <u C+1 ? C+1 : X
1054       // X = zext x; x <u c ? X : C-1 --> X = zext x; X >u C-1 ? C-1 : X
1055       // zext + signed compare cannot be changed:
1056       //    0xff <s 0x00, but 0x00ff >s 0x0000
1057       if (match(TrueVal, m_ZExt(m_Specific(CmpLHS))) && ZextRHS == FalseVal) {
1058         CmpLHS = TrueVal;
1059         AdjustedRHS = ZextRHS;
1060       } else if (match(FalseVal, m_ZExt(m_Specific(CmpLHS))) &&
1061                  ZextRHS == TrueVal) {
1062         CmpLHS = FalseVal;
1063         AdjustedRHS = ZextRHS;
1064       } else {
1065         return false;
1066       }
1067     } else {
1068       return false;
1069     }
1070   } else {
1071     return false;
1072   }
1073 
1074   Pred = ICmpInst::getSwappedPredicate(Pred);
1075   CmpRHS = AdjustedRHS;
1076   std::swap(FalseVal, TrueVal);
1077   Cmp.setPredicate(Pred);
1078   Cmp.setOperand(0, CmpLHS);
1079   Cmp.setOperand(1, CmpRHS);
1080   Sel.setOperand(1, TrueVal);
1081   Sel.setOperand(2, FalseVal);
1082   Sel.swapProfMetadata();
1083 
1084   // Move the compare instruction right before the select instruction. Otherwise
1085   // the sext/zext value may be defined after the compare instruction uses it.
1086   Cmp.moveBefore(&Sel);
1087 
1088   return true;
1089 }
1090 
1091 static Instruction *canonicalizeSPF(SelectInst &Sel, ICmpInst &Cmp,
1092                                     InstCombinerImpl &IC) {
1093   Value *LHS, *RHS;
1094   // TODO: What to do with pointer min/max patterns?
1095   if (!Sel.getType()->isIntOrIntVectorTy())
1096     return nullptr;
1097 
1098   SelectPatternFlavor SPF = matchSelectPattern(&Sel, LHS, RHS).Flavor;
1099   if (SPF == SelectPatternFlavor::SPF_ABS ||
1100       SPF == SelectPatternFlavor::SPF_NABS) {
1101     if (!Cmp.hasOneUse() && !RHS->hasOneUse())
1102       return nullptr; // TODO: Relax this restriction.
1103 
1104     // Note that NSW flag can only be propagated for normal, non-negated abs!
1105     bool IntMinIsPoison = SPF == SelectPatternFlavor::SPF_ABS &&
1106                           match(RHS, m_NSWNeg(m_Specific(LHS)));
1107     Constant *IntMinIsPoisonC =
1108         ConstantInt::get(Type::getInt1Ty(Sel.getContext()), IntMinIsPoison);
1109     Instruction *Abs =
1110         IC.Builder.CreateBinaryIntrinsic(Intrinsic::abs, LHS, IntMinIsPoisonC);
1111 
1112     if (SPF == SelectPatternFlavor::SPF_NABS)
1113       return BinaryOperator::CreateNeg(Abs); // Always without NSW flag!
1114     return IC.replaceInstUsesWith(Sel, Abs);
1115   }
1116 
1117   if (SelectPatternResult::isMinOrMax(SPF)) {
1118     Intrinsic::ID IntrinsicID;
1119     switch (SPF) {
1120     case SelectPatternFlavor::SPF_UMIN:
1121       IntrinsicID = Intrinsic::umin;
1122       break;
1123     case SelectPatternFlavor::SPF_UMAX:
1124       IntrinsicID = Intrinsic::umax;
1125       break;
1126     case SelectPatternFlavor::SPF_SMIN:
1127       IntrinsicID = Intrinsic::smin;
1128       break;
1129     case SelectPatternFlavor::SPF_SMAX:
1130       IntrinsicID = Intrinsic::smax;
1131       break;
1132     default:
1133       llvm_unreachable("Unexpected SPF");
1134     }
1135     return IC.replaceInstUsesWith(
1136         Sel, IC.Builder.CreateBinaryIntrinsic(IntrinsicID, LHS, RHS));
1137   }
1138 
1139   return nullptr;
1140 }
1141 
1142 /// If we have a select with an equality comparison, then we know the value in
1143 /// one of the arms of the select. See if substituting this value into an arm
1144 /// and simplifying the result yields the same value as the other arm.
1145 ///
1146 /// To make this transform safe, we must drop poison-generating flags
1147 /// (nsw, etc) if we simplified to a binop because the select may be guarding
1148 /// that poison from propagating. If the existing binop already had no
1149 /// poison-generating flags, then this transform can be done by instsimplify.
1150 ///
1151 /// Consider:
1152 ///   %cmp = icmp eq i32 %x, 2147483647
1153 ///   %add = add nsw i32 %x, 1
1154 ///   %sel = select i1 %cmp, i32 -2147483648, i32 %add
1155 ///
1156 /// We can't replace %sel with %add unless we strip away the flags.
1157 /// TODO: Wrapping flags could be preserved in some cases with better analysis.
1158 Instruction *InstCombinerImpl::foldSelectValueEquivalence(SelectInst &Sel,
1159                                                           ICmpInst &Cmp) {
1160   // Value equivalence substitution requires an all-or-nothing replacement.
1161   // It does not make sense for a vector compare where each lane is chosen
1162   // independently.
1163   if (!Cmp.isEquality() || Cmp.getType()->isVectorTy())
1164     return nullptr;
1165 
1166   // Canonicalize the pattern to ICMP_EQ by swapping the select operands.
1167   Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue();
1168   bool Swapped = false;
1169   if (Cmp.getPredicate() == ICmpInst::ICMP_NE) {
1170     std::swap(TrueVal, FalseVal);
1171     Swapped = true;
1172   }
1173 
1174   // In X == Y ? f(X) : Z, try to evaluate f(Y) and replace the operand.
1175   // Make sure Y cannot be undef though, as we might pick different values for
1176   // undef in the icmp and in f(Y). Additionally, take care to avoid replacing
1177   // X == Y ? X : Z with X == Y ? Y : Z, as that would lead to an infinite
1178   // replacement cycle.
1179   Value *CmpLHS = Cmp.getOperand(0), *CmpRHS = Cmp.getOperand(1);
1180   if (TrueVal != CmpLHS &&
1181       isGuaranteedNotToBeUndefOrPoison(CmpRHS, SQ.AC, &Sel, &DT)) {
1182     if (Value *V = simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, SQ,
1183                                           /* AllowRefinement */ true))
1184       return replaceOperand(Sel, Swapped ? 2 : 1, V);
1185 
1186     // Even if TrueVal does not simplify, we can directly replace a use of
1187     // CmpLHS with CmpRHS, as long as the instruction is not used anywhere
1188     // else and is safe to speculatively execute (we may end up executing it
1189     // with different operands, which should not cause side-effects or trigger
1190     // undefined behavior). Only do this if CmpRHS is a constant, as
1191     // profitability is not clear for other cases.
1192     // FIXME: The replacement could be performed recursively.
1193     if (match(CmpRHS, m_ImmConstant()) && !match(CmpLHS, m_ImmConstant()))
1194       if (auto *I = dyn_cast<Instruction>(TrueVal))
1195         if (I->hasOneUse() && isSafeToSpeculativelyExecute(I))
1196           for (Use &U : I->operands())
1197             if (U == CmpLHS) {
1198               replaceUse(U, CmpRHS);
1199               return &Sel;
1200             }
1201   }
1202   if (TrueVal != CmpRHS &&
1203       isGuaranteedNotToBeUndefOrPoison(CmpLHS, SQ.AC, &Sel, &DT))
1204     if (Value *V = simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, SQ,
1205                                           /* AllowRefinement */ true))
1206       return replaceOperand(Sel, Swapped ? 2 : 1, V);
1207 
1208   auto *FalseInst = dyn_cast<Instruction>(FalseVal);
1209   if (!FalseInst)
1210     return nullptr;
1211 
1212   // InstSimplify already performed this fold if it was possible subject to
1213   // current poison-generating flags. Try the transform again with
1214   // poison-generating flags temporarily dropped.
1215   bool WasNUW = false, WasNSW = false, WasExact = false, WasInBounds = false;
1216   if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(FalseVal)) {
1217     WasNUW = OBO->hasNoUnsignedWrap();
1218     WasNSW = OBO->hasNoSignedWrap();
1219     FalseInst->setHasNoUnsignedWrap(false);
1220     FalseInst->setHasNoSignedWrap(false);
1221   }
1222   if (auto *PEO = dyn_cast<PossiblyExactOperator>(FalseVal)) {
1223     WasExact = PEO->isExact();
1224     FalseInst->setIsExact(false);
1225   }
1226   if (auto *GEP = dyn_cast<GetElementPtrInst>(FalseVal)) {
1227     WasInBounds = GEP->isInBounds();
1228     GEP->setIsInBounds(false);
1229   }
1230 
1231   // Try each equivalence substitution possibility.
1232   // We have an 'EQ' comparison, so the select's false value will propagate.
1233   // Example:
1234   // (X == 42) ? 43 : (X + 1) --> (X == 42) ? (X + 1) : (X + 1) --> X + 1
1235   if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, SQ,
1236                              /* AllowRefinement */ false) == TrueVal ||
1237       simplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, SQ,
1238                              /* AllowRefinement */ false) == TrueVal) {
1239     return replaceInstUsesWith(Sel, FalseVal);
1240   }
1241 
1242   // Restore poison-generating flags if the transform did not apply.
1243   if (WasNUW)
1244     FalseInst->setHasNoUnsignedWrap();
1245   if (WasNSW)
1246     FalseInst->setHasNoSignedWrap();
1247   if (WasExact)
1248     FalseInst->setIsExact();
1249   if (WasInBounds)
1250     cast<GetElementPtrInst>(FalseInst)->setIsInBounds();
1251 
1252   return nullptr;
1253 }
1254 
1255 // See if this is a pattern like:
1256 //   %old_cmp1 = icmp slt i32 %x, C2
1257 //   %old_replacement = select i1 %old_cmp1, i32 %target_low, i32 %target_high
1258 //   %old_x_offseted = add i32 %x, C1
1259 //   %old_cmp0 = icmp ult i32 %old_x_offseted, C0
1260 //   %r = select i1 %old_cmp0, i32 %x, i32 %old_replacement
1261 // This can be rewritten as more canonical pattern:
1262 //   %new_cmp1 = icmp slt i32 %x, -C1
1263 //   %new_cmp2 = icmp sge i32 %x, C0-C1
1264 //   %new_clamped_low = select i1 %new_cmp1, i32 %target_low, i32 %x
1265 //   %r = select i1 %new_cmp2, i32 %target_high, i32 %new_clamped_low
1266 // Iff -C1 s<= C2 s<= C0-C1
1267 // Also ULT predicate can also be UGT iff C0 != -1 (+invert result)
1268 //      SLT predicate can also be SGT iff C2 != INT_MAX (+invert res.)
1269 static Value *canonicalizeClampLike(SelectInst &Sel0, ICmpInst &Cmp0,
1270                                     InstCombiner::BuilderTy &Builder) {
1271   Value *X = Sel0.getTrueValue();
1272   Value *Sel1 = Sel0.getFalseValue();
1273 
1274   // First match the condition of the outermost select.
1275   // Said condition must be one-use.
1276   if (!Cmp0.hasOneUse())
1277     return nullptr;
1278   ICmpInst::Predicate Pred0 = Cmp0.getPredicate();
1279   Value *Cmp00 = Cmp0.getOperand(0);
1280   Constant *C0;
1281   if (!match(Cmp0.getOperand(1),
1282              m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C0))))
1283     return nullptr;
1284 
1285   if (!isa<SelectInst>(Sel1)) {
1286     Pred0 = ICmpInst::getInversePredicate(Pred0);
1287     std::swap(X, Sel1);
1288   }
1289 
1290   // Canonicalize Cmp0 into ult or uge.
1291   // FIXME: we shouldn't care about lanes that are 'undef' in the end?
1292   switch (Pred0) {
1293   case ICmpInst::Predicate::ICMP_ULT:
1294   case ICmpInst::Predicate::ICMP_UGE:
1295     // Although icmp ult %x, 0 is an unusual thing to try and should generally
1296     // have been simplified, it does not verify with undef inputs so ensure we
1297     // are not in a strange state.
1298     if (!match(C0, m_SpecificInt_ICMP(
1299                        ICmpInst::Predicate::ICMP_NE,
1300                        APInt::getZero(C0->getType()->getScalarSizeInBits()))))
1301       return nullptr;
1302     break; // Great!
1303   case ICmpInst::Predicate::ICMP_ULE:
1304   case ICmpInst::Predicate::ICMP_UGT:
1305     // We want to canonicalize it to 'ult' or 'uge', so we'll need to increment
1306     // C0, which again means it must not have any all-ones elements.
1307     if (!match(C0,
1308                m_SpecificInt_ICMP(
1309                    ICmpInst::Predicate::ICMP_NE,
1310                    APInt::getAllOnes(C0->getType()->getScalarSizeInBits()))))
1311       return nullptr; // Can't do, have all-ones element[s].
1312     Pred0 = ICmpInst::getFlippedStrictnessPredicate(Pred0);
1313     C0 = InstCombiner::AddOne(C0);
1314     break;
1315   default:
1316     return nullptr; // Unknown predicate.
1317   }
1318 
1319   // Now that we've canonicalized the ICmp, we know the X we expect;
1320   // the select in other hand should be one-use.
1321   if (!Sel1->hasOneUse())
1322     return nullptr;
1323 
1324   // If the types do not match, look through any truncs to the underlying
1325   // instruction.
1326   if (Cmp00->getType() != X->getType() && X->hasOneUse())
1327     match(X, m_TruncOrSelf(m_Value(X)));
1328 
1329   // We now can finish matching the condition of the outermost select:
1330   // it should either be the X itself, or an addition of some constant to X.
1331   Constant *C1;
1332   if (Cmp00 == X)
1333     C1 = ConstantInt::getNullValue(X->getType());
1334   else if (!match(Cmp00,
1335                   m_Add(m_Specific(X),
1336                         m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C1)))))
1337     return nullptr;
1338 
1339   Value *Cmp1;
1340   ICmpInst::Predicate Pred1;
1341   Constant *C2;
1342   Value *ReplacementLow, *ReplacementHigh;
1343   if (!match(Sel1, m_Select(m_Value(Cmp1), m_Value(ReplacementLow),
1344                             m_Value(ReplacementHigh))) ||
1345       !match(Cmp1,
1346              m_ICmp(Pred1, m_Specific(X),
1347                     m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C2)))))
1348     return nullptr;
1349 
1350   if (!Cmp1->hasOneUse() && (Cmp00 == X || !Cmp00->hasOneUse()))
1351     return nullptr; // Not enough one-use instructions for the fold.
1352   // FIXME: this restriction could be relaxed if Cmp1 can be reused as one of
1353   //        two comparisons we'll need to build.
1354 
1355   // Canonicalize Cmp1 into the form we expect.
1356   // FIXME: we shouldn't care about lanes that are 'undef' in the end?
1357   switch (Pred1) {
1358   case ICmpInst::Predicate::ICMP_SLT:
1359     break;
1360   case ICmpInst::Predicate::ICMP_SLE:
1361     // We'd have to increment C2 by one, and for that it must not have signed
1362     // max element, but then it would have been canonicalized to 'slt' before
1363     // we get here. So we can't do anything useful with 'sle'.
1364     return nullptr;
1365   case ICmpInst::Predicate::ICMP_SGT:
1366     // We want to canonicalize it to 'slt', so we'll need to increment C2,
1367     // which again means it must not have any signed max elements.
1368     if (!match(C2,
1369                m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_NE,
1370                                   APInt::getSignedMaxValue(
1371                                       C2->getType()->getScalarSizeInBits()))))
1372       return nullptr; // Can't do, have signed max element[s].
1373     C2 = InstCombiner::AddOne(C2);
1374     LLVM_FALLTHROUGH;
1375   case ICmpInst::Predicate::ICMP_SGE:
1376     // Also non-canonical, but here we don't need to change C2,
1377     // so we don't have any restrictions on C2, so we can just handle it.
1378     Pred1 = ICmpInst::Predicate::ICMP_SLT;
1379     std::swap(ReplacementLow, ReplacementHigh);
1380     break;
1381   default:
1382     return nullptr; // Unknown predicate.
1383   }
1384   assert(Pred1 == ICmpInst::Predicate::ICMP_SLT &&
1385          "Unexpected predicate type.");
1386 
1387   // The thresholds of this clamp-like pattern.
1388   auto *ThresholdLowIncl = ConstantExpr::getNeg(C1);
1389   auto *ThresholdHighExcl = ConstantExpr::getSub(C0, C1);
1390 
1391   assert((Pred0 == ICmpInst::Predicate::ICMP_ULT ||
1392           Pred0 == ICmpInst::Predicate::ICMP_UGE) &&
1393          "Unexpected predicate type.");
1394   if (Pred0 == ICmpInst::Predicate::ICMP_UGE)
1395     std::swap(ThresholdLowIncl, ThresholdHighExcl);
1396 
1397   // The fold has a precondition 1: C2 s>= ThresholdLow
1398   auto *Precond1 = ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_SGE, C2,
1399                                          ThresholdLowIncl);
1400   if (!match(Precond1, m_One()))
1401     return nullptr;
1402   // The fold has a precondition 2: C2 s<= ThresholdHigh
1403   auto *Precond2 = ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_SLE, C2,
1404                                          ThresholdHighExcl);
1405   if (!match(Precond2, m_One()))
1406     return nullptr;
1407 
1408   // If we are matching from a truncated input, we need to sext the
1409   // ReplacementLow and ReplacementHigh values. Only do the transform if they
1410   // are free to extend due to being constants.
1411   if (X->getType() != Sel0.getType()) {
1412     Constant *LowC, *HighC;
1413     if (!match(ReplacementLow, m_ImmConstant(LowC)) ||
1414         !match(ReplacementHigh, m_ImmConstant(HighC)))
1415       return nullptr;
1416     ReplacementLow = ConstantExpr::getSExt(LowC, X->getType());
1417     ReplacementHigh = ConstantExpr::getSExt(HighC, X->getType());
1418   }
1419 
1420   // All good, finally emit the new pattern.
1421   Value *ShouldReplaceLow = Builder.CreateICmpSLT(X, ThresholdLowIncl);
1422   Value *ShouldReplaceHigh = Builder.CreateICmpSGE(X, ThresholdHighExcl);
1423   Value *MaybeReplacedLow =
1424       Builder.CreateSelect(ShouldReplaceLow, ReplacementLow, X);
1425 
1426   // Create the final select. If we looked through a truncate above, we will
1427   // need to retruncate the result.
1428   Value *MaybeReplacedHigh = Builder.CreateSelect(
1429       ShouldReplaceHigh, ReplacementHigh, MaybeReplacedLow);
1430   return Builder.CreateTrunc(MaybeReplacedHigh, Sel0.getType());
1431 }
1432 
1433 // If we have
1434 //  %cmp = icmp [canonical predicate] i32 %x, C0
1435 //  %r = select i1 %cmp, i32 %y, i32 C1
1436 // Where C0 != C1 and %x may be different from %y, see if the constant that we
1437 // will have if we flip the strictness of the predicate (i.e. without changing
1438 // the result) is identical to the C1 in select. If it matches we can change
1439 // original comparison to one with swapped predicate, reuse the constant,
1440 // and swap the hands of select.
1441 static Instruction *
1442 tryToReuseConstantFromSelectInComparison(SelectInst &Sel, ICmpInst &Cmp,
1443                                          InstCombinerImpl &IC) {
1444   ICmpInst::Predicate Pred;
1445   Value *X;
1446   Constant *C0;
1447   if (!match(&Cmp, m_OneUse(m_ICmp(
1448                        Pred, m_Value(X),
1449                        m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C0))))))
1450     return nullptr;
1451 
1452   // If comparison predicate is non-relational, we won't be able to do anything.
1453   if (ICmpInst::isEquality(Pred))
1454     return nullptr;
1455 
1456   // If comparison predicate is non-canonical, then we certainly won't be able
1457   // to make it canonical; canonicalizeCmpWithConstant() already tried.
1458   if (!InstCombiner::isCanonicalPredicate(Pred))
1459     return nullptr;
1460 
1461   // If the [input] type of comparison and select type are different, lets abort
1462   // for now. We could try to compare constants with trunc/[zs]ext though.
1463   if (C0->getType() != Sel.getType())
1464     return nullptr;
1465 
1466   // ULT with 'add' of a constant is canonical. See foldICmpAddConstant().
1467   // FIXME: Are there more magic icmp predicate+constant pairs we must avoid?
1468   //        Or should we just abandon this transform entirely?
1469   if (Pred == CmpInst::ICMP_ULT && match(X, m_Add(m_Value(), m_Constant())))
1470     return nullptr;
1471 
1472 
1473   Value *SelVal0, *SelVal1; // We do not care which one is from where.
1474   match(&Sel, m_Select(m_Value(), m_Value(SelVal0), m_Value(SelVal1)));
1475   // At least one of these values we are selecting between must be a constant
1476   // else we'll never succeed.
1477   if (!match(SelVal0, m_AnyIntegralConstant()) &&
1478       !match(SelVal1, m_AnyIntegralConstant()))
1479     return nullptr;
1480 
1481   // Does this constant C match any of the `select` values?
1482   auto MatchesSelectValue = [SelVal0, SelVal1](Constant *C) {
1483     return C->isElementWiseEqual(SelVal0) || C->isElementWiseEqual(SelVal1);
1484   };
1485 
1486   // If C0 *already* matches true/false value of select, we are done.
1487   if (MatchesSelectValue(C0))
1488     return nullptr;
1489 
1490   // Check the constant we'd have with flipped-strictness predicate.
1491   auto FlippedStrictness =
1492       InstCombiner::getFlippedStrictnessPredicateAndConstant(Pred, C0);
1493   if (!FlippedStrictness)
1494     return nullptr;
1495 
1496   // If said constant doesn't match either, then there is no hope,
1497   if (!MatchesSelectValue(FlippedStrictness->second))
1498     return nullptr;
1499 
1500   // It matched! Lets insert the new comparison just before select.
1501   InstCombiner::BuilderTy::InsertPointGuard Guard(IC.Builder);
1502   IC.Builder.SetInsertPoint(&Sel);
1503 
1504   Pred = ICmpInst::getSwappedPredicate(Pred); // Yes, swapped.
1505   Value *NewCmp = IC.Builder.CreateICmp(Pred, X, FlippedStrictness->second,
1506                                         Cmp.getName() + ".inv");
1507   IC.replaceOperand(Sel, 0, NewCmp);
1508   Sel.swapValues();
1509   Sel.swapProfMetadata();
1510 
1511   return &Sel;
1512 }
1513 
1514 static Instruction *foldSelectZeroOrOnes(ICmpInst *Cmp, Value *TVal,
1515                                          Value *FVal,
1516                                          InstCombiner::BuilderTy &Builder) {
1517   if (!Cmp->hasOneUse())
1518     return nullptr;
1519 
1520   const APInt *CmpC;
1521   if (!match(Cmp->getOperand(1), m_APIntAllowUndef(CmpC)))
1522     return nullptr;
1523 
1524   // (X u< 2) ? -X : -1 --> sext (X != 0)
1525   Value *X = Cmp->getOperand(0);
1526   if (Cmp->getPredicate() == ICmpInst::ICMP_ULT && *CmpC == 2 &&
1527       match(TVal, m_Neg(m_Specific(X))) && match(FVal, m_AllOnes()))
1528     return new SExtInst(Builder.CreateIsNotNull(X), TVal->getType());
1529 
1530   // (X u> 1) ? -1 : -X --> sext (X != 0)
1531   if (Cmp->getPredicate() == ICmpInst::ICMP_UGT && *CmpC == 1 &&
1532       match(FVal, m_Neg(m_Specific(X))) && match(TVal, m_AllOnes()))
1533     return new SExtInst(Builder.CreateIsNotNull(X), TVal->getType());
1534 
1535   return nullptr;
1536 }
1537 
1538 static Value *foldSelectInstWithICmpConst(SelectInst &SI, ICmpInst *ICI) {
1539   const APInt *CmpC;
1540   Value *V;
1541   CmpInst::Predicate Pred;
1542   if (!match(ICI, m_ICmp(Pred, m_Value(V), m_APInt(CmpC))))
1543     return nullptr;
1544 
1545   BinaryOperator *BO;
1546   const APInt *C;
1547   CmpInst::Predicate CPred;
1548   if (match(&SI, m_Select(m_Specific(ICI), m_APInt(C), m_BinOp(BO))))
1549     CPred = ICI->getPredicate();
1550   else if (match(&SI, m_Select(m_Specific(ICI), m_BinOp(BO), m_APInt(C))))
1551     CPred = ICI->getInversePredicate();
1552   else
1553     return nullptr;
1554 
1555   const APInt *BinOpC;
1556   if (!match(BO, m_BinOp(m_Specific(V), m_APInt(BinOpC))))
1557     return nullptr;
1558 
1559   ConstantRange R = ConstantRange::makeExactICmpRegion(CPred, *CmpC)
1560                         .binaryOp(BO->getOpcode(), *BinOpC);
1561   if (R == *C) {
1562     BO->dropPoisonGeneratingFlags();
1563     return BO;
1564   }
1565   return nullptr;
1566 }
1567 
1568 /// Visit a SelectInst that has an ICmpInst as its first operand.
1569 Instruction *InstCombinerImpl::foldSelectInstWithICmp(SelectInst &SI,
1570                                                       ICmpInst *ICI) {
1571   if (Instruction *NewSel = foldSelectValueEquivalence(SI, *ICI))
1572     return NewSel;
1573 
1574   if (Instruction *NewSPF = canonicalizeSPF(SI, *ICI, *this))
1575     return NewSPF;
1576 
1577   if (Value *V = foldSelectInstWithICmpConst(SI, ICI))
1578     return replaceInstUsesWith(SI, V);
1579 
1580   if (Value *V = canonicalizeClampLike(SI, *ICI, Builder))
1581     return replaceInstUsesWith(SI, V);
1582 
1583   if (Instruction *NewSel =
1584           tryToReuseConstantFromSelectInComparison(SI, *ICI, *this))
1585     return NewSel;
1586 
1587   bool Changed = adjustMinMax(SI, *ICI);
1588 
1589   if (Value *V = foldSelectICmpAnd(SI, ICI, Builder))
1590     return replaceInstUsesWith(SI, V);
1591 
1592   // NOTE: if we wanted to, this is where to detect integer MIN/MAX
1593   Value *TrueVal = SI.getTrueValue();
1594   Value *FalseVal = SI.getFalseValue();
1595   ICmpInst::Predicate Pred = ICI->getPredicate();
1596   Value *CmpLHS = ICI->getOperand(0);
1597   Value *CmpRHS = ICI->getOperand(1);
1598   if (CmpRHS != CmpLHS && isa<Constant>(CmpRHS)) {
1599     if (CmpLHS == TrueVal && Pred == ICmpInst::ICMP_EQ) {
1600       // Transform (X == C) ? X : Y -> (X == C) ? C : Y
1601       SI.setOperand(1, CmpRHS);
1602       Changed = true;
1603     } else if (CmpLHS == FalseVal && Pred == ICmpInst::ICMP_NE) {
1604       // Transform (X != C) ? Y : X -> (X != C) ? Y : C
1605       SI.setOperand(2, CmpRHS);
1606       Changed = true;
1607     }
1608   }
1609 
1610   // Canonicalize a signbit condition to use zero constant by swapping:
1611   // (CmpLHS > -1) ? TV : FV --> (CmpLHS < 0) ? FV : TV
1612   // To avoid conflicts (infinite loops) with other canonicalizations, this is
1613   // not applied with any constant select arm.
1614   if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes()) &&
1615       !match(TrueVal, m_Constant()) && !match(FalseVal, m_Constant()) &&
1616       ICI->hasOneUse()) {
1617     InstCombiner::BuilderTy::InsertPointGuard Guard(Builder);
1618     Builder.SetInsertPoint(&SI);
1619     Value *IsNeg = Builder.CreateIsNeg(CmpLHS, ICI->getName());
1620     replaceOperand(SI, 0, IsNeg);
1621     SI.swapValues();
1622     SI.swapProfMetadata();
1623     return &SI;
1624   }
1625 
1626   // FIXME: This code is nearly duplicated in InstSimplify. Using/refactoring
1627   // decomposeBitTestICmp() might help.
1628   {
1629     unsigned BitWidth =
1630         DL.getTypeSizeInBits(TrueVal->getType()->getScalarType());
1631     APInt MinSignedValue = APInt::getSignedMinValue(BitWidth);
1632     Value *X;
1633     const APInt *Y, *C;
1634     bool TrueWhenUnset;
1635     bool IsBitTest = false;
1636     if (ICmpInst::isEquality(Pred) &&
1637         match(CmpLHS, m_And(m_Value(X), m_Power2(Y))) &&
1638         match(CmpRHS, m_Zero())) {
1639       IsBitTest = true;
1640       TrueWhenUnset = Pred == ICmpInst::ICMP_EQ;
1641     } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
1642       X = CmpLHS;
1643       Y = &MinSignedValue;
1644       IsBitTest = true;
1645       TrueWhenUnset = false;
1646     } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
1647       X = CmpLHS;
1648       Y = &MinSignedValue;
1649       IsBitTest = true;
1650       TrueWhenUnset = true;
1651     }
1652     if (IsBitTest) {
1653       Value *V = nullptr;
1654       // (X & Y) == 0 ? X : X ^ Y  --> X & ~Y
1655       if (TrueWhenUnset && TrueVal == X &&
1656           match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1657         V = Builder.CreateAnd(X, ~(*Y));
1658       // (X & Y) != 0 ? X ^ Y : X  --> X & ~Y
1659       else if (!TrueWhenUnset && FalseVal == X &&
1660                match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1661         V = Builder.CreateAnd(X, ~(*Y));
1662       // (X & Y) == 0 ? X ^ Y : X  --> X | Y
1663       else if (TrueWhenUnset && FalseVal == X &&
1664                match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1665         V = Builder.CreateOr(X, *Y);
1666       // (X & Y) != 0 ? X : X ^ Y  --> X | Y
1667       else if (!TrueWhenUnset && TrueVal == X &&
1668                match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1669         V = Builder.CreateOr(X, *Y);
1670 
1671       if (V)
1672         return replaceInstUsesWith(SI, V);
1673     }
1674   }
1675 
1676   if (Instruction *V =
1677           foldSelectICmpAndAnd(SI.getType(), ICI, TrueVal, FalseVal, Builder))
1678     return V;
1679 
1680   if (Instruction *V = foldSelectCtlzToCttz(ICI, TrueVal, FalseVal, Builder))
1681     return V;
1682 
1683   if (Instruction *V = foldSelectZeroOrOnes(ICI, TrueVal, FalseVal, Builder))
1684     return V;
1685 
1686   if (Value *V = foldSelectICmpAndOr(ICI, TrueVal, FalseVal, Builder))
1687     return replaceInstUsesWith(SI, V);
1688 
1689   if (Value *V = foldSelectICmpLshrAshr(ICI, TrueVal, FalseVal, Builder))
1690     return replaceInstUsesWith(SI, V);
1691 
1692   if (Value *V = foldSelectCttzCtlz(ICI, TrueVal, FalseVal, Builder))
1693     return replaceInstUsesWith(SI, V);
1694 
1695   if (Value *V = canonicalizeSaturatedSubtract(ICI, TrueVal, FalseVal, Builder))
1696     return replaceInstUsesWith(SI, V);
1697 
1698   if (Value *V = canonicalizeSaturatedAdd(ICI, TrueVal, FalseVal, Builder))
1699     return replaceInstUsesWith(SI, V);
1700 
1701   return Changed ? &SI : nullptr;
1702 }
1703 
1704 /// SI is a select whose condition is a PHI node (but the two may be in
1705 /// different blocks). See if the true/false values (V) are live in all of the
1706 /// predecessor blocks of the PHI. For example, cases like this can't be mapped:
1707 ///
1708 ///   X = phi [ C1, BB1], [C2, BB2]
1709 ///   Y = add
1710 ///   Z = select X, Y, 0
1711 ///
1712 /// because Y is not live in BB1/BB2.
1713 static bool canSelectOperandBeMappingIntoPredBlock(const Value *V,
1714                                                    const SelectInst &SI) {
1715   // If the value is a non-instruction value like a constant or argument, it
1716   // can always be mapped.
1717   const Instruction *I = dyn_cast<Instruction>(V);
1718   if (!I) return true;
1719 
1720   // If V is a PHI node defined in the same block as the condition PHI, we can
1721   // map the arguments.
1722   const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
1723 
1724   if (const PHINode *VP = dyn_cast<PHINode>(I))
1725     if (VP->getParent() == CondPHI->getParent())
1726       return true;
1727 
1728   // Otherwise, if the PHI and select are defined in the same block and if V is
1729   // defined in a different block, then we can transform it.
1730   if (SI.getParent() == CondPHI->getParent() &&
1731       I->getParent() != CondPHI->getParent())
1732     return true;
1733 
1734   // Otherwise we have a 'hard' case and we can't tell without doing more
1735   // detailed dominator based analysis, punt.
1736   return false;
1737 }
1738 
1739 /// We have an SPF (e.g. a min or max) of an SPF of the form:
1740 ///   SPF2(SPF1(A, B), C)
1741 Instruction *InstCombinerImpl::foldSPFofSPF(Instruction *Inner,
1742                                             SelectPatternFlavor SPF1, Value *A,
1743                                             Value *B, Instruction &Outer,
1744                                             SelectPatternFlavor SPF2,
1745                                             Value *C) {
1746   if (Outer.getType() != Inner->getType())
1747     return nullptr;
1748 
1749   if (C == A || C == B) {
1750     // MAX(MAX(A, B), B) -> MAX(A, B)
1751     // MIN(MIN(a, b), a) -> MIN(a, b)
1752     // TODO: This could be done in instsimplify.
1753     if (SPF1 == SPF2 && SelectPatternResult::isMinOrMax(SPF1))
1754       return replaceInstUsesWith(Outer, Inner);
1755   }
1756 
1757   return nullptr;
1758 }
1759 
1760 /// Turn select C, (X + Y), (X - Y) --> (X + (select C, Y, (-Y))).
1761 /// This is even legal for FP.
1762 static Instruction *foldAddSubSelect(SelectInst &SI,
1763                                      InstCombiner::BuilderTy &Builder) {
1764   Value *CondVal = SI.getCondition();
1765   Value *TrueVal = SI.getTrueValue();
1766   Value *FalseVal = SI.getFalseValue();
1767   auto *TI = dyn_cast<Instruction>(TrueVal);
1768   auto *FI = dyn_cast<Instruction>(FalseVal);
1769   if (!TI || !FI || !TI->hasOneUse() || !FI->hasOneUse())
1770     return nullptr;
1771 
1772   Instruction *AddOp = nullptr, *SubOp = nullptr;
1773   if ((TI->getOpcode() == Instruction::Sub &&
1774        FI->getOpcode() == Instruction::Add) ||
1775       (TI->getOpcode() == Instruction::FSub &&
1776        FI->getOpcode() == Instruction::FAdd)) {
1777     AddOp = FI;
1778     SubOp = TI;
1779   } else if ((FI->getOpcode() == Instruction::Sub &&
1780               TI->getOpcode() == Instruction::Add) ||
1781              (FI->getOpcode() == Instruction::FSub &&
1782               TI->getOpcode() == Instruction::FAdd)) {
1783     AddOp = TI;
1784     SubOp = FI;
1785   }
1786 
1787   if (AddOp) {
1788     Value *OtherAddOp = nullptr;
1789     if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
1790       OtherAddOp = AddOp->getOperand(1);
1791     } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
1792       OtherAddOp = AddOp->getOperand(0);
1793     }
1794 
1795     if (OtherAddOp) {
1796       // So at this point we know we have (Y -> OtherAddOp):
1797       //        select C, (add X, Y), (sub X, Z)
1798       Value *NegVal; // Compute -Z
1799       if (SI.getType()->isFPOrFPVectorTy()) {
1800         NegVal = Builder.CreateFNeg(SubOp->getOperand(1));
1801         if (Instruction *NegInst = dyn_cast<Instruction>(NegVal)) {
1802           FastMathFlags Flags = AddOp->getFastMathFlags();
1803           Flags &= SubOp->getFastMathFlags();
1804           NegInst->setFastMathFlags(Flags);
1805         }
1806       } else {
1807         NegVal = Builder.CreateNeg(SubOp->getOperand(1));
1808       }
1809 
1810       Value *NewTrueOp = OtherAddOp;
1811       Value *NewFalseOp = NegVal;
1812       if (AddOp != TI)
1813         std::swap(NewTrueOp, NewFalseOp);
1814       Value *NewSel = Builder.CreateSelect(CondVal, NewTrueOp, NewFalseOp,
1815                                            SI.getName() + ".p", &SI);
1816 
1817       if (SI.getType()->isFPOrFPVectorTy()) {
1818         Instruction *RI =
1819             BinaryOperator::CreateFAdd(SubOp->getOperand(0), NewSel);
1820 
1821         FastMathFlags Flags = AddOp->getFastMathFlags();
1822         Flags &= SubOp->getFastMathFlags();
1823         RI->setFastMathFlags(Flags);
1824         return RI;
1825       } else
1826         return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
1827     }
1828   }
1829   return nullptr;
1830 }
1831 
1832 /// Turn X + Y overflows ? -1 : X + Y -> uadd_sat X, Y
1833 /// And X - Y overflows ? 0 : X - Y -> usub_sat X, Y
1834 /// Along with a number of patterns similar to:
1835 /// X + Y overflows ? (X < 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1836 /// X - Y overflows ? (X > 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1837 static Instruction *
1838 foldOverflowingAddSubSelect(SelectInst &SI, InstCombiner::BuilderTy &Builder) {
1839   Value *CondVal = SI.getCondition();
1840   Value *TrueVal = SI.getTrueValue();
1841   Value *FalseVal = SI.getFalseValue();
1842 
1843   WithOverflowInst *II;
1844   if (!match(CondVal, m_ExtractValue<1>(m_WithOverflowInst(II))) ||
1845       !match(FalseVal, m_ExtractValue<0>(m_Specific(II))))
1846     return nullptr;
1847 
1848   Value *X = II->getLHS();
1849   Value *Y = II->getRHS();
1850 
1851   auto IsSignedSaturateLimit = [&](Value *Limit, bool IsAdd) {
1852     Type *Ty = Limit->getType();
1853 
1854     ICmpInst::Predicate Pred;
1855     Value *TrueVal, *FalseVal, *Op;
1856     const APInt *C;
1857     if (!match(Limit, m_Select(m_ICmp(Pred, m_Value(Op), m_APInt(C)),
1858                                m_Value(TrueVal), m_Value(FalseVal))))
1859       return false;
1860 
1861     auto IsZeroOrOne = [](const APInt &C) { return C.isZero() || C.isOne(); };
1862     auto IsMinMax = [&](Value *Min, Value *Max) {
1863       APInt MinVal = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
1864       APInt MaxVal = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
1865       return match(Min, m_SpecificInt(MinVal)) &&
1866              match(Max, m_SpecificInt(MaxVal));
1867     };
1868 
1869     if (Op != X && Op != Y)
1870       return false;
1871 
1872     if (IsAdd) {
1873       // X + Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1874       // X + Y overflows ? (X <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1875       // X + Y overflows ? (Y <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1876       // X + Y overflows ? (Y <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1877       if (Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C) &&
1878           IsMinMax(TrueVal, FalseVal))
1879         return true;
1880       // X + Y overflows ? (X >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1881       // X + Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1882       // X + Y overflows ? (Y >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1883       // X + Y overflows ? (Y >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1884       if (Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 1) &&
1885           IsMinMax(FalseVal, TrueVal))
1886         return true;
1887     } else {
1888       // X - Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1889       // X - Y overflows ? (X <s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1890       if (Op == X && Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C + 1) &&
1891           IsMinMax(TrueVal, FalseVal))
1892         return true;
1893       // X - Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1894       // X - Y overflows ? (X >s -2 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1895       if (Op == X && Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 2) &&
1896           IsMinMax(FalseVal, TrueVal))
1897         return true;
1898       // X - Y overflows ? (Y <s 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1899       // X - Y overflows ? (Y <s 1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1900       if (Op == Y && Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C) &&
1901           IsMinMax(FalseVal, TrueVal))
1902         return true;
1903       // X - Y overflows ? (Y >s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1904       // X - Y overflows ? (Y >s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1905       if (Op == Y && Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 1) &&
1906           IsMinMax(TrueVal, FalseVal))
1907         return true;
1908     }
1909 
1910     return false;
1911   };
1912 
1913   Intrinsic::ID NewIntrinsicID;
1914   if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow &&
1915       match(TrueVal, m_AllOnes()))
1916     // X + Y overflows ? -1 : X + Y -> uadd_sat X, Y
1917     NewIntrinsicID = Intrinsic::uadd_sat;
1918   else if (II->getIntrinsicID() == Intrinsic::usub_with_overflow &&
1919            match(TrueVal, m_Zero()))
1920     // X - Y overflows ? 0 : X - Y -> usub_sat X, Y
1921     NewIntrinsicID = Intrinsic::usub_sat;
1922   else if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow &&
1923            IsSignedSaturateLimit(TrueVal, /*IsAdd=*/true))
1924     // X + Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1925     // X + Y overflows ? (X <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1926     // X + Y overflows ? (X >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1927     // X + Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1928     // X + Y overflows ? (Y <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1929     // X + Y overflows ? (Y <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1930     // X + Y overflows ? (Y >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1931     // X + Y overflows ? (Y >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1932     NewIntrinsicID = Intrinsic::sadd_sat;
1933   else if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow &&
1934            IsSignedSaturateLimit(TrueVal, /*IsAdd=*/false))
1935     // X - Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1936     // X - Y overflows ? (X <s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1937     // X - Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1938     // X - Y overflows ? (X >s -2 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1939     // X - Y overflows ? (Y <s 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1940     // X - Y overflows ? (Y <s 1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1941     // X - Y overflows ? (Y >s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1942     // X - Y overflows ? (Y >s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1943     NewIntrinsicID = Intrinsic::ssub_sat;
1944   else
1945     return nullptr;
1946 
1947   Function *F =
1948       Intrinsic::getDeclaration(SI.getModule(), NewIntrinsicID, SI.getType());
1949   return CallInst::Create(F, {X, Y});
1950 }
1951 
1952 Instruction *InstCombinerImpl::foldSelectExtConst(SelectInst &Sel) {
1953   Constant *C;
1954   if (!match(Sel.getTrueValue(), m_Constant(C)) &&
1955       !match(Sel.getFalseValue(), m_Constant(C)))
1956     return nullptr;
1957 
1958   Instruction *ExtInst;
1959   if (!match(Sel.getTrueValue(), m_Instruction(ExtInst)) &&
1960       !match(Sel.getFalseValue(), m_Instruction(ExtInst)))
1961     return nullptr;
1962 
1963   auto ExtOpcode = ExtInst->getOpcode();
1964   if (ExtOpcode != Instruction::ZExt && ExtOpcode != Instruction::SExt)
1965     return nullptr;
1966 
1967   // If we are extending from a boolean type or if we can create a select that
1968   // has the same size operands as its condition, try to narrow the select.
1969   Value *X = ExtInst->getOperand(0);
1970   Type *SmallType = X->getType();
1971   Value *Cond = Sel.getCondition();
1972   auto *Cmp = dyn_cast<CmpInst>(Cond);
1973   if (!SmallType->isIntOrIntVectorTy(1) &&
1974       (!Cmp || Cmp->getOperand(0)->getType() != SmallType))
1975     return nullptr;
1976 
1977   // If the constant is the same after truncation to the smaller type and
1978   // extension to the original type, we can narrow the select.
1979   Type *SelType = Sel.getType();
1980   Constant *TruncC = ConstantExpr::getTrunc(C, SmallType);
1981   Constant *ExtC = ConstantExpr::getCast(ExtOpcode, TruncC, SelType);
1982   if (ExtC == C && ExtInst->hasOneUse()) {
1983     Value *TruncCVal = cast<Value>(TruncC);
1984     if (ExtInst == Sel.getFalseValue())
1985       std::swap(X, TruncCVal);
1986 
1987     // select Cond, (ext X), C --> ext(select Cond, X, C')
1988     // select Cond, C, (ext X) --> ext(select Cond, C', X)
1989     Value *NewSel = Builder.CreateSelect(Cond, X, TruncCVal, "narrow", &Sel);
1990     return CastInst::Create(Instruction::CastOps(ExtOpcode), NewSel, SelType);
1991   }
1992 
1993   // If one arm of the select is the extend of the condition, replace that arm
1994   // with the extension of the appropriate known bool value.
1995   if (Cond == X) {
1996     if (ExtInst == Sel.getTrueValue()) {
1997       // select X, (sext X), C --> select X, -1, C
1998       // select X, (zext X), C --> select X,  1, C
1999       Constant *One = ConstantInt::getTrue(SmallType);
2000       Constant *AllOnesOrOne = ConstantExpr::getCast(ExtOpcode, One, SelType);
2001       return SelectInst::Create(Cond, AllOnesOrOne, C, "", nullptr, &Sel);
2002     } else {
2003       // select X, C, (sext X) --> select X, C, 0
2004       // select X, C, (zext X) --> select X, C, 0
2005       Constant *Zero = ConstantInt::getNullValue(SelType);
2006       return SelectInst::Create(Cond, C, Zero, "", nullptr, &Sel);
2007     }
2008   }
2009 
2010   return nullptr;
2011 }
2012 
2013 /// Try to transform a vector select with a constant condition vector into a
2014 /// shuffle for easier combining with other shuffles and insert/extract.
2015 static Instruction *canonicalizeSelectToShuffle(SelectInst &SI) {
2016   Value *CondVal = SI.getCondition();
2017   Constant *CondC;
2018   auto *CondValTy = dyn_cast<FixedVectorType>(CondVal->getType());
2019   if (!CondValTy || !match(CondVal, m_Constant(CondC)))
2020     return nullptr;
2021 
2022   unsigned NumElts = CondValTy->getNumElements();
2023   SmallVector<int, 16> Mask;
2024   Mask.reserve(NumElts);
2025   for (unsigned i = 0; i != NumElts; ++i) {
2026     Constant *Elt = CondC->getAggregateElement(i);
2027     if (!Elt)
2028       return nullptr;
2029 
2030     if (Elt->isOneValue()) {
2031       // If the select condition element is true, choose from the 1st vector.
2032       Mask.push_back(i);
2033     } else if (Elt->isNullValue()) {
2034       // If the select condition element is false, choose from the 2nd vector.
2035       Mask.push_back(i + NumElts);
2036     } else if (isa<UndefValue>(Elt)) {
2037       // Undef in a select condition (choose one of the operands) does not mean
2038       // the same thing as undef in a shuffle mask (any value is acceptable), so
2039       // give up.
2040       return nullptr;
2041     } else {
2042       // Bail out on a constant expression.
2043       return nullptr;
2044     }
2045   }
2046 
2047   return new ShuffleVectorInst(SI.getTrueValue(), SI.getFalseValue(), Mask);
2048 }
2049 
2050 /// If we have a select of vectors with a scalar condition, try to convert that
2051 /// to a vector select by splatting the condition. A splat may get folded with
2052 /// other operations in IR and having all operands of a select be vector types
2053 /// is likely better for vector codegen.
2054 static Instruction *canonicalizeScalarSelectOfVecs(SelectInst &Sel,
2055                                                    InstCombinerImpl &IC) {
2056   auto *Ty = dyn_cast<VectorType>(Sel.getType());
2057   if (!Ty)
2058     return nullptr;
2059 
2060   // We can replace a single-use extract with constant index.
2061   Value *Cond = Sel.getCondition();
2062   if (!match(Cond, m_OneUse(m_ExtractElt(m_Value(), m_ConstantInt()))))
2063     return nullptr;
2064 
2065   // select (extelt V, Index), T, F --> select (splat V, Index), T, F
2066   // Splatting the extracted condition reduces code (we could directly create a
2067   // splat shuffle of the source vector to eliminate the intermediate step).
2068   return IC.replaceOperand(
2069       Sel, 0, IC.Builder.CreateVectorSplat(Ty->getElementCount(), Cond));
2070 }
2071 
2072 /// Reuse bitcasted operands between a compare and select:
2073 /// select (cmp (bitcast C), (bitcast D)), (bitcast' C), (bitcast' D) -->
2074 /// bitcast (select (cmp (bitcast C), (bitcast D)), (bitcast C), (bitcast D))
2075 static Instruction *foldSelectCmpBitcasts(SelectInst &Sel,
2076                                           InstCombiner::BuilderTy &Builder) {
2077   Value *Cond = Sel.getCondition();
2078   Value *TVal = Sel.getTrueValue();
2079   Value *FVal = Sel.getFalseValue();
2080 
2081   CmpInst::Predicate Pred;
2082   Value *A, *B;
2083   if (!match(Cond, m_Cmp(Pred, m_Value(A), m_Value(B))))
2084     return nullptr;
2085 
2086   // The select condition is a compare instruction. If the select's true/false
2087   // values are already the same as the compare operands, there's nothing to do.
2088   if (TVal == A || TVal == B || FVal == A || FVal == B)
2089     return nullptr;
2090 
2091   Value *C, *D;
2092   if (!match(A, m_BitCast(m_Value(C))) || !match(B, m_BitCast(m_Value(D))))
2093     return nullptr;
2094 
2095   // select (cmp (bitcast C), (bitcast D)), (bitcast TSrc), (bitcast FSrc)
2096   Value *TSrc, *FSrc;
2097   if (!match(TVal, m_BitCast(m_Value(TSrc))) ||
2098       !match(FVal, m_BitCast(m_Value(FSrc))))
2099     return nullptr;
2100 
2101   // If the select true/false values are *different bitcasts* of the same source
2102   // operands, make the select operands the same as the compare operands and
2103   // cast the result. This is the canonical select form for min/max.
2104   Value *NewSel;
2105   if (TSrc == C && FSrc == D) {
2106     // select (cmp (bitcast C), (bitcast D)), (bitcast' C), (bitcast' D) -->
2107     // bitcast (select (cmp A, B), A, B)
2108     NewSel = Builder.CreateSelect(Cond, A, B, "", &Sel);
2109   } else if (TSrc == D && FSrc == C) {
2110     // select (cmp (bitcast C), (bitcast D)), (bitcast' D), (bitcast' C) -->
2111     // bitcast (select (cmp A, B), B, A)
2112     NewSel = Builder.CreateSelect(Cond, B, A, "", &Sel);
2113   } else {
2114     return nullptr;
2115   }
2116   return CastInst::CreateBitOrPointerCast(NewSel, Sel.getType());
2117 }
2118 
2119 /// Try to eliminate select instructions that test the returned flag of cmpxchg
2120 /// instructions.
2121 ///
2122 /// If a select instruction tests the returned flag of a cmpxchg instruction and
2123 /// selects between the returned value of the cmpxchg instruction its compare
2124 /// operand, the result of the select will always be equal to its false value.
2125 /// For example:
2126 ///
2127 ///   %0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
2128 ///   %1 = extractvalue { i64, i1 } %0, 1
2129 ///   %2 = extractvalue { i64, i1 } %0, 0
2130 ///   %3 = select i1 %1, i64 %compare, i64 %2
2131 ///   ret i64 %3
2132 ///
2133 /// The returned value of the cmpxchg instruction (%2) is the original value
2134 /// located at %ptr prior to any update. If the cmpxchg operation succeeds, %2
2135 /// must have been equal to %compare. Thus, the result of the select is always
2136 /// equal to %2, and the code can be simplified to:
2137 ///
2138 ///   %0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
2139 ///   %1 = extractvalue { i64, i1 } %0, 0
2140 ///   ret i64 %1
2141 ///
2142 static Value *foldSelectCmpXchg(SelectInst &SI) {
2143   // A helper that determines if V is an extractvalue instruction whose
2144   // aggregate operand is a cmpxchg instruction and whose single index is equal
2145   // to I. If such conditions are true, the helper returns the cmpxchg
2146   // instruction; otherwise, a nullptr is returned.
2147   auto isExtractFromCmpXchg = [](Value *V, unsigned I) -> AtomicCmpXchgInst * {
2148     auto *Extract = dyn_cast<ExtractValueInst>(V);
2149     if (!Extract)
2150       return nullptr;
2151     if (Extract->getIndices()[0] != I)
2152       return nullptr;
2153     return dyn_cast<AtomicCmpXchgInst>(Extract->getAggregateOperand());
2154   };
2155 
2156   // If the select has a single user, and this user is a select instruction that
2157   // we can simplify, skip the cmpxchg simplification for now.
2158   if (SI.hasOneUse())
2159     if (auto *Select = dyn_cast<SelectInst>(SI.user_back()))
2160       if (Select->getCondition() == SI.getCondition())
2161         if (Select->getFalseValue() == SI.getTrueValue() ||
2162             Select->getTrueValue() == SI.getFalseValue())
2163           return nullptr;
2164 
2165   // Ensure the select condition is the returned flag of a cmpxchg instruction.
2166   auto *CmpXchg = isExtractFromCmpXchg(SI.getCondition(), 1);
2167   if (!CmpXchg)
2168     return nullptr;
2169 
2170   // Check the true value case: The true value of the select is the returned
2171   // value of the same cmpxchg used by the condition, and the false value is the
2172   // cmpxchg instruction's compare operand.
2173   if (auto *X = isExtractFromCmpXchg(SI.getTrueValue(), 0))
2174     if (X == CmpXchg && X->getCompareOperand() == SI.getFalseValue())
2175       return SI.getFalseValue();
2176 
2177   // Check the false value case: The false value of the select is the returned
2178   // value of the same cmpxchg used by the condition, and the true value is the
2179   // cmpxchg instruction's compare operand.
2180   if (auto *X = isExtractFromCmpXchg(SI.getFalseValue(), 0))
2181     if (X == CmpXchg && X->getCompareOperand() == SI.getTrueValue())
2182       return SI.getFalseValue();
2183 
2184   return nullptr;
2185 }
2186 
2187 /// Try to reduce a funnel/rotate pattern that includes a compare and select
2188 /// into a funnel shift intrinsic. Example:
2189 /// rotl32(a, b) --> (b == 0 ? a : ((a >> (32 - b)) | (a << b)))
2190 ///              --> call llvm.fshl.i32(a, a, b)
2191 /// fshl32(a, b, c) --> (c == 0 ? a : ((b >> (32 - c)) | (a << c)))
2192 ///                 --> call llvm.fshl.i32(a, b, c)
2193 /// fshr32(a, b, c) --> (c == 0 ? b : ((a >> (32 - c)) | (b << c)))
2194 ///                 --> call llvm.fshr.i32(a, b, c)
2195 static Instruction *foldSelectFunnelShift(SelectInst &Sel,
2196                                           InstCombiner::BuilderTy &Builder) {
2197   // This must be a power-of-2 type for a bitmasking transform to be valid.
2198   unsigned Width = Sel.getType()->getScalarSizeInBits();
2199   if (!isPowerOf2_32(Width))
2200     return nullptr;
2201 
2202   BinaryOperator *Or0, *Or1;
2203   if (!match(Sel.getFalseValue(), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1)))))
2204     return nullptr;
2205 
2206   Value *SV0, *SV1, *SA0, *SA1;
2207   if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(SV0),
2208                                           m_ZExtOrSelf(m_Value(SA0))))) ||
2209       !match(Or1, m_OneUse(m_LogicalShift(m_Value(SV1),
2210                                           m_ZExtOrSelf(m_Value(SA1))))) ||
2211       Or0->getOpcode() == Or1->getOpcode())
2212     return nullptr;
2213 
2214   // Canonicalize to or(shl(SV0, SA0), lshr(SV1, SA1)).
2215   if (Or0->getOpcode() == BinaryOperator::LShr) {
2216     std::swap(Or0, Or1);
2217     std::swap(SV0, SV1);
2218     std::swap(SA0, SA1);
2219   }
2220   assert(Or0->getOpcode() == BinaryOperator::Shl &&
2221          Or1->getOpcode() == BinaryOperator::LShr &&
2222          "Illegal or(shift,shift) pair");
2223 
2224   // Check the shift amounts to see if they are an opposite pair.
2225   Value *ShAmt;
2226   if (match(SA1, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA0)))))
2227     ShAmt = SA0;
2228   else if (match(SA0, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA1)))))
2229     ShAmt = SA1;
2230   else
2231     return nullptr;
2232 
2233   // We should now have this pattern:
2234   // select ?, TVal, (or (shl SV0, SA0), (lshr SV1, SA1))
2235   // The false value of the select must be a funnel-shift of the true value:
2236   // IsFShl -> TVal must be SV0 else TVal must be SV1.
2237   bool IsFshl = (ShAmt == SA0);
2238   Value *TVal = Sel.getTrueValue();
2239   if ((IsFshl && TVal != SV0) || (!IsFshl && TVal != SV1))
2240     return nullptr;
2241 
2242   // Finally, see if the select is filtering out a shift-by-zero.
2243   Value *Cond = Sel.getCondition();
2244   ICmpInst::Predicate Pred;
2245   if (!match(Cond, m_OneUse(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()))) ||
2246       Pred != ICmpInst::ICMP_EQ)
2247     return nullptr;
2248 
2249   // If this is not a rotate then the select was blocking poison from the
2250   // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
2251   if (SV0 != SV1) {
2252     if (IsFshl && !llvm::isGuaranteedNotToBePoison(SV1))
2253       SV1 = Builder.CreateFreeze(SV1);
2254     else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(SV0))
2255       SV0 = Builder.CreateFreeze(SV0);
2256   }
2257 
2258   // This is a funnel/rotate that avoids shift-by-bitwidth UB in a suboptimal way.
2259   // Convert to funnel shift intrinsic.
2260   Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
2261   Function *F = Intrinsic::getDeclaration(Sel.getModule(), IID, Sel.getType());
2262   ShAmt = Builder.CreateZExt(ShAmt, Sel.getType());
2263   return CallInst::Create(F, { SV0, SV1, ShAmt });
2264 }
2265 
2266 static Instruction *foldSelectToCopysign(SelectInst &Sel,
2267                                          InstCombiner::BuilderTy &Builder) {
2268   Value *Cond = Sel.getCondition();
2269   Value *TVal = Sel.getTrueValue();
2270   Value *FVal = Sel.getFalseValue();
2271   Type *SelType = Sel.getType();
2272 
2273   // Match select ?, TC, FC where the constants are equal but negated.
2274   // TODO: Generalize to handle a negated variable operand?
2275   const APFloat *TC, *FC;
2276   if (!match(TVal, m_APFloatAllowUndef(TC)) ||
2277       !match(FVal, m_APFloatAllowUndef(FC)) ||
2278       !abs(*TC).bitwiseIsEqual(abs(*FC)))
2279     return nullptr;
2280 
2281   assert(TC != FC && "Expected equal select arms to simplify");
2282 
2283   Value *X;
2284   const APInt *C;
2285   bool IsTrueIfSignSet;
2286   ICmpInst::Predicate Pred;
2287   if (!match(Cond, m_OneUse(m_ICmp(Pred, m_BitCast(m_Value(X)), m_APInt(C)))) ||
2288       !InstCombiner::isSignBitCheck(Pred, *C, IsTrueIfSignSet) ||
2289       X->getType() != SelType)
2290     return nullptr;
2291 
2292   // If needed, negate the value that will be the sign argument of the copysign:
2293   // (bitcast X) <  0 ? -TC :  TC --> copysign(TC,  X)
2294   // (bitcast X) <  0 ?  TC : -TC --> copysign(TC, -X)
2295   // (bitcast X) >= 0 ? -TC :  TC --> copysign(TC, -X)
2296   // (bitcast X) >= 0 ?  TC : -TC --> copysign(TC,  X)
2297   // Note: FMF from the select can not be propagated to the new instructions.
2298   if (IsTrueIfSignSet ^ TC->isNegative())
2299     X = Builder.CreateFNeg(X);
2300 
2301   // Canonicalize the magnitude argument as the positive constant since we do
2302   // not care about its sign.
2303   Value *MagArg = ConstantFP::get(SelType, abs(*TC));
2304   Function *F = Intrinsic::getDeclaration(Sel.getModule(), Intrinsic::copysign,
2305                                           Sel.getType());
2306   return CallInst::Create(F, { MagArg, X });
2307 }
2308 
2309 Instruction *InstCombinerImpl::foldVectorSelect(SelectInst &Sel) {
2310   auto *VecTy = dyn_cast<FixedVectorType>(Sel.getType());
2311   if (!VecTy)
2312     return nullptr;
2313 
2314   unsigned NumElts = VecTy->getNumElements();
2315   APInt UndefElts(NumElts, 0);
2316   APInt AllOnesEltMask(APInt::getAllOnes(NumElts));
2317   if (Value *V = SimplifyDemandedVectorElts(&Sel, AllOnesEltMask, UndefElts)) {
2318     if (V != &Sel)
2319       return replaceInstUsesWith(Sel, V);
2320     return &Sel;
2321   }
2322 
2323   // A select of a "select shuffle" with a common operand can be rearranged
2324   // to select followed by "select shuffle". Because of poison, this only works
2325   // in the case of a shuffle with no undefined mask elements.
2326   Value *Cond = Sel.getCondition();
2327   Value *TVal = Sel.getTrueValue();
2328   Value *FVal = Sel.getFalseValue();
2329   Value *X, *Y;
2330   ArrayRef<int> Mask;
2331   if (match(TVal, m_OneUse(m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask)))) &&
2332       !is_contained(Mask, UndefMaskElem) &&
2333       cast<ShuffleVectorInst>(TVal)->isSelect()) {
2334     if (X == FVal) {
2335       // select Cond, (shuf_sel X, Y), X --> shuf_sel X, (select Cond, Y, X)
2336       Value *NewSel = Builder.CreateSelect(Cond, Y, X, "sel", &Sel);
2337       return new ShuffleVectorInst(X, NewSel, Mask);
2338     }
2339     if (Y == FVal) {
2340       // select Cond, (shuf_sel X, Y), Y --> shuf_sel (select Cond, X, Y), Y
2341       Value *NewSel = Builder.CreateSelect(Cond, X, Y, "sel", &Sel);
2342       return new ShuffleVectorInst(NewSel, Y, Mask);
2343     }
2344   }
2345   if (match(FVal, m_OneUse(m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask)))) &&
2346       !is_contained(Mask, UndefMaskElem) &&
2347       cast<ShuffleVectorInst>(FVal)->isSelect()) {
2348     if (X == TVal) {
2349       // select Cond, X, (shuf_sel X, Y) --> shuf_sel X, (select Cond, X, Y)
2350       Value *NewSel = Builder.CreateSelect(Cond, X, Y, "sel", &Sel);
2351       return new ShuffleVectorInst(X, NewSel, Mask);
2352     }
2353     if (Y == TVal) {
2354       // select Cond, Y, (shuf_sel X, Y) --> shuf_sel (select Cond, Y, X), Y
2355       Value *NewSel = Builder.CreateSelect(Cond, Y, X, "sel", &Sel);
2356       return new ShuffleVectorInst(NewSel, Y, Mask);
2357     }
2358   }
2359 
2360   return nullptr;
2361 }
2362 
2363 static Instruction *foldSelectToPhiImpl(SelectInst &Sel, BasicBlock *BB,
2364                                         const DominatorTree &DT,
2365                                         InstCombiner::BuilderTy &Builder) {
2366   // Find the block's immediate dominator that ends with a conditional branch
2367   // that matches select's condition (maybe inverted).
2368   auto *IDomNode = DT[BB]->getIDom();
2369   if (!IDomNode)
2370     return nullptr;
2371   BasicBlock *IDom = IDomNode->getBlock();
2372 
2373   Value *Cond = Sel.getCondition();
2374   Value *IfTrue, *IfFalse;
2375   BasicBlock *TrueSucc, *FalseSucc;
2376   if (match(IDom->getTerminator(),
2377             m_Br(m_Specific(Cond), m_BasicBlock(TrueSucc),
2378                  m_BasicBlock(FalseSucc)))) {
2379     IfTrue = Sel.getTrueValue();
2380     IfFalse = Sel.getFalseValue();
2381   } else if (match(IDom->getTerminator(),
2382                    m_Br(m_Not(m_Specific(Cond)), m_BasicBlock(TrueSucc),
2383                         m_BasicBlock(FalseSucc)))) {
2384     IfTrue = Sel.getFalseValue();
2385     IfFalse = Sel.getTrueValue();
2386   } else
2387     return nullptr;
2388 
2389   // Make sure the branches are actually different.
2390   if (TrueSucc == FalseSucc)
2391     return nullptr;
2392 
2393   // We want to replace select %cond, %a, %b with a phi that takes value %a
2394   // for all incoming edges that are dominated by condition `%cond == true`,
2395   // and value %b for edges dominated by condition `%cond == false`. If %a
2396   // or %b are also phis from the same basic block, we can go further and take
2397   // their incoming values from the corresponding blocks.
2398   BasicBlockEdge TrueEdge(IDom, TrueSucc);
2399   BasicBlockEdge FalseEdge(IDom, FalseSucc);
2400   DenseMap<BasicBlock *, Value *> Inputs;
2401   for (auto *Pred : predecessors(BB)) {
2402     // Check implication.
2403     BasicBlockEdge Incoming(Pred, BB);
2404     if (DT.dominates(TrueEdge, Incoming))
2405       Inputs[Pred] = IfTrue->DoPHITranslation(BB, Pred);
2406     else if (DT.dominates(FalseEdge, Incoming))
2407       Inputs[Pred] = IfFalse->DoPHITranslation(BB, Pred);
2408     else
2409       return nullptr;
2410     // Check availability.
2411     if (auto *Insn = dyn_cast<Instruction>(Inputs[Pred]))
2412       if (!DT.dominates(Insn, Pred->getTerminator()))
2413         return nullptr;
2414   }
2415 
2416   Builder.SetInsertPoint(&*BB->begin());
2417   auto *PN = Builder.CreatePHI(Sel.getType(), Inputs.size());
2418   for (auto *Pred : predecessors(BB))
2419     PN->addIncoming(Inputs[Pred], Pred);
2420   PN->takeName(&Sel);
2421   return PN;
2422 }
2423 
2424 static Instruction *foldSelectToPhi(SelectInst &Sel, const DominatorTree &DT,
2425                                     InstCombiner::BuilderTy &Builder) {
2426   // Try to replace this select with Phi in one of these blocks.
2427   SmallSetVector<BasicBlock *, 4> CandidateBlocks;
2428   CandidateBlocks.insert(Sel.getParent());
2429   for (Value *V : Sel.operands())
2430     if (auto *I = dyn_cast<Instruction>(V))
2431       CandidateBlocks.insert(I->getParent());
2432 
2433   for (BasicBlock *BB : CandidateBlocks)
2434     if (auto *PN = foldSelectToPhiImpl(Sel, BB, DT, Builder))
2435       return PN;
2436   return nullptr;
2437 }
2438 
2439 static Value *foldSelectWithFrozenICmp(SelectInst &Sel, InstCombiner::BuilderTy &Builder) {
2440   FreezeInst *FI = dyn_cast<FreezeInst>(Sel.getCondition());
2441   if (!FI)
2442     return nullptr;
2443 
2444   Value *Cond = FI->getOperand(0);
2445   Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue();
2446 
2447   //   select (freeze(x == y)), x, y --> y
2448   //   select (freeze(x != y)), x, y --> x
2449   // The freeze should be only used by this select. Otherwise, remaining uses of
2450   // the freeze can observe a contradictory value.
2451   //   c = freeze(x == y)   ; Let's assume that y = poison & x = 42; c is 0 or 1
2452   //   a = select c, x, y   ;
2453   //   f(a, c)              ; f(poison, 1) cannot happen, but if a is folded
2454   //                        ; to y, this can happen.
2455   CmpInst::Predicate Pred;
2456   if (FI->hasOneUse() &&
2457       match(Cond, m_c_ICmp(Pred, m_Specific(TrueVal), m_Specific(FalseVal))) &&
2458       (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)) {
2459     return Pred == ICmpInst::ICMP_EQ ? FalseVal : TrueVal;
2460   }
2461 
2462   return nullptr;
2463 }
2464 
2465 Instruction *InstCombinerImpl::foldAndOrOfSelectUsingImpliedCond(Value *Op,
2466                                                                  SelectInst &SI,
2467                                                                  bool IsAnd) {
2468   Value *CondVal = SI.getCondition();
2469   Value *A = SI.getTrueValue();
2470   Value *B = SI.getFalseValue();
2471 
2472   assert(Op->getType()->isIntOrIntVectorTy(1) &&
2473          "Op must be either i1 or vector of i1.");
2474 
2475   Optional<bool> Res = isImpliedCondition(Op, CondVal, DL, IsAnd);
2476   if (!Res)
2477     return nullptr;
2478 
2479   Value *Zero = Constant::getNullValue(A->getType());
2480   Value *One = Constant::getAllOnesValue(A->getType());
2481 
2482   if (*Res == true) {
2483     if (IsAnd)
2484       // select op, (select cond, A, B), false => select op, A, false
2485       // and    op, (select cond, A, B)        => select op, A, false
2486       //   if op = true implies condval = true.
2487       return SelectInst::Create(Op, A, Zero);
2488     else
2489       // select op, true, (select cond, A, B) => select op, true, A
2490       // or     op, (select cond, A, B)       => select op, true, A
2491       //   if op = false implies condval = true.
2492       return SelectInst::Create(Op, One, A);
2493   } else {
2494     if (IsAnd)
2495       // select op, (select cond, A, B), false => select op, B, false
2496       // and    op, (select cond, A, B)        => select op, B, false
2497       //   if op = true implies condval = false.
2498       return SelectInst::Create(Op, B, Zero);
2499     else
2500       // select op, true, (select cond, A, B) => select op, true, B
2501       // or     op, (select cond, A, B)       => select op, true, B
2502       //   if op = false implies condval = false.
2503       return SelectInst::Create(Op, One, B);
2504   }
2505 }
2506 
2507 // Canonicalize select with fcmp to fabs(). -0.0 makes this tricky. We need
2508 // fast-math-flags (nsz) or fsub with +0.0 (not fneg) for this to work.
2509 static Instruction *foldSelectWithFCmpToFabs(SelectInst &SI,
2510                                              InstCombinerImpl &IC) {
2511   Value *CondVal = SI.getCondition();
2512 
2513   for (bool Swap : {false, true}) {
2514     Value *TrueVal = SI.getTrueValue();
2515     Value *X = SI.getFalseValue();
2516     CmpInst::Predicate Pred;
2517 
2518     if (Swap)
2519       std::swap(TrueVal, X);
2520 
2521     if (!match(CondVal, m_FCmp(Pred, m_Specific(X), m_AnyZeroFP())))
2522       continue;
2523 
2524     // fold (X <= +/-0.0) ? (0.0 - X) : X to fabs(X), when 'Swap' is false
2525     // fold (X >  +/-0.0) ? X : (0.0 - X) to fabs(X), when 'Swap' is true
2526     if (match(TrueVal, m_FSub(m_PosZeroFP(), m_Specific(X)))) {
2527       if (!Swap && (Pred == FCmpInst::FCMP_OLE || Pred == FCmpInst::FCMP_ULE)) {
2528         Value *Fabs = IC.Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, &SI);
2529         return IC.replaceInstUsesWith(SI, Fabs);
2530       }
2531       if (Swap && (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_UGT)) {
2532         Value *Fabs = IC.Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, &SI);
2533         return IC.replaceInstUsesWith(SI, Fabs);
2534       }
2535     }
2536 
2537     // With nsz, when 'Swap' is false:
2538     // fold (X < +/-0.0) ? -X : X or (X <= +/-0.0) ? -X : X to fabs(X)
2539     // fold (X > +/-0.0) ? -X : X or (X >= +/-0.0) ? -X : X to -fabs(x)
2540     // when 'Swap' is true:
2541     // fold (X > +/-0.0) ? X : -X or (X >= +/-0.0) ? X : -X to fabs(X)
2542     // fold (X < +/-0.0) ? X : -X or (X <= +/-0.0) ? X : -X to -fabs(X)
2543     if (!match(TrueVal, m_FNeg(m_Specific(X))) || !SI.hasNoSignedZeros())
2544       return nullptr;
2545 
2546     if (Swap)
2547       Pred = FCmpInst::getSwappedPredicate(Pred);
2548 
2549     bool IsLTOrLE = Pred == FCmpInst::FCMP_OLT || Pred == FCmpInst::FCMP_OLE ||
2550                     Pred == FCmpInst::FCMP_ULT || Pred == FCmpInst::FCMP_ULE;
2551     bool IsGTOrGE = Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_OGE ||
2552                     Pred == FCmpInst::FCMP_UGT || Pred == FCmpInst::FCMP_UGE;
2553 
2554     if (IsLTOrLE) {
2555       Value *Fabs = IC.Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, &SI);
2556       return IC.replaceInstUsesWith(SI, Fabs);
2557     }
2558     if (IsGTOrGE) {
2559       Value *Fabs = IC.Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, &SI);
2560       Instruction *NewFNeg = UnaryOperator::CreateFNeg(Fabs);
2561       NewFNeg->setFastMathFlags(SI.getFastMathFlags());
2562       return NewFNeg;
2563     }
2564   }
2565 
2566   return nullptr;
2567 }
2568 
2569 // Match the following IR pattern:
2570 //   %x.lowbits = and i8 %x, %lowbitmask
2571 //   %x.lowbits.are.zero = icmp eq i8 %x.lowbits, 0
2572 //   %x.biased = add i8 %x, %bias
2573 //   %x.biased.highbits = and i8 %x.biased, %highbitmask
2574 //   %x.roundedup = select i1 %x.lowbits.are.zero, i8 %x, i8 %x.biased.highbits
2575 // Define:
2576 //   %alignment = add i8 %lowbitmask, 1
2577 // Iff 1. an %alignment is a power-of-two (aka, %lowbitmask is a low bit mask)
2578 // and 2. %bias is equal to either %lowbitmask or %alignment,
2579 // and 3. %highbitmask is equal to ~%lowbitmask (aka, to -%alignment)
2580 // then this pattern can be transformed into:
2581 //   %x.offset = add i8 %x, %lowbitmask
2582 //   %x.roundedup = and i8 %x.offset, %highbitmask
2583 static Value *
2584 foldRoundUpIntegerWithPow2Alignment(SelectInst &SI,
2585                                     InstCombiner::BuilderTy &Builder) {
2586   Value *Cond = SI.getCondition();
2587   Value *X = SI.getTrueValue();
2588   Value *XBiasedHighBits = SI.getFalseValue();
2589 
2590   ICmpInst::Predicate Pred;
2591   Value *XLowBits;
2592   if (!match(Cond, m_ICmp(Pred, m_Value(XLowBits), m_ZeroInt())) ||
2593       !ICmpInst::isEquality(Pred))
2594     return nullptr;
2595 
2596   if (Pred == ICmpInst::Predicate::ICMP_NE)
2597     std::swap(X, XBiasedHighBits);
2598 
2599   // FIXME: we could support non non-splats here.
2600 
2601   const APInt *LowBitMaskCst;
2602   if (!match(XLowBits, m_And(m_Specific(X), m_APIntAllowUndef(LowBitMaskCst))))
2603     return nullptr;
2604 
2605   const APInt *BiasCst, *HighBitMaskCst;
2606   if (!match(XBiasedHighBits,
2607              m_And(m_Add(m_Specific(X), m_APIntAllowUndef(BiasCst)),
2608                    m_APIntAllowUndef(HighBitMaskCst))))
2609     return nullptr;
2610 
2611   if (!LowBitMaskCst->isMask())
2612     return nullptr;
2613 
2614   APInt InvertedLowBitMaskCst = ~*LowBitMaskCst;
2615   if (InvertedLowBitMaskCst != *HighBitMaskCst)
2616     return nullptr;
2617 
2618   APInt AlignmentCst = *LowBitMaskCst + 1;
2619 
2620   if (*BiasCst != AlignmentCst && *BiasCst != *LowBitMaskCst)
2621     return nullptr;
2622 
2623   if (!XBiasedHighBits->hasOneUse()) {
2624     if (*BiasCst == *LowBitMaskCst)
2625       return XBiasedHighBits;
2626     return nullptr;
2627   }
2628 
2629   // FIXME: could we preserve undef's here?
2630   Type *Ty = X->getType();
2631   Value *XOffset = Builder.CreateAdd(X, ConstantInt::get(Ty, *LowBitMaskCst),
2632                                      X->getName() + ".biased");
2633   Value *R = Builder.CreateAnd(XOffset, ConstantInt::get(Ty, *HighBitMaskCst));
2634   R->takeName(&SI);
2635   return R;
2636 }
2637 
2638 Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
2639   Value *CondVal = SI.getCondition();
2640   Value *TrueVal = SI.getTrueValue();
2641   Value *FalseVal = SI.getFalseValue();
2642   Type *SelType = SI.getType();
2643 
2644   if (Value *V = simplifySelectInst(CondVal, TrueVal, FalseVal,
2645                                     SQ.getWithInstruction(&SI)))
2646     return replaceInstUsesWith(SI, V);
2647 
2648   if (Instruction *I = canonicalizeSelectToShuffle(SI))
2649     return I;
2650 
2651   if (Instruction *I = canonicalizeScalarSelectOfVecs(SI, *this))
2652     return I;
2653 
2654   // Avoid potential infinite loops by checking for non-constant condition.
2655   // TODO: Can we assert instead by improving canonicalizeSelectToShuffle()?
2656   //       Scalar select must have simplified?
2657   if (SelType->isIntOrIntVectorTy(1) && !isa<Constant>(CondVal) &&
2658       TrueVal->getType() == CondVal->getType()) {
2659     // Folding select to and/or i1 isn't poison safe in general. impliesPoison
2660     // checks whether folding it does not convert a well-defined value into
2661     // poison.
2662     if (match(TrueVal, m_One())) {
2663       if (impliesPoison(FalseVal, CondVal)) {
2664         // Change: A = select B, true, C --> A = or B, C
2665         return BinaryOperator::CreateOr(CondVal, FalseVal);
2666       }
2667 
2668       if (auto *LHS = dyn_cast<FCmpInst>(CondVal))
2669         if (auto *RHS = dyn_cast<FCmpInst>(FalseVal))
2670           if (Value *V = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ false,
2671                                           /*IsSelectLogical*/ true))
2672             return replaceInstUsesWith(SI, V);
2673     }
2674     if (match(FalseVal, m_Zero())) {
2675       if (impliesPoison(TrueVal, CondVal)) {
2676         // Change: A = select B, C, false --> A = and B, C
2677         return BinaryOperator::CreateAnd(CondVal, TrueVal);
2678       }
2679 
2680       if (auto *LHS = dyn_cast<FCmpInst>(CondVal))
2681         if (auto *RHS = dyn_cast<FCmpInst>(TrueVal))
2682           if (Value *V = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ true,
2683                                           /*IsSelectLogical*/ true))
2684             return replaceInstUsesWith(SI, V);
2685     }
2686 
2687     auto *One = ConstantInt::getTrue(SelType);
2688     auto *Zero = ConstantInt::getFalse(SelType);
2689 
2690     // We match the "full" 0 or 1 constant here to avoid a potential infinite
2691     // loop with vectors that may have undefined/poison elements.
2692     // select a, false, b -> select !a, b, false
2693     if (match(TrueVal, m_Specific(Zero))) {
2694       Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2695       return SelectInst::Create(NotCond, FalseVal, Zero);
2696     }
2697     // select a, b, true -> select !a, true, b
2698     if (match(FalseVal, m_Specific(One))) {
2699       Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2700       return SelectInst::Create(NotCond, One, TrueVal);
2701     }
2702 
2703     // select a, a, b -> select a, true, b
2704     if (CondVal == TrueVal)
2705       return replaceOperand(SI, 1, One);
2706     // select a, b, a -> select a, b, false
2707     if (CondVal == FalseVal)
2708       return replaceOperand(SI, 2, Zero);
2709 
2710     // select a, !a, b -> select !a, b, false
2711     if (match(TrueVal, m_Not(m_Specific(CondVal))))
2712       return SelectInst::Create(TrueVal, FalseVal, Zero);
2713     // select a, b, !a -> select !a, true, b
2714     if (match(FalseVal, m_Not(m_Specific(CondVal))))
2715       return SelectInst::Create(FalseVal, One, TrueVal);
2716 
2717     Value *A, *B;
2718 
2719     // DeMorgan in select form: !a && !b --> !(a || b)
2720     // select !a, !b, false --> not (select a, true, b)
2721     if (match(&SI, m_LogicalAnd(m_Not(m_Value(A)), m_Not(m_Value(B)))) &&
2722         (CondVal->hasOneUse() || TrueVal->hasOneUse()) &&
2723         !match(A, m_ConstantExpr()) && !match(B, m_ConstantExpr()))
2724       return BinaryOperator::CreateNot(Builder.CreateSelect(A, One, B));
2725 
2726     // DeMorgan in select form: !a || !b --> !(a && b)
2727     // select !a, true, !b --> not (select a, b, false)
2728     if (match(&SI, m_LogicalOr(m_Not(m_Value(A)), m_Not(m_Value(B)))) &&
2729         (CondVal->hasOneUse() || FalseVal->hasOneUse()) &&
2730         !match(A, m_ConstantExpr()) && !match(B, m_ConstantExpr()))
2731       return BinaryOperator::CreateNot(Builder.CreateSelect(A, B, Zero));
2732 
2733     // select (select a, true, b), true, b -> select a, true, b
2734     if (match(CondVal, m_Select(m_Value(A), m_One(), m_Value(B))) &&
2735         match(TrueVal, m_One()) && match(FalseVal, m_Specific(B)))
2736       return replaceOperand(SI, 0, A);
2737     // select (select a, b, false), b, false -> select a, b, false
2738     if (match(CondVal, m_Select(m_Value(A), m_Value(B), m_Zero())) &&
2739         match(TrueVal, m_Specific(B)) && match(FalseVal, m_Zero()))
2740       return replaceOperand(SI, 0, A);
2741 
2742     Value *C;
2743     // select (~a | c), a, b -> and a, (or c, freeze(b))
2744     if (match(CondVal, m_c_Or(m_Not(m_Specific(TrueVal)), m_Value(C))) &&
2745         CondVal->hasOneUse()) {
2746       FalseVal = Builder.CreateFreeze(FalseVal);
2747       return BinaryOperator::CreateAnd(TrueVal, Builder.CreateOr(C, FalseVal));
2748     }
2749     // select (~c & b), a, b -> and b, (or freeze(a), c)
2750     if (match(CondVal, m_c_And(m_Not(m_Value(C)), m_Specific(FalseVal))) &&
2751         CondVal->hasOneUse()) {
2752       TrueVal = Builder.CreateFreeze(TrueVal);
2753       return BinaryOperator::CreateAnd(FalseVal, Builder.CreateOr(C, TrueVal));
2754     }
2755 
2756     if (!SelType->isVectorTy()) {
2757       if (Value *S = simplifyWithOpReplaced(TrueVal, CondVal, One, SQ,
2758                                             /* AllowRefinement */ true))
2759         return replaceOperand(SI, 1, S);
2760       if (Value *S = simplifyWithOpReplaced(FalseVal, CondVal, Zero, SQ,
2761                                             /* AllowRefinement */ true))
2762         return replaceOperand(SI, 2, S);
2763     }
2764 
2765     if (match(FalseVal, m_Zero()) || match(TrueVal, m_One())) {
2766       Use *Y = nullptr;
2767       bool IsAnd = match(FalseVal, m_Zero()) ? true : false;
2768       Value *Op1 = IsAnd ? TrueVal : FalseVal;
2769       if (isCheckForZeroAndMulWithOverflow(CondVal, Op1, IsAnd, Y)) {
2770         auto *FI = new FreezeInst(*Y, (*Y)->getName() + ".fr");
2771         InsertNewInstBefore(FI, *cast<Instruction>(Y->getUser()));
2772         replaceUse(*Y, FI);
2773         return replaceInstUsesWith(SI, Op1);
2774       }
2775 
2776       if (auto *Op1SI = dyn_cast<SelectInst>(Op1))
2777         if (auto *I = foldAndOrOfSelectUsingImpliedCond(CondVal, *Op1SI,
2778                                                         /* IsAnd */ IsAnd))
2779           return I;
2780 
2781       if (auto *ICmp0 = dyn_cast<ICmpInst>(CondVal))
2782         if (auto *ICmp1 = dyn_cast<ICmpInst>(Op1))
2783           if (auto *V = foldAndOrOfICmps(ICmp0, ICmp1, SI, IsAnd,
2784                                          /* IsLogical */ true))
2785             return replaceInstUsesWith(SI, V);
2786     }
2787 
2788     // select (select a, true, b), c, false -> select a, c, false
2789     // select c, (select a, true, b), false -> select c, a, false
2790     //   if c implies that b is false.
2791     if (match(CondVal, m_Select(m_Value(A), m_One(), m_Value(B))) &&
2792         match(FalseVal, m_Zero())) {
2793       Optional<bool> Res = isImpliedCondition(TrueVal, B, DL);
2794       if (Res && *Res == false)
2795         return replaceOperand(SI, 0, A);
2796     }
2797     if (match(TrueVal, m_Select(m_Value(A), m_One(), m_Value(B))) &&
2798         match(FalseVal, m_Zero())) {
2799       Optional<bool> Res = isImpliedCondition(CondVal, B, DL);
2800       if (Res && *Res == false)
2801         return replaceOperand(SI, 1, A);
2802     }
2803     // select c, true, (select a, b, false)  -> select c, true, a
2804     // select (select a, b, false), true, c  -> select a, true, c
2805     //   if c = false implies that b = true
2806     if (match(TrueVal, m_One()) &&
2807         match(FalseVal, m_Select(m_Value(A), m_Value(B), m_Zero()))) {
2808       Optional<bool> Res = isImpliedCondition(CondVal, B, DL, false);
2809       if (Res && *Res == true)
2810         return replaceOperand(SI, 2, A);
2811     }
2812     if (match(CondVal, m_Select(m_Value(A), m_Value(B), m_Zero())) &&
2813         match(TrueVal, m_One())) {
2814       Optional<bool> Res = isImpliedCondition(FalseVal, B, DL, false);
2815       if (Res && *Res == true)
2816         return replaceOperand(SI, 0, A);
2817     }
2818 
2819     // sel (sel c, a, false), true, (sel !c, b, false) -> sel c, a, b
2820     // sel (sel !c, a, false), true, (sel c, b, false) -> sel c, b, a
2821     Value *C1, *C2;
2822     if (match(CondVal, m_Select(m_Value(C1), m_Value(A), m_Zero())) &&
2823         match(TrueVal, m_One()) &&
2824         match(FalseVal, m_Select(m_Value(C2), m_Value(B), m_Zero()))) {
2825       if (match(C2, m_Not(m_Specific(C1)))) // first case
2826         return SelectInst::Create(C1, A, B);
2827       else if (match(C1, m_Not(m_Specific(C2)))) // second case
2828         return SelectInst::Create(C2, B, A);
2829     }
2830   }
2831 
2832   // Selecting between two integer or vector splat integer constants?
2833   //
2834   // Note that we don't handle a scalar select of vectors:
2835   // select i1 %c, <2 x i8> <1, 1>, <2 x i8> <0, 0>
2836   // because that may need 3 instructions to splat the condition value:
2837   // extend, insertelement, shufflevector.
2838   //
2839   // Do not handle i1 TrueVal and FalseVal otherwise would result in
2840   // zext/sext i1 to i1.
2841   if (SelType->isIntOrIntVectorTy() && !SelType->isIntOrIntVectorTy(1) &&
2842       CondVal->getType()->isVectorTy() == SelType->isVectorTy()) {
2843     // select C, 1, 0 -> zext C to int
2844     if (match(TrueVal, m_One()) && match(FalseVal, m_Zero()))
2845       return new ZExtInst(CondVal, SelType);
2846 
2847     // select C, -1, 0 -> sext C to int
2848     if (match(TrueVal, m_AllOnes()) && match(FalseVal, m_Zero()))
2849       return new SExtInst(CondVal, SelType);
2850 
2851     // select C, 0, 1 -> zext !C to int
2852     if (match(TrueVal, m_Zero()) && match(FalseVal, m_One())) {
2853       Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2854       return new ZExtInst(NotCond, SelType);
2855     }
2856 
2857     // select C, 0, -1 -> sext !C to int
2858     if (match(TrueVal, m_Zero()) && match(FalseVal, m_AllOnes())) {
2859       Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2860       return new SExtInst(NotCond, SelType);
2861     }
2862   }
2863 
2864   if (auto *FCmp = dyn_cast<FCmpInst>(CondVal)) {
2865     Value *Cmp0 = FCmp->getOperand(0), *Cmp1 = FCmp->getOperand(1);
2866     // Are we selecting a value based on a comparison of the two values?
2867     if ((Cmp0 == TrueVal && Cmp1 == FalseVal) ||
2868         (Cmp0 == FalseVal && Cmp1 == TrueVal)) {
2869       // Canonicalize to use ordered comparisons by swapping the select
2870       // operands.
2871       //
2872       // e.g.
2873       // (X ugt Y) ? X : Y -> (X ole Y) ? Y : X
2874       if (FCmp->hasOneUse() && FCmpInst::isUnordered(FCmp->getPredicate())) {
2875         FCmpInst::Predicate InvPred = FCmp->getInversePredicate();
2876         IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2877         // FIXME: The FMF should propagate from the select, not the fcmp.
2878         Builder.setFastMathFlags(FCmp->getFastMathFlags());
2879         Value *NewCond = Builder.CreateFCmp(InvPred, Cmp0, Cmp1,
2880                                             FCmp->getName() + ".inv");
2881         Value *NewSel = Builder.CreateSelect(NewCond, FalseVal, TrueVal);
2882         return replaceInstUsesWith(SI, NewSel);
2883       }
2884 
2885       // NOTE: if we wanted to, this is where to detect MIN/MAX
2886     }
2887   }
2888 
2889   // Fold selecting to fabs.
2890   if (Instruction *Fabs = foldSelectWithFCmpToFabs(SI, *this))
2891     return Fabs;
2892 
2893   // See if we are selecting two values based on a comparison of the two values.
2894   if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
2895     if (Instruction *Result = foldSelectInstWithICmp(SI, ICI))
2896       return Result;
2897 
2898   if (Instruction *Add = foldAddSubSelect(SI, Builder))
2899     return Add;
2900   if (Instruction *Add = foldOverflowingAddSubSelect(SI, Builder))
2901     return Add;
2902   if (Instruction *Or = foldSetClearBits(SI, Builder))
2903     return Or;
2904   if (Instruction *Mul = foldSelectZeroOrMul(SI, *this))
2905     return Mul;
2906 
2907   // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
2908   auto *TI = dyn_cast<Instruction>(TrueVal);
2909   auto *FI = dyn_cast<Instruction>(FalseVal);
2910   if (TI && FI && TI->getOpcode() == FI->getOpcode())
2911     if (Instruction *IV = foldSelectOpOp(SI, TI, FI))
2912       return IV;
2913 
2914   if (Instruction *I = foldSelectExtConst(SI))
2915     return I;
2916 
2917   // Fold (select C, (gep Ptr, Idx), Ptr) -> (gep Ptr, (select C, Idx, 0))
2918   // Fold (select C, Ptr, (gep Ptr, Idx)) -> (gep Ptr, (select C, 0, Idx))
2919   auto SelectGepWithBase = [&](GetElementPtrInst *Gep, Value *Base,
2920                                bool Swap) -> GetElementPtrInst * {
2921     Value *Ptr = Gep->getPointerOperand();
2922     if (Gep->getNumOperands() != 2 || Gep->getPointerOperand() != Base ||
2923         !Gep->hasOneUse())
2924       return nullptr;
2925     Value *Idx = Gep->getOperand(1);
2926     if (isa<VectorType>(CondVal->getType()) && !isa<VectorType>(Idx->getType()))
2927       return nullptr;
2928     Type *ElementType = Gep->getResultElementType();
2929     Value *NewT = Idx;
2930     Value *NewF = Constant::getNullValue(Idx->getType());
2931     if (Swap)
2932       std::swap(NewT, NewF);
2933     Value *NewSI =
2934         Builder.CreateSelect(CondVal, NewT, NewF, SI.getName() + ".idx", &SI);
2935     return GetElementPtrInst::Create(ElementType, Ptr, {NewSI});
2936   };
2937   if (auto *TrueGep = dyn_cast<GetElementPtrInst>(TrueVal))
2938     if (auto *NewGep = SelectGepWithBase(TrueGep, FalseVal, false))
2939       return NewGep;
2940   if (auto *FalseGep = dyn_cast<GetElementPtrInst>(FalseVal))
2941     if (auto *NewGep = SelectGepWithBase(FalseGep, TrueVal, true))
2942       return NewGep;
2943 
2944   // See if we can fold the select into one of our operands.
2945   if (SelType->isIntOrIntVectorTy() || SelType->isFPOrFPVectorTy()) {
2946     if (Instruction *FoldI = foldSelectIntoOp(SI, TrueVal, FalseVal))
2947       return FoldI;
2948 
2949     Value *LHS, *RHS;
2950     Instruction::CastOps CastOp;
2951     SelectPatternResult SPR = matchSelectPattern(&SI, LHS, RHS, &CastOp);
2952     auto SPF = SPR.Flavor;
2953     if (SPF) {
2954       Value *LHS2, *RHS2;
2955       if (SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor)
2956         if (Instruction *R = foldSPFofSPF(cast<Instruction>(LHS), SPF2, LHS2,
2957                                           RHS2, SI, SPF, RHS))
2958           return R;
2959       if (SelectPatternFlavor SPF2 = matchSelectPattern(RHS, LHS2, RHS2).Flavor)
2960         if (Instruction *R = foldSPFofSPF(cast<Instruction>(RHS), SPF2, LHS2,
2961                                           RHS2, SI, SPF, LHS))
2962           return R;
2963     }
2964 
2965     if (SelectPatternResult::isMinOrMax(SPF)) {
2966       // Canonicalize so that
2967       // - type casts are outside select patterns.
2968       // - float clamp is transformed to min/max pattern
2969 
2970       bool IsCastNeeded = LHS->getType() != SelType;
2971       Value *CmpLHS = cast<CmpInst>(CondVal)->getOperand(0);
2972       Value *CmpRHS = cast<CmpInst>(CondVal)->getOperand(1);
2973       if (IsCastNeeded ||
2974           (LHS->getType()->isFPOrFPVectorTy() &&
2975            ((CmpLHS != LHS && CmpLHS != RHS) ||
2976             (CmpRHS != LHS && CmpRHS != RHS)))) {
2977         CmpInst::Predicate MinMaxPred = getMinMaxPred(SPF, SPR.Ordered);
2978 
2979         Value *Cmp;
2980         if (CmpInst::isIntPredicate(MinMaxPred)) {
2981           Cmp = Builder.CreateICmp(MinMaxPred, LHS, RHS);
2982         } else {
2983           IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2984           auto FMF =
2985               cast<FPMathOperator>(SI.getCondition())->getFastMathFlags();
2986           Builder.setFastMathFlags(FMF);
2987           Cmp = Builder.CreateFCmp(MinMaxPred, LHS, RHS);
2988         }
2989 
2990         Value *NewSI = Builder.CreateSelect(Cmp, LHS, RHS, SI.getName(), &SI);
2991         if (!IsCastNeeded)
2992           return replaceInstUsesWith(SI, NewSI);
2993 
2994         Value *NewCast = Builder.CreateCast(CastOp, NewSI, SelType);
2995         return replaceInstUsesWith(SI, NewCast);
2996       }
2997     }
2998   }
2999 
3000   // Canonicalize select of FP values where NaN and -0.0 are not valid as
3001   // minnum/maxnum intrinsics.
3002   if (isa<FPMathOperator>(SI) && SI.hasNoNaNs() && SI.hasNoSignedZeros()) {
3003     Value *X, *Y;
3004     if (match(&SI, m_OrdFMax(m_Value(X), m_Value(Y))))
3005       return replaceInstUsesWith(
3006           SI, Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, X, Y, &SI));
3007 
3008     if (match(&SI, m_OrdFMin(m_Value(X), m_Value(Y))))
3009       return replaceInstUsesWith(
3010           SI, Builder.CreateBinaryIntrinsic(Intrinsic::minnum, X, Y, &SI));
3011   }
3012 
3013   // See if we can fold the select into a phi node if the condition is a select.
3014   if (auto *PN = dyn_cast<PHINode>(SI.getCondition()))
3015     // The true/false values have to be live in the PHI predecessor's blocks.
3016     if (canSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
3017         canSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
3018       if (Instruction *NV = foldOpIntoPhi(SI, PN))
3019         return NV;
3020 
3021   if (SelectInst *TrueSI = dyn_cast<SelectInst>(TrueVal)) {
3022     if (TrueSI->getCondition()->getType() == CondVal->getType()) {
3023       // select(C, select(C, a, b), c) -> select(C, a, c)
3024       if (TrueSI->getCondition() == CondVal) {
3025         if (SI.getTrueValue() == TrueSI->getTrueValue())
3026           return nullptr;
3027         return replaceOperand(SI, 1, TrueSI->getTrueValue());
3028       }
3029       // select(C0, select(C1, a, b), b) -> select(C0&C1, a, b)
3030       // We choose this as normal form to enable folding on the And and
3031       // shortening paths for the values (this helps getUnderlyingObjects() for
3032       // example).
3033       if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) {
3034         Value *And = Builder.CreateLogicalAnd(CondVal, TrueSI->getCondition());
3035         replaceOperand(SI, 0, And);
3036         replaceOperand(SI, 1, TrueSI->getTrueValue());
3037         return &SI;
3038       }
3039     }
3040   }
3041   if (SelectInst *FalseSI = dyn_cast<SelectInst>(FalseVal)) {
3042     if (FalseSI->getCondition()->getType() == CondVal->getType()) {
3043       // select(C, a, select(C, b, c)) -> select(C, a, c)
3044       if (FalseSI->getCondition() == CondVal) {
3045         if (SI.getFalseValue() == FalseSI->getFalseValue())
3046           return nullptr;
3047         return replaceOperand(SI, 2, FalseSI->getFalseValue());
3048       }
3049       // select(C0, a, select(C1, a, b)) -> select(C0|C1, a, b)
3050       if (FalseSI->getTrueValue() == TrueVal && FalseSI->hasOneUse()) {
3051         Value *Or = Builder.CreateLogicalOr(CondVal, FalseSI->getCondition());
3052         replaceOperand(SI, 0, Or);
3053         replaceOperand(SI, 2, FalseSI->getFalseValue());
3054         return &SI;
3055       }
3056     }
3057   }
3058 
3059   auto canMergeSelectThroughBinop = [](BinaryOperator *BO) {
3060     // The select might be preventing a division by 0.
3061     switch (BO->getOpcode()) {
3062     default:
3063       return true;
3064     case Instruction::SRem:
3065     case Instruction::URem:
3066     case Instruction::SDiv:
3067     case Instruction::UDiv:
3068       return false;
3069     }
3070   };
3071 
3072   // Try to simplify a binop sandwiched between 2 selects with the same
3073   // condition.
3074   // select(C, binop(select(C, X, Y), W), Z) -> select(C, binop(X, W), Z)
3075   BinaryOperator *TrueBO;
3076   if (match(TrueVal, m_OneUse(m_BinOp(TrueBO))) &&
3077       canMergeSelectThroughBinop(TrueBO)) {
3078     if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(0))) {
3079       if (TrueBOSI->getCondition() == CondVal) {
3080         replaceOperand(*TrueBO, 0, TrueBOSI->getTrueValue());
3081         Worklist.push(TrueBO);
3082         return &SI;
3083       }
3084     }
3085     if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(1))) {
3086       if (TrueBOSI->getCondition() == CondVal) {
3087         replaceOperand(*TrueBO, 1, TrueBOSI->getTrueValue());
3088         Worklist.push(TrueBO);
3089         return &SI;
3090       }
3091     }
3092   }
3093 
3094   // select(C, Z, binop(select(C, X, Y), W)) -> select(C, Z, binop(Y, W))
3095   BinaryOperator *FalseBO;
3096   if (match(FalseVal, m_OneUse(m_BinOp(FalseBO))) &&
3097       canMergeSelectThroughBinop(FalseBO)) {
3098     if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(0))) {
3099       if (FalseBOSI->getCondition() == CondVal) {
3100         replaceOperand(*FalseBO, 0, FalseBOSI->getFalseValue());
3101         Worklist.push(FalseBO);
3102         return &SI;
3103       }
3104     }
3105     if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(1))) {
3106       if (FalseBOSI->getCondition() == CondVal) {
3107         replaceOperand(*FalseBO, 1, FalseBOSI->getFalseValue());
3108         Worklist.push(FalseBO);
3109         return &SI;
3110       }
3111     }
3112   }
3113 
3114   Value *NotCond;
3115   if (match(CondVal, m_Not(m_Value(NotCond))) &&
3116       !InstCombiner::shouldAvoidAbsorbingNotIntoSelect(SI)) {
3117     replaceOperand(SI, 0, NotCond);
3118     SI.swapValues();
3119     SI.swapProfMetadata();
3120     return &SI;
3121   }
3122 
3123   if (Instruction *I = foldVectorSelect(SI))
3124     return I;
3125 
3126   // If we can compute the condition, there's no need for a select.
3127   // Like the above fold, we are attempting to reduce compile-time cost by
3128   // putting this fold here with limitations rather than in InstSimplify.
3129   // The motivation for this call into value tracking is to take advantage of
3130   // the assumption cache, so make sure that is populated.
3131   if (!CondVal->getType()->isVectorTy() && !AC.assumptions().empty()) {
3132     KnownBits Known(1);
3133     computeKnownBits(CondVal, Known, 0, &SI);
3134     if (Known.One.isOne())
3135       return replaceInstUsesWith(SI, TrueVal);
3136     if (Known.Zero.isOne())
3137       return replaceInstUsesWith(SI, FalseVal);
3138   }
3139 
3140   if (Instruction *BitCastSel = foldSelectCmpBitcasts(SI, Builder))
3141     return BitCastSel;
3142 
3143   // Simplify selects that test the returned flag of cmpxchg instructions.
3144   if (Value *V = foldSelectCmpXchg(SI))
3145     return replaceInstUsesWith(SI, V);
3146 
3147   if (Instruction *Select = foldSelectBinOpIdentity(SI, TLI, *this))
3148     return Select;
3149 
3150   if (Instruction *Funnel = foldSelectFunnelShift(SI, Builder))
3151     return Funnel;
3152 
3153   if (Instruction *Copysign = foldSelectToCopysign(SI, Builder))
3154     return Copysign;
3155 
3156   if (Instruction *PN = foldSelectToPhi(SI, DT, Builder))
3157     return replaceInstUsesWith(SI, PN);
3158 
3159   if (Value *Fr = foldSelectWithFrozenICmp(SI, Builder))
3160     return replaceInstUsesWith(SI, Fr);
3161 
3162   if (Value *V = foldRoundUpIntegerWithPow2Alignment(SI, Builder))
3163     return replaceInstUsesWith(SI, V);
3164 
3165   // select(mask, mload(,,mask,0), 0) -> mload(,,mask,0)
3166   // Load inst is intentionally not checked for hasOneUse()
3167   if (match(FalseVal, m_Zero()) &&
3168       (match(TrueVal, m_MaskedLoad(m_Value(), m_Value(), m_Specific(CondVal),
3169                                    m_CombineOr(m_Undef(), m_Zero()))) ||
3170        match(TrueVal, m_MaskedGather(m_Value(), m_Value(), m_Specific(CondVal),
3171                                      m_CombineOr(m_Undef(), m_Zero()))))) {
3172     auto *MaskedInst = cast<IntrinsicInst>(TrueVal);
3173     if (isa<UndefValue>(MaskedInst->getArgOperand(3)))
3174       MaskedInst->setArgOperand(3, FalseVal /* Zero */);
3175     return replaceInstUsesWith(SI, MaskedInst);
3176   }
3177 
3178   Value *Mask;
3179   if (match(TrueVal, m_Zero()) &&
3180       (match(FalseVal, m_MaskedLoad(m_Value(), m_Value(), m_Value(Mask),
3181                                     m_CombineOr(m_Undef(), m_Zero()))) ||
3182        match(FalseVal, m_MaskedGather(m_Value(), m_Value(), m_Value(Mask),
3183                                       m_CombineOr(m_Undef(), m_Zero())))) &&
3184       (CondVal->getType() == Mask->getType())) {
3185     // We can remove the select by ensuring the load zeros all lanes the
3186     // select would have.  We determine this by proving there is no overlap
3187     // between the load and select masks.
3188     // (i.e (load_mask & select_mask) == 0 == no overlap)
3189     bool CanMergeSelectIntoLoad = false;
3190     if (Value *V = simplifyAndInst(CondVal, Mask, SQ.getWithInstruction(&SI)))
3191       CanMergeSelectIntoLoad = match(V, m_Zero());
3192 
3193     if (CanMergeSelectIntoLoad) {
3194       auto *MaskedInst = cast<IntrinsicInst>(FalseVal);
3195       if (isa<UndefValue>(MaskedInst->getArgOperand(3)))
3196         MaskedInst->setArgOperand(3, TrueVal /* Zero */);
3197       return replaceInstUsesWith(SI, MaskedInst);
3198     }
3199   }
3200 
3201   return nullptr;
3202 }
3203