xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp (revision 0b37c1590418417c894529d371800dfac71ef887)
1 //===- InstCombineCompares.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitICmp and visitFCmp functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APSInt.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/ConstantFolding.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/TargetLibraryInfo.h"
20 #include "llvm/IR/ConstantRange.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/GetElementPtrTypeIterator.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/KnownBits.h"
27 
28 using namespace llvm;
29 using namespace PatternMatch;
30 
31 #define DEBUG_TYPE "instcombine"
32 
33 // How many times is a select replaced by one of its operands?
34 STATISTIC(NumSel, "Number of select opts");
35 
36 
37 /// Compute Result = In1+In2, returning true if the result overflowed for this
38 /// type.
39 static bool addWithOverflow(APInt &Result, const APInt &In1,
40                             const APInt &In2, bool IsSigned = false) {
41   bool Overflow;
42   if (IsSigned)
43     Result = In1.sadd_ov(In2, Overflow);
44   else
45     Result = In1.uadd_ov(In2, Overflow);
46 
47   return Overflow;
48 }
49 
50 /// Compute Result = In1-In2, returning true if the result overflowed for this
51 /// type.
52 static bool subWithOverflow(APInt &Result, const APInt &In1,
53                             const APInt &In2, bool IsSigned = false) {
54   bool Overflow;
55   if (IsSigned)
56     Result = In1.ssub_ov(In2, Overflow);
57   else
58     Result = In1.usub_ov(In2, Overflow);
59 
60   return Overflow;
61 }
62 
63 /// Given an icmp instruction, return true if any use of this comparison is a
64 /// branch on sign bit comparison.
65 static bool hasBranchUse(ICmpInst &I) {
66   for (auto *U : I.users())
67     if (isa<BranchInst>(U))
68       return true;
69   return false;
70 }
71 
72 /// Returns true if the exploded icmp can be expressed as a signed comparison
73 /// to zero and updates the predicate accordingly.
74 /// The signedness of the comparison is preserved.
75 /// TODO: Refactor with decomposeBitTestICmp()?
76 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
77   if (!ICmpInst::isSigned(Pred))
78     return false;
79 
80   if (C.isNullValue())
81     return ICmpInst::isRelational(Pred);
82 
83   if (C.isOneValue()) {
84     if (Pred == ICmpInst::ICMP_SLT) {
85       Pred = ICmpInst::ICMP_SLE;
86       return true;
87     }
88   } else if (C.isAllOnesValue()) {
89     if (Pred == ICmpInst::ICMP_SGT) {
90       Pred = ICmpInst::ICMP_SGE;
91       return true;
92     }
93   }
94 
95   return false;
96 }
97 
98 /// Given a signed integer type and a set of known zero and one bits, compute
99 /// the maximum and minimum values that could have the specified known zero and
100 /// known one bits, returning them in Min/Max.
101 /// TODO: Move to method on KnownBits struct?
102 static void computeSignedMinMaxValuesFromKnownBits(const KnownBits &Known,
103                                                    APInt &Min, APInt &Max) {
104   assert(Known.getBitWidth() == Min.getBitWidth() &&
105          Known.getBitWidth() == Max.getBitWidth() &&
106          "KnownZero, KnownOne and Min, Max must have equal bitwidth.");
107   APInt UnknownBits = ~(Known.Zero|Known.One);
108 
109   // The minimum value is when all unknown bits are zeros, EXCEPT for the sign
110   // bit if it is unknown.
111   Min = Known.One;
112   Max = Known.One|UnknownBits;
113 
114   if (UnknownBits.isNegative()) { // Sign bit is unknown
115     Min.setSignBit();
116     Max.clearSignBit();
117   }
118 }
119 
120 /// Given an unsigned integer type and a set of known zero and one bits, compute
121 /// the maximum and minimum values that could have the specified known zero and
122 /// known one bits, returning them in Min/Max.
123 /// TODO: Move to method on KnownBits struct?
124 static void computeUnsignedMinMaxValuesFromKnownBits(const KnownBits &Known,
125                                                      APInt &Min, APInt &Max) {
126   assert(Known.getBitWidth() == Min.getBitWidth() &&
127          Known.getBitWidth() == Max.getBitWidth() &&
128          "Ty, KnownZero, KnownOne and Min, Max must have equal bitwidth.");
129   APInt UnknownBits = ~(Known.Zero|Known.One);
130 
131   // The minimum value is when the unknown bits are all zeros.
132   Min = Known.One;
133   // The maximum value is when the unknown bits are all ones.
134   Max = Known.One|UnknownBits;
135 }
136 
137 /// This is called when we see this pattern:
138 ///   cmp pred (load (gep GV, ...)), cmpcst
139 /// where GV is a global variable with a constant initializer. Try to simplify
140 /// this into some simple computation that does not need the load. For example
141 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
142 ///
143 /// If AndCst is non-null, then the loaded value is masked with that constant
144 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
145 Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
146                                                         GlobalVariable *GV,
147                                                         CmpInst &ICI,
148                                                         ConstantInt *AndCst) {
149   Constant *Init = GV->getInitializer();
150   if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
151     return nullptr;
152 
153   uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
154   // Don't blow up on huge arrays.
155   if (ArrayElementCount > MaxArraySizeForCombine)
156     return nullptr;
157 
158   // There are many forms of this optimization we can handle, for now, just do
159   // the simple index into a single-dimensional array.
160   //
161   // Require: GEP GV, 0, i {{, constant indices}}
162   if (GEP->getNumOperands() < 3 ||
163       !isa<ConstantInt>(GEP->getOperand(1)) ||
164       !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
165       isa<Constant>(GEP->getOperand(2)))
166     return nullptr;
167 
168   // Check that indices after the variable are constants and in-range for the
169   // type they index.  Collect the indices.  This is typically for arrays of
170   // structs.
171   SmallVector<unsigned, 4> LaterIndices;
172 
173   Type *EltTy = Init->getType()->getArrayElementType();
174   for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
175     ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
176     if (!Idx) return nullptr;  // Variable index.
177 
178     uint64_t IdxVal = Idx->getZExtValue();
179     if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
180 
181     if (StructType *STy = dyn_cast<StructType>(EltTy))
182       EltTy = STy->getElementType(IdxVal);
183     else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
184       if (IdxVal >= ATy->getNumElements()) return nullptr;
185       EltTy = ATy->getElementType();
186     } else {
187       return nullptr; // Unknown type.
188     }
189 
190     LaterIndices.push_back(IdxVal);
191   }
192 
193   enum { Overdefined = -3, Undefined = -2 };
194 
195   // Variables for our state machines.
196 
197   // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
198   // "i == 47 | i == 87", where 47 is the first index the condition is true for,
199   // and 87 is the second (and last) index.  FirstTrueElement is -2 when
200   // undefined, otherwise set to the first true element.  SecondTrueElement is
201   // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
202   int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
203 
204   // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
205   // form "i != 47 & i != 87".  Same state transitions as for true elements.
206   int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
207 
208   /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
209   /// define a state machine that triggers for ranges of values that the index
210   /// is true or false for.  This triggers on things like "abbbbc"[i] == 'b'.
211   /// This is -2 when undefined, -3 when overdefined, and otherwise the last
212   /// index in the range (inclusive).  We use -2 for undefined here because we
213   /// use relative comparisons and don't want 0-1 to match -1.
214   int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
215 
216   // MagicBitvector - This is a magic bitvector where we set a bit if the
217   // comparison is true for element 'i'.  If there are 64 elements or less in
218   // the array, this will fully represent all the comparison results.
219   uint64_t MagicBitvector = 0;
220 
221   // Scan the array and see if one of our patterns matches.
222   Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
223   for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
224     Constant *Elt = Init->getAggregateElement(i);
225     if (!Elt) return nullptr;
226 
227     // If this is indexing an array of structures, get the structure element.
228     if (!LaterIndices.empty())
229       Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
230 
231     // If the element is masked, handle it.
232     if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
233 
234     // Find out if the comparison would be true or false for the i'th element.
235     Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
236                                                   CompareRHS, DL, &TLI);
237     // If the result is undef for this element, ignore it.
238     if (isa<UndefValue>(C)) {
239       // Extend range state machines to cover this element in case there is an
240       // undef in the middle of the range.
241       if (TrueRangeEnd == (int)i-1)
242         TrueRangeEnd = i;
243       if (FalseRangeEnd == (int)i-1)
244         FalseRangeEnd = i;
245       continue;
246     }
247 
248     // If we can't compute the result for any of the elements, we have to give
249     // up evaluating the entire conditional.
250     if (!isa<ConstantInt>(C)) return nullptr;
251 
252     // Otherwise, we know if the comparison is true or false for this element,
253     // update our state machines.
254     bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
255 
256     // State machine for single/double/range index comparison.
257     if (IsTrueForElt) {
258       // Update the TrueElement state machine.
259       if (FirstTrueElement == Undefined)
260         FirstTrueElement = TrueRangeEnd = i;  // First true element.
261       else {
262         // Update double-compare state machine.
263         if (SecondTrueElement == Undefined)
264           SecondTrueElement = i;
265         else
266           SecondTrueElement = Overdefined;
267 
268         // Update range state machine.
269         if (TrueRangeEnd == (int)i-1)
270           TrueRangeEnd = i;
271         else
272           TrueRangeEnd = Overdefined;
273       }
274     } else {
275       // Update the FalseElement state machine.
276       if (FirstFalseElement == Undefined)
277         FirstFalseElement = FalseRangeEnd = i; // First false element.
278       else {
279         // Update double-compare state machine.
280         if (SecondFalseElement == Undefined)
281           SecondFalseElement = i;
282         else
283           SecondFalseElement = Overdefined;
284 
285         // Update range state machine.
286         if (FalseRangeEnd == (int)i-1)
287           FalseRangeEnd = i;
288         else
289           FalseRangeEnd = Overdefined;
290       }
291     }
292 
293     // If this element is in range, update our magic bitvector.
294     if (i < 64 && IsTrueForElt)
295       MagicBitvector |= 1ULL << i;
296 
297     // If all of our states become overdefined, bail out early.  Since the
298     // predicate is expensive, only check it every 8 elements.  This is only
299     // really useful for really huge arrays.
300     if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
301         SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
302         FalseRangeEnd == Overdefined)
303       return nullptr;
304   }
305 
306   // Now that we've scanned the entire array, emit our new comparison(s).  We
307   // order the state machines in complexity of the generated code.
308   Value *Idx = GEP->getOperand(2);
309 
310   // If the index is larger than the pointer size of the target, truncate the
311   // index down like the GEP would do implicitly.  We don't have to do this for
312   // an inbounds GEP because the index can't be out of range.
313   if (!GEP->isInBounds()) {
314     Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
315     unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
316     if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
317       Idx = Builder.CreateTrunc(Idx, IntPtrTy);
318   }
319 
320   // If the comparison is only true for one or two elements, emit direct
321   // comparisons.
322   if (SecondTrueElement != Overdefined) {
323     // None true -> false.
324     if (FirstTrueElement == Undefined)
325       return replaceInstUsesWith(ICI, Builder.getFalse());
326 
327     Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
328 
329     // True for one element -> 'i == 47'.
330     if (SecondTrueElement == Undefined)
331       return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
332 
333     // True for two elements -> 'i == 47 | i == 72'.
334     Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
335     Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
336     Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
337     return BinaryOperator::CreateOr(C1, C2);
338   }
339 
340   // If the comparison is only false for one or two elements, emit direct
341   // comparisons.
342   if (SecondFalseElement != Overdefined) {
343     // None false -> true.
344     if (FirstFalseElement == Undefined)
345       return replaceInstUsesWith(ICI, Builder.getTrue());
346 
347     Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
348 
349     // False for one element -> 'i != 47'.
350     if (SecondFalseElement == Undefined)
351       return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
352 
353     // False for two elements -> 'i != 47 & i != 72'.
354     Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
355     Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
356     Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
357     return BinaryOperator::CreateAnd(C1, C2);
358   }
359 
360   // If the comparison can be replaced with a range comparison for the elements
361   // where it is true, emit the range check.
362   if (TrueRangeEnd != Overdefined) {
363     assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
364 
365     // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
366     if (FirstTrueElement) {
367       Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
368       Idx = Builder.CreateAdd(Idx, Offs);
369     }
370 
371     Value *End = ConstantInt::get(Idx->getType(),
372                                   TrueRangeEnd-FirstTrueElement+1);
373     return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
374   }
375 
376   // False range check.
377   if (FalseRangeEnd != Overdefined) {
378     assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
379     // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
380     if (FirstFalseElement) {
381       Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
382       Idx = Builder.CreateAdd(Idx, Offs);
383     }
384 
385     Value *End = ConstantInt::get(Idx->getType(),
386                                   FalseRangeEnd-FirstFalseElement);
387     return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
388   }
389 
390   // If a magic bitvector captures the entire comparison state
391   // of this load, replace it with computation that does:
392   //   ((magic_cst >> i) & 1) != 0
393   {
394     Type *Ty = nullptr;
395 
396     // Look for an appropriate type:
397     // - The type of Idx if the magic fits
398     // - The smallest fitting legal type
399     if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
400       Ty = Idx->getType();
401     else
402       Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
403 
404     if (Ty) {
405       Value *V = Builder.CreateIntCast(Idx, Ty, false);
406       V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
407       V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
408       return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
409     }
410   }
411 
412   return nullptr;
413 }
414 
415 /// Return a value that can be used to compare the *offset* implied by a GEP to
416 /// zero. For example, if we have &A[i], we want to return 'i' for
417 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
418 /// are involved. The above expression would also be legal to codegen as
419 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
420 /// This latter form is less amenable to optimization though, and we are allowed
421 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
422 ///
423 /// If we can't emit an optimized form for this expression, this returns null.
424 ///
425 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
426                                           const DataLayout &DL) {
427   gep_type_iterator GTI = gep_type_begin(GEP);
428 
429   // Check to see if this gep only has a single variable index.  If so, and if
430   // any constant indices are a multiple of its scale, then we can compute this
431   // in terms of the scale of the variable index.  For example, if the GEP
432   // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
433   // because the expression will cross zero at the same point.
434   unsigned i, e = GEP->getNumOperands();
435   int64_t Offset = 0;
436   for (i = 1; i != e; ++i, ++GTI) {
437     if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
438       // Compute the aggregate offset of constant indices.
439       if (CI->isZero()) continue;
440 
441       // Handle a struct index, which adds its field offset to the pointer.
442       if (StructType *STy = GTI.getStructTypeOrNull()) {
443         Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
444       } else {
445         uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
446         Offset += Size*CI->getSExtValue();
447       }
448     } else {
449       // Found our variable index.
450       break;
451     }
452   }
453 
454   // If there are no variable indices, we must have a constant offset, just
455   // evaluate it the general way.
456   if (i == e) return nullptr;
457 
458   Value *VariableIdx = GEP->getOperand(i);
459   // Determine the scale factor of the variable element.  For example, this is
460   // 4 if the variable index is into an array of i32.
461   uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
462 
463   // Verify that there are no other variable indices.  If so, emit the hard way.
464   for (++i, ++GTI; i != e; ++i, ++GTI) {
465     ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
466     if (!CI) return nullptr;
467 
468     // Compute the aggregate offset of constant indices.
469     if (CI->isZero()) continue;
470 
471     // Handle a struct index, which adds its field offset to the pointer.
472     if (StructType *STy = GTI.getStructTypeOrNull()) {
473       Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
474     } else {
475       uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
476       Offset += Size*CI->getSExtValue();
477     }
478   }
479 
480   // Okay, we know we have a single variable index, which must be a
481   // pointer/array/vector index.  If there is no offset, life is simple, return
482   // the index.
483   Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
484   unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
485   if (Offset == 0) {
486     // Cast to intptrty in case a truncation occurs.  If an extension is needed,
487     // we don't need to bother extending: the extension won't affect where the
488     // computation crosses zero.
489     if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
490       VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
491     }
492     return VariableIdx;
493   }
494 
495   // Otherwise, there is an index.  The computation we will do will be modulo
496   // the pointer size.
497   Offset = SignExtend64(Offset, IntPtrWidth);
498   VariableScale = SignExtend64(VariableScale, IntPtrWidth);
499 
500   // To do this transformation, any constant index must be a multiple of the
501   // variable scale factor.  For example, we can evaluate "12 + 4*i" as "3 + i",
502   // but we can't evaluate "10 + 3*i" in terms of i.  Check that the offset is a
503   // multiple of the variable scale.
504   int64_t NewOffs = Offset / (int64_t)VariableScale;
505   if (Offset != NewOffs*(int64_t)VariableScale)
506     return nullptr;
507 
508   // Okay, we can do this evaluation.  Start by converting the index to intptr.
509   if (VariableIdx->getType() != IntPtrTy)
510     VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
511                                             true /*Signed*/);
512   Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
513   return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
514 }
515 
516 /// Returns true if we can rewrite Start as a GEP with pointer Base
517 /// and some integer offset. The nodes that need to be re-written
518 /// for this transformation will be added to Explored.
519 static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
520                                   const DataLayout &DL,
521                                   SetVector<Value *> &Explored) {
522   SmallVector<Value *, 16> WorkList(1, Start);
523   Explored.insert(Base);
524 
525   // The following traversal gives us an order which can be used
526   // when doing the final transformation. Since in the final
527   // transformation we create the PHI replacement instructions first,
528   // we don't have to get them in any particular order.
529   //
530   // However, for other instructions we will have to traverse the
531   // operands of an instruction first, which means that we have to
532   // do a post-order traversal.
533   while (!WorkList.empty()) {
534     SetVector<PHINode *> PHIs;
535 
536     while (!WorkList.empty()) {
537       if (Explored.size() >= 100)
538         return false;
539 
540       Value *V = WorkList.back();
541 
542       if (Explored.count(V) != 0) {
543         WorkList.pop_back();
544         continue;
545       }
546 
547       if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
548           !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
549         // We've found some value that we can't explore which is different from
550         // the base. Therefore we can't do this transformation.
551         return false;
552 
553       if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
554         auto *CI = dyn_cast<CastInst>(V);
555         if (!CI->isNoopCast(DL))
556           return false;
557 
558         if (Explored.count(CI->getOperand(0)) == 0)
559           WorkList.push_back(CI->getOperand(0));
560       }
561 
562       if (auto *GEP = dyn_cast<GEPOperator>(V)) {
563         // We're limiting the GEP to having one index. This will preserve
564         // the original pointer type. We could handle more cases in the
565         // future.
566         if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
567             GEP->getType() != Start->getType())
568           return false;
569 
570         if (Explored.count(GEP->getOperand(0)) == 0)
571           WorkList.push_back(GEP->getOperand(0));
572       }
573 
574       if (WorkList.back() == V) {
575         WorkList.pop_back();
576         // We've finished visiting this node, mark it as such.
577         Explored.insert(V);
578       }
579 
580       if (auto *PN = dyn_cast<PHINode>(V)) {
581         // We cannot transform PHIs on unsplittable basic blocks.
582         if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
583           return false;
584         Explored.insert(PN);
585         PHIs.insert(PN);
586       }
587     }
588 
589     // Explore the PHI nodes further.
590     for (auto *PN : PHIs)
591       for (Value *Op : PN->incoming_values())
592         if (Explored.count(Op) == 0)
593           WorkList.push_back(Op);
594   }
595 
596   // Make sure that we can do this. Since we can't insert GEPs in a basic
597   // block before a PHI node, we can't easily do this transformation if
598   // we have PHI node users of transformed instructions.
599   for (Value *Val : Explored) {
600     for (Value *Use : Val->uses()) {
601 
602       auto *PHI = dyn_cast<PHINode>(Use);
603       auto *Inst = dyn_cast<Instruction>(Val);
604 
605       if (Inst == Base || Inst == PHI || !Inst || !PHI ||
606           Explored.count(PHI) == 0)
607         continue;
608 
609       if (PHI->getParent() == Inst->getParent())
610         return false;
611     }
612   }
613   return true;
614 }
615 
616 // Sets the appropriate insert point on Builder where we can add
617 // a replacement Instruction for V (if that is possible).
618 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
619                               bool Before = true) {
620   if (auto *PHI = dyn_cast<PHINode>(V)) {
621     Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
622     return;
623   }
624   if (auto *I = dyn_cast<Instruction>(V)) {
625     if (!Before)
626       I = &*std::next(I->getIterator());
627     Builder.SetInsertPoint(I);
628     return;
629   }
630   if (auto *A = dyn_cast<Argument>(V)) {
631     // Set the insertion point in the entry block.
632     BasicBlock &Entry = A->getParent()->getEntryBlock();
633     Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
634     return;
635   }
636   // Otherwise, this is a constant and we don't need to set a new
637   // insertion point.
638   assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
639 }
640 
641 /// Returns a re-written value of Start as an indexed GEP using Base as a
642 /// pointer.
643 static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
644                                  const DataLayout &DL,
645                                  SetVector<Value *> &Explored) {
646   // Perform all the substitutions. This is a bit tricky because we can
647   // have cycles in our use-def chains.
648   // 1. Create the PHI nodes without any incoming values.
649   // 2. Create all the other values.
650   // 3. Add the edges for the PHI nodes.
651   // 4. Emit GEPs to get the original pointers.
652   // 5. Remove the original instructions.
653   Type *IndexType = IntegerType::get(
654       Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
655 
656   DenseMap<Value *, Value *> NewInsts;
657   NewInsts[Base] = ConstantInt::getNullValue(IndexType);
658 
659   // Create the new PHI nodes, without adding any incoming values.
660   for (Value *Val : Explored) {
661     if (Val == Base)
662       continue;
663     // Create empty phi nodes. This avoids cyclic dependencies when creating
664     // the remaining instructions.
665     if (auto *PHI = dyn_cast<PHINode>(Val))
666       NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
667                                       PHI->getName() + ".idx", PHI);
668   }
669   IRBuilder<> Builder(Base->getContext());
670 
671   // Create all the other instructions.
672   for (Value *Val : Explored) {
673 
674     if (NewInsts.find(Val) != NewInsts.end())
675       continue;
676 
677     if (auto *CI = dyn_cast<CastInst>(Val)) {
678       // Don't get rid of the intermediate variable here; the store can grow
679       // the map which will invalidate the reference to the input value.
680       Value *V = NewInsts[CI->getOperand(0)];
681       NewInsts[CI] = V;
682       continue;
683     }
684     if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
685       Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
686                                                   : GEP->getOperand(1);
687       setInsertionPoint(Builder, GEP);
688       // Indices might need to be sign extended. GEPs will magically do
689       // this, but we need to do it ourselves here.
690       if (Index->getType()->getScalarSizeInBits() !=
691           NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
692         Index = Builder.CreateSExtOrTrunc(
693             Index, NewInsts[GEP->getOperand(0)]->getType(),
694             GEP->getOperand(0)->getName() + ".sext");
695       }
696 
697       auto *Op = NewInsts[GEP->getOperand(0)];
698       if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
699         NewInsts[GEP] = Index;
700       else
701         NewInsts[GEP] = Builder.CreateNSWAdd(
702             Op, Index, GEP->getOperand(0)->getName() + ".add");
703       continue;
704     }
705     if (isa<PHINode>(Val))
706       continue;
707 
708     llvm_unreachable("Unexpected instruction type");
709   }
710 
711   // Add the incoming values to the PHI nodes.
712   for (Value *Val : Explored) {
713     if (Val == Base)
714       continue;
715     // All the instructions have been created, we can now add edges to the
716     // phi nodes.
717     if (auto *PHI = dyn_cast<PHINode>(Val)) {
718       PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
719       for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
720         Value *NewIncoming = PHI->getIncomingValue(I);
721 
722         if (NewInsts.find(NewIncoming) != NewInsts.end())
723           NewIncoming = NewInsts[NewIncoming];
724 
725         NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
726       }
727     }
728   }
729 
730   for (Value *Val : Explored) {
731     if (Val == Base)
732       continue;
733 
734     // Depending on the type, for external users we have to emit
735     // a GEP or a GEP + ptrtoint.
736     setInsertionPoint(Builder, Val, false);
737 
738     // If required, create an inttoptr instruction for Base.
739     Value *NewBase = Base;
740     if (!Base->getType()->isPointerTy())
741       NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
742                                                Start->getName() + "to.ptr");
743 
744     Value *GEP = Builder.CreateInBoundsGEP(
745         Start->getType()->getPointerElementType(), NewBase,
746         makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
747 
748     if (!Val->getType()->isPointerTy()) {
749       Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
750                                               Val->getName() + ".conv");
751       GEP = Cast;
752     }
753     Val->replaceAllUsesWith(GEP);
754   }
755 
756   return NewInsts[Start];
757 }
758 
759 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
760 /// the input Value as a constant indexed GEP. Returns a pair containing
761 /// the GEPs Pointer and Index.
762 static std::pair<Value *, Value *>
763 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
764   Type *IndexType = IntegerType::get(V->getContext(),
765                                      DL.getIndexTypeSizeInBits(V->getType()));
766 
767   Constant *Index = ConstantInt::getNullValue(IndexType);
768   while (true) {
769     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
770       // We accept only inbouds GEPs here to exclude the possibility of
771       // overflow.
772       if (!GEP->isInBounds())
773         break;
774       if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
775           GEP->getType() == V->getType()) {
776         V = GEP->getOperand(0);
777         Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
778         Index = ConstantExpr::getAdd(
779             Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
780         continue;
781       }
782       break;
783     }
784     if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
785       if (!CI->isNoopCast(DL))
786         break;
787       V = CI->getOperand(0);
788       continue;
789     }
790     if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
791       if (!CI->isNoopCast(DL))
792         break;
793       V = CI->getOperand(0);
794       continue;
795     }
796     break;
797   }
798   return {V, Index};
799 }
800 
801 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
802 /// We can look through PHIs, GEPs and casts in order to determine a common base
803 /// between GEPLHS and RHS.
804 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
805                                               ICmpInst::Predicate Cond,
806                                               const DataLayout &DL) {
807   // FIXME: Support vector of pointers.
808   if (GEPLHS->getType()->isVectorTy())
809     return nullptr;
810 
811   if (!GEPLHS->hasAllConstantIndices())
812     return nullptr;
813 
814   // Make sure the pointers have the same type.
815   if (GEPLHS->getType() != RHS->getType())
816     return nullptr;
817 
818   Value *PtrBase, *Index;
819   std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
820 
821   // The set of nodes that will take part in this transformation.
822   SetVector<Value *> Nodes;
823 
824   if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
825     return nullptr;
826 
827   // We know we can re-write this as
828   //  ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
829   // Since we've only looked through inbouds GEPs we know that we
830   // can't have overflow on either side. We can therefore re-write
831   // this as:
832   //   OFFSET1 cmp OFFSET2
833   Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
834 
835   // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
836   // GEP having PtrBase as the pointer base, and has returned in NewRHS the
837   // offset. Since Index is the offset of LHS to the base pointer, we will now
838   // compare the offsets instead of comparing the pointers.
839   return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
840 }
841 
842 /// Fold comparisons between a GEP instruction and something else. At this point
843 /// we know that the GEP is on the LHS of the comparison.
844 Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
845                                        ICmpInst::Predicate Cond,
846                                        Instruction &I) {
847   // Don't transform signed compares of GEPs into index compares. Even if the
848   // GEP is inbounds, the final add of the base pointer can have signed overflow
849   // and would change the result of the icmp.
850   // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
851   // the maximum signed value for the pointer type.
852   if (ICmpInst::isSigned(Cond))
853     return nullptr;
854 
855   // Look through bitcasts and addrspacecasts. We do not however want to remove
856   // 0 GEPs.
857   if (!isa<GetElementPtrInst>(RHS))
858     RHS = RHS->stripPointerCasts();
859 
860   Value *PtrBase = GEPLHS->getOperand(0);
861   // FIXME: Support vector pointer GEPs.
862   if (PtrBase == RHS && GEPLHS->isInBounds() &&
863       !GEPLHS->getType()->isVectorTy()) {
864     // ((gep Ptr, OFFSET) cmp Ptr)   ---> (OFFSET cmp 0).
865     // This transformation (ignoring the base and scales) is valid because we
866     // know pointers can't overflow since the gep is inbounds.  See if we can
867     // output an optimized form.
868     Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
869 
870     // If not, synthesize the offset the hard way.
871     if (!Offset)
872       Offset = EmitGEPOffset(GEPLHS);
873     return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
874                         Constant::getNullValue(Offset->getType()));
875   }
876 
877   if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
878       isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
879       !NullPointerIsDefined(I.getFunction(),
880                             RHS->getType()->getPointerAddressSpace())) {
881     // For most address spaces, an allocation can't be placed at null, but null
882     // itself is treated as a 0 size allocation in the in bounds rules.  Thus,
883     // the only valid inbounds address derived from null, is null itself.
884     // Thus, we have four cases to consider:
885     // 1) Base == nullptr, Offset == 0 -> inbounds, null
886     // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
887     // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
888     // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
889     //
890     // (Note if we're indexing a type of size 0, that simply collapses into one
891     //  of the buckets above.)
892     //
893     // In general, we're allowed to make values less poison (i.e. remove
894     //   sources of full UB), so in this case, we just select between the two
895     //   non-poison cases (1 and 4 above).
896     //
897     // For vectors, we apply the same reasoning on a per-lane basis.
898     auto *Base = GEPLHS->getPointerOperand();
899     if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
900       int NumElts = GEPLHS->getType()->getVectorNumElements();
901       Base = Builder.CreateVectorSplat(NumElts, Base);
902     }
903     return new ICmpInst(Cond, Base,
904                         ConstantExpr::getPointerBitCastOrAddrSpaceCast(
905                             cast<Constant>(RHS), Base->getType()));
906   } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
907     // If the base pointers are different, but the indices are the same, just
908     // compare the base pointer.
909     if (PtrBase != GEPRHS->getOperand(0)) {
910       bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
911       IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
912                         GEPRHS->getOperand(0)->getType();
913       if (IndicesTheSame)
914         for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
915           if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
916             IndicesTheSame = false;
917             break;
918           }
919 
920       // If all indices are the same, just compare the base pointers.
921       Type *BaseType = GEPLHS->getOperand(0)->getType();
922       if (IndicesTheSame && CmpInst::makeCmpResultType(BaseType) == I.getType())
923         return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
924 
925       // If we're comparing GEPs with two base pointers that only differ in type
926       // and both GEPs have only constant indices or just one use, then fold
927       // the compare with the adjusted indices.
928       // FIXME: Support vector of pointers.
929       if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
930           (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
931           (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
932           PtrBase->stripPointerCasts() ==
933               GEPRHS->getOperand(0)->stripPointerCasts() &&
934           !GEPLHS->getType()->isVectorTy()) {
935         Value *LOffset = EmitGEPOffset(GEPLHS);
936         Value *ROffset = EmitGEPOffset(GEPRHS);
937 
938         // If we looked through an addrspacecast between different sized address
939         // spaces, the LHS and RHS pointers are different sized
940         // integers. Truncate to the smaller one.
941         Type *LHSIndexTy = LOffset->getType();
942         Type *RHSIndexTy = ROffset->getType();
943         if (LHSIndexTy != RHSIndexTy) {
944           if (LHSIndexTy->getPrimitiveSizeInBits() <
945               RHSIndexTy->getPrimitiveSizeInBits()) {
946             ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
947           } else
948             LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
949         }
950 
951         Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
952                                         LOffset, ROffset);
953         return replaceInstUsesWith(I, Cmp);
954       }
955 
956       // Otherwise, the base pointers are different and the indices are
957       // different. Try convert this to an indexed compare by looking through
958       // PHIs/casts.
959       return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
960     }
961 
962     // If one of the GEPs has all zero indices, recurse.
963     // FIXME: Handle vector of pointers.
964     if (!GEPLHS->getType()->isVectorTy() && GEPLHS->hasAllZeroIndices())
965       return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
966                          ICmpInst::getSwappedPredicate(Cond), I);
967 
968     // If the other GEP has all zero indices, recurse.
969     // FIXME: Handle vector of pointers.
970     if (!GEPRHS->getType()->isVectorTy() && GEPRHS->hasAllZeroIndices())
971       return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
972 
973     bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
974     if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
975       // If the GEPs only differ by one index, compare it.
976       unsigned NumDifferences = 0;  // Keep track of # differences.
977       unsigned DiffOperand = 0;     // The operand that differs.
978       for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
979         if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
980           Type *LHSType = GEPLHS->getOperand(i)->getType();
981           Type *RHSType = GEPRHS->getOperand(i)->getType();
982           // FIXME: Better support for vector of pointers.
983           if (LHSType->getPrimitiveSizeInBits() !=
984                    RHSType->getPrimitiveSizeInBits() ||
985               (GEPLHS->getType()->isVectorTy() &&
986                (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
987             // Irreconcilable differences.
988             NumDifferences = 2;
989             break;
990           }
991 
992           if (NumDifferences++) break;
993           DiffOperand = i;
994         }
995 
996       if (NumDifferences == 0)   // SAME GEP?
997         return replaceInstUsesWith(I, // No comparison is needed here.
998           ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
999 
1000       else if (NumDifferences == 1 && GEPsInBounds) {
1001         Value *LHSV = GEPLHS->getOperand(DiffOperand);
1002         Value *RHSV = GEPRHS->getOperand(DiffOperand);
1003         // Make sure we do a signed comparison here.
1004         return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
1005       }
1006     }
1007 
1008     // Only lower this if the icmp is the only user of the GEP or if we expect
1009     // the result to fold to a constant!
1010     if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
1011         (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
1012       // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)  --->  (OFFSET1 cmp OFFSET2)
1013       Value *L = EmitGEPOffset(GEPLHS);
1014       Value *R = EmitGEPOffset(GEPRHS);
1015       return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
1016     }
1017   }
1018 
1019   // Try convert this to an indexed compare by looking through PHIs/casts as a
1020   // last resort.
1021   return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
1022 }
1023 
1024 Instruction *InstCombiner::foldAllocaCmp(ICmpInst &ICI,
1025                                          const AllocaInst *Alloca,
1026                                          const Value *Other) {
1027   assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
1028 
1029   // It would be tempting to fold away comparisons between allocas and any
1030   // pointer not based on that alloca (e.g. an argument). However, even
1031   // though such pointers cannot alias, they can still compare equal.
1032   //
1033   // But LLVM doesn't specify where allocas get their memory, so if the alloca
1034   // doesn't escape we can argue that it's impossible to guess its value, and we
1035   // can therefore act as if any such guesses are wrong.
1036   //
1037   // The code below checks that the alloca doesn't escape, and that it's only
1038   // used in a comparison once (the current instruction). The
1039   // single-comparison-use condition ensures that we're trivially folding all
1040   // comparisons against the alloca consistently, and avoids the risk of
1041   // erroneously folding a comparison of the pointer with itself.
1042 
1043   unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1044 
1045   SmallVector<const Use *, 32> Worklist;
1046   for (const Use &U : Alloca->uses()) {
1047     if (Worklist.size() >= MaxIter)
1048       return nullptr;
1049     Worklist.push_back(&U);
1050   }
1051 
1052   unsigned NumCmps = 0;
1053   while (!Worklist.empty()) {
1054     assert(Worklist.size() <= MaxIter);
1055     const Use *U = Worklist.pop_back_val();
1056     const Value *V = U->getUser();
1057     --MaxIter;
1058 
1059     if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1060         isa<SelectInst>(V)) {
1061       // Track the uses.
1062     } else if (isa<LoadInst>(V)) {
1063       // Loading from the pointer doesn't escape it.
1064       continue;
1065     } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1066       // Storing *to* the pointer is fine, but storing the pointer escapes it.
1067       if (SI->getValueOperand() == U->get())
1068         return nullptr;
1069       continue;
1070     } else if (isa<ICmpInst>(V)) {
1071       if (NumCmps++)
1072         return nullptr; // Found more than one cmp.
1073       continue;
1074     } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1075       switch (Intrin->getIntrinsicID()) {
1076         // These intrinsics don't escape or compare the pointer. Memset is safe
1077         // because we don't allow ptrtoint. Memcpy and memmove are safe because
1078         // we don't allow stores, so src cannot point to V.
1079         case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1080         case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1081           continue;
1082         default:
1083           return nullptr;
1084       }
1085     } else {
1086       return nullptr;
1087     }
1088     for (const Use &U : V->uses()) {
1089       if (Worklist.size() >= MaxIter)
1090         return nullptr;
1091       Worklist.push_back(&U);
1092     }
1093   }
1094 
1095   Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1096   return replaceInstUsesWith(
1097       ICI,
1098       ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1099 }
1100 
1101 /// Fold "icmp pred (X+C), X".
1102 Instruction *InstCombiner::foldICmpAddOpConst(Value *X, const APInt &C,
1103                                               ICmpInst::Predicate Pred) {
1104   // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1105   // so the values can never be equal.  Similarly for all other "or equals"
1106   // operators.
1107   assert(!!C && "C should not be zero!");
1108 
1109   // (X+1) <u X        --> X >u (MAXUINT-1)        --> X == 255
1110   // (X+2) <u X        --> X >u (MAXUINT-2)        --> X > 253
1111   // (X+MAXUINT) <u X  --> X >u (MAXUINT-MAXUINT)  --> X != 0
1112   if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1113     Constant *R = ConstantInt::get(X->getType(),
1114                                    APInt::getMaxValue(C.getBitWidth()) - C);
1115     return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1116   }
1117 
1118   // (X+1) >u X        --> X <u (0-1)        --> X != 255
1119   // (X+2) >u X        --> X <u (0-2)        --> X <u 254
1120   // (X+MAXUINT) >u X  --> X <u (0-MAXUINT)  --> X <u 1  --> X == 0
1121   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1122     return new ICmpInst(ICmpInst::ICMP_ULT, X,
1123                         ConstantInt::get(X->getType(), -C));
1124 
1125   APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
1126 
1127   // (X+ 1) <s X       --> X >s (MAXSINT-1)          --> X == 127
1128   // (X+ 2) <s X       --> X >s (MAXSINT-2)          --> X >s 125
1129   // (X+MAXSINT) <s X  --> X >s (MAXSINT-MAXSINT)    --> X >s 0
1130   // (X+MINSINT) <s X  --> X >s (MAXSINT-MINSINT)    --> X >s -1
1131   // (X+ -2) <s X      --> X >s (MAXSINT- -2)        --> X >s 126
1132   // (X+ -1) <s X      --> X >s (MAXSINT- -1)        --> X != 127
1133   if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1134     return new ICmpInst(ICmpInst::ICMP_SGT, X,
1135                         ConstantInt::get(X->getType(), SMax - C));
1136 
1137   // (X+ 1) >s X       --> X <s (MAXSINT-(1-1))       --> X != 127
1138   // (X+ 2) >s X       --> X <s (MAXSINT-(2-1))       --> X <s 126
1139   // (X+MAXSINT) >s X  --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1140   // (X+MINSINT) >s X  --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1141   // (X+ -2) >s X      --> X <s (MAXSINT-(-2-1))      --> X <s -126
1142   // (X+ -1) >s X      --> X <s (MAXSINT-(-1-1))      --> X == -128
1143 
1144   assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1145   return new ICmpInst(ICmpInst::ICMP_SLT, X,
1146                       ConstantInt::get(X->getType(), SMax - (C - 1)));
1147 }
1148 
1149 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1150 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1151 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1152 Instruction *InstCombiner::foldICmpShrConstConst(ICmpInst &I, Value *A,
1153                                                  const APInt &AP1,
1154                                                  const APInt &AP2) {
1155   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1156 
1157   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1158     if (I.getPredicate() == I.ICMP_NE)
1159       Pred = CmpInst::getInversePredicate(Pred);
1160     return new ICmpInst(Pred, LHS, RHS);
1161   };
1162 
1163   // Don't bother doing any work for cases which InstSimplify handles.
1164   if (AP2.isNullValue())
1165     return nullptr;
1166 
1167   bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1168   if (IsAShr) {
1169     if (AP2.isAllOnesValue())
1170       return nullptr;
1171     if (AP2.isNegative() != AP1.isNegative())
1172       return nullptr;
1173     if (AP2.sgt(AP1))
1174       return nullptr;
1175   }
1176 
1177   if (!AP1)
1178     // 'A' must be large enough to shift out the highest set bit.
1179     return getICmp(I.ICMP_UGT, A,
1180                    ConstantInt::get(A->getType(), AP2.logBase2()));
1181 
1182   if (AP1 == AP2)
1183     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1184 
1185   int Shift;
1186   if (IsAShr && AP1.isNegative())
1187     Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1188   else
1189     Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1190 
1191   if (Shift > 0) {
1192     if (IsAShr && AP1 == AP2.ashr(Shift)) {
1193       // There are multiple solutions if we are comparing against -1 and the LHS
1194       // of the ashr is not a power of two.
1195       if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1196         return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1197       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1198     } else if (AP1 == AP2.lshr(Shift)) {
1199       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1200     }
1201   }
1202 
1203   // Shifting const2 will never be equal to const1.
1204   // FIXME: This should always be handled by InstSimplify?
1205   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1206   return replaceInstUsesWith(I, TorF);
1207 }
1208 
1209 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1210 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1211 Instruction *InstCombiner::foldICmpShlConstConst(ICmpInst &I, Value *A,
1212                                                  const APInt &AP1,
1213                                                  const APInt &AP2) {
1214   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1215 
1216   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1217     if (I.getPredicate() == I.ICMP_NE)
1218       Pred = CmpInst::getInversePredicate(Pred);
1219     return new ICmpInst(Pred, LHS, RHS);
1220   };
1221 
1222   // Don't bother doing any work for cases which InstSimplify handles.
1223   if (AP2.isNullValue())
1224     return nullptr;
1225 
1226   unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1227 
1228   if (!AP1 && AP2TrailingZeros != 0)
1229     return getICmp(
1230         I.ICMP_UGE, A,
1231         ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1232 
1233   if (AP1 == AP2)
1234     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1235 
1236   // Get the distance between the lowest bits that are set.
1237   int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1238 
1239   if (Shift > 0 && AP2.shl(Shift) == AP1)
1240     return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1241 
1242   // Shifting const2 will never be equal to const1.
1243   // FIXME: This should always be handled by InstSimplify?
1244   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1245   return replaceInstUsesWith(I, TorF);
1246 }
1247 
1248 /// The caller has matched a pattern of the form:
1249 ///   I = icmp ugt (add (add A, B), CI2), CI1
1250 /// If this is of the form:
1251 ///   sum = a + b
1252 ///   if (sum+128 >u 255)
1253 /// Then replace it with llvm.sadd.with.overflow.i8.
1254 ///
1255 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1256                                           ConstantInt *CI2, ConstantInt *CI1,
1257                                           InstCombiner &IC) {
1258   // The transformation we're trying to do here is to transform this into an
1259   // llvm.sadd.with.overflow.  To do this, we have to replace the original add
1260   // with a narrower add, and discard the add-with-constant that is part of the
1261   // range check (if we can't eliminate it, this isn't profitable).
1262 
1263   // In order to eliminate the add-with-constant, the compare can be its only
1264   // use.
1265   Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1266   if (!AddWithCst->hasOneUse())
1267     return nullptr;
1268 
1269   // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1270   if (!CI2->getValue().isPowerOf2())
1271     return nullptr;
1272   unsigned NewWidth = CI2->getValue().countTrailingZeros();
1273   if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1274     return nullptr;
1275 
1276   // The width of the new add formed is 1 more than the bias.
1277   ++NewWidth;
1278 
1279   // Check to see that CI1 is an all-ones value with NewWidth bits.
1280   if (CI1->getBitWidth() == NewWidth ||
1281       CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1282     return nullptr;
1283 
1284   // This is only really a signed overflow check if the inputs have been
1285   // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1286   // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1287   unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1288   if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1289       IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1290     return nullptr;
1291 
1292   // In order to replace the original add with a narrower
1293   // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1294   // and truncates that discard the high bits of the add.  Verify that this is
1295   // the case.
1296   Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1297   for (User *U : OrigAdd->users()) {
1298     if (U == AddWithCst)
1299       continue;
1300 
1301     // Only accept truncates for now.  We would really like a nice recursive
1302     // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1303     // chain to see which bits of a value are actually demanded.  If the
1304     // original add had another add which was then immediately truncated, we
1305     // could still do the transformation.
1306     TruncInst *TI = dyn_cast<TruncInst>(U);
1307     if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1308       return nullptr;
1309   }
1310 
1311   // If the pattern matches, truncate the inputs to the narrower type and
1312   // use the sadd_with_overflow intrinsic to efficiently compute both the
1313   // result and the overflow bit.
1314   Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1315   Function *F = Intrinsic::getDeclaration(
1316       I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1317 
1318   InstCombiner::BuilderTy &Builder = IC.Builder;
1319 
1320   // Put the new code above the original add, in case there are any uses of the
1321   // add between the add and the compare.
1322   Builder.SetInsertPoint(OrigAdd);
1323 
1324   Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1325   Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1326   CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1327   Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1328   Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1329 
1330   // The inner add was the result of the narrow add, zero extended to the
1331   // wider type.  Replace it with the result computed by the intrinsic.
1332   IC.replaceInstUsesWith(*OrigAdd, ZExt);
1333 
1334   // The original icmp gets replaced with the overflow value.
1335   return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1336 }
1337 
1338 /// If we have:
1339 ///   icmp eq/ne (urem/srem %x, %y), 0
1340 /// iff %y is a power-of-two, we can replace this with a bit test:
1341 ///   icmp eq/ne (and %x, (add %y, -1)), 0
1342 Instruction *InstCombiner::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) {
1343   // This fold is only valid for equality predicates.
1344   if (!I.isEquality())
1345     return nullptr;
1346   ICmpInst::Predicate Pred;
1347   Value *X, *Y, *Zero;
1348   if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1349                         m_CombineAnd(m_Zero(), m_Value(Zero)))))
1350     return nullptr;
1351   if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, 0, &I))
1352     return nullptr;
1353   // This may increase instruction count, we don't enforce that Y is a constant.
1354   Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1355   Value *Masked = Builder.CreateAnd(X, Mask);
1356   return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1357 }
1358 
1359 /// Fold equality-comparison between zero and any (maybe truncated) right-shift
1360 /// by one-less-than-bitwidth into a sign test on the original value.
1361 Instruction *InstCombiner::foldSignBitTest(ICmpInst &I) {
1362   Instruction *Val;
1363   ICmpInst::Predicate Pred;
1364   if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1365     return nullptr;
1366 
1367   Value *X;
1368   Type *XTy;
1369 
1370   Constant *C;
1371   if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1372     XTy = X->getType();
1373     unsigned XBitWidth = XTy->getScalarSizeInBits();
1374     if (!match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1375                                      APInt(XBitWidth, XBitWidth - 1))))
1376       return nullptr;
1377   } else if (isa<BinaryOperator>(Val) &&
1378              (X = reassociateShiftAmtsOfTwoSameDirectionShifts(
1379                   cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1380                   /*AnalyzeForSignBitExtraction=*/true))) {
1381     XTy = X->getType();
1382   } else
1383     return nullptr;
1384 
1385   return ICmpInst::Create(Instruction::ICmp,
1386                           Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SGE
1387                                                     : ICmpInst::ICMP_SLT,
1388                           X, ConstantInt::getNullValue(XTy));
1389 }
1390 
1391 // Handle  icmp pred X, 0
1392 Instruction *InstCombiner::foldICmpWithZero(ICmpInst &Cmp) {
1393   CmpInst::Predicate Pred = Cmp.getPredicate();
1394   if (!match(Cmp.getOperand(1), m_Zero()))
1395     return nullptr;
1396 
1397   // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1398   if (Pred == ICmpInst::ICMP_SGT) {
1399     Value *A, *B;
1400     SelectPatternResult SPR = matchSelectPattern(Cmp.getOperand(0), A, B);
1401     if (SPR.Flavor == SPF_SMIN) {
1402       if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1403         return new ICmpInst(Pred, B, Cmp.getOperand(1));
1404       if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1405         return new ICmpInst(Pred, A, Cmp.getOperand(1));
1406     }
1407   }
1408 
1409   if (Instruction *New = foldIRemByPowerOfTwoToBitTest(Cmp))
1410     return New;
1411 
1412   // Given:
1413   //   icmp eq/ne (urem %x, %y), 0
1414   // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1415   //   icmp eq/ne %x, 0
1416   Value *X, *Y;
1417   if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1418       ICmpInst::isEquality(Pred)) {
1419     KnownBits XKnown = computeKnownBits(X, 0, &Cmp);
1420     KnownBits YKnown = computeKnownBits(Y, 0, &Cmp);
1421     if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1422       return new ICmpInst(Pred, X, Cmp.getOperand(1));
1423   }
1424 
1425   return nullptr;
1426 }
1427 
1428 /// Fold icmp Pred X, C.
1429 /// TODO: This code structure does not make sense. The saturating add fold
1430 /// should be moved to some other helper and extended as noted below (it is also
1431 /// possible that code has been made unnecessary - do we canonicalize IR to
1432 /// overflow/saturating intrinsics or not?).
1433 Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
1434   // Match the following pattern, which is a common idiom when writing
1435   // overflow-safe integer arithmetic functions. The source performs an addition
1436   // in wider type and explicitly checks for overflow using comparisons against
1437   // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1438   //
1439   // TODO: This could probably be generalized to handle other overflow-safe
1440   // operations if we worked out the formulas to compute the appropriate magic
1441   // constants.
1442   //
1443   // sum = a + b
1444   // if (sum+128 >u 255)  ...  -> llvm.sadd.with.overflow.i8
1445   CmpInst::Predicate Pred = Cmp.getPredicate();
1446   Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1447   Value *A, *B;
1448   ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1449   if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1450       match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1451     if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1452       return Res;
1453 
1454   return nullptr;
1455 }
1456 
1457 /// Canonicalize icmp instructions based on dominating conditions.
1458 Instruction *InstCombiner::foldICmpWithDominatingICmp(ICmpInst &Cmp) {
1459   // This is a cheap/incomplete check for dominance - just match a single
1460   // predecessor with a conditional branch.
1461   BasicBlock *CmpBB = Cmp.getParent();
1462   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1463   if (!DomBB)
1464     return nullptr;
1465 
1466   Value *DomCond;
1467   BasicBlock *TrueBB, *FalseBB;
1468   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1469     return nullptr;
1470 
1471   assert((TrueBB == CmpBB || FalseBB == CmpBB) &&
1472          "Predecessor block does not point to successor?");
1473 
1474   // The branch should get simplified. Don't bother simplifying this condition.
1475   if (TrueBB == FalseBB)
1476     return nullptr;
1477 
1478   // Try to simplify this compare to T/F based on the dominating condition.
1479   Optional<bool> Imp = isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB);
1480   if (Imp)
1481     return replaceInstUsesWith(Cmp, ConstantInt::get(Cmp.getType(), *Imp));
1482 
1483   CmpInst::Predicate Pred = Cmp.getPredicate();
1484   Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1485   ICmpInst::Predicate DomPred;
1486   const APInt *C, *DomC;
1487   if (match(DomCond, m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))) &&
1488       match(Y, m_APInt(C))) {
1489     // We have 2 compares of a variable with constants. Calculate the constant
1490     // ranges of those compares to see if we can transform the 2nd compare:
1491     // DomBB:
1492     //   DomCond = icmp DomPred X, DomC
1493     //   br DomCond, CmpBB, FalseBB
1494     // CmpBB:
1495     //   Cmp = icmp Pred X, C
1496     ConstantRange CR = ConstantRange::makeAllowedICmpRegion(Pred, *C);
1497     ConstantRange DominatingCR =
1498         (CmpBB == TrueBB) ? ConstantRange::makeExactICmpRegion(DomPred, *DomC)
1499                           : ConstantRange::makeExactICmpRegion(
1500                                 CmpInst::getInversePredicate(DomPred), *DomC);
1501     ConstantRange Intersection = DominatingCR.intersectWith(CR);
1502     ConstantRange Difference = DominatingCR.difference(CR);
1503     if (Intersection.isEmptySet())
1504       return replaceInstUsesWith(Cmp, Builder.getFalse());
1505     if (Difference.isEmptySet())
1506       return replaceInstUsesWith(Cmp, Builder.getTrue());
1507 
1508     // Canonicalizing a sign bit comparison that gets used in a branch,
1509     // pessimizes codegen by generating branch on zero instruction instead
1510     // of a test and branch. So we avoid canonicalizing in such situations
1511     // because test and branch instruction has better branch displacement
1512     // than compare and branch instruction.
1513     bool UnusedBit;
1514     bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1515     if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1516       return nullptr;
1517 
1518     if (const APInt *EqC = Intersection.getSingleElement())
1519       return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1520     if (const APInt *NeC = Difference.getSingleElement())
1521       return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1522   }
1523 
1524   return nullptr;
1525 }
1526 
1527 /// Fold icmp (trunc X, Y), C.
1528 Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp,
1529                                                  TruncInst *Trunc,
1530                                                  const APInt &C) {
1531   ICmpInst::Predicate Pred = Cmp.getPredicate();
1532   Value *X = Trunc->getOperand(0);
1533   if (C.isOneValue() && C.getBitWidth() > 1) {
1534     // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1535     Value *V = nullptr;
1536     if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1537       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1538                           ConstantInt::get(V->getType(), 1));
1539   }
1540 
1541   if (Cmp.isEquality() && Trunc->hasOneUse()) {
1542     // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1543     // of the high bits truncated out of x are known.
1544     unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1545              SrcBits = X->getType()->getScalarSizeInBits();
1546     KnownBits Known = computeKnownBits(X, 0, &Cmp);
1547 
1548     // If all the high bits are known, we can do this xform.
1549     if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1550       // Pull in the high bits from known-ones set.
1551       APInt NewRHS = C.zext(SrcBits);
1552       NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1553       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1554     }
1555   }
1556 
1557   return nullptr;
1558 }
1559 
1560 /// Fold icmp (xor X, Y), C.
1561 Instruction *InstCombiner::foldICmpXorConstant(ICmpInst &Cmp,
1562                                                BinaryOperator *Xor,
1563                                                const APInt &C) {
1564   Value *X = Xor->getOperand(0);
1565   Value *Y = Xor->getOperand(1);
1566   const APInt *XorC;
1567   if (!match(Y, m_APInt(XorC)))
1568     return nullptr;
1569 
1570   // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1571   // fold the xor.
1572   ICmpInst::Predicate Pred = Cmp.getPredicate();
1573   bool TrueIfSigned = false;
1574   if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1575 
1576     // If the sign bit of the XorCst is not set, there is no change to
1577     // the operation, just stop using the Xor.
1578     if (!XorC->isNegative()) {
1579       Cmp.setOperand(0, X);
1580       Worklist.Add(Xor);
1581       return &Cmp;
1582     }
1583 
1584     // Emit the opposite comparison.
1585     if (TrueIfSigned)
1586       return new ICmpInst(ICmpInst::ICMP_SGT, X,
1587                           ConstantInt::getAllOnesValue(X->getType()));
1588     else
1589       return new ICmpInst(ICmpInst::ICMP_SLT, X,
1590                           ConstantInt::getNullValue(X->getType()));
1591   }
1592 
1593   if (Xor->hasOneUse()) {
1594     // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1595     if (!Cmp.isEquality() && XorC->isSignMask()) {
1596       Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1597                             : Cmp.getSignedPredicate();
1598       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1599     }
1600 
1601     // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1602     if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1603       Pred = Cmp.isSigned() ? Cmp.getUnsignedPredicate()
1604                             : Cmp.getSignedPredicate();
1605       Pred = Cmp.getSwappedPredicate(Pred);
1606       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1607     }
1608   }
1609 
1610   // Mask constant magic can eliminate an 'xor' with unsigned compares.
1611   if (Pred == ICmpInst::ICMP_UGT) {
1612     // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1613     if (*XorC == ~C && (C + 1).isPowerOf2())
1614       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1615     // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1616     if (*XorC == C && (C + 1).isPowerOf2())
1617       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1618   }
1619   if (Pred == ICmpInst::ICMP_ULT) {
1620     // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1621     if (*XorC == -C && C.isPowerOf2())
1622       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1623                           ConstantInt::get(X->getType(), ~C));
1624     // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1625     if (*XorC == C && (-C).isPowerOf2())
1626       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1627                           ConstantInt::get(X->getType(), ~C));
1628   }
1629   return nullptr;
1630 }
1631 
1632 /// Fold icmp (and (sh X, Y), C2), C1.
1633 Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
1634                                             const APInt &C1, const APInt &C2) {
1635   BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1636   if (!Shift || !Shift->isShift())
1637     return nullptr;
1638 
1639   // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1640   // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1641   // code produced by the clang front-end, for bitfield access.
1642   // This seemingly simple opportunity to fold away a shift turns out to be
1643   // rather complicated. See PR17827 for details.
1644   unsigned ShiftOpcode = Shift->getOpcode();
1645   bool IsShl = ShiftOpcode == Instruction::Shl;
1646   const APInt *C3;
1647   if (match(Shift->getOperand(1), m_APInt(C3))) {
1648     bool CanFold = false;
1649     if (ShiftOpcode == Instruction::Shl) {
1650       // For a left shift, we can fold if the comparison is not signed. We can
1651       // also fold a signed comparison if the mask value and comparison value
1652       // are not negative. These constraints may not be obvious, but we can
1653       // prove that they are correct using an SMT solver.
1654       if (!Cmp.isSigned() || (!C2.isNegative() && !C1.isNegative()))
1655         CanFold = true;
1656     } else {
1657       bool IsAshr = ShiftOpcode == Instruction::AShr;
1658       // For a logical right shift, we can fold if the comparison is not signed.
1659       // We can also fold a signed comparison if the shifted mask value and the
1660       // shifted comparison value are not negative. These constraints may not be
1661       // obvious, but we can prove that they are correct using an SMT solver.
1662       // For an arithmetic shift right we can do the same, if we ensure
1663       // the And doesn't use any bits being shifted in. Normally these would
1664       // be turned into lshr by SimplifyDemandedBits, but not if there is an
1665       // additional user.
1666       if (!IsAshr || (C2.shl(*C3).lshr(*C3) == C2)) {
1667         if (!Cmp.isSigned() ||
1668             (!C2.shl(*C3).isNegative() && !C1.shl(*C3).isNegative()))
1669           CanFold = true;
1670       }
1671     }
1672 
1673     if (CanFold) {
1674       APInt NewCst = IsShl ? C1.lshr(*C3) : C1.shl(*C3);
1675       APInt SameAsC1 = IsShl ? NewCst.shl(*C3) : NewCst.lshr(*C3);
1676       // Check to see if we are shifting out any of the bits being compared.
1677       if (SameAsC1 != C1) {
1678         // If we shifted bits out, the fold is not going to work out. As a
1679         // special case, check to see if this means that the result is always
1680         // true or false now.
1681         if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1682           return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1683         if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1684           return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1685       } else {
1686         Cmp.setOperand(1, ConstantInt::get(And->getType(), NewCst));
1687         APInt NewAndCst = IsShl ? C2.lshr(*C3) : C2.shl(*C3);
1688         And->setOperand(1, ConstantInt::get(And->getType(), NewAndCst));
1689         And->setOperand(0, Shift->getOperand(0));
1690         Worklist.Add(Shift); // Shift is dead.
1691         return &Cmp;
1692       }
1693     }
1694   }
1695 
1696   // Turn ((X >> Y) & C2) == 0  into  (X & (C2 << Y)) == 0.  The latter is
1697   // preferable because it allows the C2 << Y expression to be hoisted out of a
1698   // loop if Y is invariant and X is not.
1699   if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() &&
1700       !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1701     // Compute C2 << Y.
1702     Value *NewShift =
1703         IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1704               : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1705 
1706     // Compute X & (C2 << Y).
1707     Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1708     Cmp.setOperand(0, NewAnd);
1709     return &Cmp;
1710   }
1711 
1712   return nullptr;
1713 }
1714 
1715 /// Fold icmp (and X, C2), C1.
1716 Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
1717                                                  BinaryOperator *And,
1718                                                  const APInt &C1) {
1719   bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1720 
1721   // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1722   // TODO: We canonicalize to the longer form for scalars because we have
1723   // better analysis/folds for icmp, and codegen may be better with icmp.
1724   if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isNullValue() &&
1725       match(And->getOperand(1), m_One()))
1726     return new TruncInst(And->getOperand(0), Cmp.getType());
1727 
1728   const APInt *C2;
1729   Value *X;
1730   if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1731     return nullptr;
1732 
1733   // Don't perform the following transforms if the AND has multiple uses
1734   if (!And->hasOneUse())
1735     return nullptr;
1736 
1737   if (Cmp.isEquality() && C1.isNullValue()) {
1738     // Restrict this fold to single-use 'and' (PR10267).
1739     // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1740     if (C2->isSignMask()) {
1741       Constant *Zero = Constant::getNullValue(X->getType());
1742       auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1743       return new ICmpInst(NewPred, X, Zero);
1744     }
1745 
1746     // Restrict this fold only for single-use 'and' (PR10267).
1747     // ((%x & C) == 0) --> %x u< (-C)  iff (-C) is power of two.
1748     if ((~(*C2) + 1).isPowerOf2()) {
1749       Constant *NegBOC =
1750           ConstantExpr::getNeg(cast<Constant>(And->getOperand(1)));
1751       auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1752       return new ICmpInst(NewPred, X, NegBOC);
1753     }
1754   }
1755 
1756   // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1757   // the input width without changing the value produced, eliminate the cast:
1758   //
1759   // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1760   //
1761   // We can do this transformation if the constants do not have their sign bits
1762   // set or if it is an equality comparison. Extending a relational comparison
1763   // when we're checking the sign bit would not work.
1764   Value *W;
1765   if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1766       (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1767     // TODO: Is this a good transform for vectors? Wider types may reduce
1768     // throughput. Should this transform be limited (even for scalars) by using
1769     // shouldChangeType()?
1770     if (!Cmp.getType()->isVectorTy()) {
1771       Type *WideType = W->getType();
1772       unsigned WideScalarBits = WideType->getScalarSizeInBits();
1773       Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1774       Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1775       Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1776       return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1777     }
1778   }
1779 
1780   if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1781     return I;
1782 
1783   // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1784   // (icmp pred (and A, (or (shl 1, B), 1), 0))
1785   //
1786   // iff pred isn't signed
1787   if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() &&
1788       match(And->getOperand(1), m_One())) {
1789     Constant *One = cast<Constant>(And->getOperand(1));
1790     Value *Or = And->getOperand(0);
1791     Value *A, *B, *LShr;
1792     if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1793         match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1794       unsigned UsesRemoved = 0;
1795       if (And->hasOneUse())
1796         ++UsesRemoved;
1797       if (Or->hasOneUse())
1798         ++UsesRemoved;
1799       if (LShr->hasOneUse())
1800         ++UsesRemoved;
1801 
1802       // Compute A & ((1 << B) | 1)
1803       Value *NewOr = nullptr;
1804       if (auto *C = dyn_cast<Constant>(B)) {
1805         if (UsesRemoved >= 1)
1806           NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1807       } else {
1808         if (UsesRemoved >= 3)
1809           NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1810                                                      /*HasNUW=*/true),
1811                                    One, Or->getName());
1812       }
1813       if (NewOr) {
1814         Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1815         Cmp.setOperand(0, NewAnd);
1816         return &Cmp;
1817       }
1818     }
1819   }
1820 
1821   return nullptr;
1822 }
1823 
1824 /// Fold icmp (and X, Y), C.
1825 Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp,
1826                                                BinaryOperator *And,
1827                                                const APInt &C) {
1828   if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1829     return I;
1830 
1831   // TODO: These all require that Y is constant too, so refactor with the above.
1832 
1833   // Try to optimize things like "A[i] & 42 == 0" to index computations.
1834   Value *X = And->getOperand(0);
1835   Value *Y = And->getOperand(1);
1836   if (auto *LI = dyn_cast<LoadInst>(X))
1837     if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1838       if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1839         if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1840             !LI->isVolatile() && isa<ConstantInt>(Y)) {
1841           ConstantInt *C2 = cast<ConstantInt>(Y);
1842           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1843             return Res;
1844         }
1845 
1846   if (!Cmp.isEquality())
1847     return nullptr;
1848 
1849   // X & -C == -C -> X >  u ~C
1850   // X & -C != -C -> X <= u ~C
1851   //   iff C is a power of 2
1852   if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) {
1853     auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1854                                                           : CmpInst::ICMP_ULE;
1855     return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1856   }
1857 
1858   // (X & C2) == 0 -> (trunc X) >= 0
1859   // (X & C2) != 0 -> (trunc X) <  0
1860   //   iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1861   const APInt *C2;
1862   if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) {
1863     int32_t ExactLogBase2 = C2->exactLogBase2();
1864     if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1865       Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1866       if (And->getType()->isVectorTy())
1867         NTy = VectorType::get(NTy, And->getType()->getVectorNumElements());
1868       Value *Trunc = Builder.CreateTrunc(X, NTy);
1869       auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1870                                                             : CmpInst::ICMP_SLT;
1871       return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1872     }
1873   }
1874 
1875   return nullptr;
1876 }
1877 
1878 /// Fold icmp (or X, Y), C.
1879 Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
1880                                               const APInt &C) {
1881   ICmpInst::Predicate Pred = Cmp.getPredicate();
1882   if (C.isOneValue()) {
1883     // icmp slt signum(V) 1 --> icmp slt V, 1
1884     Value *V = nullptr;
1885     if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1886       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1887                           ConstantInt::get(V->getType(), 1));
1888   }
1889 
1890   Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
1891   if (Cmp.isEquality() && Cmp.getOperand(1) == OrOp1) {
1892     // X | C == C --> X <=u C
1893     // X | C != C --> X  >u C
1894     //   iff C+1 is a power of 2 (C is a bitmask of the low bits)
1895     if ((C + 1).isPowerOf2()) {
1896       Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1897       return new ICmpInst(Pred, OrOp0, OrOp1);
1898     }
1899     // More general: are all bits outside of a mask constant set or not set?
1900     // X | C == C --> (X & ~C) == 0
1901     // X | C != C --> (X & ~C) != 0
1902     if (Or->hasOneUse()) {
1903       Value *A = Builder.CreateAnd(OrOp0, ~C);
1904       return new ICmpInst(Pred, A, ConstantInt::getNullValue(OrOp0->getType()));
1905     }
1906   }
1907 
1908   if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse())
1909     return nullptr;
1910 
1911   Value *P, *Q;
1912   if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1913     // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1914     // -> and (icmp eq P, null), (icmp eq Q, null).
1915     Value *CmpP =
1916         Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1917     Value *CmpQ =
1918         Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1919     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1920     return BinaryOperator::Create(BOpc, CmpP, CmpQ);
1921   }
1922 
1923   // Are we using xors to bitwise check for a pair of (in)equalities? Convert to
1924   // a shorter form that has more potential to be folded even further.
1925   Value *X1, *X2, *X3, *X4;
1926   if (match(OrOp0, m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) &&
1927       match(OrOp1, m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) {
1928     // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4)
1929     // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4)
1930     Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2);
1931     Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4);
1932     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1933     return BinaryOperator::Create(BOpc, Cmp12, Cmp34);
1934   }
1935 
1936   return nullptr;
1937 }
1938 
1939 /// Fold icmp (mul X, Y), C.
1940 Instruction *InstCombiner::foldICmpMulConstant(ICmpInst &Cmp,
1941                                                BinaryOperator *Mul,
1942                                                const APInt &C) {
1943   const APInt *MulC;
1944   if (!match(Mul->getOperand(1), m_APInt(MulC)))
1945     return nullptr;
1946 
1947   // If this is a test of the sign bit and the multiply is sign-preserving with
1948   // a constant operand, use the multiply LHS operand instead.
1949   ICmpInst::Predicate Pred = Cmp.getPredicate();
1950   if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
1951     if (MulC->isNegative())
1952       Pred = ICmpInst::getSwappedPredicate(Pred);
1953     return new ICmpInst(Pred, Mul->getOperand(0),
1954                         Constant::getNullValue(Mul->getType()));
1955   }
1956 
1957   return nullptr;
1958 }
1959 
1960 /// Fold icmp (shl 1, Y), C.
1961 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
1962                                    const APInt &C) {
1963   Value *Y;
1964   if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
1965     return nullptr;
1966 
1967   Type *ShiftType = Shl->getType();
1968   unsigned TypeBits = C.getBitWidth();
1969   bool CIsPowerOf2 = C.isPowerOf2();
1970   ICmpInst::Predicate Pred = Cmp.getPredicate();
1971   if (Cmp.isUnsigned()) {
1972     // (1 << Y) pred C -> Y pred Log2(C)
1973     if (!CIsPowerOf2) {
1974       // (1 << Y) <  30 -> Y <= 4
1975       // (1 << Y) <= 30 -> Y <= 4
1976       // (1 << Y) >= 30 -> Y >  4
1977       // (1 << Y) >  30 -> Y >  4
1978       if (Pred == ICmpInst::ICMP_ULT)
1979         Pred = ICmpInst::ICMP_ULE;
1980       else if (Pred == ICmpInst::ICMP_UGE)
1981         Pred = ICmpInst::ICMP_UGT;
1982     }
1983 
1984     // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
1985     // (1 << Y) <  2147483648 -> Y <  31 -> Y != 31
1986     unsigned CLog2 = C.logBase2();
1987     if (CLog2 == TypeBits - 1) {
1988       if (Pred == ICmpInst::ICMP_UGE)
1989         Pred = ICmpInst::ICMP_EQ;
1990       else if (Pred == ICmpInst::ICMP_ULT)
1991         Pred = ICmpInst::ICMP_NE;
1992     }
1993     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
1994   } else if (Cmp.isSigned()) {
1995     Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
1996     if (C.isAllOnesValue()) {
1997       // (1 << Y) <= -1 -> Y == 31
1998       if (Pred == ICmpInst::ICMP_SLE)
1999         return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2000 
2001       // (1 << Y) >  -1 -> Y != 31
2002       if (Pred == ICmpInst::ICMP_SGT)
2003         return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2004     } else if (!C) {
2005       // (1 << Y) <  0 -> Y == 31
2006       // (1 << Y) <= 0 -> Y == 31
2007       if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
2008         return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2009 
2010       // (1 << Y) >= 0 -> Y != 31
2011       // (1 << Y) >  0 -> Y != 31
2012       if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
2013         return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2014     }
2015   } else if (Cmp.isEquality() && CIsPowerOf2) {
2016     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2()));
2017   }
2018 
2019   return nullptr;
2020 }
2021 
2022 /// Fold icmp (shl X, Y), C.
2023 Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
2024                                                BinaryOperator *Shl,
2025                                                const APInt &C) {
2026   const APInt *ShiftVal;
2027   if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2028     return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2029 
2030   const APInt *ShiftAmt;
2031   if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2032     return foldICmpShlOne(Cmp, Shl, C);
2033 
2034   // Check that the shift amount is in range. If not, don't perform undefined
2035   // shifts. When the shift is visited, it will be simplified.
2036   unsigned TypeBits = C.getBitWidth();
2037   if (ShiftAmt->uge(TypeBits))
2038     return nullptr;
2039 
2040   ICmpInst::Predicate Pred = Cmp.getPredicate();
2041   Value *X = Shl->getOperand(0);
2042   Type *ShType = Shl->getType();
2043 
2044   // NSW guarantees that we are only shifting out sign bits from the high bits,
2045   // so we can ASHR the compare constant without needing a mask and eliminate
2046   // the shift.
2047   if (Shl->hasNoSignedWrap()) {
2048     if (Pred == ICmpInst::ICMP_SGT) {
2049       // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2050       APInt ShiftedC = C.ashr(*ShiftAmt);
2051       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2052     }
2053     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2054         C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2055       APInt ShiftedC = C.ashr(*ShiftAmt);
2056       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2057     }
2058     if (Pred == ICmpInst::ICMP_SLT) {
2059       // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2060       // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2061       // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2062       // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2063       assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2064       APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2065       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2066     }
2067     // If this is a signed comparison to 0 and the shift is sign preserving,
2068     // use the shift LHS operand instead; isSignTest may change 'Pred', so only
2069     // do that if we're sure to not continue on in this function.
2070     if (isSignTest(Pred, C))
2071       return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
2072   }
2073 
2074   // NUW guarantees that we are only shifting out zero bits from the high bits,
2075   // so we can LSHR the compare constant without needing a mask and eliminate
2076   // the shift.
2077   if (Shl->hasNoUnsignedWrap()) {
2078     if (Pred == ICmpInst::ICMP_UGT) {
2079       // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2080       APInt ShiftedC = C.lshr(*ShiftAmt);
2081       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2082     }
2083     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2084         C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2085       APInt ShiftedC = C.lshr(*ShiftAmt);
2086       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2087     }
2088     if (Pred == ICmpInst::ICMP_ULT) {
2089       // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2090       // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2091       // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2092       // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2093       assert(C.ugt(0) && "ult 0 should have been eliminated");
2094       APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2095       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2096     }
2097   }
2098 
2099   if (Cmp.isEquality() && Shl->hasOneUse()) {
2100     // Strength-reduce the shift into an 'and'.
2101     Constant *Mask = ConstantInt::get(
2102         ShType,
2103         APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2104     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2105     Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2106     return new ICmpInst(Pred, And, LShrC);
2107   }
2108 
2109   // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2110   bool TrueIfSigned = false;
2111   if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2112     // (X << 31) <s 0  --> (X & 1) != 0
2113     Constant *Mask = ConstantInt::get(
2114         ShType,
2115         APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2116     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2117     return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2118                         And, Constant::getNullValue(ShType));
2119   }
2120 
2121   // Simplify 'shl' inequality test into 'and' equality test.
2122   if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2123     // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2124     if ((C + 1).isPowerOf2() &&
2125         (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2126       Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2127       return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2128                                                      : ICmpInst::ICMP_NE,
2129                           And, Constant::getNullValue(ShType));
2130     }
2131     // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2132     if (C.isPowerOf2() &&
2133         (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2134       Value *And =
2135           Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2136       return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2137                                                      : ICmpInst::ICMP_NE,
2138                           And, Constant::getNullValue(ShType));
2139     }
2140   }
2141 
2142   // Transform (icmp pred iM (shl iM %v, N), C)
2143   // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2144   // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2145   // This enables us to get rid of the shift in favor of a trunc that may be
2146   // free on the target. It has the additional benefit of comparing to a
2147   // smaller constant that may be more target-friendly.
2148   unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2149   if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt &&
2150       DL.isLegalInteger(TypeBits - Amt)) {
2151     Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2152     if (ShType->isVectorTy())
2153       TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements());
2154     Constant *NewC =
2155         ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2156     return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
2157   }
2158 
2159   return nullptr;
2160 }
2161 
2162 /// Fold icmp ({al}shr X, Y), C.
2163 Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
2164                                                BinaryOperator *Shr,
2165                                                const APInt &C) {
2166   // An exact shr only shifts out zero bits, so:
2167   // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2168   Value *X = Shr->getOperand(0);
2169   CmpInst::Predicate Pred = Cmp.getPredicate();
2170   if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() &&
2171       C.isNullValue())
2172     return new ICmpInst(Pred, X, Cmp.getOperand(1));
2173 
2174   const APInt *ShiftVal;
2175   if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2176     return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal);
2177 
2178   const APInt *ShiftAmt;
2179   if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2180     return nullptr;
2181 
2182   // Check that the shift amount is in range. If not, don't perform undefined
2183   // shifts. When the shift is visited it will be simplified.
2184   unsigned TypeBits = C.getBitWidth();
2185   unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2186   if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2187     return nullptr;
2188 
2189   bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2190   bool IsExact = Shr->isExact();
2191   Type *ShrTy = Shr->getType();
2192   // TODO: If we could guarantee that InstSimplify would handle all of the
2193   // constant-value-based preconditions in the folds below, then we could assert
2194   // those conditions rather than checking them. This is difficult because of
2195   // undef/poison (PR34838).
2196   if (IsAShr) {
2197     if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) {
2198       // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC)
2199       // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC)
2200       APInt ShiftedC = C.shl(ShAmtVal);
2201       if (ShiftedC.ashr(ShAmtVal) == C)
2202         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2203     }
2204     if (Pred == CmpInst::ICMP_SGT) {
2205       // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2206       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2207       if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2208           (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2209         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2210     }
2211   } else {
2212     if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2213       // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2214       // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2215       APInt ShiftedC = C.shl(ShAmtVal);
2216       if (ShiftedC.lshr(ShAmtVal) == C)
2217         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2218     }
2219     if (Pred == CmpInst::ICMP_UGT) {
2220       // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2221       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2222       if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2223         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2224     }
2225   }
2226 
2227   if (!Cmp.isEquality())
2228     return nullptr;
2229 
2230   // Handle equality comparisons of shift-by-constant.
2231 
2232   // If the comparison constant changes with the shift, the comparison cannot
2233   // succeed (bits of the comparison constant cannot match the shifted value).
2234   // This should be known by InstSimplify and already be folded to true/false.
2235   assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2236           (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2237          "Expected icmp+shr simplify did not occur.");
2238 
2239   // If the bits shifted out are known zero, compare the unshifted value:
2240   //  (X & 4) >> 1 == 2  --> (X & 4) == 4.
2241   if (Shr->isExact())
2242     return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2243 
2244   if (Shr->hasOneUse()) {
2245     // Canonicalize the shift into an 'and':
2246     // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2247     APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2248     Constant *Mask = ConstantInt::get(ShrTy, Val);
2249     Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2250     return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2251   }
2252 
2253   return nullptr;
2254 }
2255 
2256 Instruction *InstCombiner::foldICmpSRemConstant(ICmpInst &Cmp,
2257                                                 BinaryOperator *SRem,
2258                                                 const APInt &C) {
2259   // Match an 'is positive' or 'is negative' comparison of remainder by a
2260   // constant power-of-2 value:
2261   // (X % pow2C) sgt/slt 0
2262   const ICmpInst::Predicate Pred = Cmp.getPredicate();
2263   if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT)
2264     return nullptr;
2265 
2266   // TODO: The one-use check is standard because we do not typically want to
2267   //       create longer instruction sequences, but this might be a special-case
2268   //       because srem is not good for analysis or codegen.
2269   if (!SRem->hasOneUse())
2270     return nullptr;
2271 
2272   const APInt *DivisorC;
2273   if (!C.isNullValue() || !match(SRem->getOperand(1), m_Power2(DivisorC)))
2274     return nullptr;
2275 
2276   // Mask off the sign bit and the modulo bits (low-bits).
2277   Type *Ty = SRem->getType();
2278   APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2279   Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2280   Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2281 
2282   // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2283   // bit is set. Example:
2284   // (i8 X % 32) s> 0 --> (X & 159) s> 0
2285   if (Pred == ICmpInst::ICMP_SGT)
2286     return new ICmpInst(ICmpInst::ICMP_SGT, And, ConstantInt::getNullValue(Ty));
2287 
2288   // For 'is negative?' check that the sign-bit is set and at least 1 masked
2289   // bit is set. Example:
2290   // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2291   return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2292 }
2293 
2294 /// Fold icmp (udiv X, Y), C.
2295 Instruction *InstCombiner::foldICmpUDivConstant(ICmpInst &Cmp,
2296                                                 BinaryOperator *UDiv,
2297                                                 const APInt &C) {
2298   const APInt *C2;
2299   if (!match(UDiv->getOperand(0), m_APInt(C2)))
2300     return nullptr;
2301 
2302   assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2303 
2304   // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2305   Value *Y = UDiv->getOperand(1);
2306   if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2307     assert(!C.isMaxValue() &&
2308            "icmp ugt X, UINT_MAX should have been simplified already.");
2309     return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2310                         ConstantInt::get(Y->getType(), C2->udiv(C + 1)));
2311   }
2312 
2313   // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2314   if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2315     assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2316     return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2317                         ConstantInt::get(Y->getType(), C2->udiv(C)));
2318   }
2319 
2320   return nullptr;
2321 }
2322 
2323 /// Fold icmp ({su}div X, Y), C.
2324 Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
2325                                                BinaryOperator *Div,
2326                                                const APInt &C) {
2327   // Fold: icmp pred ([us]div X, C2), C -> range test
2328   // Fold this div into the comparison, producing a range check.
2329   // Determine, based on the divide type, what the range is being
2330   // checked.  If there is an overflow on the low or high side, remember
2331   // it, otherwise compute the range [low, hi) bounding the new value.
2332   // See: InsertRangeTest above for the kinds of replacements possible.
2333   const APInt *C2;
2334   if (!match(Div->getOperand(1), m_APInt(C2)))
2335     return nullptr;
2336 
2337   // FIXME: If the operand types don't match the type of the divide
2338   // then don't attempt this transform. The code below doesn't have the
2339   // logic to deal with a signed divide and an unsigned compare (and
2340   // vice versa). This is because (x /s C2) <s C  produces different
2341   // results than (x /s C2) <u C or (x /u C2) <s C or even
2342   // (x /u C2) <u C.  Simply casting the operands and result won't
2343   // work. :(  The if statement below tests that condition and bails
2344   // if it finds it.
2345   bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2346   if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2347     return nullptr;
2348 
2349   // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2350   // INT_MIN will also fail if the divisor is 1. Although folds of all these
2351   // division-by-constant cases should be present, we can not assert that they
2352   // have happened before we reach this icmp instruction.
2353   if (C2->isNullValue() || C2->isOneValue() ||
2354       (DivIsSigned && C2->isAllOnesValue()))
2355     return nullptr;
2356 
2357   // Compute Prod = C * C2. We are essentially solving an equation of
2358   // form X / C2 = C. We solve for X by multiplying C2 and C.
2359   // By solving for X, we can turn this into a range check instead of computing
2360   // a divide.
2361   APInt Prod = C * *C2;
2362 
2363   // Determine if the product overflows by seeing if the product is not equal to
2364   // the divide. Make sure we do the same kind of divide as in the LHS
2365   // instruction that we're folding.
2366   bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2367 
2368   ICmpInst::Predicate Pred = Cmp.getPredicate();
2369 
2370   // If the division is known to be exact, then there is no remainder from the
2371   // divide, so the covered range size is unit, otherwise it is the divisor.
2372   APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2373 
2374   // Figure out the interval that is being checked.  For example, a comparison
2375   // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2376   // Compute this interval based on the constants involved and the signedness of
2377   // the compare/divide.  This computes a half-open interval, keeping track of
2378   // whether either value in the interval overflows.  After analysis each
2379   // overflow variable is set to 0 if it's corresponding bound variable is valid
2380   // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2381   int LoOverflow = 0, HiOverflow = 0;
2382   APInt LoBound, HiBound;
2383 
2384   if (!DivIsSigned) {  // udiv
2385     // e.g. X/5 op 3  --> [15, 20)
2386     LoBound = Prod;
2387     HiOverflow = LoOverflow = ProdOV;
2388     if (!HiOverflow) {
2389       // If this is not an exact divide, then many values in the range collapse
2390       // to the same result value.
2391       HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2392     }
2393   } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2394     if (C.isNullValue()) {       // (X / pos) op 0
2395       // Can't overflow.  e.g.  X/2 op 0 --> [-1, 2)
2396       LoBound = -(RangeSize - 1);
2397       HiBound = RangeSize;
2398     } else if (C.isStrictlyPositive()) {   // (X / pos) op pos
2399       LoBound = Prod;     // e.g.   X/5 op 3 --> [15, 20)
2400       HiOverflow = LoOverflow = ProdOV;
2401       if (!HiOverflow)
2402         HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2403     } else {                       // (X / pos) op neg
2404       // e.g. X/5 op -3  --> [-15-4, -15+1) --> [-19, -14)
2405       HiBound = Prod + 1;
2406       LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2407       if (!LoOverflow) {
2408         APInt DivNeg = -RangeSize;
2409         LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2410       }
2411     }
2412   } else if (C2->isNegative()) { // Divisor is < 0.
2413     if (Div->isExact())
2414       RangeSize.negate();
2415     if (C.isNullValue()) { // (X / neg) op 0
2416       // e.g. X/-5 op 0  --> [-4, 5)
2417       LoBound = RangeSize + 1;
2418       HiBound = -RangeSize;
2419       if (HiBound == *C2) {        // -INTMIN = INTMIN
2420         HiOverflow = 1;            // [INTMIN+1, overflow)
2421         HiBound = APInt();         // e.g. X/INTMIN = 0 --> X > INTMIN
2422       }
2423     } else if (C.isStrictlyPositive()) {   // (X / neg) op pos
2424       // e.g. X/-5 op 3  --> [-19, -14)
2425       HiBound = Prod + 1;
2426       HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2427       if (!LoOverflow)
2428         LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2429     } else {                       // (X / neg) op neg
2430       LoBound = Prod;       // e.g. X/-5 op -3  --> [15, 20)
2431       LoOverflow = HiOverflow = ProdOV;
2432       if (!HiOverflow)
2433         HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2434     }
2435 
2436     // Dividing by a negative swaps the condition.  LT <-> GT
2437     Pred = ICmpInst::getSwappedPredicate(Pred);
2438   }
2439 
2440   Value *X = Div->getOperand(0);
2441   switch (Pred) {
2442     default: llvm_unreachable("Unhandled icmp opcode!");
2443     case ICmpInst::ICMP_EQ:
2444       if (LoOverflow && HiOverflow)
2445         return replaceInstUsesWith(Cmp, Builder.getFalse());
2446       if (HiOverflow)
2447         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2448                             ICmpInst::ICMP_UGE, X,
2449                             ConstantInt::get(Div->getType(), LoBound));
2450       if (LoOverflow)
2451         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2452                             ICmpInst::ICMP_ULT, X,
2453                             ConstantInt::get(Div->getType(), HiBound));
2454       return replaceInstUsesWith(
2455           Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2456     case ICmpInst::ICMP_NE:
2457       if (LoOverflow && HiOverflow)
2458         return replaceInstUsesWith(Cmp, Builder.getTrue());
2459       if (HiOverflow)
2460         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2461                             ICmpInst::ICMP_ULT, X,
2462                             ConstantInt::get(Div->getType(), LoBound));
2463       if (LoOverflow)
2464         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2465                             ICmpInst::ICMP_UGE, X,
2466                             ConstantInt::get(Div->getType(), HiBound));
2467       return replaceInstUsesWith(Cmp,
2468                                  insertRangeTest(X, LoBound, HiBound,
2469                                                  DivIsSigned, false));
2470     case ICmpInst::ICMP_ULT:
2471     case ICmpInst::ICMP_SLT:
2472       if (LoOverflow == +1)   // Low bound is greater than input range.
2473         return replaceInstUsesWith(Cmp, Builder.getTrue());
2474       if (LoOverflow == -1)   // Low bound is less than input range.
2475         return replaceInstUsesWith(Cmp, Builder.getFalse());
2476       return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound));
2477     case ICmpInst::ICMP_UGT:
2478     case ICmpInst::ICMP_SGT:
2479       if (HiOverflow == +1)       // High bound greater than input range.
2480         return replaceInstUsesWith(Cmp, Builder.getFalse());
2481       if (HiOverflow == -1)       // High bound less than input range.
2482         return replaceInstUsesWith(Cmp, Builder.getTrue());
2483       if (Pred == ICmpInst::ICMP_UGT)
2484         return new ICmpInst(ICmpInst::ICMP_UGE, X,
2485                             ConstantInt::get(Div->getType(), HiBound));
2486       return new ICmpInst(ICmpInst::ICMP_SGE, X,
2487                           ConstantInt::get(Div->getType(), HiBound));
2488   }
2489 
2490   return nullptr;
2491 }
2492 
2493 /// Fold icmp (sub X, Y), C.
2494 Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp,
2495                                                BinaryOperator *Sub,
2496                                                const APInt &C) {
2497   Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2498   ICmpInst::Predicate Pred = Cmp.getPredicate();
2499   const APInt *C2;
2500   APInt SubResult;
2501 
2502   // icmp eq/ne (sub C, Y), C -> icmp eq/ne Y, 0
2503   if (match(X, m_APInt(C2)) && *C2 == C && Cmp.isEquality())
2504     return new ICmpInst(Cmp.getPredicate(), Y,
2505                         ConstantInt::get(Y->getType(), 0));
2506 
2507   // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2508   if (match(X, m_APInt(C2)) &&
2509       ((Cmp.isUnsigned() && Sub->hasNoUnsignedWrap()) ||
2510        (Cmp.isSigned() && Sub->hasNoSignedWrap())) &&
2511       !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2512     return new ICmpInst(Cmp.getSwappedPredicate(), Y,
2513                         ConstantInt::get(Y->getType(), SubResult));
2514 
2515   // The following transforms are only worth it if the only user of the subtract
2516   // is the icmp.
2517   if (!Sub->hasOneUse())
2518     return nullptr;
2519 
2520   if (Sub->hasNoSignedWrap()) {
2521     // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2522     if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue())
2523       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2524 
2525     // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2526     if (Pred == ICmpInst::ICMP_SGT && C.isNullValue())
2527       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2528 
2529     // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2530     if (Pred == ICmpInst::ICMP_SLT && C.isNullValue())
2531       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2532 
2533     // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2534     if (Pred == ICmpInst::ICMP_SLT && C.isOneValue())
2535       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2536   }
2537 
2538   if (!match(X, m_APInt(C2)))
2539     return nullptr;
2540 
2541   // C2 - Y <u C -> (Y | (C - 1)) == C2
2542   //   iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2543   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
2544       (*C2 & (C - 1)) == (C - 1))
2545     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
2546 
2547   // C2 - Y >u C -> (Y | C) != C2
2548   //   iff C2 & C == C and C + 1 is a power of 2
2549   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
2550     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
2551 
2552   return nullptr;
2553 }
2554 
2555 /// Fold icmp (add X, Y), C.
2556 Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
2557                                                BinaryOperator *Add,
2558                                                const APInt &C) {
2559   Value *Y = Add->getOperand(1);
2560   const APInt *C2;
2561   if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2562     return nullptr;
2563 
2564   // Fold icmp pred (add X, C2), C.
2565   Value *X = Add->getOperand(0);
2566   Type *Ty = Add->getType();
2567   CmpInst::Predicate Pred = Cmp.getPredicate();
2568 
2569   // If the add does not wrap, we can always adjust the compare by subtracting
2570   // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
2571   // are canonicalized to SGT/SLT/UGT/ULT.
2572   if ((Add->hasNoSignedWrap() &&
2573        (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
2574       (Add->hasNoUnsignedWrap() &&
2575        (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
2576     bool Overflow;
2577     APInt NewC =
2578         Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
2579     // If there is overflow, the result must be true or false.
2580     // TODO: Can we assert there is no overflow because InstSimplify always
2581     // handles those cases?
2582     if (!Overflow)
2583       // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2584       return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2585   }
2586 
2587   auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
2588   const APInt &Upper = CR.getUpper();
2589   const APInt &Lower = CR.getLower();
2590   if (Cmp.isSigned()) {
2591     if (Lower.isSignMask())
2592       return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2593     if (Upper.isSignMask())
2594       return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2595   } else {
2596     if (Lower.isMinValue())
2597       return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2598     if (Upper.isMinValue())
2599       return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2600   }
2601 
2602   if (!Add->hasOneUse())
2603     return nullptr;
2604 
2605   // X+C <u C2 -> (X & -C2) == C
2606   //   iff C & (C2-1) == 0
2607   //       C2 is a power of 2
2608   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
2609     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
2610                         ConstantExpr::getNeg(cast<Constant>(Y)));
2611 
2612   // X+C >u C2 -> (X & ~C2) != C
2613   //   iff C & C2 == 0
2614   //       C2+1 is a power of 2
2615   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
2616     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
2617                         ConstantExpr::getNeg(cast<Constant>(Y)));
2618 
2619   return nullptr;
2620 }
2621 
2622 bool InstCombiner::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
2623                                            Value *&RHS, ConstantInt *&Less,
2624                                            ConstantInt *&Equal,
2625                                            ConstantInt *&Greater) {
2626   // TODO: Generalize this to work with other comparison idioms or ensure
2627   // they get canonicalized into this form.
2628 
2629   // select i1 (a == b),
2630   //        i32 Equal,
2631   //        i32 (select i1 (a < b), i32 Less, i32 Greater)
2632   // where Equal, Less and Greater are placeholders for any three constants.
2633   ICmpInst::Predicate PredA;
2634   if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
2635       !ICmpInst::isEquality(PredA))
2636     return false;
2637   Value *EqualVal = SI->getTrueValue();
2638   Value *UnequalVal = SI->getFalseValue();
2639   // We still can get non-canonical predicate here, so canonicalize.
2640   if (PredA == ICmpInst::ICMP_NE)
2641     std::swap(EqualVal, UnequalVal);
2642   if (!match(EqualVal, m_ConstantInt(Equal)))
2643     return false;
2644   ICmpInst::Predicate PredB;
2645   Value *LHS2, *RHS2;
2646   if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
2647                                   m_ConstantInt(Less), m_ConstantInt(Greater))))
2648     return false;
2649   // We can get predicate mismatch here, so canonicalize if possible:
2650   // First, ensure that 'LHS' match.
2651   if (LHS2 != LHS) {
2652     // x sgt y <--> y slt x
2653     std::swap(LHS2, RHS2);
2654     PredB = ICmpInst::getSwappedPredicate(PredB);
2655   }
2656   if (LHS2 != LHS)
2657     return false;
2658   // We also need to canonicalize 'RHS'.
2659   if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
2660     // x sgt C-1  <-->  x sge C  <-->  not(x slt C)
2661     auto FlippedStrictness =
2662         getFlippedStrictnessPredicateAndConstant(PredB, cast<Constant>(RHS2));
2663     if (!FlippedStrictness)
2664       return false;
2665     assert(FlippedStrictness->first == ICmpInst::ICMP_SGE && "Sanity check");
2666     RHS2 = FlippedStrictness->second;
2667     // And kind-of perform the result swap.
2668     std::swap(Less, Greater);
2669     PredB = ICmpInst::ICMP_SLT;
2670   }
2671   return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
2672 }
2673 
2674 Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp,
2675                                                   SelectInst *Select,
2676                                                   ConstantInt *C) {
2677 
2678   assert(C && "Cmp RHS should be a constant int!");
2679   // If we're testing a constant value against the result of a three way
2680   // comparison, the result can be expressed directly in terms of the
2681   // original values being compared.  Note: We could possibly be more
2682   // aggressive here and remove the hasOneUse test. The original select is
2683   // really likely to simplify or sink when we remove a test of the result.
2684   Value *OrigLHS, *OrigRHS;
2685   ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
2686   if (Cmp.hasOneUse() &&
2687       matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
2688                               C3GreaterThan)) {
2689     assert(C1LessThan && C2Equal && C3GreaterThan);
2690 
2691     bool TrueWhenLessThan =
2692         ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C)
2693             ->isAllOnesValue();
2694     bool TrueWhenEqual =
2695         ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C)
2696             ->isAllOnesValue();
2697     bool TrueWhenGreaterThan =
2698         ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C)
2699             ->isAllOnesValue();
2700 
2701     // This generates the new instruction that will replace the original Cmp
2702     // Instruction. Instead of enumerating the various combinations when
2703     // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
2704     // false, we rely on chaining of ORs and future passes of InstCombine to
2705     // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
2706 
2707     // When none of the three constants satisfy the predicate for the RHS (C),
2708     // the entire original Cmp can be simplified to a false.
2709     Value *Cond = Builder.getFalse();
2710     if (TrueWhenLessThan)
2711       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT,
2712                                                        OrigLHS, OrigRHS));
2713     if (TrueWhenEqual)
2714       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ,
2715                                                        OrigLHS, OrigRHS));
2716     if (TrueWhenGreaterThan)
2717       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT,
2718                                                        OrigLHS, OrigRHS));
2719 
2720     return replaceInstUsesWith(Cmp, Cond);
2721   }
2722   return nullptr;
2723 }
2724 
2725 static Instruction *foldICmpBitCast(ICmpInst &Cmp,
2726                                     InstCombiner::BuilderTy &Builder) {
2727   auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
2728   if (!Bitcast)
2729     return nullptr;
2730 
2731   ICmpInst::Predicate Pred = Cmp.getPredicate();
2732   Value *Op1 = Cmp.getOperand(1);
2733   Value *BCSrcOp = Bitcast->getOperand(0);
2734 
2735   // Make sure the bitcast doesn't change the number of vector elements.
2736   if (Bitcast->getSrcTy()->getScalarSizeInBits() ==
2737           Bitcast->getDestTy()->getScalarSizeInBits()) {
2738     // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
2739     Value *X;
2740     if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
2741       // icmp  eq (bitcast (sitofp X)), 0 --> icmp  eq X, 0
2742       // icmp  ne (bitcast (sitofp X)), 0 --> icmp  ne X, 0
2743       // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
2744       // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
2745       if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
2746            Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
2747           match(Op1, m_Zero()))
2748         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2749 
2750       // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
2751       if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
2752         return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
2753 
2754       // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
2755       if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
2756         return new ICmpInst(Pred, X,
2757                             ConstantInt::getAllOnesValue(X->getType()));
2758     }
2759 
2760     // Zero-equality checks are preserved through unsigned floating-point casts:
2761     // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
2762     // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
2763     if (match(BCSrcOp, m_UIToFP(m_Value(X))))
2764       if (Cmp.isEquality() && match(Op1, m_Zero()))
2765         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2766   }
2767 
2768   // Test to see if the operands of the icmp are casted versions of other
2769   // values. If the ptr->ptr cast can be stripped off both arguments, do so.
2770   if (Bitcast->getType()->isPointerTy() &&
2771       (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
2772     // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
2773     // so eliminate it as well.
2774     if (auto *BC2 = dyn_cast<BitCastInst>(Op1))
2775       Op1 = BC2->getOperand(0);
2776 
2777     Op1 = Builder.CreateBitCast(Op1, BCSrcOp->getType());
2778     return new ICmpInst(Pred, BCSrcOp, Op1);
2779   }
2780 
2781   // Folding: icmp <pred> iN X, C
2782   //  where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
2783   //    and C is a splat of a K-bit pattern
2784   //    and SC is a constant vector = <C', C', C', ..., C'>
2785   // Into:
2786   //   %E = extractelement <M x iK> %vec, i32 C'
2787   //   icmp <pred> iK %E, trunc(C)
2788   const APInt *C;
2789   if (!match(Cmp.getOperand(1), m_APInt(C)) ||
2790       !Bitcast->getType()->isIntegerTy() ||
2791       !Bitcast->getSrcTy()->isIntOrIntVectorTy())
2792     return nullptr;
2793 
2794   Value *Vec;
2795   Constant *Mask;
2796   if (match(BCSrcOp,
2797             m_ShuffleVector(m_Value(Vec), m_Undef(), m_Constant(Mask)))) {
2798     // Check whether every element of Mask is the same constant
2799     if (auto *Elem = dyn_cast_or_null<ConstantInt>(Mask->getSplatValue())) {
2800       auto *VecTy = cast<VectorType>(BCSrcOp->getType());
2801       auto *EltTy = cast<IntegerType>(VecTy->getElementType());
2802       if (C->isSplat(EltTy->getBitWidth())) {
2803         // Fold the icmp based on the value of C
2804         // If C is M copies of an iK sized bit pattern,
2805         // then:
2806         //   =>  %E = extractelement <N x iK> %vec, i32 Elem
2807         //       icmp <pred> iK %SplatVal, <pattern>
2808         Value *Extract = Builder.CreateExtractElement(Vec, Elem);
2809         Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
2810         return new ICmpInst(Pred, Extract, NewC);
2811       }
2812     }
2813   }
2814   return nullptr;
2815 }
2816 
2817 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2818 /// where X is some kind of instruction.
2819 Instruction *InstCombiner::foldICmpInstWithConstant(ICmpInst &Cmp) {
2820   const APInt *C;
2821   if (!match(Cmp.getOperand(1), m_APInt(C)))
2822     return nullptr;
2823 
2824   if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) {
2825     switch (BO->getOpcode()) {
2826     case Instruction::Xor:
2827       if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C))
2828         return I;
2829       break;
2830     case Instruction::And:
2831       if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C))
2832         return I;
2833       break;
2834     case Instruction::Or:
2835       if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C))
2836         return I;
2837       break;
2838     case Instruction::Mul:
2839       if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C))
2840         return I;
2841       break;
2842     case Instruction::Shl:
2843       if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C))
2844         return I;
2845       break;
2846     case Instruction::LShr:
2847     case Instruction::AShr:
2848       if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C))
2849         return I;
2850       break;
2851     case Instruction::SRem:
2852       if (Instruction *I = foldICmpSRemConstant(Cmp, BO, *C))
2853         return I;
2854       break;
2855     case Instruction::UDiv:
2856       if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C))
2857         return I;
2858       LLVM_FALLTHROUGH;
2859     case Instruction::SDiv:
2860       if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C))
2861         return I;
2862       break;
2863     case Instruction::Sub:
2864       if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C))
2865         return I;
2866       break;
2867     case Instruction::Add:
2868       if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C))
2869         return I;
2870       break;
2871     default:
2872       break;
2873     }
2874     // TODO: These folds could be refactored to be part of the above calls.
2875     if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C))
2876       return I;
2877   }
2878 
2879   // Match against CmpInst LHS being instructions other than binary operators.
2880 
2881   if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) {
2882     // For now, we only support constant integers while folding the
2883     // ICMP(SELECT)) pattern. We can extend this to support vector of integers
2884     // similar to the cases handled by binary ops above.
2885     if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
2886       if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
2887         return I;
2888   }
2889 
2890   if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) {
2891     if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
2892       return I;
2893   }
2894 
2895   if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
2896     if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, II, *C))
2897       return I;
2898 
2899   return nullptr;
2900 }
2901 
2902 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
2903 /// icmp eq/ne BO, C.
2904 Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
2905                                                              BinaryOperator *BO,
2906                                                              const APInt &C) {
2907   // TODO: Some of these folds could work with arbitrary constants, but this
2908   // function is limited to scalar and vector splat constants.
2909   if (!Cmp.isEquality())
2910     return nullptr;
2911 
2912   ICmpInst::Predicate Pred = Cmp.getPredicate();
2913   bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
2914   Constant *RHS = cast<Constant>(Cmp.getOperand(1));
2915   Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
2916 
2917   switch (BO->getOpcode()) {
2918   case Instruction::SRem:
2919     // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
2920     if (C.isNullValue() && BO->hasOneUse()) {
2921       const APInt *BOC;
2922       if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
2923         Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
2924         return new ICmpInst(Pred, NewRem,
2925                             Constant::getNullValue(BO->getType()));
2926       }
2927     }
2928     break;
2929   case Instruction::Add: {
2930     // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
2931     const APInt *BOC;
2932     if (match(BOp1, m_APInt(BOC))) {
2933       if (BO->hasOneUse()) {
2934         Constant *SubC = ConstantExpr::getSub(RHS, cast<Constant>(BOp1));
2935         return new ICmpInst(Pred, BOp0, SubC);
2936       }
2937     } else if (C.isNullValue()) {
2938       // Replace ((add A, B) != 0) with (A != -B) if A or B is
2939       // efficiently invertible, or if the add has just this one use.
2940       if (Value *NegVal = dyn_castNegVal(BOp1))
2941         return new ICmpInst(Pred, BOp0, NegVal);
2942       if (Value *NegVal = dyn_castNegVal(BOp0))
2943         return new ICmpInst(Pred, NegVal, BOp1);
2944       if (BO->hasOneUse()) {
2945         Value *Neg = Builder.CreateNeg(BOp1);
2946         Neg->takeName(BO);
2947         return new ICmpInst(Pred, BOp0, Neg);
2948       }
2949     }
2950     break;
2951   }
2952   case Instruction::Xor:
2953     if (BO->hasOneUse()) {
2954       if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
2955         // For the xor case, we can xor two constants together, eliminating
2956         // the explicit xor.
2957         return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
2958       } else if (C.isNullValue()) {
2959         // Replace ((xor A, B) != 0) with (A != B)
2960         return new ICmpInst(Pred, BOp0, BOp1);
2961       }
2962     }
2963     break;
2964   case Instruction::Sub:
2965     if (BO->hasOneUse()) {
2966       const APInt *BOC;
2967       if (match(BOp0, m_APInt(BOC))) {
2968         // Replace ((sub BOC, B) != C) with (B != BOC-C).
2969         Constant *SubC = ConstantExpr::getSub(cast<Constant>(BOp0), RHS);
2970         return new ICmpInst(Pred, BOp1, SubC);
2971       } else if (C.isNullValue()) {
2972         // Replace ((sub A, B) != 0) with (A != B).
2973         return new ICmpInst(Pred, BOp0, BOp1);
2974       }
2975     }
2976     break;
2977   case Instruction::Or: {
2978     const APInt *BOC;
2979     if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
2980       // Comparing if all bits outside of a constant mask are set?
2981       // Replace (X | C) == -1 with (X & ~C) == ~C.
2982       // This removes the -1 constant.
2983       Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
2984       Value *And = Builder.CreateAnd(BOp0, NotBOC);
2985       return new ICmpInst(Pred, And, NotBOC);
2986     }
2987     break;
2988   }
2989   case Instruction::And: {
2990     const APInt *BOC;
2991     if (match(BOp1, m_APInt(BOC))) {
2992       // If we have ((X & C) == C), turn it into ((X & C) != 0).
2993       if (C == *BOC && C.isPowerOf2())
2994         return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
2995                             BO, Constant::getNullValue(RHS->getType()));
2996     }
2997     break;
2998   }
2999   case Instruction::Mul:
3000     if (C.isNullValue() && BO->hasNoSignedWrap()) {
3001       const APInt *BOC;
3002       if (match(BOp1, m_APInt(BOC)) && !BOC->isNullValue()) {
3003         // The trivial case (mul X, 0) is handled by InstSimplify.
3004         // General case : (mul X, C) != 0 iff X != 0
3005         //                (mul X, C) == 0 iff X == 0
3006         return new ICmpInst(Pred, BOp0, Constant::getNullValue(RHS->getType()));
3007       }
3008     }
3009     break;
3010   case Instruction::UDiv:
3011     if (C.isNullValue()) {
3012       // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3013       auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3014       return new ICmpInst(NewPred, BOp1, BOp0);
3015     }
3016     break;
3017   default:
3018     break;
3019   }
3020   return nullptr;
3021 }
3022 
3023 /// Fold an equality icmp with LLVM intrinsic and constant operand.
3024 Instruction *InstCombiner::foldICmpEqIntrinsicWithConstant(ICmpInst &Cmp,
3025                                                            IntrinsicInst *II,
3026                                                            const APInt &C) {
3027   Type *Ty = II->getType();
3028   unsigned BitWidth = C.getBitWidth();
3029   switch (II->getIntrinsicID()) {
3030   case Intrinsic::bswap:
3031     Worklist.Add(II);
3032     Cmp.setOperand(0, II->getArgOperand(0));
3033     Cmp.setOperand(1, ConstantInt::get(Ty, C.byteSwap()));
3034     return &Cmp;
3035 
3036   case Intrinsic::ctlz:
3037   case Intrinsic::cttz: {
3038     // ctz(A) == bitwidth(A)  ->  A == 0 and likewise for !=
3039     if (C == BitWidth) {
3040       Worklist.Add(II);
3041       Cmp.setOperand(0, II->getArgOperand(0));
3042       Cmp.setOperand(1, ConstantInt::getNullValue(Ty));
3043       return &Cmp;
3044     }
3045 
3046     // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3047     // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3048     // Limit to one use to ensure we don't increase instruction count.
3049     unsigned Num = C.getLimitedValue(BitWidth);
3050     if (Num != BitWidth && II->hasOneUse()) {
3051       bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3052       APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3053                                : APInt::getHighBitsSet(BitWidth, Num + 1);
3054       APInt Mask2 = IsTrailing
3055         ? APInt::getOneBitSet(BitWidth, Num)
3056         : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3057       Cmp.setOperand(0, Builder.CreateAnd(II->getArgOperand(0), Mask1));
3058       Cmp.setOperand(1, ConstantInt::get(Ty, Mask2));
3059       Worklist.Add(II);
3060       return &Cmp;
3061     }
3062     break;
3063   }
3064 
3065   case Intrinsic::ctpop: {
3066     // popcount(A) == 0  ->  A == 0 and likewise for !=
3067     // popcount(A) == bitwidth(A)  ->  A == -1 and likewise for !=
3068     bool IsZero = C.isNullValue();
3069     if (IsZero || C == BitWidth) {
3070       Worklist.Add(II);
3071       Cmp.setOperand(0, II->getArgOperand(0));
3072       auto *NewOp =
3073           IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty);
3074       Cmp.setOperand(1, NewOp);
3075       return &Cmp;
3076     }
3077     break;
3078   }
3079 
3080   case Intrinsic::uadd_sat: {
3081     // uadd.sat(a, b) == 0  ->  (a | b) == 0
3082     if (C.isNullValue()) {
3083       Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3084       return replaceInstUsesWith(Cmp, Builder.CreateICmp(
3085           Cmp.getPredicate(), Or, Constant::getNullValue(Ty)));
3086 
3087     }
3088     break;
3089   }
3090 
3091   case Intrinsic::usub_sat: {
3092     // usub.sat(a, b) == 0  ->  a <= b
3093     if (C.isNullValue()) {
3094       ICmpInst::Predicate NewPred = Cmp.getPredicate() == ICmpInst::ICMP_EQ
3095           ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3096       return ICmpInst::Create(Instruction::ICmp, NewPred,
3097                               II->getArgOperand(0), II->getArgOperand(1));
3098     }
3099     break;
3100   }
3101   default:
3102     break;
3103   }
3104 
3105   return nullptr;
3106 }
3107 
3108 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
3109 Instruction *InstCombiner::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
3110                                                          IntrinsicInst *II,
3111                                                          const APInt &C) {
3112   if (Cmp.isEquality())
3113     return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
3114 
3115   Type *Ty = II->getType();
3116   unsigned BitWidth = C.getBitWidth();
3117   switch (II->getIntrinsicID()) {
3118   case Intrinsic::ctlz: {
3119     // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
3120     if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3121       unsigned Num = C.getLimitedValue();
3122       APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3123       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
3124                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3125     }
3126 
3127     // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
3128     if (Cmp.getPredicate() == ICmpInst::ICMP_ULT &&
3129         C.uge(1) && C.ule(BitWidth)) {
3130       unsigned Num = C.getLimitedValue();
3131       APInt Limit = APInt::getLowBitsSet(BitWidth, BitWidth - Num);
3132       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
3133                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3134     }
3135     break;
3136   }
3137   case Intrinsic::cttz: {
3138     // Limit to one use to ensure we don't increase instruction count.
3139     if (!II->hasOneUse())
3140       return nullptr;
3141 
3142     // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
3143     if (Cmp.getPredicate() == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3144       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
3145       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
3146                              Builder.CreateAnd(II->getArgOperand(0), Mask),
3147                              ConstantInt::getNullValue(Ty));
3148     }
3149 
3150     // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
3151     if (Cmp.getPredicate() == ICmpInst::ICMP_ULT &&
3152         C.uge(1) && C.ule(BitWidth)) {
3153       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
3154       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
3155                              Builder.CreateAnd(II->getArgOperand(0), Mask),
3156                              ConstantInt::getNullValue(Ty));
3157     }
3158     break;
3159   }
3160   default:
3161     break;
3162   }
3163 
3164   return nullptr;
3165 }
3166 
3167 /// Handle icmp with constant (but not simple integer constant) RHS.
3168 Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) {
3169   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3170   Constant *RHSC = dyn_cast<Constant>(Op1);
3171   Instruction *LHSI = dyn_cast<Instruction>(Op0);
3172   if (!RHSC || !LHSI)
3173     return nullptr;
3174 
3175   switch (LHSI->getOpcode()) {
3176   case Instruction::GetElementPtr:
3177     // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
3178     if (RHSC->isNullValue() &&
3179         cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
3180       return new ICmpInst(
3181           I.getPredicate(), LHSI->getOperand(0),
3182           Constant::getNullValue(LHSI->getOperand(0)->getType()));
3183     break;
3184   case Instruction::PHI:
3185     // Only fold icmp into the PHI if the phi and icmp are in the same
3186     // block.  If in the same block, we're encouraging jump threading.  If
3187     // not, we are just pessimizing the code by making an i1 phi.
3188     if (LHSI->getParent() == I.getParent())
3189       if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
3190         return NV;
3191     break;
3192   case Instruction::Select: {
3193     // If either operand of the select is a constant, we can fold the
3194     // comparison into the select arms, which will cause one to be
3195     // constant folded and the select turned into a bitwise or.
3196     Value *Op1 = nullptr, *Op2 = nullptr;
3197     ConstantInt *CI = nullptr;
3198     if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
3199       Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3200       CI = dyn_cast<ConstantInt>(Op1);
3201     }
3202     if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
3203       Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3204       CI = dyn_cast<ConstantInt>(Op2);
3205     }
3206 
3207     // We only want to perform this transformation if it will not lead to
3208     // additional code. This is true if either both sides of the select
3209     // fold to a constant (in which case the icmp is replaced with a select
3210     // which will usually simplify) or this is the only user of the
3211     // select (in which case we are trading a select+icmp for a simpler
3212     // select+icmp) or all uses of the select can be replaced based on
3213     // dominance information ("Global cases").
3214     bool Transform = false;
3215     if (Op1 && Op2)
3216       Transform = true;
3217     else if (Op1 || Op2) {
3218       // Local case
3219       if (LHSI->hasOneUse())
3220         Transform = true;
3221       // Global cases
3222       else if (CI && !CI->isZero())
3223         // When Op1 is constant try replacing select with second operand.
3224         // Otherwise Op2 is constant and try replacing select with first
3225         // operand.
3226         Transform =
3227             replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
3228     }
3229     if (Transform) {
3230       if (!Op1)
3231         Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
3232                                  I.getName());
3233       if (!Op2)
3234         Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
3235                                  I.getName());
3236       return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
3237     }
3238     break;
3239   }
3240   case Instruction::IntToPtr:
3241     // icmp pred inttoptr(X), null -> icmp pred X, 0
3242     if (RHSC->isNullValue() &&
3243         DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
3244       return new ICmpInst(
3245           I.getPredicate(), LHSI->getOperand(0),
3246           Constant::getNullValue(LHSI->getOperand(0)->getType()));
3247     break;
3248 
3249   case Instruction::Load:
3250     // Try to optimize things like "A[i] > 4" to index computations.
3251     if (GetElementPtrInst *GEP =
3252             dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
3253       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
3254         if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
3255             !cast<LoadInst>(LHSI)->isVolatile())
3256           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
3257             return Res;
3258     }
3259     break;
3260   }
3261 
3262   return nullptr;
3263 }
3264 
3265 /// Some comparisons can be simplified.
3266 /// In this case, we are looking for comparisons that look like
3267 /// a check for a lossy truncation.
3268 /// Folds:
3269 ///   icmp SrcPred (x & Mask), x    to    icmp DstPred x, Mask
3270 /// Where Mask is some pattern that produces all-ones in low bits:
3271 ///    (-1 >> y)
3272 ///    ((-1 << y) >> y)     <- non-canonical, has extra uses
3273 ///   ~(-1 << y)
3274 ///    ((1 << y) + (-1))    <- non-canonical, has extra uses
3275 /// The Mask can be a constant, too.
3276 /// For some predicates, the operands are commutative.
3277 /// For others, x can only be on a specific side.
3278 static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I,
3279                                           InstCombiner::BuilderTy &Builder) {
3280   ICmpInst::Predicate SrcPred;
3281   Value *X, *M, *Y;
3282   auto m_VariableMask = m_CombineOr(
3283       m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())),
3284                   m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())),
3285       m_CombineOr(m_LShr(m_AllOnes(), m_Value()),
3286                   m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y))));
3287   auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask());
3288   if (!match(&I, m_c_ICmp(SrcPred,
3289                           m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)),
3290                           m_Deferred(X))))
3291     return nullptr;
3292 
3293   ICmpInst::Predicate DstPred;
3294   switch (SrcPred) {
3295   case ICmpInst::Predicate::ICMP_EQ:
3296     //  x & (-1 >> y) == x    ->    x u<= (-1 >> y)
3297     DstPred = ICmpInst::Predicate::ICMP_ULE;
3298     break;
3299   case ICmpInst::Predicate::ICMP_NE:
3300     //  x & (-1 >> y) != x    ->    x u> (-1 >> y)
3301     DstPred = ICmpInst::Predicate::ICMP_UGT;
3302     break;
3303   case ICmpInst::Predicate::ICMP_UGT:
3304     //  x u> x & (-1 >> y)    ->    x u> (-1 >> y)
3305     assert(X == I.getOperand(0) && "instsimplify took care of commut. variant");
3306     DstPred = ICmpInst::Predicate::ICMP_UGT;
3307     break;
3308   case ICmpInst::Predicate::ICMP_UGE:
3309     //  x & (-1 >> y) u>= x    ->    x u<= (-1 >> y)
3310     assert(X == I.getOperand(1) && "instsimplify took care of commut. variant");
3311     DstPred = ICmpInst::Predicate::ICMP_ULE;
3312     break;
3313   case ICmpInst::Predicate::ICMP_ULT:
3314     //  x & (-1 >> y) u< x    ->    x u> (-1 >> y)
3315     assert(X == I.getOperand(1) && "instsimplify took care of commut. variant");
3316     DstPred = ICmpInst::Predicate::ICMP_UGT;
3317     break;
3318   case ICmpInst::Predicate::ICMP_ULE:
3319     //  x u<= x & (-1 >> y)    ->    x u<= (-1 >> y)
3320     assert(X == I.getOperand(0) && "instsimplify took care of commut. variant");
3321     DstPred = ICmpInst::Predicate::ICMP_ULE;
3322     break;
3323   case ICmpInst::Predicate::ICMP_SGT:
3324     //  x s> x & (-1 >> y)    ->    x s> (-1 >> y)
3325     if (X != I.getOperand(0)) // X must be on LHS of comparison!
3326       return nullptr;         // Ignore the other case.
3327     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3328       return nullptr;
3329     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3330       return nullptr;
3331     DstPred = ICmpInst::Predicate::ICMP_SGT;
3332     break;
3333   case ICmpInst::Predicate::ICMP_SGE:
3334     //  x & (-1 >> y) s>= x    ->    x s<= (-1 >> y)
3335     if (X != I.getOperand(1)) // X must be on RHS of comparison!
3336       return nullptr;         // Ignore the other case.
3337     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3338       return nullptr;
3339     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3340       return nullptr;
3341     DstPred = ICmpInst::Predicate::ICMP_SLE;
3342     break;
3343   case ICmpInst::Predicate::ICMP_SLT:
3344     //  x & (-1 >> y) s< x    ->    x s> (-1 >> y)
3345     if (X != I.getOperand(1)) // X must be on RHS of comparison!
3346       return nullptr;         // Ignore the other case.
3347     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3348       return nullptr;
3349     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3350       return nullptr;
3351     DstPred = ICmpInst::Predicate::ICMP_SGT;
3352     break;
3353   case ICmpInst::Predicate::ICMP_SLE:
3354     //  x s<= x & (-1 >> y)    ->    x s<= (-1 >> y)
3355     if (X != I.getOperand(0)) // X must be on LHS of comparison!
3356       return nullptr;         // Ignore the other case.
3357     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3358       return nullptr;
3359     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3360       return nullptr;
3361     DstPred = ICmpInst::Predicate::ICMP_SLE;
3362     break;
3363   default:
3364     llvm_unreachable("All possible folds are handled.");
3365   }
3366 
3367   // The mask value may be a vector constant that has undefined elements. But it
3368   // may not be safe to propagate those undefs into the new compare, so replace
3369   // those elements by copying an existing, defined, and safe scalar constant.
3370   Type *OpTy = M->getType();
3371   auto *VecC = dyn_cast<Constant>(M);
3372   if (OpTy->isVectorTy() && VecC && VecC->containsUndefElement()) {
3373     Constant *SafeReplacementConstant = nullptr;
3374     for (unsigned i = 0, e = OpTy->getVectorNumElements(); i != e; ++i) {
3375       if (!isa<UndefValue>(VecC->getAggregateElement(i))) {
3376         SafeReplacementConstant = VecC->getAggregateElement(i);
3377         break;
3378       }
3379     }
3380     assert(SafeReplacementConstant && "Failed to find undef replacement");
3381     M = Constant::replaceUndefsWith(VecC, SafeReplacementConstant);
3382   }
3383 
3384   return Builder.CreateICmp(DstPred, X, M);
3385 }
3386 
3387 /// Some comparisons can be simplified.
3388 /// In this case, we are looking for comparisons that look like
3389 /// a check for a lossy signed truncation.
3390 /// Folds:   (MaskedBits is a constant.)
3391 ///   ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
3392 /// Into:
3393 ///   (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
3394 /// Where  KeptBits = bitwidth(%x) - MaskedBits
3395 static Value *
3396 foldICmpWithTruncSignExtendedVal(ICmpInst &I,
3397                                  InstCombiner::BuilderTy &Builder) {
3398   ICmpInst::Predicate SrcPred;
3399   Value *X;
3400   const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
3401   // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
3402   if (!match(&I, m_c_ICmp(SrcPred,
3403                           m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)),
3404                                           m_APInt(C1))),
3405                           m_Deferred(X))))
3406     return nullptr;
3407 
3408   // Potential handling of non-splats: for each element:
3409   //  * if both are undef, replace with constant 0.
3410   //    Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
3411   //  * if both are not undef, and are different, bailout.
3412   //  * else, only one is undef, then pick the non-undef one.
3413 
3414   // The shift amount must be equal.
3415   if (*C0 != *C1)
3416     return nullptr;
3417   const APInt &MaskedBits = *C0;
3418   assert(MaskedBits != 0 && "shift by zero should be folded away already.");
3419 
3420   ICmpInst::Predicate DstPred;
3421   switch (SrcPred) {
3422   case ICmpInst::Predicate::ICMP_EQ:
3423     // ((%x << MaskedBits) a>> MaskedBits) == %x
3424     //   =>
3425     // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
3426     DstPred = ICmpInst::Predicate::ICMP_ULT;
3427     break;
3428   case ICmpInst::Predicate::ICMP_NE:
3429     // ((%x << MaskedBits) a>> MaskedBits) != %x
3430     //   =>
3431     // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
3432     DstPred = ICmpInst::Predicate::ICMP_UGE;
3433     break;
3434   // FIXME: are more folds possible?
3435   default:
3436     return nullptr;
3437   }
3438 
3439   auto *XType = X->getType();
3440   const unsigned XBitWidth = XType->getScalarSizeInBits();
3441   const APInt BitWidth = APInt(XBitWidth, XBitWidth);
3442   assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
3443 
3444   // KeptBits = bitwidth(%x) - MaskedBits
3445   const APInt KeptBits = BitWidth - MaskedBits;
3446   assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
3447   // ICmpCst = (1 << KeptBits)
3448   const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
3449   assert(ICmpCst.isPowerOf2());
3450   // AddCst = (1 << (KeptBits-1))
3451   const APInt AddCst = ICmpCst.lshr(1);
3452   assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
3453 
3454   // T0 = add %x, AddCst
3455   Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
3456   // T1 = T0 DstPred ICmpCst
3457   Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
3458 
3459   return T1;
3460 }
3461 
3462 // Given pattern:
3463 //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3464 // we should move shifts to the same hand of 'and', i.e. rewrite as
3465 //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3466 // We are only interested in opposite logical shifts here.
3467 // One of the shifts can be truncated.
3468 // If we can, we want to end up creating 'lshr' shift.
3469 static Value *
3470 foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
3471                                            InstCombiner::BuilderTy &Builder) {
3472   if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
3473       !I.getOperand(0)->hasOneUse())
3474     return nullptr;
3475 
3476   auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
3477 
3478   // Look for an 'and' of two logical shifts, one of which may be truncated.
3479   // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
3480   Instruction *XShift, *MaybeTruncation, *YShift;
3481   if (!match(
3482           I.getOperand(0),
3483           m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
3484                   m_CombineAnd(m_TruncOrSelf(m_CombineAnd(
3485                                    m_AnyLogicalShift, m_Instruction(YShift))),
3486                                m_Instruction(MaybeTruncation)))))
3487     return nullptr;
3488 
3489   // We potentially looked past 'trunc', but only when matching YShift,
3490   // therefore YShift must have the widest type.
3491   Instruction *WidestShift = YShift;
3492   // Therefore XShift must have the shallowest type.
3493   // Or they both have identical types if there was no truncation.
3494   Instruction *NarrowestShift = XShift;
3495 
3496   Type *WidestTy = WidestShift->getType();
3497   assert(NarrowestShift->getType() == I.getOperand(0)->getType() &&
3498          "We did not look past any shifts while matching XShift though.");
3499   bool HadTrunc = WidestTy != I.getOperand(0)->getType();
3500 
3501   // If YShift is a 'lshr', swap the shifts around.
3502   if (match(YShift, m_LShr(m_Value(), m_Value())))
3503     std::swap(XShift, YShift);
3504 
3505   // The shifts must be in opposite directions.
3506   auto XShiftOpcode = XShift->getOpcode();
3507   if (XShiftOpcode == YShift->getOpcode())
3508     return nullptr; // Do not care about same-direction shifts here.
3509 
3510   Value *X, *XShAmt, *Y, *YShAmt;
3511   match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
3512   match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
3513 
3514   // If one of the values being shifted is a constant, then we will end with
3515   // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
3516   // however, we will need to ensure that we won't increase instruction count.
3517   if (!isa<Constant>(X) && !isa<Constant>(Y)) {
3518     // At least one of the hands of the 'and' should be one-use shift.
3519     if (!match(I.getOperand(0),
3520                m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
3521       return nullptr;
3522     if (HadTrunc) {
3523       // Due to the 'trunc', we will need to widen X. For that either the old
3524       // 'trunc' or the shift amt in the non-truncated shift should be one-use.
3525       if (!MaybeTruncation->hasOneUse() &&
3526           !NarrowestShift->getOperand(1)->hasOneUse())
3527         return nullptr;
3528     }
3529   }
3530 
3531   // We have two shift amounts from two different shifts. The types of those
3532   // shift amounts may not match. If that's the case let's bailout now.
3533   if (XShAmt->getType() != YShAmt->getType())
3534     return nullptr;
3535 
3536   // Can we fold (XShAmt+YShAmt) ?
3537   auto *NewShAmt = dyn_cast_or_null<Constant>(
3538       SimplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
3539                       /*isNUW=*/false, SQ.getWithInstruction(&I)));
3540   if (!NewShAmt)
3541     return nullptr;
3542   NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy);
3543   unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
3544 
3545   // Is the new shift amount smaller than the bit width?
3546   // FIXME: could also rely on ConstantRange.
3547   if (!match(NewShAmt,
3548              m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
3549                                 APInt(WidestBitWidth, WidestBitWidth))))
3550     return nullptr;
3551 
3552   // An extra legality check is needed if we had trunc-of-lshr.
3553   if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
3554     auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
3555                     WidestShift]() {
3556       // It isn't obvious whether it's worth it to analyze non-constants here.
3557       // Also, let's basically give up on non-splat cases, pessimizing vectors.
3558       // If *any* of these preconditions matches we can perform the fold.
3559       Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
3560                                     ? NewShAmt->getSplatValue()
3561                                     : NewShAmt;
3562       // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
3563       if (NewShAmtSplat &&
3564           (NewShAmtSplat->isNullValue() ||
3565            NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
3566         return true;
3567       // We consider *min* leading zeros so a single outlier
3568       // blocks the transform as opposed to allowing it.
3569       if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
3570         KnownBits Known = computeKnownBits(C, SQ.DL);
3571         unsigned MinLeadZero = Known.countMinLeadingZeros();
3572         // If the value being shifted has at most lowest bit set we can fold.
3573         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3574         if (MaxActiveBits <= 1)
3575           return true;
3576         // Precondition:  NewShAmt u<= countLeadingZeros(C)
3577         if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
3578           return true;
3579       }
3580       if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
3581         KnownBits Known = computeKnownBits(C, SQ.DL);
3582         unsigned MinLeadZero = Known.countMinLeadingZeros();
3583         // If the value being shifted has at most lowest bit set we can fold.
3584         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3585         if (MaxActiveBits <= 1)
3586           return true;
3587         // Precondition:  ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
3588         if (NewShAmtSplat) {
3589           APInt AdjNewShAmt =
3590               (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
3591           if (AdjNewShAmt.ule(MinLeadZero))
3592             return true;
3593         }
3594       }
3595       return false; // Can't tell if it's ok.
3596     };
3597     if (!CanFold())
3598       return nullptr;
3599   }
3600 
3601   // All good, we can do this fold.
3602   X = Builder.CreateZExt(X, WidestTy);
3603   Y = Builder.CreateZExt(Y, WidestTy);
3604   // The shift is the same that was for X.
3605   Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
3606                   ? Builder.CreateLShr(X, NewShAmt)
3607                   : Builder.CreateShl(X, NewShAmt);
3608   Value *T1 = Builder.CreateAnd(T0, Y);
3609   return Builder.CreateICmp(I.getPredicate(), T1,
3610                             Constant::getNullValue(WidestTy));
3611 }
3612 
3613 /// Fold
3614 ///   (-1 u/ x) u< y
3615 ///   ((x * y) u/ x) != y
3616 /// to
3617 ///   @llvm.umul.with.overflow(x, y) plus extraction of overflow bit
3618 /// Note that the comparison is commutative, while inverted (u>=, ==) predicate
3619 /// will mean that we are looking for the opposite answer.
3620 Value *InstCombiner::foldUnsignedMultiplicationOverflowCheck(ICmpInst &I) {
3621   ICmpInst::Predicate Pred;
3622   Value *X, *Y;
3623   Instruction *Mul;
3624   bool NeedNegation;
3625   // Look for: (-1 u/ x) u</u>= y
3626   if (!I.isEquality() &&
3627       match(&I, m_c_ICmp(Pred, m_OneUse(m_UDiv(m_AllOnes(), m_Value(X))),
3628                          m_Value(Y)))) {
3629     Mul = nullptr;
3630     // Canonicalize as-if y was on RHS.
3631     if (I.getOperand(1) != Y)
3632       Pred = I.getSwappedPredicate();
3633 
3634     // Are we checking that overflow does not happen, or does happen?
3635     switch (Pred) {
3636     case ICmpInst::Predicate::ICMP_ULT:
3637       NeedNegation = false;
3638       break; // OK
3639     case ICmpInst::Predicate::ICMP_UGE:
3640       NeedNegation = true;
3641       break; // OK
3642     default:
3643       return nullptr; // Wrong predicate.
3644     }
3645   } else // Look for: ((x * y) u/ x) !=/== y
3646       if (I.isEquality() &&
3647           match(&I, m_c_ICmp(Pred, m_Value(Y),
3648                              m_OneUse(m_UDiv(m_CombineAnd(m_c_Mul(m_Deferred(Y),
3649                                                                   m_Value(X)),
3650                                                           m_Instruction(Mul)),
3651                                              m_Deferred(X)))))) {
3652     NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
3653   } else
3654     return nullptr;
3655 
3656   BuilderTy::InsertPointGuard Guard(Builder);
3657   // If the pattern included (x * y), we'll want to insert new instructions
3658   // right before that original multiplication so that we can replace it.
3659   bool MulHadOtherUses = Mul && !Mul->hasOneUse();
3660   if (MulHadOtherUses)
3661     Builder.SetInsertPoint(Mul);
3662 
3663   Function *F = Intrinsic::getDeclaration(
3664       I.getModule(), Intrinsic::umul_with_overflow, X->getType());
3665   CallInst *Call = Builder.CreateCall(F, {X, Y}, "umul");
3666 
3667   // If the multiplication was used elsewhere, to ensure that we don't leave
3668   // "duplicate" instructions, replace uses of that original multiplication
3669   // with the multiplication result from the with.overflow intrinsic.
3670   if (MulHadOtherUses)
3671     replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "umul.val"));
3672 
3673   Value *Res = Builder.CreateExtractValue(Call, 1, "umul.ov");
3674   if (NeedNegation) // This technically increases instruction count.
3675     Res = Builder.CreateNot(Res, "umul.not.ov");
3676 
3677   return Res;
3678 }
3679 
3680 /// Try to fold icmp (binop), X or icmp X, (binop).
3681 /// TODO: A large part of this logic is duplicated in InstSimplify's
3682 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
3683 /// duplication.
3684 Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I, const SimplifyQuery &SQ) {
3685   const SimplifyQuery Q = SQ.getWithInstruction(&I);
3686   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3687 
3688   // Special logic for binary operators.
3689   BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
3690   BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
3691   if (!BO0 && !BO1)
3692     return nullptr;
3693 
3694   const CmpInst::Predicate Pred = I.getPredicate();
3695   Value *X;
3696 
3697   // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
3698   // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
3699   if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
3700       (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3701     return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
3702   // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
3703   if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
3704       (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3705     return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
3706 
3707   bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
3708   if (BO0 && isa<OverflowingBinaryOperator>(BO0))
3709     NoOp0WrapProblem =
3710         ICmpInst::isEquality(Pred) ||
3711         (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
3712         (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
3713   if (BO1 && isa<OverflowingBinaryOperator>(BO1))
3714     NoOp1WrapProblem =
3715         ICmpInst::isEquality(Pred) ||
3716         (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
3717         (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
3718 
3719   // Analyze the case when either Op0 or Op1 is an add instruction.
3720   // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
3721   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3722   if (BO0 && BO0->getOpcode() == Instruction::Add) {
3723     A = BO0->getOperand(0);
3724     B = BO0->getOperand(1);
3725   }
3726   if (BO1 && BO1->getOpcode() == Instruction::Add) {
3727     C = BO1->getOperand(0);
3728     D = BO1->getOperand(1);
3729   }
3730 
3731   // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
3732   // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
3733   if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
3734     return new ICmpInst(Pred, A == Op1 ? B : A,
3735                         Constant::getNullValue(Op1->getType()));
3736 
3737   // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
3738   // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
3739   if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
3740     return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
3741                         C == Op0 ? D : C);
3742 
3743   // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
3744   if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
3745       NoOp1WrapProblem) {
3746     // Determine Y and Z in the form icmp (X+Y), (X+Z).
3747     Value *Y, *Z;
3748     if (A == C) {
3749       // C + B == C + D  ->  B == D
3750       Y = B;
3751       Z = D;
3752     } else if (A == D) {
3753       // D + B == C + D  ->  B == C
3754       Y = B;
3755       Z = C;
3756     } else if (B == C) {
3757       // A + C == C + D  ->  A == D
3758       Y = A;
3759       Z = D;
3760     } else {
3761       assert(B == D);
3762       // A + D == C + D  ->  A == C
3763       Y = A;
3764       Z = C;
3765     }
3766     return new ICmpInst(Pred, Y, Z);
3767   }
3768 
3769   // icmp slt (A + -1), Op1 -> icmp sle A, Op1
3770   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
3771       match(B, m_AllOnes()))
3772     return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
3773 
3774   // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
3775   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
3776       match(B, m_AllOnes()))
3777     return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
3778 
3779   // icmp sle (A + 1), Op1 -> icmp slt A, Op1
3780   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
3781     return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
3782 
3783   // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
3784   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
3785     return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
3786 
3787   // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
3788   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
3789       match(D, m_AllOnes()))
3790     return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
3791 
3792   // icmp sle Op0, (C + -1) -> icmp slt Op0, C
3793   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
3794       match(D, m_AllOnes()))
3795     return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
3796 
3797   // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
3798   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
3799     return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
3800 
3801   // icmp slt Op0, (C + 1) -> icmp sle Op0, C
3802   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
3803     return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
3804 
3805   // TODO: The subtraction-related identities shown below also hold, but
3806   // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
3807   // wouldn't happen even if they were implemented.
3808   //
3809   // icmp ult (A - 1), Op1 -> icmp ule A, Op1
3810   // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
3811   // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
3812   // icmp ule Op0, (C - 1) -> icmp ult Op0, C
3813 
3814   // icmp ule (A + 1), Op0 -> icmp ult A, Op1
3815   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
3816     return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
3817 
3818   // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
3819   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
3820     return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
3821 
3822   // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
3823   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
3824     return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
3825 
3826   // icmp ult Op0, (C + 1) -> icmp ule Op0, C
3827   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
3828     return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
3829 
3830   // if C1 has greater magnitude than C2:
3831   //  icmp (A + C1), (C + C2) -> icmp (A + C3), C
3832   //  s.t. C3 = C1 - C2
3833   //
3834   // if C2 has greater magnitude than C1:
3835   //  icmp (A + C1), (C + C2) -> icmp A, (C + C3)
3836   //  s.t. C3 = C2 - C1
3837   if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
3838       (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
3839     if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
3840       if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
3841         const APInt &AP1 = C1->getValue();
3842         const APInt &AP2 = C2->getValue();
3843         if (AP1.isNegative() == AP2.isNegative()) {
3844           APInt AP1Abs = C1->getValue().abs();
3845           APInt AP2Abs = C2->getValue().abs();
3846           if (AP1Abs.uge(AP2Abs)) {
3847             ConstantInt *C3 = Builder.getInt(AP1 - AP2);
3848             Value *NewAdd = Builder.CreateNSWAdd(A, C3);
3849             return new ICmpInst(Pred, NewAdd, C);
3850           } else {
3851             ConstantInt *C3 = Builder.getInt(AP2 - AP1);
3852             Value *NewAdd = Builder.CreateNSWAdd(C, C3);
3853             return new ICmpInst(Pred, A, NewAdd);
3854           }
3855         }
3856       }
3857 
3858   // Analyze the case when either Op0 or Op1 is a sub instruction.
3859   // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
3860   A = nullptr;
3861   B = nullptr;
3862   C = nullptr;
3863   D = nullptr;
3864   if (BO0 && BO0->getOpcode() == Instruction::Sub) {
3865     A = BO0->getOperand(0);
3866     B = BO0->getOperand(1);
3867   }
3868   if (BO1 && BO1->getOpcode() == Instruction::Sub) {
3869     C = BO1->getOperand(0);
3870     D = BO1->getOperand(1);
3871   }
3872 
3873   // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
3874   if (A == Op1 && NoOp0WrapProblem)
3875     return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
3876   // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
3877   if (C == Op0 && NoOp1WrapProblem)
3878     return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
3879 
3880   // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
3881   // (A - B) u>/u<= A --> B u>/u<= A
3882   if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3883     return new ICmpInst(Pred, B, A);
3884   // C u</u>= (C - D) --> C u</u>= D
3885   if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3886     return new ICmpInst(Pred, C, D);
3887   // (A - B) u>=/u< A --> B u>/u<= A  iff B != 0
3888   if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
3889       isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
3890     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
3891   // C u<=/u> (C - D) --> C u</u>= D  iff B != 0
3892   if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
3893       isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
3894     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
3895 
3896   // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
3897   if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
3898     return new ICmpInst(Pred, A, C);
3899 
3900   // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
3901   if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
3902     return new ICmpInst(Pred, D, B);
3903 
3904   // icmp (0-X) < cst --> x > -cst
3905   if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
3906     Value *X;
3907     if (match(BO0, m_Neg(m_Value(X))))
3908       if (Constant *RHSC = dyn_cast<Constant>(Op1))
3909         if (RHSC->isNotMinSignedValue())
3910           return new ICmpInst(I.getSwappedPredicate(), X,
3911                               ConstantExpr::getNeg(RHSC));
3912   }
3913 
3914   BinaryOperator *SRem = nullptr;
3915   // icmp (srem X, Y), Y
3916   if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
3917     SRem = BO0;
3918   // icmp Y, (srem X, Y)
3919   else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
3920            Op0 == BO1->getOperand(1))
3921     SRem = BO1;
3922   if (SRem) {
3923     // We don't check hasOneUse to avoid increasing register pressure because
3924     // the value we use is the same value this instruction was already using.
3925     switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
3926     default:
3927       break;
3928     case ICmpInst::ICMP_EQ:
3929       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
3930     case ICmpInst::ICMP_NE:
3931       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
3932     case ICmpInst::ICMP_SGT:
3933     case ICmpInst::ICMP_SGE:
3934       return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
3935                           Constant::getAllOnesValue(SRem->getType()));
3936     case ICmpInst::ICMP_SLT:
3937     case ICmpInst::ICMP_SLE:
3938       return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
3939                           Constant::getNullValue(SRem->getType()));
3940     }
3941   }
3942 
3943   if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
3944       BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
3945     switch (BO0->getOpcode()) {
3946     default:
3947       break;
3948     case Instruction::Add:
3949     case Instruction::Sub:
3950     case Instruction::Xor: {
3951       if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
3952         return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3953 
3954       const APInt *C;
3955       if (match(BO0->getOperand(1), m_APInt(C))) {
3956         // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
3957         if (C->isSignMask()) {
3958           ICmpInst::Predicate NewPred =
3959               I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3960           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3961         }
3962 
3963         // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
3964         if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
3965           ICmpInst::Predicate NewPred =
3966               I.isSigned() ? I.getUnsignedPredicate() : I.getSignedPredicate();
3967           NewPred = I.getSwappedPredicate(NewPred);
3968           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
3969         }
3970       }
3971       break;
3972     }
3973     case Instruction::Mul: {
3974       if (!I.isEquality())
3975         break;
3976 
3977       const APInt *C;
3978       if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() &&
3979           !C->isOneValue()) {
3980         // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
3981         // Mask = -1 >> count-trailing-zeros(C).
3982         if (unsigned TZs = C->countTrailingZeros()) {
3983           Constant *Mask = ConstantInt::get(
3984               BO0->getType(),
3985               APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
3986           Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
3987           Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
3988           return new ICmpInst(Pred, And1, And2);
3989         }
3990         // If there are no trailing zeros in the multiplier, just eliminate
3991         // the multiplies (no masking is needed):
3992         // icmp eq/ne (X * C), (Y * C) --> icmp eq/ne X, Y
3993         return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
3994       }
3995       break;
3996     }
3997     case Instruction::UDiv:
3998     case Instruction::LShr:
3999       if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
4000         break;
4001       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4002 
4003     case Instruction::SDiv:
4004       if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
4005         break;
4006       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4007 
4008     case Instruction::AShr:
4009       if (!BO0->isExact() || !BO1->isExact())
4010         break;
4011       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4012 
4013     case Instruction::Shl: {
4014       bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
4015       bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
4016       if (!NUW && !NSW)
4017         break;
4018       if (!NSW && I.isSigned())
4019         break;
4020       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4021     }
4022     }
4023   }
4024 
4025   if (BO0) {
4026     // Transform  A & (L - 1) `ult` L --> L != 0
4027     auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
4028     auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
4029 
4030     if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
4031       auto *Zero = Constant::getNullValue(BO0->getType());
4032       return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
4033     }
4034   }
4035 
4036   if (Value *V = foldUnsignedMultiplicationOverflowCheck(I))
4037     return replaceInstUsesWith(I, V);
4038 
4039   if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder))
4040     return replaceInstUsesWith(I, V);
4041 
4042   if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
4043     return replaceInstUsesWith(I, V);
4044 
4045   if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder))
4046     return replaceInstUsesWith(I, V);
4047 
4048   return nullptr;
4049 }
4050 
4051 /// Fold icmp Pred min|max(X, Y), X.
4052 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
4053   ICmpInst::Predicate Pred = Cmp.getPredicate();
4054   Value *Op0 = Cmp.getOperand(0);
4055   Value *X = Cmp.getOperand(1);
4056 
4057   // Canonicalize minimum or maximum operand to LHS of the icmp.
4058   if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
4059       match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
4060       match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
4061       match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
4062     std::swap(Op0, X);
4063     Pred = Cmp.getSwappedPredicate();
4064   }
4065 
4066   Value *Y;
4067   if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
4068     // smin(X, Y)  == X --> X s<= Y
4069     // smin(X, Y) s>= X --> X s<= Y
4070     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
4071       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
4072 
4073     // smin(X, Y) != X --> X s> Y
4074     // smin(X, Y) s< X --> X s> Y
4075     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
4076       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
4077 
4078     // These cases should be handled in InstSimplify:
4079     // smin(X, Y) s<= X --> true
4080     // smin(X, Y) s> X --> false
4081     return nullptr;
4082   }
4083 
4084   if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
4085     // smax(X, Y)  == X --> X s>= Y
4086     // smax(X, Y) s<= X --> X s>= Y
4087     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
4088       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
4089 
4090     // smax(X, Y) != X --> X s< Y
4091     // smax(X, Y) s> X --> X s< Y
4092     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
4093       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
4094 
4095     // These cases should be handled in InstSimplify:
4096     // smax(X, Y) s>= X --> true
4097     // smax(X, Y) s< X --> false
4098     return nullptr;
4099   }
4100 
4101   if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
4102     // umin(X, Y)  == X --> X u<= Y
4103     // umin(X, Y) u>= X --> X u<= Y
4104     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
4105       return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
4106 
4107     // umin(X, Y) != X --> X u> Y
4108     // umin(X, Y) u< X --> X u> Y
4109     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
4110       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
4111 
4112     // These cases should be handled in InstSimplify:
4113     // umin(X, Y) u<= X --> true
4114     // umin(X, Y) u> X --> false
4115     return nullptr;
4116   }
4117 
4118   if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
4119     // umax(X, Y)  == X --> X u>= Y
4120     // umax(X, Y) u<= X --> X u>= Y
4121     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
4122       return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
4123 
4124     // umax(X, Y) != X --> X u< Y
4125     // umax(X, Y) u> X --> X u< Y
4126     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
4127       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
4128 
4129     // These cases should be handled in InstSimplify:
4130     // umax(X, Y) u>= X --> true
4131     // umax(X, Y) u< X --> false
4132     return nullptr;
4133   }
4134 
4135   return nullptr;
4136 }
4137 
4138 Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
4139   if (!I.isEquality())
4140     return nullptr;
4141 
4142   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4143   const CmpInst::Predicate Pred = I.getPredicate();
4144   Value *A, *B, *C, *D;
4145   if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4146     if (A == Op1 || B == Op1) { // (A^B) == A  ->  B == 0
4147       Value *OtherVal = A == Op1 ? B : A;
4148       return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4149     }
4150 
4151     if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
4152       // A^c1 == C^c2 --> A == C^(c1^c2)
4153       ConstantInt *C1, *C2;
4154       if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
4155           Op1->hasOneUse()) {
4156         Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
4157         Value *Xor = Builder.CreateXor(C, NC);
4158         return new ICmpInst(Pred, A, Xor);
4159       }
4160 
4161       // A^B == A^D -> B == D
4162       if (A == C)
4163         return new ICmpInst(Pred, B, D);
4164       if (A == D)
4165         return new ICmpInst(Pred, B, C);
4166       if (B == C)
4167         return new ICmpInst(Pred, A, D);
4168       if (B == D)
4169         return new ICmpInst(Pred, A, C);
4170     }
4171   }
4172 
4173   if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
4174     // A == (A^B)  ->  B == 0
4175     Value *OtherVal = A == Op0 ? B : A;
4176     return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4177   }
4178 
4179   // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
4180   if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
4181       match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
4182     Value *X = nullptr, *Y = nullptr, *Z = nullptr;
4183 
4184     if (A == C) {
4185       X = B;
4186       Y = D;
4187       Z = A;
4188     } else if (A == D) {
4189       X = B;
4190       Y = C;
4191       Z = A;
4192     } else if (B == C) {
4193       X = A;
4194       Y = D;
4195       Z = B;
4196     } else if (B == D) {
4197       X = A;
4198       Y = C;
4199       Z = B;
4200     }
4201 
4202     if (X) { // Build (X^Y) & Z
4203       Op1 = Builder.CreateXor(X, Y);
4204       Op1 = Builder.CreateAnd(Op1, Z);
4205       I.setOperand(0, Op1);
4206       I.setOperand(1, Constant::getNullValue(Op1->getType()));
4207       return &I;
4208     }
4209   }
4210 
4211   // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
4212   // and       (B & (1<<X)-1) == (zext A) --> A == (trunc B)
4213   ConstantInt *Cst1;
4214   if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
4215        match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
4216       (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
4217        match(Op1, m_ZExt(m_Value(A))))) {
4218     APInt Pow2 = Cst1->getValue() + 1;
4219     if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
4220         Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
4221       return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
4222   }
4223 
4224   // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
4225   // For lshr and ashr pairs.
4226   if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4227        match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
4228       (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4229        match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
4230     unsigned TypeBits = Cst1->getBitWidth();
4231     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4232     if (ShAmt < TypeBits && ShAmt != 0) {
4233       ICmpInst::Predicate NewPred =
4234           Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
4235       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4236       APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
4237       return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
4238     }
4239   }
4240 
4241   // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
4242   if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
4243       match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
4244     unsigned TypeBits = Cst1->getBitWidth();
4245     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4246     if (ShAmt < TypeBits && ShAmt != 0) {
4247       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4248       APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
4249       Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
4250                                       I.getName() + ".mask");
4251       return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
4252     }
4253   }
4254 
4255   // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
4256   // "icmp (and X, mask), cst"
4257   uint64_t ShAmt = 0;
4258   if (Op0->hasOneUse() &&
4259       match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
4260       match(Op1, m_ConstantInt(Cst1)) &&
4261       // Only do this when A has multiple uses.  This is most important to do
4262       // when it exposes other optimizations.
4263       !A->hasOneUse()) {
4264     unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
4265 
4266     if (ShAmt < ASize) {
4267       APInt MaskV =
4268           APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
4269       MaskV <<= ShAmt;
4270 
4271       APInt CmpV = Cst1->getValue().zext(ASize);
4272       CmpV <<= ShAmt;
4273 
4274       Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
4275       return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
4276     }
4277   }
4278 
4279   // If both operands are byte-swapped or bit-reversed, just compare the
4280   // original values.
4281   // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant()
4282   // and handle more intrinsics.
4283   if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) ||
4284       (match(Op0, m_BitReverse(m_Value(A))) &&
4285        match(Op1, m_BitReverse(m_Value(B)))))
4286     return new ICmpInst(Pred, A, B);
4287 
4288   // Canonicalize checking for a power-of-2-or-zero value:
4289   // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
4290   // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
4291   if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
4292                                    m_Deferred(A)))) ||
4293       !match(Op1, m_ZeroInt()))
4294     A = nullptr;
4295 
4296   // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
4297   // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
4298   if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
4299     A = Op1;
4300   else if (match(Op1,
4301                  m_OneUse(m_c_And(m_Neg(m_Specific(Op0)), m_Specific(Op0)))))
4302     A = Op0;
4303 
4304   if (A) {
4305     Type *Ty = A->getType();
4306     CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
4307     return Pred == ICmpInst::ICMP_EQ
4308         ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop, ConstantInt::get(Ty, 2))
4309         : new ICmpInst(ICmpInst::ICMP_UGT, CtPop, ConstantInt::get(Ty, 1));
4310   }
4311 
4312   return nullptr;
4313 }
4314 
4315 static Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp,
4316                                            InstCombiner::BuilderTy &Builder) {
4317   assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
4318   auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
4319   Value *X;
4320   if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
4321     return nullptr;
4322 
4323   bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
4324   bool IsSignedCmp = ICmp.isSigned();
4325   if (auto *CastOp1 = dyn_cast<CastInst>(ICmp.getOperand(1))) {
4326     // If the signedness of the two casts doesn't agree (i.e. one is a sext
4327     // and the other is a zext), then we can't handle this.
4328     // TODO: This is too strict. We can handle some predicates (equality?).
4329     if (CastOp0->getOpcode() != CastOp1->getOpcode())
4330       return nullptr;
4331 
4332     // Not an extension from the same type?
4333     Value *Y = CastOp1->getOperand(0);
4334     Type *XTy = X->getType(), *YTy = Y->getType();
4335     if (XTy != YTy) {
4336       // One of the casts must have one use because we are creating a new cast.
4337       if (!CastOp0->hasOneUse() && !CastOp1->hasOneUse())
4338         return nullptr;
4339       // Extend the narrower operand to the type of the wider operand.
4340       if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
4341         X = Builder.CreateCast(CastOp0->getOpcode(), X, YTy);
4342       else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
4343         Y = Builder.CreateCast(CastOp0->getOpcode(), Y, XTy);
4344       else
4345         return nullptr;
4346     }
4347 
4348     // (zext X) == (zext Y) --> X == Y
4349     // (sext X) == (sext Y) --> X == Y
4350     if (ICmp.isEquality())
4351       return new ICmpInst(ICmp.getPredicate(), X, Y);
4352 
4353     // A signed comparison of sign extended values simplifies into a
4354     // signed comparison.
4355     if (IsSignedCmp && IsSignedExt)
4356       return new ICmpInst(ICmp.getPredicate(), X, Y);
4357 
4358     // The other three cases all fold into an unsigned comparison.
4359     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
4360   }
4361 
4362   // Below here, we are only folding a compare with constant.
4363   auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
4364   if (!C)
4365     return nullptr;
4366 
4367   // Compute the constant that would happen if we truncated to SrcTy then
4368   // re-extended to DestTy.
4369   Type *SrcTy = CastOp0->getSrcTy();
4370   Type *DestTy = CastOp0->getDestTy();
4371   Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
4372   Constant *Res2 = ConstantExpr::getCast(CastOp0->getOpcode(), Res1, DestTy);
4373 
4374   // If the re-extended constant didn't change...
4375   if (Res2 == C) {
4376     if (ICmp.isEquality())
4377       return new ICmpInst(ICmp.getPredicate(), X, Res1);
4378 
4379     // A signed comparison of sign extended values simplifies into a
4380     // signed comparison.
4381     if (IsSignedExt && IsSignedCmp)
4382       return new ICmpInst(ICmp.getPredicate(), X, Res1);
4383 
4384     // The other three cases all fold into an unsigned comparison.
4385     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res1);
4386   }
4387 
4388   // The re-extended constant changed, partly changed (in the case of a vector),
4389   // or could not be determined to be equal (in the case of a constant
4390   // expression), so the constant cannot be represented in the shorter type.
4391   // All the cases that fold to true or false will have already been handled
4392   // by SimplifyICmpInst, so only deal with the tricky case.
4393   if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
4394     return nullptr;
4395 
4396   // Is source op positive?
4397   // icmp ult (sext X), C --> icmp sgt X, -1
4398   if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
4399     return new ICmpInst(CmpInst::ICMP_SGT, X, Constant::getAllOnesValue(SrcTy));
4400 
4401   // Is source op negative?
4402   // icmp ugt (sext X), C --> icmp slt X, 0
4403   assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
4404   return new ICmpInst(CmpInst::ICMP_SLT, X, Constant::getNullValue(SrcTy));
4405 }
4406 
4407 /// Handle icmp (cast x), (cast or constant).
4408 Instruction *InstCombiner::foldICmpWithCastOp(ICmpInst &ICmp) {
4409   auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
4410   if (!CastOp0)
4411     return nullptr;
4412   if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
4413     return nullptr;
4414 
4415   Value *Op0Src = CastOp0->getOperand(0);
4416   Type *SrcTy = CastOp0->getSrcTy();
4417   Type *DestTy = CastOp0->getDestTy();
4418 
4419   // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
4420   // integer type is the same size as the pointer type.
4421   auto CompatibleSizes = [&](Type *SrcTy, Type *DestTy) {
4422     if (isa<VectorType>(SrcTy)) {
4423       SrcTy = cast<VectorType>(SrcTy)->getElementType();
4424       DestTy = cast<VectorType>(DestTy)->getElementType();
4425     }
4426     return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth();
4427   };
4428   if (CastOp0->getOpcode() == Instruction::PtrToInt &&
4429       CompatibleSizes(SrcTy, DestTy)) {
4430     Value *NewOp1 = nullptr;
4431     if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
4432       Value *PtrSrc = PtrToIntOp1->getOperand(0);
4433       if (PtrSrc->getType()->getPointerAddressSpace() ==
4434           Op0Src->getType()->getPointerAddressSpace()) {
4435         NewOp1 = PtrToIntOp1->getOperand(0);
4436         // If the pointer types don't match, insert a bitcast.
4437         if (Op0Src->getType() != NewOp1->getType())
4438           NewOp1 = Builder.CreateBitCast(NewOp1, Op0Src->getType());
4439       }
4440     } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
4441       NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
4442     }
4443 
4444     if (NewOp1)
4445       return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
4446   }
4447 
4448   return foldICmpWithZextOrSext(ICmp, Builder);
4449 }
4450 
4451 static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS) {
4452   switch (BinaryOp) {
4453     default:
4454       llvm_unreachable("Unsupported binary op");
4455     case Instruction::Add:
4456     case Instruction::Sub:
4457       return match(RHS, m_Zero());
4458     case Instruction::Mul:
4459       return match(RHS, m_One());
4460   }
4461 }
4462 
4463 OverflowResult InstCombiner::computeOverflow(
4464     Instruction::BinaryOps BinaryOp, bool IsSigned,
4465     Value *LHS, Value *RHS, Instruction *CxtI) const {
4466   switch (BinaryOp) {
4467     default:
4468       llvm_unreachable("Unsupported binary op");
4469     case Instruction::Add:
4470       if (IsSigned)
4471         return computeOverflowForSignedAdd(LHS, RHS, CxtI);
4472       else
4473         return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
4474     case Instruction::Sub:
4475       if (IsSigned)
4476         return computeOverflowForSignedSub(LHS, RHS, CxtI);
4477       else
4478         return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
4479     case Instruction::Mul:
4480       if (IsSigned)
4481         return computeOverflowForSignedMul(LHS, RHS, CxtI);
4482       else
4483         return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
4484   }
4485 }
4486 
4487 bool InstCombiner::OptimizeOverflowCheck(
4488     Instruction::BinaryOps BinaryOp, bool IsSigned, Value *LHS, Value *RHS,
4489     Instruction &OrigI, Value *&Result, Constant *&Overflow) {
4490   if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
4491     std::swap(LHS, RHS);
4492 
4493   // If the overflow check was an add followed by a compare, the insertion point
4494   // may be pointing to the compare.  We want to insert the new instructions
4495   // before the add in case there are uses of the add between the add and the
4496   // compare.
4497   Builder.SetInsertPoint(&OrigI);
4498 
4499   if (isNeutralValue(BinaryOp, RHS)) {
4500     Result = LHS;
4501     Overflow = Builder.getFalse();
4502     return true;
4503   }
4504 
4505   switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
4506     case OverflowResult::MayOverflow:
4507       return false;
4508     case OverflowResult::AlwaysOverflowsLow:
4509     case OverflowResult::AlwaysOverflowsHigh:
4510       Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4511       Result->takeName(&OrigI);
4512       Overflow = Builder.getTrue();
4513       return true;
4514     case OverflowResult::NeverOverflows:
4515       Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4516       Result->takeName(&OrigI);
4517       Overflow = Builder.getFalse();
4518       if (auto *Inst = dyn_cast<Instruction>(Result)) {
4519         if (IsSigned)
4520           Inst->setHasNoSignedWrap();
4521         else
4522           Inst->setHasNoUnsignedWrap();
4523       }
4524       return true;
4525   }
4526 
4527   llvm_unreachable("Unexpected overflow result");
4528 }
4529 
4530 /// Recognize and process idiom involving test for multiplication
4531 /// overflow.
4532 ///
4533 /// The caller has matched a pattern of the form:
4534 ///   I = cmp u (mul(zext A, zext B), V
4535 /// The function checks if this is a test for overflow and if so replaces
4536 /// multiplication with call to 'mul.with.overflow' intrinsic.
4537 ///
4538 /// \param I Compare instruction.
4539 /// \param MulVal Result of 'mult' instruction.  It is one of the arguments of
4540 ///               the compare instruction.  Must be of integer type.
4541 /// \param OtherVal The other argument of compare instruction.
4542 /// \returns Instruction which must replace the compare instruction, NULL if no
4543 ///          replacement required.
4544 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
4545                                          Value *OtherVal, InstCombiner &IC) {
4546   // Don't bother doing this transformation for pointers, don't do it for
4547   // vectors.
4548   if (!isa<IntegerType>(MulVal->getType()))
4549     return nullptr;
4550 
4551   assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
4552   assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
4553   auto *MulInstr = dyn_cast<Instruction>(MulVal);
4554   if (!MulInstr)
4555     return nullptr;
4556   assert(MulInstr->getOpcode() == Instruction::Mul);
4557 
4558   auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
4559        *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
4560   assert(LHS->getOpcode() == Instruction::ZExt);
4561   assert(RHS->getOpcode() == Instruction::ZExt);
4562   Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
4563 
4564   // Calculate type and width of the result produced by mul.with.overflow.
4565   Type *TyA = A->getType(), *TyB = B->getType();
4566   unsigned WidthA = TyA->getPrimitiveSizeInBits(),
4567            WidthB = TyB->getPrimitiveSizeInBits();
4568   unsigned MulWidth;
4569   Type *MulType;
4570   if (WidthB > WidthA) {
4571     MulWidth = WidthB;
4572     MulType = TyB;
4573   } else {
4574     MulWidth = WidthA;
4575     MulType = TyA;
4576   }
4577 
4578   // In order to replace the original mul with a narrower mul.with.overflow,
4579   // all uses must ignore upper bits of the product.  The number of used low
4580   // bits must be not greater than the width of mul.with.overflow.
4581   if (MulVal->hasNUsesOrMore(2))
4582     for (User *U : MulVal->users()) {
4583       if (U == &I)
4584         continue;
4585       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4586         // Check if truncation ignores bits above MulWidth.
4587         unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
4588         if (TruncWidth > MulWidth)
4589           return nullptr;
4590       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4591         // Check if AND ignores bits above MulWidth.
4592         if (BO->getOpcode() != Instruction::And)
4593           return nullptr;
4594         if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
4595           const APInt &CVal = CI->getValue();
4596           if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
4597             return nullptr;
4598         } else {
4599           // In this case we could have the operand of the binary operation
4600           // being defined in another block, and performing the replacement
4601           // could break the dominance relation.
4602           return nullptr;
4603         }
4604       } else {
4605         // Other uses prohibit this transformation.
4606         return nullptr;
4607       }
4608     }
4609 
4610   // Recognize patterns
4611   switch (I.getPredicate()) {
4612   case ICmpInst::ICMP_EQ:
4613   case ICmpInst::ICMP_NE:
4614     // Recognize pattern:
4615     //   mulval = mul(zext A, zext B)
4616     //   cmp eq/neq mulval, zext trunc mulval
4617     if (ZExtInst *Zext = dyn_cast<ZExtInst>(OtherVal))
4618       if (Zext->hasOneUse()) {
4619         Value *ZextArg = Zext->getOperand(0);
4620         if (TruncInst *Trunc = dyn_cast<TruncInst>(ZextArg))
4621           if (Trunc->getType()->getPrimitiveSizeInBits() == MulWidth)
4622             break; //Recognized
4623       }
4624 
4625     // Recognize pattern:
4626     //   mulval = mul(zext A, zext B)
4627     //   cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
4628     ConstantInt *CI;
4629     Value *ValToMask;
4630     if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
4631       if (ValToMask != MulVal)
4632         return nullptr;
4633       const APInt &CVal = CI->getValue() + 1;
4634       if (CVal.isPowerOf2()) {
4635         unsigned MaskWidth = CVal.logBase2();
4636         if (MaskWidth == MulWidth)
4637           break; // Recognized
4638       }
4639     }
4640     return nullptr;
4641 
4642   case ICmpInst::ICMP_UGT:
4643     // Recognize pattern:
4644     //   mulval = mul(zext A, zext B)
4645     //   cmp ugt mulval, max
4646     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4647       APInt MaxVal = APInt::getMaxValue(MulWidth);
4648       MaxVal = MaxVal.zext(CI->getBitWidth());
4649       if (MaxVal.eq(CI->getValue()))
4650         break; // Recognized
4651     }
4652     return nullptr;
4653 
4654   case ICmpInst::ICMP_UGE:
4655     // Recognize pattern:
4656     //   mulval = mul(zext A, zext B)
4657     //   cmp uge mulval, max+1
4658     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4659       APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4660       if (MaxVal.eq(CI->getValue()))
4661         break; // Recognized
4662     }
4663     return nullptr;
4664 
4665   case ICmpInst::ICMP_ULE:
4666     // Recognize pattern:
4667     //   mulval = mul(zext A, zext B)
4668     //   cmp ule mulval, max
4669     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4670       APInt MaxVal = APInt::getMaxValue(MulWidth);
4671       MaxVal = MaxVal.zext(CI->getBitWidth());
4672       if (MaxVal.eq(CI->getValue()))
4673         break; // Recognized
4674     }
4675     return nullptr;
4676 
4677   case ICmpInst::ICMP_ULT:
4678     // Recognize pattern:
4679     //   mulval = mul(zext A, zext B)
4680     //   cmp ule mulval, max + 1
4681     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4682       APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4683       if (MaxVal.eq(CI->getValue()))
4684         break; // Recognized
4685     }
4686     return nullptr;
4687 
4688   default:
4689     return nullptr;
4690   }
4691 
4692   InstCombiner::BuilderTy &Builder = IC.Builder;
4693   Builder.SetInsertPoint(MulInstr);
4694 
4695   // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
4696   Value *MulA = A, *MulB = B;
4697   if (WidthA < MulWidth)
4698     MulA = Builder.CreateZExt(A, MulType);
4699   if (WidthB < MulWidth)
4700     MulB = Builder.CreateZExt(B, MulType);
4701   Function *F = Intrinsic::getDeclaration(
4702       I.getModule(), Intrinsic::umul_with_overflow, MulType);
4703   CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
4704   IC.Worklist.Add(MulInstr);
4705 
4706   // If there are uses of mul result other than the comparison, we know that
4707   // they are truncation or binary AND. Change them to use result of
4708   // mul.with.overflow and adjust properly mask/size.
4709   if (MulVal->hasNUsesOrMore(2)) {
4710     Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
4711     for (auto UI = MulVal->user_begin(), UE = MulVal->user_end(); UI != UE;) {
4712       User *U = *UI++;
4713       if (U == &I || U == OtherVal)
4714         continue;
4715       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4716         if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
4717           IC.replaceInstUsesWith(*TI, Mul);
4718         else
4719           TI->setOperand(0, Mul);
4720       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4721         assert(BO->getOpcode() == Instruction::And);
4722         // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
4723         ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
4724         APInt ShortMask = CI->getValue().trunc(MulWidth);
4725         Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
4726         Instruction *Zext =
4727             cast<Instruction>(Builder.CreateZExt(ShortAnd, BO->getType()));
4728         IC.Worklist.Add(Zext);
4729         IC.replaceInstUsesWith(*BO, Zext);
4730       } else {
4731         llvm_unreachable("Unexpected Binary operation");
4732       }
4733       IC.Worklist.Add(cast<Instruction>(U));
4734     }
4735   }
4736   if (isa<Instruction>(OtherVal))
4737     IC.Worklist.Add(cast<Instruction>(OtherVal));
4738 
4739   // The original icmp gets replaced with the overflow value, maybe inverted
4740   // depending on predicate.
4741   bool Inverse = false;
4742   switch (I.getPredicate()) {
4743   case ICmpInst::ICMP_NE:
4744     break;
4745   case ICmpInst::ICMP_EQ:
4746     Inverse = true;
4747     break;
4748   case ICmpInst::ICMP_UGT:
4749   case ICmpInst::ICMP_UGE:
4750     if (I.getOperand(0) == MulVal)
4751       break;
4752     Inverse = true;
4753     break;
4754   case ICmpInst::ICMP_ULT:
4755   case ICmpInst::ICMP_ULE:
4756     if (I.getOperand(1) == MulVal)
4757       break;
4758     Inverse = true;
4759     break;
4760   default:
4761     llvm_unreachable("Unexpected predicate");
4762   }
4763   if (Inverse) {
4764     Value *Res = Builder.CreateExtractValue(Call, 1);
4765     return BinaryOperator::CreateNot(Res);
4766   }
4767 
4768   return ExtractValueInst::Create(Call, 1);
4769 }
4770 
4771 /// When performing a comparison against a constant, it is possible that not all
4772 /// the bits in the LHS are demanded. This helper method computes the mask that
4773 /// IS demanded.
4774 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
4775   const APInt *RHS;
4776   if (!match(I.getOperand(1), m_APInt(RHS)))
4777     return APInt::getAllOnesValue(BitWidth);
4778 
4779   // If this is a normal comparison, it demands all bits. If it is a sign bit
4780   // comparison, it only demands the sign bit.
4781   bool UnusedBit;
4782   if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
4783     return APInt::getSignMask(BitWidth);
4784 
4785   switch (I.getPredicate()) {
4786   // For a UGT comparison, we don't care about any bits that
4787   // correspond to the trailing ones of the comparand.  The value of these
4788   // bits doesn't impact the outcome of the comparison, because any value
4789   // greater than the RHS must differ in a bit higher than these due to carry.
4790   case ICmpInst::ICMP_UGT:
4791     return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes());
4792 
4793   // Similarly, for a ULT comparison, we don't care about the trailing zeros.
4794   // Any value less than the RHS must differ in a higher bit because of carries.
4795   case ICmpInst::ICMP_ULT:
4796     return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros());
4797 
4798   default:
4799     return APInt::getAllOnesValue(BitWidth);
4800   }
4801 }
4802 
4803 /// Check if the order of \p Op0 and \p Op1 as operands in an ICmpInst
4804 /// should be swapped.
4805 /// The decision is based on how many times these two operands are reused
4806 /// as subtract operands and their positions in those instructions.
4807 /// The rationale is that several architectures use the same instruction for
4808 /// both subtract and cmp. Thus, it is better if the order of those operands
4809 /// match.
4810 /// \return true if Op0 and Op1 should be swapped.
4811 static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) {
4812   // Filter out pointer values as those cannot appear directly in subtract.
4813   // FIXME: we may want to go through inttoptrs or bitcasts.
4814   if (Op0->getType()->isPointerTy())
4815     return false;
4816   // If a subtract already has the same operands as a compare, swapping would be
4817   // bad. If a subtract has the same operands as a compare but in reverse order,
4818   // then swapping is good.
4819   int GoodToSwap = 0;
4820   for (const User *U : Op0->users()) {
4821     if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
4822       GoodToSwap++;
4823     else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
4824       GoodToSwap--;
4825   }
4826   return GoodToSwap > 0;
4827 }
4828 
4829 /// Check that one use is in the same block as the definition and all
4830 /// other uses are in blocks dominated by a given block.
4831 ///
4832 /// \param DI Definition
4833 /// \param UI Use
4834 /// \param DB Block that must dominate all uses of \p DI outside
4835 ///           the parent block
4836 /// \return true when \p UI is the only use of \p DI in the parent block
4837 /// and all other uses of \p DI are in blocks dominated by \p DB.
4838 ///
4839 bool InstCombiner::dominatesAllUses(const Instruction *DI,
4840                                     const Instruction *UI,
4841                                     const BasicBlock *DB) const {
4842   assert(DI && UI && "Instruction not defined\n");
4843   // Ignore incomplete definitions.
4844   if (!DI->getParent())
4845     return false;
4846   // DI and UI must be in the same block.
4847   if (DI->getParent() != UI->getParent())
4848     return false;
4849   // Protect from self-referencing blocks.
4850   if (DI->getParent() == DB)
4851     return false;
4852   for (const User *U : DI->users()) {
4853     auto *Usr = cast<Instruction>(U);
4854     if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
4855       return false;
4856   }
4857   return true;
4858 }
4859 
4860 /// Return true when the instruction sequence within a block is select-cmp-br.
4861 static bool isChainSelectCmpBranch(const SelectInst *SI) {
4862   const BasicBlock *BB = SI->getParent();
4863   if (!BB)
4864     return false;
4865   auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
4866   if (!BI || BI->getNumSuccessors() != 2)
4867     return false;
4868   auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
4869   if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
4870     return false;
4871   return true;
4872 }
4873 
4874 /// True when a select result is replaced by one of its operands
4875 /// in select-icmp sequence. This will eventually result in the elimination
4876 /// of the select.
4877 ///
4878 /// \param SI    Select instruction
4879 /// \param Icmp  Compare instruction
4880 /// \param SIOpd Operand that replaces the select
4881 ///
4882 /// Notes:
4883 /// - The replacement is global and requires dominator information
4884 /// - The caller is responsible for the actual replacement
4885 ///
4886 /// Example:
4887 ///
4888 /// entry:
4889 ///  %4 = select i1 %3, %C* %0, %C* null
4890 ///  %5 = icmp eq %C* %4, null
4891 ///  br i1 %5, label %9, label %7
4892 ///  ...
4893 ///  ; <label>:7                                       ; preds = %entry
4894 ///  %8 = getelementptr inbounds %C* %4, i64 0, i32 0
4895 ///  ...
4896 ///
4897 /// can be transformed to
4898 ///
4899 ///  %5 = icmp eq %C* %0, null
4900 ///  %6 = select i1 %3, i1 %5, i1 true
4901 ///  br i1 %6, label %9, label %7
4902 ///  ...
4903 ///  ; <label>:7                                       ; preds = %entry
4904 ///  %8 = getelementptr inbounds %C* %0, i64 0, i32 0  // replace by %0!
4905 ///
4906 /// Similar when the first operand of the select is a constant or/and
4907 /// the compare is for not equal rather than equal.
4908 ///
4909 /// NOTE: The function is only called when the select and compare constants
4910 /// are equal, the optimization can work only for EQ predicates. This is not a
4911 /// major restriction since a NE compare should be 'normalized' to an equal
4912 /// compare, which usually happens in the combiner and test case
4913 /// select-cmp-br.ll checks for it.
4914 bool InstCombiner::replacedSelectWithOperand(SelectInst *SI,
4915                                              const ICmpInst *Icmp,
4916                                              const unsigned SIOpd) {
4917   assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
4918   if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
4919     BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
4920     // The check for the single predecessor is not the best that can be
4921     // done. But it protects efficiently against cases like when SI's
4922     // home block has two successors, Succ and Succ1, and Succ1 predecessor
4923     // of Succ. Then SI can't be replaced by SIOpd because the use that gets
4924     // replaced can be reached on either path. So the uniqueness check
4925     // guarantees that the path all uses of SI (outside SI's parent) are on
4926     // is disjoint from all other paths out of SI. But that information
4927     // is more expensive to compute, and the trade-off here is in favor
4928     // of compile-time. It should also be noticed that we check for a single
4929     // predecessor and not only uniqueness. This to handle the situation when
4930     // Succ and Succ1 points to the same basic block.
4931     if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
4932       NumSel++;
4933       SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
4934       return true;
4935     }
4936   }
4937   return false;
4938 }
4939 
4940 /// Try to fold the comparison based on range information we can get by checking
4941 /// whether bits are known to be zero or one in the inputs.
4942 Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
4943   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4944   Type *Ty = Op0->getType();
4945   ICmpInst::Predicate Pred = I.getPredicate();
4946 
4947   // Get scalar or pointer size.
4948   unsigned BitWidth = Ty->isIntOrIntVectorTy()
4949                           ? Ty->getScalarSizeInBits()
4950                           : DL.getPointerTypeSizeInBits(Ty->getScalarType());
4951 
4952   if (!BitWidth)
4953     return nullptr;
4954 
4955   KnownBits Op0Known(BitWidth);
4956   KnownBits Op1Known(BitWidth);
4957 
4958   if (SimplifyDemandedBits(&I, 0,
4959                            getDemandedBitsLHSMask(I, BitWidth),
4960                            Op0Known, 0))
4961     return &I;
4962 
4963   if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
4964                            Op1Known, 0))
4965     return &I;
4966 
4967   // Given the known and unknown bits, compute a range that the LHS could be
4968   // in.  Compute the Min, Max and RHS values based on the known bits. For the
4969   // EQ and NE we use unsigned values.
4970   APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
4971   APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
4972   if (I.isSigned()) {
4973     computeSignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4974     computeSignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4975   } else {
4976     computeUnsignedMinMaxValuesFromKnownBits(Op0Known, Op0Min, Op0Max);
4977     computeUnsignedMinMaxValuesFromKnownBits(Op1Known, Op1Min, Op1Max);
4978   }
4979 
4980   // If Min and Max are known to be the same, then SimplifyDemandedBits figured
4981   // out that the LHS or RHS is a constant. Constant fold this now, so that
4982   // code below can assume that Min != Max.
4983   if (!isa<Constant>(Op0) && Op0Min == Op0Max)
4984     return new ICmpInst(Pred, ConstantExpr::getIntegerValue(Ty, Op0Min), Op1);
4985   if (!isa<Constant>(Op1) && Op1Min == Op1Max)
4986     return new ICmpInst(Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Min));
4987 
4988   // Based on the range information we know about the LHS, see if we can
4989   // simplify this comparison.  For example, (x&4) < 8 is always true.
4990   switch (Pred) {
4991   default:
4992     llvm_unreachable("Unknown icmp opcode!");
4993   case ICmpInst::ICMP_EQ:
4994   case ICmpInst::ICMP_NE: {
4995     if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max)) {
4996       return Pred == CmpInst::ICMP_EQ
4997                  ? replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()))
4998                  : replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4999     }
5000 
5001     // If all bits are known zero except for one, then we know at most one bit
5002     // is set. If the comparison is against zero, then this is a check to see if
5003     // *that* bit is set.
5004     APInt Op0KnownZeroInverted = ~Op0Known.Zero;
5005     if (Op1Known.isZero()) {
5006       // If the LHS is an AND with the same constant, look through it.
5007       Value *LHS = nullptr;
5008       const APInt *LHSC;
5009       if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
5010           *LHSC != Op0KnownZeroInverted)
5011         LHS = Op0;
5012 
5013       Value *X;
5014       if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
5015         APInt ValToCheck = Op0KnownZeroInverted;
5016         Type *XTy = X->getType();
5017         if (ValToCheck.isPowerOf2()) {
5018           // ((1 << X) & 8) == 0 -> X != 3
5019           // ((1 << X) & 8) != 0 -> X == 3
5020           auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5021           auto NewPred = ICmpInst::getInversePredicate(Pred);
5022           return new ICmpInst(NewPred, X, CmpC);
5023         } else if ((++ValToCheck).isPowerOf2()) {
5024           // ((1 << X) & 7) == 0 -> X >= 3
5025           // ((1 << X) & 7) != 0 -> X  < 3
5026           auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5027           auto NewPred =
5028               Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
5029           return new ICmpInst(NewPred, X, CmpC);
5030         }
5031       }
5032 
5033       // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
5034       const APInt *CI;
5035       if (Op0KnownZeroInverted.isOneValue() &&
5036           match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
5037         // ((8 >>u X) & 1) == 0 -> X != 3
5038         // ((8 >>u X) & 1) != 0 -> X == 3
5039         unsigned CmpVal = CI->countTrailingZeros();
5040         auto NewPred = ICmpInst::getInversePredicate(Pred);
5041         return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
5042       }
5043     }
5044     break;
5045   }
5046   case ICmpInst::ICMP_ULT: {
5047     if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
5048       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5049     if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
5050       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5051     if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
5052       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5053 
5054     const APInt *CmpC;
5055     if (match(Op1, m_APInt(CmpC))) {
5056       // A <u C -> A == C-1 if min(A)+1 == C
5057       if (*CmpC == Op0Min + 1)
5058         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5059                             ConstantInt::get(Op1->getType(), *CmpC - 1));
5060       // X <u C --> X == 0, if the number of zero bits in the bottom of X
5061       // exceeds the log2 of C.
5062       if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
5063         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5064                             Constant::getNullValue(Op1->getType()));
5065     }
5066     break;
5067   }
5068   case ICmpInst::ICMP_UGT: {
5069     if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
5070       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5071     if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
5072       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5073     if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
5074       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5075 
5076     const APInt *CmpC;
5077     if (match(Op1, m_APInt(CmpC))) {
5078       // A >u C -> A == C+1 if max(a)-1 == C
5079       if (*CmpC == Op0Max - 1)
5080         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5081                             ConstantInt::get(Op1->getType(), *CmpC + 1));
5082       // X >u C --> X != 0, if the number of zero bits in the bottom of X
5083       // exceeds the log2 of C.
5084       if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
5085         return new ICmpInst(ICmpInst::ICMP_NE, Op0,
5086                             Constant::getNullValue(Op1->getType()));
5087     }
5088     break;
5089   }
5090   case ICmpInst::ICMP_SLT: {
5091     if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
5092       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5093     if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
5094       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5095     if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
5096       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5097     const APInt *CmpC;
5098     if (match(Op1, m_APInt(CmpC))) {
5099       if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
5100         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5101                             ConstantInt::get(Op1->getType(), *CmpC - 1));
5102     }
5103     break;
5104   }
5105   case ICmpInst::ICMP_SGT: {
5106     if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
5107       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5108     if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
5109       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5110     if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
5111       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5112     const APInt *CmpC;
5113     if (match(Op1, m_APInt(CmpC))) {
5114       if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
5115         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5116                             ConstantInt::get(Op1->getType(), *CmpC + 1));
5117     }
5118     break;
5119   }
5120   case ICmpInst::ICMP_SGE:
5121     assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
5122     if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
5123       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5124     if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
5125       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5126     if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
5127       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5128     break;
5129   case ICmpInst::ICMP_SLE:
5130     assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
5131     if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
5132       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5133     if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
5134       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5135     if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
5136       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5137     break;
5138   case ICmpInst::ICMP_UGE:
5139     assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
5140     if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
5141       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5142     if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
5143       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5144     if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
5145       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5146     break;
5147   case ICmpInst::ICMP_ULE:
5148     assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
5149     if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
5150       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5151     if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
5152       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5153     if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
5154       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5155     break;
5156   }
5157 
5158   // Turn a signed comparison into an unsigned one if both operands are known to
5159   // have the same sign.
5160   if (I.isSigned() &&
5161       ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
5162        (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
5163     return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
5164 
5165   return nullptr;
5166 }
5167 
5168 llvm::Optional<std::pair<CmpInst::Predicate, Constant *>>
5169 llvm::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred,
5170                                                Constant *C) {
5171   assert(ICmpInst::isRelational(Pred) && ICmpInst::isIntPredicate(Pred) &&
5172          "Only for relational integer predicates.");
5173 
5174   Type *Type = C->getType();
5175   bool IsSigned = ICmpInst::isSigned(Pred);
5176 
5177   CmpInst::Predicate UnsignedPred = ICmpInst::getUnsignedPredicate(Pred);
5178   bool WillIncrement =
5179       UnsignedPred == ICmpInst::ICMP_ULE || UnsignedPred == ICmpInst::ICMP_UGT;
5180 
5181   // Check if the constant operand can be safely incremented/decremented
5182   // without overflowing/underflowing.
5183   auto ConstantIsOk = [WillIncrement, IsSigned](ConstantInt *C) {
5184     return WillIncrement ? !C->isMaxValue(IsSigned) : !C->isMinValue(IsSigned);
5185   };
5186 
5187   Constant *SafeReplacementConstant = nullptr;
5188   if (auto *CI = dyn_cast<ConstantInt>(C)) {
5189     // Bail out if the constant can't be safely incremented/decremented.
5190     if (!ConstantIsOk(CI))
5191       return llvm::None;
5192   } else if (Type->isVectorTy()) {
5193     unsigned NumElts = Type->getVectorNumElements();
5194     for (unsigned i = 0; i != NumElts; ++i) {
5195       Constant *Elt = C->getAggregateElement(i);
5196       if (!Elt)
5197         return llvm::None;
5198 
5199       if (isa<UndefValue>(Elt))
5200         continue;
5201 
5202       // Bail out if we can't determine if this constant is min/max or if we
5203       // know that this constant is min/max.
5204       auto *CI = dyn_cast<ConstantInt>(Elt);
5205       if (!CI || !ConstantIsOk(CI))
5206         return llvm::None;
5207 
5208       if (!SafeReplacementConstant)
5209         SafeReplacementConstant = CI;
5210     }
5211   } else {
5212     // ConstantExpr?
5213     return llvm::None;
5214   }
5215 
5216   // It may not be safe to change a compare predicate in the presence of
5217   // undefined elements, so replace those elements with the first safe constant
5218   // that we found.
5219   if (C->containsUndefElement()) {
5220     assert(SafeReplacementConstant && "Replacement constant not set");
5221     C = Constant::replaceUndefsWith(C, SafeReplacementConstant);
5222   }
5223 
5224   CmpInst::Predicate NewPred = CmpInst::getFlippedStrictnessPredicate(Pred);
5225 
5226   // Increment or decrement the constant.
5227   Constant *OneOrNegOne = ConstantInt::get(Type, WillIncrement ? 1 : -1, true);
5228   Constant *NewC = ConstantExpr::getAdd(C, OneOrNegOne);
5229 
5230   return std::make_pair(NewPred, NewC);
5231 }
5232 
5233 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
5234 /// it into the appropriate icmp lt or icmp gt instruction. This transform
5235 /// allows them to be folded in visitICmpInst.
5236 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
5237   ICmpInst::Predicate Pred = I.getPredicate();
5238   if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
5239       isCanonicalPredicate(Pred))
5240     return nullptr;
5241 
5242   Value *Op0 = I.getOperand(0);
5243   Value *Op1 = I.getOperand(1);
5244   auto *Op1C = dyn_cast<Constant>(Op1);
5245   if (!Op1C)
5246     return nullptr;
5247 
5248   auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
5249   if (!FlippedStrictness)
5250     return nullptr;
5251 
5252   return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
5253 }
5254 
5255 /// Integer compare with boolean values can always be turned into bitwise ops.
5256 static Instruction *canonicalizeICmpBool(ICmpInst &I,
5257                                          InstCombiner::BuilderTy &Builder) {
5258   Value *A = I.getOperand(0), *B = I.getOperand(1);
5259   assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
5260 
5261   // A boolean compared to true/false can be simplified to Op0/true/false in
5262   // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
5263   // Cases not handled by InstSimplify are always 'not' of Op0.
5264   if (match(B, m_Zero())) {
5265     switch (I.getPredicate()) {
5266       case CmpInst::ICMP_EQ:  // A ==   0 -> !A
5267       case CmpInst::ICMP_ULE: // A <=u  0 -> !A
5268       case CmpInst::ICMP_SGE: // A >=s  0 -> !A
5269         return BinaryOperator::CreateNot(A);
5270       default:
5271         llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5272     }
5273   } else if (match(B, m_One())) {
5274     switch (I.getPredicate()) {
5275       case CmpInst::ICMP_NE:  // A !=  1 -> !A
5276       case CmpInst::ICMP_ULT: // A <u  1 -> !A
5277       case CmpInst::ICMP_SGT: // A >s -1 -> !A
5278         return BinaryOperator::CreateNot(A);
5279       default:
5280         llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5281     }
5282   }
5283 
5284   switch (I.getPredicate()) {
5285   default:
5286     llvm_unreachable("Invalid icmp instruction!");
5287   case ICmpInst::ICMP_EQ:
5288     // icmp eq i1 A, B -> ~(A ^ B)
5289     return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
5290 
5291   case ICmpInst::ICMP_NE:
5292     // icmp ne i1 A, B -> A ^ B
5293     return BinaryOperator::CreateXor(A, B);
5294 
5295   case ICmpInst::ICMP_UGT:
5296     // icmp ugt -> icmp ult
5297     std::swap(A, B);
5298     LLVM_FALLTHROUGH;
5299   case ICmpInst::ICMP_ULT:
5300     // icmp ult i1 A, B -> ~A & B
5301     return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
5302 
5303   case ICmpInst::ICMP_SGT:
5304     // icmp sgt -> icmp slt
5305     std::swap(A, B);
5306     LLVM_FALLTHROUGH;
5307   case ICmpInst::ICMP_SLT:
5308     // icmp slt i1 A, B -> A & ~B
5309     return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
5310 
5311   case ICmpInst::ICMP_UGE:
5312     // icmp uge -> icmp ule
5313     std::swap(A, B);
5314     LLVM_FALLTHROUGH;
5315   case ICmpInst::ICMP_ULE:
5316     // icmp ule i1 A, B -> ~A | B
5317     return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
5318 
5319   case ICmpInst::ICMP_SGE:
5320     // icmp sge -> icmp sle
5321     std::swap(A, B);
5322     LLVM_FALLTHROUGH;
5323   case ICmpInst::ICMP_SLE:
5324     // icmp sle i1 A, B -> A | ~B
5325     return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
5326   }
5327 }
5328 
5329 // Transform pattern like:
5330 //   (1 << Y) u<= X  or  ~(-1 << Y) u<  X  or  ((1 << Y)+(-1)) u<  X
5331 //   (1 << Y) u>  X  or  ~(-1 << Y) u>= X  or  ((1 << Y)+(-1)) u>= X
5332 // Into:
5333 //   (X l>> Y) != 0
5334 //   (X l>> Y) == 0
5335 static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp,
5336                                             InstCombiner::BuilderTy &Builder) {
5337   ICmpInst::Predicate Pred, NewPred;
5338   Value *X, *Y;
5339   if (match(&Cmp,
5340             m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
5341     // We want X to be the icmp's second operand, so swap predicate if it isn't.
5342     if (Cmp.getOperand(0) == X)
5343       Pred = Cmp.getSwappedPredicate();
5344 
5345     switch (Pred) {
5346     case ICmpInst::ICMP_ULE:
5347       NewPred = ICmpInst::ICMP_NE;
5348       break;
5349     case ICmpInst::ICMP_UGT:
5350       NewPred = ICmpInst::ICMP_EQ;
5351       break;
5352     default:
5353       return nullptr;
5354     }
5355   } else if (match(&Cmp, m_c_ICmp(Pred,
5356                                   m_OneUse(m_CombineOr(
5357                                       m_Not(m_Shl(m_AllOnes(), m_Value(Y))),
5358                                       m_Add(m_Shl(m_One(), m_Value(Y)),
5359                                             m_AllOnes()))),
5360                                   m_Value(X)))) {
5361     // The variant with 'add' is not canonical, (the variant with 'not' is)
5362     // we only get it because it has extra uses, and can't be canonicalized,
5363 
5364     // We want X to be the icmp's second operand, so swap predicate if it isn't.
5365     if (Cmp.getOperand(0) == X)
5366       Pred = Cmp.getSwappedPredicate();
5367 
5368     switch (Pred) {
5369     case ICmpInst::ICMP_ULT:
5370       NewPred = ICmpInst::ICMP_NE;
5371       break;
5372     case ICmpInst::ICMP_UGE:
5373       NewPred = ICmpInst::ICMP_EQ;
5374       break;
5375     default:
5376       return nullptr;
5377     }
5378   } else
5379     return nullptr;
5380 
5381   Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
5382   Constant *Zero = Constant::getNullValue(NewX->getType());
5383   return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
5384 }
5385 
5386 static Instruction *foldVectorCmp(CmpInst &Cmp,
5387                                   InstCombiner::BuilderTy &Builder) {
5388   // If both arguments of the cmp are shuffles that use the same mask and
5389   // shuffle within a single vector, move the shuffle after the cmp.
5390   Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
5391   Value *V1, *V2;
5392   Constant *M;
5393   if (match(LHS, m_ShuffleVector(m_Value(V1), m_Undef(), m_Constant(M))) &&
5394       match(RHS, m_ShuffleVector(m_Value(V2), m_Undef(), m_Specific(M))) &&
5395       V1->getType() == V2->getType() &&
5396       (LHS->hasOneUse() || RHS->hasOneUse())) {
5397     // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
5398     CmpInst::Predicate P = Cmp.getPredicate();
5399     Value *NewCmp = isa<ICmpInst>(Cmp) ? Builder.CreateICmp(P, V1, V2)
5400                                        : Builder.CreateFCmp(P, V1, V2);
5401     return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()), M);
5402   }
5403   return nullptr;
5404 }
5405 
5406 // extract(uadd.with.overflow(A, B), 0) ult A
5407 //  -> extract(uadd.with.overflow(A, B), 1)
5408 static Instruction *foldICmpOfUAddOv(ICmpInst &I) {
5409   CmpInst::Predicate Pred = I.getPredicate();
5410   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5411 
5412   Value *UAddOv;
5413   Value *A, *B;
5414   auto UAddOvResultPat = m_ExtractValue<0>(
5415       m_Intrinsic<Intrinsic::uadd_with_overflow>(m_Value(A), m_Value(B)));
5416   if (match(Op0, UAddOvResultPat) &&
5417       ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
5418        (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
5419         (match(A, m_One()) || match(B, m_One()))) ||
5420        (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
5421         (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
5422     // extract(uadd.with.overflow(A, B), 0) < A
5423     // extract(uadd.with.overflow(A, 1), 0) == 0
5424     // extract(uadd.with.overflow(A, -1), 0) != -1
5425     UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
5426   else if (match(Op1, UAddOvResultPat) &&
5427            Pred == ICmpInst::ICMP_UGT && (Op0 == A || Op0 == B))
5428     // A > extract(uadd.with.overflow(A, B), 0)
5429     UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
5430   else
5431     return nullptr;
5432 
5433   return ExtractValueInst::Create(UAddOv, 1);
5434 }
5435 
5436 Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
5437   bool Changed = false;
5438   const SimplifyQuery Q = SQ.getWithInstruction(&I);
5439   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5440   unsigned Op0Cplxity = getComplexity(Op0);
5441   unsigned Op1Cplxity = getComplexity(Op1);
5442 
5443   /// Orders the operands of the compare so that they are listed from most
5444   /// complex to least complex.  This puts constants before unary operators,
5445   /// before binary operators.
5446   if (Op0Cplxity < Op1Cplxity ||
5447       (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
5448     I.swapOperands();
5449     std::swap(Op0, Op1);
5450     Changed = true;
5451   }
5452 
5453   if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
5454     return replaceInstUsesWith(I, V);
5455 
5456   // Comparing -val or val with non-zero is the same as just comparing val
5457   // ie, abs(val) != 0 -> val != 0
5458   if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
5459     Value *Cond, *SelectTrue, *SelectFalse;
5460     if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
5461                             m_Value(SelectFalse)))) {
5462       if (Value *V = dyn_castNegVal(SelectTrue)) {
5463         if (V == SelectFalse)
5464           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5465       }
5466       else if (Value *V = dyn_castNegVal(SelectFalse)) {
5467         if (V == SelectTrue)
5468           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5469       }
5470     }
5471   }
5472 
5473   if (Op0->getType()->isIntOrIntVectorTy(1))
5474     if (Instruction *Res = canonicalizeICmpBool(I, Builder))
5475       return Res;
5476 
5477   if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I))
5478     return NewICmp;
5479 
5480   if (Instruction *Res = foldICmpWithConstant(I))
5481     return Res;
5482 
5483   if (Instruction *Res = foldICmpWithDominatingICmp(I))
5484     return Res;
5485 
5486   if (Instruction *Res = foldICmpBinOp(I, Q))
5487     return Res;
5488 
5489   if (Instruction *Res = foldICmpUsingKnownBits(I))
5490     return Res;
5491 
5492   // Test if the ICmpInst instruction is used exclusively by a select as
5493   // part of a minimum or maximum operation. If so, refrain from doing
5494   // any other folding. This helps out other analyses which understand
5495   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
5496   // and CodeGen. And in this case, at least one of the comparison
5497   // operands has at least one user besides the compare (the select),
5498   // which would often largely negate the benefit of folding anyway.
5499   //
5500   // Do the same for the other patterns recognized by matchSelectPattern.
5501   if (I.hasOneUse())
5502     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
5503       Value *A, *B;
5504       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
5505       if (SPR.Flavor != SPF_UNKNOWN)
5506         return nullptr;
5507     }
5508 
5509   // Do this after checking for min/max to prevent infinite looping.
5510   if (Instruction *Res = foldICmpWithZero(I))
5511     return Res;
5512 
5513   // FIXME: We only do this after checking for min/max to prevent infinite
5514   // looping caused by a reverse canonicalization of these patterns for min/max.
5515   // FIXME: The organization of folds is a mess. These would naturally go into
5516   // canonicalizeCmpWithConstant(), but we can't move all of the above folds
5517   // down here after the min/max restriction.
5518   ICmpInst::Predicate Pred = I.getPredicate();
5519   const APInt *C;
5520   if (match(Op1, m_APInt(C))) {
5521     // For i32: x >u 2147483647 -> x <s 0  -> true if sign bit set
5522     if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
5523       Constant *Zero = Constant::getNullValue(Op0->getType());
5524       return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
5525     }
5526 
5527     // For i32: x <u 2147483648 -> x >s -1  -> true if sign bit clear
5528     if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
5529       Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
5530       return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
5531     }
5532   }
5533 
5534   if (Instruction *Res = foldICmpInstWithConstant(I))
5535     return Res;
5536 
5537   // Try to match comparison as a sign bit test. Intentionally do this after
5538   // foldICmpInstWithConstant() to potentially let other folds to happen first.
5539   if (Instruction *New = foldSignBitTest(I))
5540     return New;
5541 
5542   if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
5543     return Res;
5544 
5545   // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
5546   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
5547     if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
5548       return NI;
5549   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
5550     if (Instruction *NI = foldGEPICmp(GEP, Op0,
5551                            ICmpInst::getSwappedPredicate(I.getPredicate()), I))
5552       return NI;
5553 
5554   // Try to optimize equality comparisons against alloca-based pointers.
5555   if (Op0->getType()->isPointerTy() && I.isEquality()) {
5556     assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
5557     if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op0, DL)))
5558       if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
5559         return New;
5560     if (auto *Alloca = dyn_cast<AllocaInst>(GetUnderlyingObject(Op1, DL)))
5561       if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
5562         return New;
5563   }
5564 
5565   if (Instruction *Res = foldICmpBitCast(I, Builder))
5566     return Res;
5567 
5568   if (Instruction *R = foldICmpWithCastOp(I))
5569     return R;
5570 
5571   if (Instruction *Res = foldICmpWithMinMax(I))
5572     return Res;
5573 
5574   {
5575     Value *A, *B;
5576     // Transform (A & ~B) == 0 --> (A & B) != 0
5577     // and       (A & ~B) != 0 --> (A & B) == 0
5578     // if A is a power of 2.
5579     if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
5580         match(Op1, m_Zero()) &&
5581         isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
5582       return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
5583                           Op1);
5584 
5585     // ~X < ~Y --> Y < X
5586     // ~X < C -->  X > ~C
5587     if (match(Op0, m_Not(m_Value(A)))) {
5588       if (match(Op1, m_Not(m_Value(B))))
5589         return new ICmpInst(I.getPredicate(), B, A);
5590 
5591       const APInt *C;
5592       if (match(Op1, m_APInt(C)))
5593         return new ICmpInst(I.getSwappedPredicate(), A,
5594                             ConstantInt::get(Op1->getType(), ~(*C)));
5595     }
5596 
5597     Instruction *AddI = nullptr;
5598     if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
5599                                      m_Instruction(AddI))) &&
5600         isa<IntegerType>(A->getType())) {
5601       Value *Result;
5602       Constant *Overflow;
5603       if (OptimizeOverflowCheck(Instruction::Add, /*Signed*/false, A, B,
5604                                 *AddI, Result, Overflow)) {
5605         replaceInstUsesWith(*AddI, Result);
5606         return replaceInstUsesWith(I, Overflow);
5607       }
5608     }
5609 
5610     // (zext a) * (zext b)  --> llvm.umul.with.overflow.
5611     if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5612       if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
5613         return R;
5614     }
5615     if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5616       if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
5617         return R;
5618     }
5619   }
5620 
5621   if (Instruction *Res = foldICmpEquality(I))
5622     return Res;
5623 
5624   if (Instruction *Res = foldICmpOfUAddOv(I))
5625     return Res;
5626 
5627   // The 'cmpxchg' instruction returns an aggregate containing the old value and
5628   // an i1 which indicates whether or not we successfully did the swap.
5629   //
5630   // Replace comparisons between the old value and the expected value with the
5631   // indicator that 'cmpxchg' returns.
5632   //
5633   // N.B.  This transform is only valid when the 'cmpxchg' is not permitted to
5634   // spuriously fail.  In those cases, the old value may equal the expected
5635   // value but it is possible for the swap to not occur.
5636   if (I.getPredicate() == ICmpInst::ICMP_EQ)
5637     if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
5638       if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
5639         if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
5640             !ACXI->isWeak())
5641           return ExtractValueInst::Create(ACXI, 1);
5642 
5643   {
5644     Value *X;
5645     const APInt *C;
5646     // icmp X+Cst, X
5647     if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
5648       return foldICmpAddOpConst(X, *C, I.getPredicate());
5649 
5650     // icmp X, X+Cst
5651     if (match(Op1, m_Add(m_Value(X), m_APInt(C))) && Op0 == X)
5652       return foldICmpAddOpConst(X, *C, I.getSwappedPredicate());
5653   }
5654 
5655   if (Instruction *Res = foldICmpWithHighBitMask(I, Builder))
5656     return Res;
5657 
5658   if (I.getType()->isVectorTy())
5659     if (Instruction *Res = foldVectorCmp(I, Builder))
5660       return Res;
5661 
5662   return Changed ? &I : nullptr;
5663 }
5664 
5665 /// Fold fcmp ([us]itofp x, cst) if possible.
5666 Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
5667                                                 Constant *RHSC) {
5668   if (!isa<ConstantFP>(RHSC)) return nullptr;
5669   const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5670 
5671   // Get the width of the mantissa.  We don't want to hack on conversions that
5672   // might lose information from the integer, e.g. "i64 -> float"
5673   int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5674   if (MantissaWidth == -1) return nullptr;  // Unknown.
5675 
5676   IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5677 
5678   bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5679 
5680   if (I.isEquality()) {
5681     FCmpInst::Predicate P = I.getPredicate();
5682     bool IsExact = false;
5683     APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
5684     RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
5685 
5686     // If the floating point constant isn't an integer value, we know if we will
5687     // ever compare equal / not equal to it.
5688     if (!IsExact) {
5689       // TODO: Can never be -0.0 and other non-representable values
5690       APFloat RHSRoundInt(RHS);
5691       RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
5692       if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
5693         if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
5694           return replaceInstUsesWith(I, Builder.getFalse());
5695 
5696         assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
5697         return replaceInstUsesWith(I, Builder.getTrue());
5698       }
5699     }
5700 
5701     // TODO: If the constant is exactly representable, is it always OK to do
5702     // equality compares as integer?
5703   }
5704 
5705   // Check to see that the input is converted from an integer type that is small
5706   // enough that preserves all bits.  TODO: check here for "known" sign bits.
5707   // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5708   unsigned InputSize = IntTy->getScalarSizeInBits();
5709 
5710   // Following test does NOT adjust InputSize downwards for signed inputs,
5711   // because the most negative value still requires all the mantissa bits
5712   // to distinguish it from one less than that value.
5713   if ((int)InputSize > MantissaWidth) {
5714     // Conversion would lose accuracy. Check if loss can impact comparison.
5715     int Exp = ilogb(RHS);
5716     if (Exp == APFloat::IEK_Inf) {
5717       int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
5718       if (MaxExponent < (int)InputSize - !LHSUnsigned)
5719         // Conversion could create infinity.
5720         return nullptr;
5721     } else {
5722       // Note that if RHS is zero or NaN, then Exp is negative
5723       // and first condition is trivially false.
5724       if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
5725         // Conversion could affect comparison.
5726         return nullptr;
5727     }
5728   }
5729 
5730   // Otherwise, we can potentially simplify the comparison.  We know that it
5731   // will always come through as an integer value and we know the constant is
5732   // not a NAN (it would have been previously simplified).
5733   assert(!RHS.isNaN() && "NaN comparison not already folded!");
5734 
5735   ICmpInst::Predicate Pred;
5736   switch (I.getPredicate()) {
5737   default: llvm_unreachable("Unexpected predicate!");
5738   case FCmpInst::FCMP_UEQ:
5739   case FCmpInst::FCMP_OEQ:
5740     Pred = ICmpInst::ICMP_EQ;
5741     break;
5742   case FCmpInst::FCMP_UGT:
5743   case FCmpInst::FCMP_OGT:
5744     Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5745     break;
5746   case FCmpInst::FCMP_UGE:
5747   case FCmpInst::FCMP_OGE:
5748     Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5749     break;
5750   case FCmpInst::FCMP_ULT:
5751   case FCmpInst::FCMP_OLT:
5752     Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5753     break;
5754   case FCmpInst::FCMP_ULE:
5755   case FCmpInst::FCMP_OLE:
5756     Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5757     break;
5758   case FCmpInst::FCMP_UNE:
5759   case FCmpInst::FCMP_ONE:
5760     Pred = ICmpInst::ICMP_NE;
5761     break;
5762   case FCmpInst::FCMP_ORD:
5763     return replaceInstUsesWith(I, Builder.getTrue());
5764   case FCmpInst::FCMP_UNO:
5765     return replaceInstUsesWith(I, Builder.getFalse());
5766   }
5767 
5768   // Now we know that the APFloat is a normal number, zero or inf.
5769 
5770   // See if the FP constant is too large for the integer.  For example,
5771   // comparing an i8 to 300.0.
5772   unsigned IntWidth = IntTy->getScalarSizeInBits();
5773 
5774   if (!LHSUnsigned) {
5775     // If the RHS value is > SignedMax, fold the comparison.  This handles +INF
5776     // and large values.
5777     APFloat SMax(RHS.getSemantics());
5778     SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5779                           APFloat::rmNearestTiesToEven);
5780     if (SMax.compare(RHS) == APFloat::cmpLessThan) {  // smax < 13123.0
5781       if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_SLT ||
5782           Pred == ICmpInst::ICMP_SLE)
5783         return replaceInstUsesWith(I, Builder.getTrue());
5784       return replaceInstUsesWith(I, Builder.getFalse());
5785     }
5786   } else {
5787     // If the RHS value is > UnsignedMax, fold the comparison. This handles
5788     // +INF and large values.
5789     APFloat UMax(RHS.getSemantics());
5790     UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5791                           APFloat::rmNearestTiesToEven);
5792     if (UMax.compare(RHS) == APFloat::cmpLessThan) {  // umax < 13123.0
5793       if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_ULT ||
5794           Pred == ICmpInst::ICMP_ULE)
5795         return replaceInstUsesWith(I, Builder.getTrue());
5796       return replaceInstUsesWith(I, Builder.getFalse());
5797     }
5798   }
5799 
5800   if (!LHSUnsigned) {
5801     // See if the RHS value is < SignedMin.
5802     APFloat SMin(RHS.getSemantics());
5803     SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5804                           APFloat::rmNearestTiesToEven);
5805     if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
5806       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
5807           Pred == ICmpInst::ICMP_SGE)
5808         return replaceInstUsesWith(I, Builder.getTrue());
5809       return replaceInstUsesWith(I, Builder.getFalse());
5810     }
5811   } else {
5812     // See if the RHS value is < UnsignedMin.
5813     APFloat SMin(RHS.getSemantics());
5814     SMin.convertFromAPInt(APInt::getMinValue(IntWidth), true,
5815                           APFloat::rmNearestTiesToEven);
5816     if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
5817       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
5818           Pred == ICmpInst::ICMP_UGE)
5819         return replaceInstUsesWith(I, Builder.getTrue());
5820       return replaceInstUsesWith(I, Builder.getFalse());
5821     }
5822   }
5823 
5824   // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5825   // [0, UMAX], but it may still be fractional.  See if it is fractional by
5826   // casting the FP value to the integer value and back, checking for equality.
5827   // Don't do this for zero, because -0.0 is not fractional.
5828   Constant *RHSInt = LHSUnsigned
5829     ? ConstantExpr::getFPToUI(RHSC, IntTy)
5830     : ConstantExpr::getFPToSI(RHSC, IntTy);
5831   if (!RHS.isZero()) {
5832     bool Equal = LHSUnsigned
5833       ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
5834       : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
5835     if (!Equal) {
5836       // If we had a comparison against a fractional value, we have to adjust
5837       // the compare predicate and sometimes the value.  RHSC is rounded towards
5838       // zero at this point.
5839       switch (Pred) {
5840       default: llvm_unreachable("Unexpected integer comparison!");
5841       case ICmpInst::ICMP_NE:  // (float)int != 4.4   --> true
5842         return replaceInstUsesWith(I, Builder.getTrue());
5843       case ICmpInst::ICMP_EQ:  // (float)int == 4.4   --> false
5844         return replaceInstUsesWith(I, Builder.getFalse());
5845       case ICmpInst::ICMP_ULE:
5846         // (float)int <= 4.4   --> int <= 4
5847         // (float)int <= -4.4  --> false
5848         if (RHS.isNegative())
5849           return replaceInstUsesWith(I, Builder.getFalse());
5850         break;
5851       case ICmpInst::ICMP_SLE:
5852         // (float)int <= 4.4   --> int <= 4
5853         // (float)int <= -4.4  --> int < -4
5854         if (RHS.isNegative())
5855           Pred = ICmpInst::ICMP_SLT;
5856         break;
5857       case ICmpInst::ICMP_ULT:
5858         // (float)int < -4.4   --> false
5859         // (float)int < 4.4    --> int <= 4
5860         if (RHS.isNegative())
5861           return replaceInstUsesWith(I, Builder.getFalse());
5862         Pred = ICmpInst::ICMP_ULE;
5863         break;
5864       case ICmpInst::ICMP_SLT:
5865         // (float)int < -4.4   --> int < -4
5866         // (float)int < 4.4    --> int <= 4
5867         if (!RHS.isNegative())
5868           Pred = ICmpInst::ICMP_SLE;
5869         break;
5870       case ICmpInst::ICMP_UGT:
5871         // (float)int > 4.4    --> int > 4
5872         // (float)int > -4.4   --> true
5873         if (RHS.isNegative())
5874           return replaceInstUsesWith(I, Builder.getTrue());
5875         break;
5876       case ICmpInst::ICMP_SGT:
5877         // (float)int > 4.4    --> int > 4
5878         // (float)int > -4.4   --> int >= -4
5879         if (RHS.isNegative())
5880           Pred = ICmpInst::ICMP_SGE;
5881         break;
5882       case ICmpInst::ICMP_UGE:
5883         // (float)int >= -4.4   --> true
5884         // (float)int >= 4.4    --> int > 4
5885         if (RHS.isNegative())
5886           return replaceInstUsesWith(I, Builder.getTrue());
5887         Pred = ICmpInst::ICMP_UGT;
5888         break;
5889       case ICmpInst::ICMP_SGE:
5890         // (float)int >= -4.4   --> int >= -4
5891         // (float)int >= 4.4    --> int > 4
5892         if (!RHS.isNegative())
5893           Pred = ICmpInst::ICMP_SGT;
5894         break;
5895       }
5896     }
5897   }
5898 
5899   // Lower this FP comparison into an appropriate integer version of the
5900   // comparison.
5901   return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
5902 }
5903 
5904 /// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
5905 static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI,
5906                                               Constant *RHSC) {
5907   // When C is not 0.0 and infinities are not allowed:
5908   // (C / X) < 0.0 is a sign-bit test of X
5909   // (C / X) < 0.0 --> X < 0.0 (if C is positive)
5910   // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
5911   //
5912   // Proof:
5913   // Multiply (C / X) < 0.0 by X * X / C.
5914   // - X is non zero, if it is the flag 'ninf' is violated.
5915   // - C defines the sign of X * X * C. Thus it also defines whether to swap
5916   //   the predicate. C is also non zero by definition.
5917   //
5918   // Thus X * X / C is non zero and the transformation is valid. [qed]
5919 
5920   FCmpInst::Predicate Pred = I.getPredicate();
5921 
5922   // Check that predicates are valid.
5923   if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
5924       (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
5925     return nullptr;
5926 
5927   // Check that RHS operand is zero.
5928   if (!match(RHSC, m_AnyZeroFP()))
5929     return nullptr;
5930 
5931   // Check fastmath flags ('ninf').
5932   if (!LHSI->hasNoInfs() || !I.hasNoInfs())
5933     return nullptr;
5934 
5935   // Check the properties of the dividend. It must not be zero to avoid a
5936   // division by zero (see Proof).
5937   const APFloat *C;
5938   if (!match(LHSI->getOperand(0), m_APFloat(C)))
5939     return nullptr;
5940 
5941   if (C->isZero())
5942     return nullptr;
5943 
5944   // Get swapped predicate if necessary.
5945   if (C->isNegative())
5946     Pred = I.getSwappedPredicate();
5947 
5948   return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
5949 }
5950 
5951 /// Optimize fabs(X) compared with zero.
5952 static Instruction *foldFabsWithFcmpZero(FCmpInst &I) {
5953   Value *X;
5954   if (!match(I.getOperand(0), m_Intrinsic<Intrinsic::fabs>(m_Value(X))) ||
5955       !match(I.getOperand(1), m_PosZeroFP()))
5956     return nullptr;
5957 
5958   auto replacePredAndOp0 = [](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
5959     I->setPredicate(P);
5960     I->setOperand(0, X);
5961     return I;
5962   };
5963 
5964   switch (I.getPredicate()) {
5965   case FCmpInst::FCMP_UGE:
5966   case FCmpInst::FCMP_OLT:
5967     // fabs(X) >= 0.0 --> true
5968     // fabs(X) <  0.0 --> false
5969     llvm_unreachable("fcmp should have simplified");
5970 
5971   case FCmpInst::FCMP_OGT:
5972     // fabs(X) > 0.0 --> X != 0.0
5973     return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
5974 
5975   case FCmpInst::FCMP_UGT:
5976     // fabs(X) u> 0.0 --> X u!= 0.0
5977     return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
5978 
5979   case FCmpInst::FCMP_OLE:
5980     // fabs(X) <= 0.0 --> X == 0.0
5981     return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
5982 
5983   case FCmpInst::FCMP_ULE:
5984     // fabs(X) u<= 0.0 --> X u== 0.0
5985     return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
5986 
5987   case FCmpInst::FCMP_OGE:
5988     // fabs(X) >= 0.0 --> !isnan(X)
5989     assert(!I.hasNoNaNs() && "fcmp should have simplified");
5990     return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
5991 
5992   case FCmpInst::FCMP_ULT:
5993     // fabs(X) u< 0.0 --> isnan(X)
5994     assert(!I.hasNoNaNs() && "fcmp should have simplified");
5995     return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
5996 
5997   case FCmpInst::FCMP_OEQ:
5998   case FCmpInst::FCMP_UEQ:
5999   case FCmpInst::FCMP_ONE:
6000   case FCmpInst::FCMP_UNE:
6001   case FCmpInst::FCMP_ORD:
6002   case FCmpInst::FCMP_UNO:
6003     // Look through the fabs() because it doesn't change anything but the sign.
6004     // fabs(X) == 0.0 --> X == 0.0,
6005     // fabs(X) != 0.0 --> X != 0.0
6006     // isnan(fabs(X)) --> isnan(X)
6007     // !isnan(fabs(X) --> !isnan(X)
6008     return replacePredAndOp0(&I, I.getPredicate(), X);
6009 
6010   default:
6011     return nullptr;
6012   }
6013 }
6014 
6015 Instruction *InstCombiner::visitFCmpInst(FCmpInst &I) {
6016   bool Changed = false;
6017 
6018   /// Orders the operands of the compare so that they are listed from most
6019   /// complex to least complex.  This puts constants before unary operators,
6020   /// before binary operators.
6021   if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6022     I.swapOperands();
6023     Changed = true;
6024   }
6025 
6026   const CmpInst::Predicate Pred = I.getPredicate();
6027   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6028   if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
6029                                   SQ.getWithInstruction(&I)))
6030     return replaceInstUsesWith(I, V);
6031 
6032   // Simplify 'fcmp pred X, X'
6033   Type *OpType = Op0->getType();
6034   assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
6035   if (Op0 == Op1) {
6036     switch (Pred) {
6037       default: break;
6038     case FCmpInst::FCMP_UNO:    // True if unordered: isnan(X) | isnan(Y)
6039     case FCmpInst::FCMP_ULT:    // True if unordered or less than
6040     case FCmpInst::FCMP_UGT:    // True if unordered or greater than
6041     case FCmpInst::FCMP_UNE:    // True if unordered or not equal
6042       // Canonicalize these to be 'fcmp uno %X, 0.0'.
6043       I.setPredicate(FCmpInst::FCMP_UNO);
6044       I.setOperand(1, Constant::getNullValue(OpType));
6045       return &I;
6046 
6047     case FCmpInst::FCMP_ORD:    // True if ordered (no nans)
6048     case FCmpInst::FCMP_OEQ:    // True if ordered and equal
6049     case FCmpInst::FCMP_OGE:    // True if ordered and greater than or equal
6050     case FCmpInst::FCMP_OLE:    // True if ordered and less than or equal
6051       // Canonicalize these to be 'fcmp ord %X, 0.0'.
6052       I.setPredicate(FCmpInst::FCMP_ORD);
6053       I.setOperand(1, Constant::getNullValue(OpType));
6054       return &I;
6055     }
6056   }
6057 
6058   // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
6059   // then canonicalize the operand to 0.0.
6060   if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
6061     if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI)) {
6062       I.setOperand(0, ConstantFP::getNullValue(OpType));
6063       return &I;
6064     }
6065     if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI)) {
6066       I.setOperand(1, ConstantFP::getNullValue(OpType));
6067       return &I;
6068     }
6069   }
6070 
6071   // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
6072   Value *X, *Y;
6073   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
6074     return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
6075 
6076   // Test if the FCmpInst instruction is used exclusively by a select as
6077   // part of a minimum or maximum operation. If so, refrain from doing
6078   // any other folding. This helps out other analyses which understand
6079   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6080   // and CodeGen. And in this case, at least one of the comparison
6081   // operands has at least one user besides the compare (the select),
6082   // which would often largely negate the benefit of folding anyway.
6083   if (I.hasOneUse())
6084     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
6085       Value *A, *B;
6086       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
6087       if (SPR.Flavor != SPF_UNKNOWN)
6088         return nullptr;
6089     }
6090 
6091   // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
6092   // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
6093   if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP())) {
6094     I.setOperand(1, ConstantFP::getNullValue(OpType));
6095     return &I;
6096   }
6097 
6098   // Handle fcmp with instruction LHS and constant RHS.
6099   Instruction *LHSI;
6100   Constant *RHSC;
6101   if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
6102     switch (LHSI->getOpcode()) {
6103     case Instruction::PHI:
6104       // Only fold fcmp into the PHI if the phi and fcmp are in the same
6105       // block.  If in the same block, we're encouraging jump threading.  If
6106       // not, we are just pessimizing the code by making an i1 phi.
6107       if (LHSI->getParent() == I.getParent())
6108         if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
6109           return NV;
6110       break;
6111     case Instruction::SIToFP:
6112     case Instruction::UIToFP:
6113       if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
6114         return NV;
6115       break;
6116     case Instruction::FDiv:
6117       if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
6118         return NV;
6119       break;
6120     case Instruction::Load:
6121       if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
6122         if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
6123           if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
6124               !cast<LoadInst>(LHSI)->isVolatile())
6125             if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
6126               return Res;
6127       break;
6128   }
6129   }
6130 
6131   if (Instruction *R = foldFabsWithFcmpZero(I))
6132     return R;
6133 
6134   if (match(Op0, m_FNeg(m_Value(X)))) {
6135     // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
6136     Constant *C;
6137     if (match(Op1, m_Constant(C))) {
6138       Constant *NegC = ConstantExpr::getFNeg(C);
6139       return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
6140     }
6141   }
6142 
6143   if (match(Op0, m_FPExt(m_Value(X)))) {
6144     // fcmp (fpext X), (fpext Y) -> fcmp X, Y
6145     if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
6146       return new FCmpInst(Pred, X, Y, "", &I);
6147 
6148     // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
6149     const APFloat *C;
6150     if (match(Op1, m_APFloat(C))) {
6151       const fltSemantics &FPSem =
6152           X->getType()->getScalarType()->getFltSemantics();
6153       bool Lossy;
6154       APFloat TruncC = *C;
6155       TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
6156 
6157       // Avoid lossy conversions and denormals.
6158       // Zero is a special case that's OK to convert.
6159       APFloat Fabs = TruncC;
6160       Fabs.clearSign();
6161       if (!Lossy &&
6162           ((Fabs.compare(APFloat::getSmallestNormalized(FPSem)) !=
6163             APFloat::cmpLessThan) || Fabs.isZero())) {
6164         Constant *NewC = ConstantFP::get(X->getType(), TruncC);
6165         return new FCmpInst(Pred, X, NewC, "", &I);
6166       }
6167     }
6168   }
6169 
6170   if (I.getType()->isVectorTy())
6171     if (Instruction *Res = foldVectorCmp(I, Builder))
6172       return Res;
6173 
6174   return Changed ? &I : nullptr;
6175 }
6176