xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp (revision 19261079b74319502c6ffa1249920079f0f69a72)
1 //===- InstCombineCompares.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitICmp and visitFCmp functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APSInt.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/ConstantFolding.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/Analysis/TargetLibraryInfo.h"
20 #include "llvm/IR/ConstantRange.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/GetElementPtrTypeIterator.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/KnownBits.h"
27 #include "llvm/Transforms/InstCombine/InstCombiner.h"
28 
29 using namespace llvm;
30 using namespace PatternMatch;
31 
32 #define DEBUG_TYPE "instcombine"
33 
34 // How many times is a select replaced by one of its operands?
35 STATISTIC(NumSel, "Number of select opts");
36 
37 
38 /// Compute Result = In1+In2, returning true if the result overflowed for this
39 /// type.
40 static bool addWithOverflow(APInt &Result, const APInt &In1,
41                             const APInt &In2, bool IsSigned = false) {
42   bool Overflow;
43   if (IsSigned)
44     Result = In1.sadd_ov(In2, Overflow);
45   else
46     Result = In1.uadd_ov(In2, Overflow);
47 
48   return Overflow;
49 }
50 
51 /// Compute Result = In1-In2, returning true if the result overflowed for this
52 /// type.
53 static bool subWithOverflow(APInt &Result, const APInt &In1,
54                             const APInt &In2, bool IsSigned = false) {
55   bool Overflow;
56   if (IsSigned)
57     Result = In1.ssub_ov(In2, Overflow);
58   else
59     Result = In1.usub_ov(In2, Overflow);
60 
61   return Overflow;
62 }
63 
64 /// Given an icmp instruction, return true if any use of this comparison is a
65 /// branch on sign bit comparison.
66 static bool hasBranchUse(ICmpInst &I) {
67   for (auto *U : I.users())
68     if (isa<BranchInst>(U))
69       return true;
70   return false;
71 }
72 
73 /// Returns true if the exploded icmp can be expressed as a signed comparison
74 /// to zero and updates the predicate accordingly.
75 /// The signedness of the comparison is preserved.
76 /// TODO: Refactor with decomposeBitTestICmp()?
77 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
78   if (!ICmpInst::isSigned(Pred))
79     return false;
80 
81   if (C.isNullValue())
82     return ICmpInst::isRelational(Pred);
83 
84   if (C.isOneValue()) {
85     if (Pred == ICmpInst::ICMP_SLT) {
86       Pred = ICmpInst::ICMP_SLE;
87       return true;
88     }
89   } else if (C.isAllOnesValue()) {
90     if (Pred == ICmpInst::ICMP_SGT) {
91       Pred = ICmpInst::ICMP_SGE;
92       return true;
93     }
94   }
95 
96   return false;
97 }
98 
99 /// This is called when we see this pattern:
100 ///   cmp pred (load (gep GV, ...)), cmpcst
101 /// where GV is a global variable with a constant initializer. Try to simplify
102 /// this into some simple computation that does not need the load. For example
103 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
104 ///
105 /// If AndCst is non-null, then the loaded value is masked with that constant
106 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
107 Instruction *
108 InstCombinerImpl::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
109                                                GlobalVariable *GV, CmpInst &ICI,
110                                                ConstantInt *AndCst) {
111   Constant *Init = GV->getInitializer();
112   if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
113     return nullptr;
114 
115   uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
116   // Don't blow up on huge arrays.
117   if (ArrayElementCount > MaxArraySizeForCombine)
118     return nullptr;
119 
120   // There are many forms of this optimization we can handle, for now, just do
121   // the simple index into a single-dimensional array.
122   //
123   // Require: GEP GV, 0, i {{, constant indices}}
124   if (GEP->getNumOperands() < 3 ||
125       !isa<ConstantInt>(GEP->getOperand(1)) ||
126       !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
127       isa<Constant>(GEP->getOperand(2)))
128     return nullptr;
129 
130   // Check that indices after the variable are constants and in-range for the
131   // type they index.  Collect the indices.  This is typically for arrays of
132   // structs.
133   SmallVector<unsigned, 4> LaterIndices;
134 
135   Type *EltTy = Init->getType()->getArrayElementType();
136   for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
137     ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
138     if (!Idx) return nullptr;  // Variable index.
139 
140     uint64_t IdxVal = Idx->getZExtValue();
141     if ((unsigned)IdxVal != IdxVal) return nullptr; // Too large array index.
142 
143     if (StructType *STy = dyn_cast<StructType>(EltTy))
144       EltTy = STy->getElementType(IdxVal);
145     else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
146       if (IdxVal >= ATy->getNumElements()) return nullptr;
147       EltTy = ATy->getElementType();
148     } else {
149       return nullptr; // Unknown type.
150     }
151 
152     LaterIndices.push_back(IdxVal);
153   }
154 
155   enum { Overdefined = -3, Undefined = -2 };
156 
157   // Variables for our state machines.
158 
159   // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
160   // "i == 47 | i == 87", where 47 is the first index the condition is true for,
161   // and 87 is the second (and last) index.  FirstTrueElement is -2 when
162   // undefined, otherwise set to the first true element.  SecondTrueElement is
163   // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
164   int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
165 
166   // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
167   // form "i != 47 & i != 87".  Same state transitions as for true elements.
168   int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
169 
170   /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
171   /// define a state machine that triggers for ranges of values that the index
172   /// is true or false for.  This triggers on things like "abbbbc"[i] == 'b'.
173   /// This is -2 when undefined, -3 when overdefined, and otherwise the last
174   /// index in the range (inclusive).  We use -2 for undefined here because we
175   /// use relative comparisons and don't want 0-1 to match -1.
176   int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
177 
178   // MagicBitvector - This is a magic bitvector where we set a bit if the
179   // comparison is true for element 'i'.  If there are 64 elements or less in
180   // the array, this will fully represent all the comparison results.
181   uint64_t MagicBitvector = 0;
182 
183   // Scan the array and see if one of our patterns matches.
184   Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
185   for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
186     Constant *Elt = Init->getAggregateElement(i);
187     if (!Elt) return nullptr;
188 
189     // If this is indexing an array of structures, get the structure element.
190     if (!LaterIndices.empty())
191       Elt = ConstantExpr::getExtractValue(Elt, LaterIndices);
192 
193     // If the element is masked, handle it.
194     if (AndCst) Elt = ConstantExpr::getAnd(Elt, AndCst);
195 
196     // Find out if the comparison would be true or false for the i'th element.
197     Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
198                                                   CompareRHS, DL, &TLI);
199     // If the result is undef for this element, ignore it.
200     if (isa<UndefValue>(C)) {
201       // Extend range state machines to cover this element in case there is an
202       // undef in the middle of the range.
203       if (TrueRangeEnd == (int)i-1)
204         TrueRangeEnd = i;
205       if (FalseRangeEnd == (int)i-1)
206         FalseRangeEnd = i;
207       continue;
208     }
209 
210     // If we can't compute the result for any of the elements, we have to give
211     // up evaluating the entire conditional.
212     if (!isa<ConstantInt>(C)) return nullptr;
213 
214     // Otherwise, we know if the comparison is true or false for this element,
215     // update our state machines.
216     bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
217 
218     // State machine for single/double/range index comparison.
219     if (IsTrueForElt) {
220       // Update the TrueElement state machine.
221       if (FirstTrueElement == Undefined)
222         FirstTrueElement = TrueRangeEnd = i;  // First true element.
223       else {
224         // Update double-compare state machine.
225         if (SecondTrueElement == Undefined)
226           SecondTrueElement = i;
227         else
228           SecondTrueElement = Overdefined;
229 
230         // Update range state machine.
231         if (TrueRangeEnd == (int)i-1)
232           TrueRangeEnd = i;
233         else
234           TrueRangeEnd = Overdefined;
235       }
236     } else {
237       // Update the FalseElement state machine.
238       if (FirstFalseElement == Undefined)
239         FirstFalseElement = FalseRangeEnd = i; // First false element.
240       else {
241         // Update double-compare state machine.
242         if (SecondFalseElement == Undefined)
243           SecondFalseElement = i;
244         else
245           SecondFalseElement = Overdefined;
246 
247         // Update range state machine.
248         if (FalseRangeEnd == (int)i-1)
249           FalseRangeEnd = i;
250         else
251           FalseRangeEnd = Overdefined;
252       }
253     }
254 
255     // If this element is in range, update our magic bitvector.
256     if (i < 64 && IsTrueForElt)
257       MagicBitvector |= 1ULL << i;
258 
259     // If all of our states become overdefined, bail out early.  Since the
260     // predicate is expensive, only check it every 8 elements.  This is only
261     // really useful for really huge arrays.
262     if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
263         SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
264         FalseRangeEnd == Overdefined)
265       return nullptr;
266   }
267 
268   // Now that we've scanned the entire array, emit our new comparison(s).  We
269   // order the state machines in complexity of the generated code.
270   Value *Idx = GEP->getOperand(2);
271 
272   // If the index is larger than the pointer size of the target, truncate the
273   // index down like the GEP would do implicitly.  We don't have to do this for
274   // an inbounds GEP because the index can't be out of range.
275   if (!GEP->isInBounds()) {
276     Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
277     unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
278     if (Idx->getType()->getPrimitiveSizeInBits().getFixedSize() > PtrSize)
279       Idx = Builder.CreateTrunc(Idx, IntPtrTy);
280   }
281 
282   // If the comparison is only true for one or two elements, emit direct
283   // comparisons.
284   if (SecondTrueElement != Overdefined) {
285     // None true -> false.
286     if (FirstTrueElement == Undefined)
287       return replaceInstUsesWith(ICI, Builder.getFalse());
288 
289     Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
290 
291     // True for one element -> 'i == 47'.
292     if (SecondTrueElement == Undefined)
293       return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
294 
295     // True for two elements -> 'i == 47 | i == 72'.
296     Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
297     Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
298     Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
299     return BinaryOperator::CreateOr(C1, C2);
300   }
301 
302   // If the comparison is only false for one or two elements, emit direct
303   // comparisons.
304   if (SecondFalseElement != Overdefined) {
305     // None false -> true.
306     if (FirstFalseElement == Undefined)
307       return replaceInstUsesWith(ICI, Builder.getTrue());
308 
309     Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
310 
311     // False for one element -> 'i != 47'.
312     if (SecondFalseElement == Undefined)
313       return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
314 
315     // False for two elements -> 'i != 47 & i != 72'.
316     Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
317     Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
318     Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
319     return BinaryOperator::CreateAnd(C1, C2);
320   }
321 
322   // If the comparison can be replaced with a range comparison for the elements
323   // where it is true, emit the range check.
324   if (TrueRangeEnd != Overdefined) {
325     assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
326 
327     // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
328     if (FirstTrueElement) {
329       Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
330       Idx = Builder.CreateAdd(Idx, Offs);
331     }
332 
333     Value *End = ConstantInt::get(Idx->getType(),
334                                   TrueRangeEnd-FirstTrueElement+1);
335     return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
336   }
337 
338   // False range check.
339   if (FalseRangeEnd != Overdefined) {
340     assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
341     // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
342     if (FirstFalseElement) {
343       Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
344       Idx = Builder.CreateAdd(Idx, Offs);
345     }
346 
347     Value *End = ConstantInt::get(Idx->getType(),
348                                   FalseRangeEnd-FirstFalseElement);
349     return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
350   }
351 
352   // If a magic bitvector captures the entire comparison state
353   // of this load, replace it with computation that does:
354   //   ((magic_cst >> i) & 1) != 0
355   {
356     Type *Ty = nullptr;
357 
358     // Look for an appropriate type:
359     // - The type of Idx if the magic fits
360     // - The smallest fitting legal type
361     if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
362       Ty = Idx->getType();
363     else
364       Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
365 
366     if (Ty) {
367       Value *V = Builder.CreateIntCast(Idx, Ty, false);
368       V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
369       V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
370       return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
371     }
372   }
373 
374   return nullptr;
375 }
376 
377 /// Return a value that can be used to compare the *offset* implied by a GEP to
378 /// zero. For example, if we have &A[i], we want to return 'i' for
379 /// "icmp ne i, 0". Note that, in general, indices can be complex, and scales
380 /// are involved. The above expression would also be legal to codegen as
381 /// "icmp ne (i*4), 0" (assuming A is a pointer to i32).
382 /// This latter form is less amenable to optimization though, and we are allowed
383 /// to generate the first by knowing that pointer arithmetic doesn't overflow.
384 ///
385 /// If we can't emit an optimized form for this expression, this returns null.
386 ///
387 static Value *evaluateGEPOffsetExpression(User *GEP, InstCombinerImpl &IC,
388                                           const DataLayout &DL) {
389   gep_type_iterator GTI = gep_type_begin(GEP);
390 
391   // Check to see if this gep only has a single variable index.  If so, and if
392   // any constant indices are a multiple of its scale, then we can compute this
393   // in terms of the scale of the variable index.  For example, if the GEP
394   // implies an offset of "12 + i*4", then we can codegen this as "3 + i",
395   // because the expression will cross zero at the same point.
396   unsigned i, e = GEP->getNumOperands();
397   int64_t Offset = 0;
398   for (i = 1; i != e; ++i, ++GTI) {
399     if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
400       // Compute the aggregate offset of constant indices.
401       if (CI->isZero()) continue;
402 
403       // Handle a struct index, which adds its field offset to the pointer.
404       if (StructType *STy = GTI.getStructTypeOrNull()) {
405         Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
406       } else {
407         uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
408         Offset += Size*CI->getSExtValue();
409       }
410     } else {
411       // Found our variable index.
412       break;
413     }
414   }
415 
416   // If there are no variable indices, we must have a constant offset, just
417   // evaluate it the general way.
418   if (i == e) return nullptr;
419 
420   Value *VariableIdx = GEP->getOperand(i);
421   // Determine the scale factor of the variable element.  For example, this is
422   // 4 if the variable index is into an array of i32.
423   uint64_t VariableScale = DL.getTypeAllocSize(GTI.getIndexedType());
424 
425   // Verify that there are no other variable indices.  If so, emit the hard way.
426   for (++i, ++GTI; i != e; ++i, ++GTI) {
427     ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i));
428     if (!CI) return nullptr;
429 
430     // Compute the aggregate offset of constant indices.
431     if (CI->isZero()) continue;
432 
433     // Handle a struct index, which adds its field offset to the pointer.
434     if (StructType *STy = GTI.getStructTypeOrNull()) {
435       Offset += DL.getStructLayout(STy)->getElementOffset(CI->getZExtValue());
436     } else {
437       uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType());
438       Offset += Size*CI->getSExtValue();
439     }
440   }
441 
442   // Okay, we know we have a single variable index, which must be a
443   // pointer/array/vector index.  If there is no offset, life is simple, return
444   // the index.
445   Type *IntPtrTy = DL.getIntPtrType(GEP->getOperand(0)->getType());
446   unsigned IntPtrWidth = IntPtrTy->getIntegerBitWidth();
447   if (Offset == 0) {
448     // Cast to intptrty in case a truncation occurs.  If an extension is needed,
449     // we don't need to bother extending: the extension won't affect where the
450     // computation crosses zero.
451     if (VariableIdx->getType()->getPrimitiveSizeInBits().getFixedSize() >
452         IntPtrWidth) {
453       VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
454     }
455     return VariableIdx;
456   }
457 
458   // Otherwise, there is an index.  The computation we will do will be modulo
459   // the pointer size.
460   Offset = SignExtend64(Offset, IntPtrWidth);
461   VariableScale = SignExtend64(VariableScale, IntPtrWidth);
462 
463   // To do this transformation, any constant index must be a multiple of the
464   // variable scale factor.  For example, we can evaluate "12 + 4*i" as "3 + i",
465   // but we can't evaluate "10 + 3*i" in terms of i.  Check that the offset is a
466   // multiple of the variable scale.
467   int64_t NewOffs = Offset / (int64_t)VariableScale;
468   if (Offset != NewOffs*(int64_t)VariableScale)
469     return nullptr;
470 
471   // Okay, we can do this evaluation.  Start by converting the index to intptr.
472   if (VariableIdx->getType() != IntPtrTy)
473     VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
474                                             true /*Signed*/);
475   Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
476   return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
477 }
478 
479 /// Returns true if we can rewrite Start as a GEP with pointer Base
480 /// and some integer offset. The nodes that need to be re-written
481 /// for this transformation will be added to Explored.
482 static bool canRewriteGEPAsOffset(Value *Start, Value *Base,
483                                   const DataLayout &DL,
484                                   SetVector<Value *> &Explored) {
485   SmallVector<Value *, 16> WorkList(1, Start);
486   Explored.insert(Base);
487 
488   // The following traversal gives us an order which can be used
489   // when doing the final transformation. Since in the final
490   // transformation we create the PHI replacement instructions first,
491   // we don't have to get them in any particular order.
492   //
493   // However, for other instructions we will have to traverse the
494   // operands of an instruction first, which means that we have to
495   // do a post-order traversal.
496   while (!WorkList.empty()) {
497     SetVector<PHINode *> PHIs;
498 
499     while (!WorkList.empty()) {
500       if (Explored.size() >= 100)
501         return false;
502 
503       Value *V = WorkList.back();
504 
505       if (Explored.contains(V)) {
506         WorkList.pop_back();
507         continue;
508       }
509 
510       if (!isa<IntToPtrInst>(V) && !isa<PtrToIntInst>(V) &&
511           !isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
512         // We've found some value that we can't explore which is different from
513         // the base. Therefore we can't do this transformation.
514         return false;
515 
516       if (isa<IntToPtrInst>(V) || isa<PtrToIntInst>(V)) {
517         auto *CI = cast<CastInst>(V);
518         if (!CI->isNoopCast(DL))
519           return false;
520 
521         if (Explored.count(CI->getOperand(0)) == 0)
522           WorkList.push_back(CI->getOperand(0));
523       }
524 
525       if (auto *GEP = dyn_cast<GEPOperator>(V)) {
526         // We're limiting the GEP to having one index. This will preserve
527         // the original pointer type. We could handle more cases in the
528         // future.
529         if (GEP->getNumIndices() != 1 || !GEP->isInBounds() ||
530             GEP->getType() != Start->getType())
531           return false;
532 
533         if (Explored.count(GEP->getOperand(0)) == 0)
534           WorkList.push_back(GEP->getOperand(0));
535       }
536 
537       if (WorkList.back() == V) {
538         WorkList.pop_back();
539         // We've finished visiting this node, mark it as such.
540         Explored.insert(V);
541       }
542 
543       if (auto *PN = dyn_cast<PHINode>(V)) {
544         // We cannot transform PHIs on unsplittable basic blocks.
545         if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
546           return false;
547         Explored.insert(PN);
548         PHIs.insert(PN);
549       }
550     }
551 
552     // Explore the PHI nodes further.
553     for (auto *PN : PHIs)
554       for (Value *Op : PN->incoming_values())
555         if (Explored.count(Op) == 0)
556           WorkList.push_back(Op);
557   }
558 
559   // Make sure that we can do this. Since we can't insert GEPs in a basic
560   // block before a PHI node, we can't easily do this transformation if
561   // we have PHI node users of transformed instructions.
562   for (Value *Val : Explored) {
563     for (Value *Use : Val->uses()) {
564 
565       auto *PHI = dyn_cast<PHINode>(Use);
566       auto *Inst = dyn_cast<Instruction>(Val);
567 
568       if (Inst == Base || Inst == PHI || !Inst || !PHI ||
569           Explored.count(PHI) == 0)
570         continue;
571 
572       if (PHI->getParent() == Inst->getParent())
573         return false;
574     }
575   }
576   return true;
577 }
578 
579 // Sets the appropriate insert point on Builder where we can add
580 // a replacement Instruction for V (if that is possible).
581 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
582                               bool Before = true) {
583   if (auto *PHI = dyn_cast<PHINode>(V)) {
584     Builder.SetInsertPoint(&*PHI->getParent()->getFirstInsertionPt());
585     return;
586   }
587   if (auto *I = dyn_cast<Instruction>(V)) {
588     if (!Before)
589       I = &*std::next(I->getIterator());
590     Builder.SetInsertPoint(I);
591     return;
592   }
593   if (auto *A = dyn_cast<Argument>(V)) {
594     // Set the insertion point in the entry block.
595     BasicBlock &Entry = A->getParent()->getEntryBlock();
596     Builder.SetInsertPoint(&*Entry.getFirstInsertionPt());
597     return;
598   }
599   // Otherwise, this is a constant and we don't need to set a new
600   // insertion point.
601   assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
602 }
603 
604 /// Returns a re-written value of Start as an indexed GEP using Base as a
605 /// pointer.
606 static Value *rewriteGEPAsOffset(Value *Start, Value *Base,
607                                  const DataLayout &DL,
608                                  SetVector<Value *> &Explored) {
609   // Perform all the substitutions. This is a bit tricky because we can
610   // have cycles in our use-def chains.
611   // 1. Create the PHI nodes without any incoming values.
612   // 2. Create all the other values.
613   // 3. Add the edges for the PHI nodes.
614   // 4. Emit GEPs to get the original pointers.
615   // 5. Remove the original instructions.
616   Type *IndexType = IntegerType::get(
617       Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
618 
619   DenseMap<Value *, Value *> NewInsts;
620   NewInsts[Base] = ConstantInt::getNullValue(IndexType);
621 
622   // Create the new PHI nodes, without adding any incoming values.
623   for (Value *Val : Explored) {
624     if (Val == Base)
625       continue;
626     // Create empty phi nodes. This avoids cyclic dependencies when creating
627     // the remaining instructions.
628     if (auto *PHI = dyn_cast<PHINode>(Val))
629       NewInsts[PHI] = PHINode::Create(IndexType, PHI->getNumIncomingValues(),
630                                       PHI->getName() + ".idx", PHI);
631   }
632   IRBuilder<> Builder(Base->getContext());
633 
634   // Create all the other instructions.
635   for (Value *Val : Explored) {
636 
637     if (NewInsts.find(Val) != NewInsts.end())
638       continue;
639 
640     if (auto *CI = dyn_cast<CastInst>(Val)) {
641       // Don't get rid of the intermediate variable here; the store can grow
642       // the map which will invalidate the reference to the input value.
643       Value *V = NewInsts[CI->getOperand(0)];
644       NewInsts[CI] = V;
645       continue;
646     }
647     if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
648       Value *Index = NewInsts[GEP->getOperand(1)] ? NewInsts[GEP->getOperand(1)]
649                                                   : GEP->getOperand(1);
650       setInsertionPoint(Builder, GEP);
651       // Indices might need to be sign extended. GEPs will magically do
652       // this, but we need to do it ourselves here.
653       if (Index->getType()->getScalarSizeInBits() !=
654           NewInsts[GEP->getOperand(0)]->getType()->getScalarSizeInBits()) {
655         Index = Builder.CreateSExtOrTrunc(
656             Index, NewInsts[GEP->getOperand(0)]->getType(),
657             GEP->getOperand(0)->getName() + ".sext");
658       }
659 
660       auto *Op = NewInsts[GEP->getOperand(0)];
661       if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
662         NewInsts[GEP] = Index;
663       else
664         NewInsts[GEP] = Builder.CreateNSWAdd(
665             Op, Index, GEP->getOperand(0)->getName() + ".add");
666       continue;
667     }
668     if (isa<PHINode>(Val))
669       continue;
670 
671     llvm_unreachable("Unexpected instruction type");
672   }
673 
674   // Add the incoming values to the PHI nodes.
675   for (Value *Val : Explored) {
676     if (Val == Base)
677       continue;
678     // All the instructions have been created, we can now add edges to the
679     // phi nodes.
680     if (auto *PHI = dyn_cast<PHINode>(Val)) {
681       PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
682       for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
683         Value *NewIncoming = PHI->getIncomingValue(I);
684 
685         if (NewInsts.find(NewIncoming) != NewInsts.end())
686           NewIncoming = NewInsts[NewIncoming];
687 
688         NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
689       }
690     }
691   }
692 
693   for (Value *Val : Explored) {
694     if (Val == Base)
695       continue;
696 
697     // Depending on the type, for external users we have to emit
698     // a GEP or a GEP + ptrtoint.
699     setInsertionPoint(Builder, Val, false);
700 
701     // If required, create an inttoptr instruction for Base.
702     Value *NewBase = Base;
703     if (!Base->getType()->isPointerTy())
704       NewBase = Builder.CreateBitOrPointerCast(Base, Start->getType(),
705                                                Start->getName() + "to.ptr");
706 
707     Value *GEP = Builder.CreateInBoundsGEP(
708         Start->getType()->getPointerElementType(), NewBase,
709         makeArrayRef(NewInsts[Val]), Val->getName() + ".ptr");
710 
711     if (!Val->getType()->isPointerTy()) {
712       Value *Cast = Builder.CreatePointerCast(GEP, Val->getType(),
713                                               Val->getName() + ".conv");
714       GEP = Cast;
715     }
716     Val->replaceAllUsesWith(GEP);
717   }
718 
719   return NewInsts[Start];
720 }
721 
722 /// Looks through GEPs, IntToPtrInsts and PtrToIntInsts in order to express
723 /// the input Value as a constant indexed GEP. Returns a pair containing
724 /// the GEPs Pointer and Index.
725 static std::pair<Value *, Value *>
726 getAsConstantIndexedAddress(Value *V, const DataLayout &DL) {
727   Type *IndexType = IntegerType::get(V->getContext(),
728                                      DL.getIndexTypeSizeInBits(V->getType()));
729 
730   Constant *Index = ConstantInt::getNullValue(IndexType);
731   while (true) {
732     if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
733       // We accept only inbouds GEPs here to exclude the possibility of
734       // overflow.
735       if (!GEP->isInBounds())
736         break;
737       if (GEP->hasAllConstantIndices() && GEP->getNumIndices() == 1 &&
738           GEP->getType() == V->getType()) {
739         V = GEP->getOperand(0);
740         Constant *GEPIndex = static_cast<Constant *>(GEP->getOperand(1));
741         Index = ConstantExpr::getAdd(
742             Index, ConstantExpr::getSExtOrBitCast(GEPIndex, IndexType));
743         continue;
744       }
745       break;
746     }
747     if (auto *CI = dyn_cast<IntToPtrInst>(V)) {
748       if (!CI->isNoopCast(DL))
749         break;
750       V = CI->getOperand(0);
751       continue;
752     }
753     if (auto *CI = dyn_cast<PtrToIntInst>(V)) {
754       if (!CI->isNoopCast(DL))
755         break;
756       V = CI->getOperand(0);
757       continue;
758     }
759     break;
760   }
761   return {V, Index};
762 }
763 
764 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
765 /// We can look through PHIs, GEPs and casts in order to determine a common base
766 /// between GEPLHS and RHS.
767 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
768                                               ICmpInst::Predicate Cond,
769                                               const DataLayout &DL) {
770   // FIXME: Support vector of pointers.
771   if (GEPLHS->getType()->isVectorTy())
772     return nullptr;
773 
774   if (!GEPLHS->hasAllConstantIndices())
775     return nullptr;
776 
777   // Make sure the pointers have the same type.
778   if (GEPLHS->getType() != RHS->getType())
779     return nullptr;
780 
781   Value *PtrBase, *Index;
782   std::tie(PtrBase, Index) = getAsConstantIndexedAddress(GEPLHS, DL);
783 
784   // The set of nodes that will take part in this transformation.
785   SetVector<Value *> Nodes;
786 
787   if (!canRewriteGEPAsOffset(RHS, PtrBase, DL, Nodes))
788     return nullptr;
789 
790   // We know we can re-write this as
791   //  ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
792   // Since we've only looked through inbouds GEPs we know that we
793   // can't have overflow on either side. We can therefore re-write
794   // this as:
795   //   OFFSET1 cmp OFFSET2
796   Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, DL, Nodes);
797 
798   // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
799   // GEP having PtrBase as the pointer base, and has returned in NewRHS the
800   // offset. Since Index is the offset of LHS to the base pointer, we will now
801   // compare the offsets instead of comparing the pointers.
802   return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Index, NewRHS);
803 }
804 
805 /// Fold comparisons between a GEP instruction and something else. At this point
806 /// we know that the GEP is on the LHS of the comparison.
807 Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
808                                            ICmpInst::Predicate Cond,
809                                            Instruction &I) {
810   // Don't transform signed compares of GEPs into index compares. Even if the
811   // GEP is inbounds, the final add of the base pointer can have signed overflow
812   // and would change the result of the icmp.
813   // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
814   // the maximum signed value for the pointer type.
815   if (ICmpInst::isSigned(Cond))
816     return nullptr;
817 
818   // Look through bitcasts and addrspacecasts. We do not however want to remove
819   // 0 GEPs.
820   if (!isa<GetElementPtrInst>(RHS))
821     RHS = RHS->stripPointerCasts();
822 
823   Value *PtrBase = GEPLHS->getOperand(0);
824   // FIXME: Support vector pointer GEPs.
825   if (PtrBase == RHS && GEPLHS->isInBounds() &&
826       !GEPLHS->getType()->isVectorTy()) {
827     // ((gep Ptr, OFFSET) cmp Ptr)   ---> (OFFSET cmp 0).
828     // This transformation (ignoring the base and scales) is valid because we
829     // know pointers can't overflow since the gep is inbounds.  See if we can
830     // output an optimized form.
831     Value *Offset = evaluateGEPOffsetExpression(GEPLHS, *this, DL);
832 
833     // If not, synthesize the offset the hard way.
834     if (!Offset)
835       Offset = EmitGEPOffset(GEPLHS);
836     return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Offset,
837                         Constant::getNullValue(Offset->getType()));
838   }
839 
840   if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
841       isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
842       !NullPointerIsDefined(I.getFunction(),
843                             RHS->getType()->getPointerAddressSpace())) {
844     // For most address spaces, an allocation can't be placed at null, but null
845     // itself is treated as a 0 size allocation in the in bounds rules.  Thus,
846     // the only valid inbounds address derived from null, is null itself.
847     // Thus, we have four cases to consider:
848     // 1) Base == nullptr, Offset == 0 -> inbounds, null
849     // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
850     // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
851     // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
852     //
853     // (Note if we're indexing a type of size 0, that simply collapses into one
854     //  of the buckets above.)
855     //
856     // In general, we're allowed to make values less poison (i.e. remove
857     //   sources of full UB), so in this case, we just select between the two
858     //   non-poison cases (1 and 4 above).
859     //
860     // For vectors, we apply the same reasoning on a per-lane basis.
861     auto *Base = GEPLHS->getPointerOperand();
862     if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
863       auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount();
864       Base = Builder.CreateVectorSplat(EC, Base);
865     }
866     return new ICmpInst(Cond, Base,
867                         ConstantExpr::getPointerBitCastOrAddrSpaceCast(
868                             cast<Constant>(RHS), Base->getType()));
869   } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
870     // If the base pointers are different, but the indices are the same, just
871     // compare the base pointer.
872     if (PtrBase != GEPRHS->getOperand(0)) {
873       bool IndicesTheSame = GEPLHS->getNumOperands()==GEPRHS->getNumOperands();
874       IndicesTheSame &= GEPLHS->getOperand(0)->getType() ==
875                         GEPRHS->getOperand(0)->getType();
876       if (IndicesTheSame)
877         for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
878           if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
879             IndicesTheSame = false;
880             break;
881           }
882 
883       // If all indices are the same, just compare the base pointers.
884       Type *BaseType = GEPLHS->getOperand(0)->getType();
885       if (IndicesTheSame && CmpInst::makeCmpResultType(BaseType) == I.getType())
886         return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
887 
888       // If we're comparing GEPs with two base pointers that only differ in type
889       // and both GEPs have only constant indices or just one use, then fold
890       // the compare with the adjusted indices.
891       // FIXME: Support vector of pointers.
892       if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
893           (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
894           (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
895           PtrBase->stripPointerCasts() ==
896               GEPRHS->getOperand(0)->stripPointerCasts() &&
897           !GEPLHS->getType()->isVectorTy()) {
898         Value *LOffset = EmitGEPOffset(GEPLHS);
899         Value *ROffset = EmitGEPOffset(GEPRHS);
900 
901         // If we looked through an addrspacecast between different sized address
902         // spaces, the LHS and RHS pointers are different sized
903         // integers. Truncate to the smaller one.
904         Type *LHSIndexTy = LOffset->getType();
905         Type *RHSIndexTy = ROffset->getType();
906         if (LHSIndexTy != RHSIndexTy) {
907           if (LHSIndexTy->getPrimitiveSizeInBits().getFixedSize() <
908               RHSIndexTy->getPrimitiveSizeInBits().getFixedSize()) {
909             ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
910           } else
911             LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
912         }
913 
914         Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
915                                         LOffset, ROffset);
916         return replaceInstUsesWith(I, Cmp);
917       }
918 
919       // Otherwise, the base pointers are different and the indices are
920       // different. Try convert this to an indexed compare by looking through
921       // PHIs/casts.
922       return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
923     }
924 
925     // If one of the GEPs has all zero indices, recurse.
926     // FIXME: Handle vector of pointers.
927     if (!GEPLHS->getType()->isVectorTy() && GEPLHS->hasAllZeroIndices())
928       return foldGEPICmp(GEPRHS, GEPLHS->getOperand(0),
929                          ICmpInst::getSwappedPredicate(Cond), I);
930 
931     // If the other GEP has all zero indices, recurse.
932     // FIXME: Handle vector of pointers.
933     if (!GEPRHS->getType()->isVectorTy() && GEPRHS->hasAllZeroIndices())
934       return foldGEPICmp(GEPLHS, GEPRHS->getOperand(0), Cond, I);
935 
936     bool GEPsInBounds = GEPLHS->isInBounds() && GEPRHS->isInBounds();
937     if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands()) {
938       // If the GEPs only differ by one index, compare it.
939       unsigned NumDifferences = 0;  // Keep track of # differences.
940       unsigned DiffOperand = 0;     // The operand that differs.
941       for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
942         if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
943           Type *LHSType = GEPLHS->getOperand(i)->getType();
944           Type *RHSType = GEPRHS->getOperand(i)->getType();
945           // FIXME: Better support for vector of pointers.
946           if (LHSType->getPrimitiveSizeInBits() !=
947                    RHSType->getPrimitiveSizeInBits() ||
948               (GEPLHS->getType()->isVectorTy() &&
949                (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
950             // Irreconcilable differences.
951             NumDifferences = 2;
952             break;
953           }
954 
955           if (NumDifferences++) break;
956           DiffOperand = i;
957         }
958 
959       if (NumDifferences == 0)   // SAME GEP?
960         return replaceInstUsesWith(I, // No comparison is needed here.
961           ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
962 
963       else if (NumDifferences == 1 && GEPsInBounds) {
964         Value *LHSV = GEPLHS->getOperand(DiffOperand);
965         Value *RHSV = GEPRHS->getOperand(DiffOperand);
966         // Make sure we do a signed comparison here.
967         return new ICmpInst(ICmpInst::getSignedPredicate(Cond), LHSV, RHSV);
968       }
969     }
970 
971     // Only lower this if the icmp is the only user of the GEP or if we expect
972     // the result to fold to a constant!
973     if (GEPsInBounds && (isa<ConstantExpr>(GEPLHS) || GEPLHS->hasOneUse()) &&
974         (isa<ConstantExpr>(GEPRHS) || GEPRHS->hasOneUse())) {
975       // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)  --->  (OFFSET1 cmp OFFSET2)
976       Value *L = EmitGEPOffset(GEPLHS);
977       Value *R = EmitGEPOffset(GEPRHS);
978       return new ICmpInst(ICmpInst::getSignedPredicate(Cond), L, R);
979     }
980   }
981 
982   // Try convert this to an indexed compare by looking through PHIs/casts as a
983   // last resort.
984   return transformToIndexedCompare(GEPLHS, RHS, Cond, DL);
985 }
986 
987 Instruction *InstCombinerImpl::foldAllocaCmp(ICmpInst &ICI,
988                                              const AllocaInst *Alloca,
989                                              const Value *Other) {
990   assert(ICI.isEquality() && "Cannot fold non-equality comparison.");
991 
992   // It would be tempting to fold away comparisons between allocas and any
993   // pointer not based on that alloca (e.g. an argument). However, even
994   // though such pointers cannot alias, they can still compare equal.
995   //
996   // But LLVM doesn't specify where allocas get their memory, so if the alloca
997   // doesn't escape we can argue that it's impossible to guess its value, and we
998   // can therefore act as if any such guesses are wrong.
999   //
1000   // The code below checks that the alloca doesn't escape, and that it's only
1001   // used in a comparison once (the current instruction). The
1002   // single-comparison-use condition ensures that we're trivially folding all
1003   // comparisons against the alloca consistently, and avoids the risk of
1004   // erroneously folding a comparison of the pointer with itself.
1005 
1006   unsigned MaxIter = 32; // Break cycles and bound to constant-time.
1007 
1008   SmallVector<const Use *, 32> Worklist;
1009   for (const Use &U : Alloca->uses()) {
1010     if (Worklist.size() >= MaxIter)
1011       return nullptr;
1012     Worklist.push_back(&U);
1013   }
1014 
1015   unsigned NumCmps = 0;
1016   while (!Worklist.empty()) {
1017     assert(Worklist.size() <= MaxIter);
1018     const Use *U = Worklist.pop_back_val();
1019     const Value *V = U->getUser();
1020     --MaxIter;
1021 
1022     if (isa<BitCastInst>(V) || isa<GetElementPtrInst>(V) || isa<PHINode>(V) ||
1023         isa<SelectInst>(V)) {
1024       // Track the uses.
1025     } else if (isa<LoadInst>(V)) {
1026       // Loading from the pointer doesn't escape it.
1027       continue;
1028     } else if (const auto *SI = dyn_cast<StoreInst>(V)) {
1029       // Storing *to* the pointer is fine, but storing the pointer escapes it.
1030       if (SI->getValueOperand() == U->get())
1031         return nullptr;
1032       continue;
1033     } else if (isa<ICmpInst>(V)) {
1034       if (NumCmps++)
1035         return nullptr; // Found more than one cmp.
1036       continue;
1037     } else if (const auto *Intrin = dyn_cast<IntrinsicInst>(V)) {
1038       switch (Intrin->getIntrinsicID()) {
1039         // These intrinsics don't escape or compare the pointer. Memset is safe
1040         // because we don't allow ptrtoint. Memcpy and memmove are safe because
1041         // we don't allow stores, so src cannot point to V.
1042         case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
1043         case Intrinsic::memcpy: case Intrinsic::memmove: case Intrinsic::memset:
1044           continue;
1045         default:
1046           return nullptr;
1047       }
1048     } else {
1049       return nullptr;
1050     }
1051     for (const Use &U : V->uses()) {
1052       if (Worklist.size() >= MaxIter)
1053         return nullptr;
1054       Worklist.push_back(&U);
1055     }
1056   }
1057 
1058   Type *CmpTy = CmpInst::makeCmpResultType(Other->getType());
1059   return replaceInstUsesWith(
1060       ICI,
1061       ConstantInt::get(CmpTy, !CmpInst::isTrueWhenEqual(ICI.getPredicate())));
1062 }
1063 
1064 /// Fold "icmp pred (X+C), X".
1065 Instruction *InstCombinerImpl::foldICmpAddOpConst(Value *X, const APInt &C,
1066                                                   ICmpInst::Predicate Pred) {
1067   // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
1068   // so the values can never be equal.  Similarly for all other "or equals"
1069   // operators.
1070   assert(!!C && "C should not be zero!");
1071 
1072   // (X+1) <u X        --> X >u (MAXUINT-1)        --> X == 255
1073   // (X+2) <u X        --> X >u (MAXUINT-2)        --> X > 253
1074   // (X+MAXUINT) <u X  --> X >u (MAXUINT-MAXUINT)  --> X != 0
1075   if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
1076     Constant *R = ConstantInt::get(X->getType(),
1077                                    APInt::getMaxValue(C.getBitWidth()) - C);
1078     return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
1079   }
1080 
1081   // (X+1) >u X        --> X <u (0-1)        --> X != 255
1082   // (X+2) >u X        --> X <u (0-2)        --> X <u 254
1083   // (X+MAXUINT) >u X  --> X <u (0-MAXUINT)  --> X <u 1  --> X == 0
1084   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
1085     return new ICmpInst(ICmpInst::ICMP_ULT, X,
1086                         ConstantInt::get(X->getType(), -C));
1087 
1088   APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
1089 
1090   // (X+ 1) <s X       --> X >s (MAXSINT-1)          --> X == 127
1091   // (X+ 2) <s X       --> X >s (MAXSINT-2)          --> X >s 125
1092   // (X+MAXSINT) <s X  --> X >s (MAXSINT-MAXSINT)    --> X >s 0
1093   // (X+MINSINT) <s X  --> X >s (MAXSINT-MINSINT)    --> X >s -1
1094   // (X+ -2) <s X      --> X >s (MAXSINT- -2)        --> X >s 126
1095   // (X+ -1) <s X      --> X >s (MAXSINT- -1)        --> X != 127
1096   if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
1097     return new ICmpInst(ICmpInst::ICMP_SGT, X,
1098                         ConstantInt::get(X->getType(), SMax - C));
1099 
1100   // (X+ 1) >s X       --> X <s (MAXSINT-(1-1))       --> X != 127
1101   // (X+ 2) >s X       --> X <s (MAXSINT-(2-1))       --> X <s 126
1102   // (X+MAXSINT) >s X  --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
1103   // (X+MINSINT) >s X  --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
1104   // (X+ -2) >s X      --> X <s (MAXSINT-(-2-1))      --> X <s -126
1105   // (X+ -1) >s X      --> X <s (MAXSINT-(-1-1))      --> X == -128
1106 
1107   assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
1108   return new ICmpInst(ICmpInst::ICMP_SLT, X,
1109                       ConstantInt::get(X->getType(), SMax - (C - 1)));
1110 }
1111 
1112 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
1113 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
1114 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
1115 Instruction *InstCombinerImpl::foldICmpShrConstConst(ICmpInst &I, Value *A,
1116                                                      const APInt &AP1,
1117                                                      const APInt &AP2) {
1118   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1119 
1120   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1121     if (I.getPredicate() == I.ICMP_NE)
1122       Pred = CmpInst::getInversePredicate(Pred);
1123     return new ICmpInst(Pred, LHS, RHS);
1124   };
1125 
1126   // Don't bother doing any work for cases which InstSimplify handles.
1127   if (AP2.isNullValue())
1128     return nullptr;
1129 
1130   bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1131   if (IsAShr) {
1132     if (AP2.isAllOnesValue())
1133       return nullptr;
1134     if (AP2.isNegative() != AP1.isNegative())
1135       return nullptr;
1136     if (AP2.sgt(AP1))
1137       return nullptr;
1138   }
1139 
1140   if (!AP1)
1141     // 'A' must be large enough to shift out the highest set bit.
1142     return getICmp(I.ICMP_UGT, A,
1143                    ConstantInt::get(A->getType(), AP2.logBase2()));
1144 
1145   if (AP1 == AP2)
1146     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1147 
1148   int Shift;
1149   if (IsAShr && AP1.isNegative())
1150     Shift = AP1.countLeadingOnes() - AP2.countLeadingOnes();
1151   else
1152     Shift = AP1.countLeadingZeros() - AP2.countLeadingZeros();
1153 
1154   if (Shift > 0) {
1155     if (IsAShr && AP1 == AP2.ashr(Shift)) {
1156       // There are multiple solutions if we are comparing against -1 and the LHS
1157       // of the ashr is not a power of two.
1158       if (AP1.isAllOnesValue() && !AP2.isPowerOf2())
1159         return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1160       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1161     } else if (AP1 == AP2.lshr(Shift)) {
1162       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1163     }
1164   }
1165 
1166   // Shifting const2 will never be equal to const1.
1167   // FIXME: This should always be handled by InstSimplify?
1168   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1169   return replaceInstUsesWith(I, TorF);
1170 }
1171 
1172 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1173 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
1174 Instruction *InstCombinerImpl::foldICmpShlConstConst(ICmpInst &I, Value *A,
1175                                                      const APInt &AP1,
1176                                                      const APInt &AP2) {
1177   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1178 
1179   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1180     if (I.getPredicate() == I.ICMP_NE)
1181       Pred = CmpInst::getInversePredicate(Pred);
1182     return new ICmpInst(Pred, LHS, RHS);
1183   };
1184 
1185   // Don't bother doing any work for cases which InstSimplify handles.
1186   if (AP2.isNullValue())
1187     return nullptr;
1188 
1189   unsigned AP2TrailingZeros = AP2.countTrailingZeros();
1190 
1191   if (!AP1 && AP2TrailingZeros != 0)
1192     return getICmp(
1193         I.ICMP_UGE, A,
1194         ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1195 
1196   if (AP1 == AP2)
1197     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1198 
1199   // Get the distance between the lowest bits that are set.
1200   int Shift = AP1.countTrailingZeros() - AP2TrailingZeros;
1201 
1202   if (Shift > 0 && AP2.shl(Shift) == AP1)
1203     return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1204 
1205   // Shifting const2 will never be equal to const1.
1206   // FIXME: This should always be handled by InstSimplify?
1207   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1208   return replaceInstUsesWith(I, TorF);
1209 }
1210 
1211 /// The caller has matched a pattern of the form:
1212 ///   I = icmp ugt (add (add A, B), CI2), CI1
1213 /// If this is of the form:
1214 ///   sum = a + b
1215 ///   if (sum+128 >u 255)
1216 /// Then replace it with llvm.sadd.with.overflow.i8.
1217 ///
1218 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1219                                           ConstantInt *CI2, ConstantInt *CI1,
1220                                           InstCombinerImpl &IC) {
1221   // The transformation we're trying to do here is to transform this into an
1222   // llvm.sadd.with.overflow.  To do this, we have to replace the original add
1223   // with a narrower add, and discard the add-with-constant that is part of the
1224   // range check (if we can't eliminate it, this isn't profitable).
1225 
1226   // In order to eliminate the add-with-constant, the compare can be its only
1227   // use.
1228   Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1229   if (!AddWithCst->hasOneUse())
1230     return nullptr;
1231 
1232   // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1233   if (!CI2->getValue().isPowerOf2())
1234     return nullptr;
1235   unsigned NewWidth = CI2->getValue().countTrailingZeros();
1236   if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1237     return nullptr;
1238 
1239   // The width of the new add formed is 1 more than the bias.
1240   ++NewWidth;
1241 
1242   // Check to see that CI1 is an all-ones value with NewWidth bits.
1243   if (CI1->getBitWidth() == NewWidth ||
1244       CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1245     return nullptr;
1246 
1247   // This is only really a signed overflow check if the inputs have been
1248   // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1249   // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1250   unsigned NeededSignBits = CI1->getBitWidth() - NewWidth + 1;
1251   if (IC.ComputeNumSignBits(A, 0, &I) < NeededSignBits ||
1252       IC.ComputeNumSignBits(B, 0, &I) < NeededSignBits)
1253     return nullptr;
1254 
1255   // In order to replace the original add with a narrower
1256   // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1257   // and truncates that discard the high bits of the add.  Verify that this is
1258   // the case.
1259   Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1260   for (User *U : OrigAdd->users()) {
1261     if (U == AddWithCst)
1262       continue;
1263 
1264     // Only accept truncates for now.  We would really like a nice recursive
1265     // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1266     // chain to see which bits of a value are actually demanded.  If the
1267     // original add had another add which was then immediately truncated, we
1268     // could still do the transformation.
1269     TruncInst *TI = dyn_cast<TruncInst>(U);
1270     if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1271       return nullptr;
1272   }
1273 
1274   // If the pattern matches, truncate the inputs to the narrower type and
1275   // use the sadd_with_overflow intrinsic to efficiently compute both the
1276   // result and the overflow bit.
1277   Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1278   Function *F = Intrinsic::getDeclaration(
1279       I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1280 
1281   InstCombiner::BuilderTy &Builder = IC.Builder;
1282 
1283   // Put the new code above the original add, in case there are any uses of the
1284   // add between the add and the compare.
1285   Builder.SetInsertPoint(OrigAdd);
1286 
1287   Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1288   Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1289   CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1290   Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1291   Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1292 
1293   // The inner add was the result of the narrow add, zero extended to the
1294   // wider type.  Replace it with the result computed by the intrinsic.
1295   IC.replaceInstUsesWith(*OrigAdd, ZExt);
1296   IC.eraseInstFromFunction(*OrigAdd);
1297 
1298   // The original icmp gets replaced with the overflow value.
1299   return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1300 }
1301 
1302 /// If we have:
1303 ///   icmp eq/ne (urem/srem %x, %y), 0
1304 /// iff %y is a power-of-two, we can replace this with a bit test:
1305 ///   icmp eq/ne (and %x, (add %y, -1)), 0
1306 Instruction *InstCombinerImpl::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) {
1307   // This fold is only valid for equality predicates.
1308   if (!I.isEquality())
1309     return nullptr;
1310   ICmpInst::Predicate Pred;
1311   Value *X, *Y, *Zero;
1312   if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1313                         m_CombineAnd(m_Zero(), m_Value(Zero)))))
1314     return nullptr;
1315   if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, 0, &I))
1316     return nullptr;
1317   // This may increase instruction count, we don't enforce that Y is a constant.
1318   Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1319   Value *Masked = Builder.CreateAnd(X, Mask);
1320   return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1321 }
1322 
1323 /// Fold equality-comparison between zero and any (maybe truncated) right-shift
1324 /// by one-less-than-bitwidth into a sign test on the original value.
1325 Instruction *InstCombinerImpl::foldSignBitTest(ICmpInst &I) {
1326   Instruction *Val;
1327   ICmpInst::Predicate Pred;
1328   if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1329     return nullptr;
1330 
1331   Value *X;
1332   Type *XTy;
1333 
1334   Constant *C;
1335   if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1336     XTy = X->getType();
1337     unsigned XBitWidth = XTy->getScalarSizeInBits();
1338     if (!match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1339                                      APInt(XBitWidth, XBitWidth - 1))))
1340       return nullptr;
1341   } else if (isa<BinaryOperator>(Val) &&
1342              (X = reassociateShiftAmtsOfTwoSameDirectionShifts(
1343                   cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1344                   /*AnalyzeForSignBitExtraction=*/true))) {
1345     XTy = X->getType();
1346   } else
1347     return nullptr;
1348 
1349   return ICmpInst::Create(Instruction::ICmp,
1350                           Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SGE
1351                                                     : ICmpInst::ICMP_SLT,
1352                           X, ConstantInt::getNullValue(XTy));
1353 }
1354 
1355 // Handle  icmp pred X, 0
1356 Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {
1357   CmpInst::Predicate Pred = Cmp.getPredicate();
1358   if (!match(Cmp.getOperand(1), m_Zero()))
1359     return nullptr;
1360 
1361   // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1362   if (Pred == ICmpInst::ICMP_SGT) {
1363     Value *A, *B;
1364     SelectPatternResult SPR = matchSelectPattern(Cmp.getOperand(0), A, B);
1365     if (SPR.Flavor == SPF_SMIN) {
1366       if (isKnownPositive(A, DL, 0, &AC, &Cmp, &DT))
1367         return new ICmpInst(Pred, B, Cmp.getOperand(1));
1368       if (isKnownPositive(B, DL, 0, &AC, &Cmp, &DT))
1369         return new ICmpInst(Pred, A, Cmp.getOperand(1));
1370     }
1371   }
1372 
1373   if (Instruction *New = foldIRemByPowerOfTwoToBitTest(Cmp))
1374     return New;
1375 
1376   // Given:
1377   //   icmp eq/ne (urem %x, %y), 0
1378   // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1379   //   icmp eq/ne %x, 0
1380   Value *X, *Y;
1381   if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1382       ICmpInst::isEquality(Pred)) {
1383     KnownBits XKnown = computeKnownBits(X, 0, &Cmp);
1384     KnownBits YKnown = computeKnownBits(Y, 0, &Cmp);
1385     if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1386       return new ICmpInst(Pred, X, Cmp.getOperand(1));
1387   }
1388 
1389   return nullptr;
1390 }
1391 
1392 /// Fold icmp Pred X, C.
1393 /// TODO: This code structure does not make sense. The saturating add fold
1394 /// should be moved to some other helper and extended as noted below (it is also
1395 /// possible that code has been made unnecessary - do we canonicalize IR to
1396 /// overflow/saturating intrinsics or not?).
1397 Instruction *InstCombinerImpl::foldICmpWithConstant(ICmpInst &Cmp) {
1398   // Match the following pattern, which is a common idiom when writing
1399   // overflow-safe integer arithmetic functions. The source performs an addition
1400   // in wider type and explicitly checks for overflow using comparisons against
1401   // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1402   //
1403   // TODO: This could probably be generalized to handle other overflow-safe
1404   // operations if we worked out the formulas to compute the appropriate magic
1405   // constants.
1406   //
1407   // sum = a + b
1408   // if (sum+128 >u 255)  ...  -> llvm.sadd.with.overflow.i8
1409   CmpInst::Predicate Pred = Cmp.getPredicate();
1410   Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1411   Value *A, *B;
1412   ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1413   if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1414       match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1415     if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1416       return Res;
1417 
1418   // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...).
1419   Constant *C = dyn_cast<Constant>(Op1);
1420   if (!C)
1421     return nullptr;
1422 
1423   if (auto *Phi = dyn_cast<PHINode>(Op0))
1424     if (all_of(Phi->operands(), [](Value *V) { return isa<Constant>(V); })) {
1425       Type *Ty = Cmp.getType();
1426       Builder.SetInsertPoint(Phi);
1427       PHINode *NewPhi =
1428           Builder.CreatePHI(Ty, Phi->getNumOperands());
1429       for (BasicBlock *Predecessor : predecessors(Phi->getParent())) {
1430         auto *Input =
1431             cast<Constant>(Phi->getIncomingValueForBlock(Predecessor));
1432         auto *BoolInput = ConstantExpr::getCompare(Pred, Input, C);
1433         NewPhi->addIncoming(BoolInput, Predecessor);
1434       }
1435       NewPhi->takeName(&Cmp);
1436       return replaceInstUsesWith(Cmp, NewPhi);
1437     }
1438 
1439   return nullptr;
1440 }
1441 
1442 /// Canonicalize icmp instructions based on dominating conditions.
1443 Instruction *InstCombinerImpl::foldICmpWithDominatingICmp(ICmpInst &Cmp) {
1444   // This is a cheap/incomplete check for dominance - just match a single
1445   // predecessor with a conditional branch.
1446   BasicBlock *CmpBB = Cmp.getParent();
1447   BasicBlock *DomBB = CmpBB->getSinglePredecessor();
1448   if (!DomBB)
1449     return nullptr;
1450 
1451   Value *DomCond;
1452   BasicBlock *TrueBB, *FalseBB;
1453   if (!match(DomBB->getTerminator(), m_Br(m_Value(DomCond), TrueBB, FalseBB)))
1454     return nullptr;
1455 
1456   assert((TrueBB == CmpBB || FalseBB == CmpBB) &&
1457          "Predecessor block does not point to successor?");
1458 
1459   // The branch should get simplified. Don't bother simplifying this condition.
1460   if (TrueBB == FalseBB)
1461     return nullptr;
1462 
1463   // Try to simplify this compare to T/F based on the dominating condition.
1464   Optional<bool> Imp = isImpliedCondition(DomCond, &Cmp, DL, TrueBB == CmpBB);
1465   if (Imp)
1466     return replaceInstUsesWith(Cmp, ConstantInt::get(Cmp.getType(), *Imp));
1467 
1468   CmpInst::Predicate Pred = Cmp.getPredicate();
1469   Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1470   ICmpInst::Predicate DomPred;
1471   const APInt *C, *DomC;
1472   if (match(DomCond, m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))) &&
1473       match(Y, m_APInt(C))) {
1474     // We have 2 compares of a variable with constants. Calculate the constant
1475     // ranges of those compares to see if we can transform the 2nd compare:
1476     // DomBB:
1477     //   DomCond = icmp DomPred X, DomC
1478     //   br DomCond, CmpBB, FalseBB
1479     // CmpBB:
1480     //   Cmp = icmp Pred X, C
1481     ConstantRange CR = ConstantRange::makeAllowedICmpRegion(Pred, *C);
1482     ConstantRange DominatingCR =
1483         (CmpBB == TrueBB) ? ConstantRange::makeExactICmpRegion(DomPred, *DomC)
1484                           : ConstantRange::makeExactICmpRegion(
1485                                 CmpInst::getInversePredicate(DomPred), *DomC);
1486     ConstantRange Intersection = DominatingCR.intersectWith(CR);
1487     ConstantRange Difference = DominatingCR.difference(CR);
1488     if (Intersection.isEmptySet())
1489       return replaceInstUsesWith(Cmp, Builder.getFalse());
1490     if (Difference.isEmptySet())
1491       return replaceInstUsesWith(Cmp, Builder.getTrue());
1492 
1493     // Canonicalizing a sign bit comparison that gets used in a branch,
1494     // pessimizes codegen by generating branch on zero instruction instead
1495     // of a test and branch. So we avoid canonicalizing in such situations
1496     // because test and branch instruction has better branch displacement
1497     // than compare and branch instruction.
1498     bool UnusedBit;
1499     bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1500     if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1501       return nullptr;
1502 
1503     if (const APInt *EqC = Intersection.getSingleElement())
1504       return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1505     if (const APInt *NeC = Difference.getSingleElement())
1506       return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1507   }
1508 
1509   return nullptr;
1510 }
1511 
1512 /// Fold icmp (trunc X, Y), C.
1513 Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp,
1514                                                      TruncInst *Trunc,
1515                                                      const APInt &C) {
1516   ICmpInst::Predicate Pred = Cmp.getPredicate();
1517   Value *X = Trunc->getOperand(0);
1518   if (C.isOneValue() && C.getBitWidth() > 1) {
1519     // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1520     Value *V = nullptr;
1521     if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1522       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1523                           ConstantInt::get(V->getType(), 1));
1524   }
1525 
1526   if (Cmp.isEquality() && Trunc->hasOneUse()) {
1527     // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1528     // of the high bits truncated out of x are known.
1529     unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1530              SrcBits = X->getType()->getScalarSizeInBits();
1531     KnownBits Known = computeKnownBits(X, 0, &Cmp);
1532 
1533     // If all the high bits are known, we can do this xform.
1534     if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
1535       // Pull in the high bits from known-ones set.
1536       APInt NewRHS = C.zext(SrcBits);
1537       NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1538       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), NewRHS));
1539     }
1540   }
1541 
1542   return nullptr;
1543 }
1544 
1545 /// Fold icmp (xor X, Y), C.
1546 Instruction *InstCombinerImpl::foldICmpXorConstant(ICmpInst &Cmp,
1547                                                    BinaryOperator *Xor,
1548                                                    const APInt &C) {
1549   Value *X = Xor->getOperand(0);
1550   Value *Y = Xor->getOperand(1);
1551   const APInt *XorC;
1552   if (!match(Y, m_APInt(XorC)))
1553     return nullptr;
1554 
1555   // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1556   // fold the xor.
1557   ICmpInst::Predicate Pred = Cmp.getPredicate();
1558   bool TrueIfSigned = false;
1559   if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1560 
1561     // If the sign bit of the XorCst is not set, there is no change to
1562     // the operation, just stop using the Xor.
1563     if (!XorC->isNegative())
1564       return replaceOperand(Cmp, 0, X);
1565 
1566     // Emit the opposite comparison.
1567     if (TrueIfSigned)
1568       return new ICmpInst(ICmpInst::ICMP_SGT, X,
1569                           ConstantInt::getAllOnesValue(X->getType()));
1570     else
1571       return new ICmpInst(ICmpInst::ICMP_SLT, X,
1572                           ConstantInt::getNullValue(X->getType()));
1573   }
1574 
1575   if (Xor->hasOneUse()) {
1576     // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1577     if (!Cmp.isEquality() && XorC->isSignMask()) {
1578       Pred = Cmp.getFlippedSignednessPredicate();
1579       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1580     }
1581 
1582     // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1583     if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1584       Pred = Cmp.getFlippedSignednessPredicate();
1585       Pred = Cmp.getSwappedPredicate(Pred);
1586       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1587     }
1588   }
1589 
1590   // Mask constant magic can eliminate an 'xor' with unsigned compares.
1591   if (Pred == ICmpInst::ICMP_UGT) {
1592     // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1593     if (*XorC == ~C && (C + 1).isPowerOf2())
1594       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1595     // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1596     if (*XorC == C && (C + 1).isPowerOf2())
1597       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1598   }
1599   if (Pred == ICmpInst::ICMP_ULT) {
1600     // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1601     if (*XorC == -C && C.isPowerOf2())
1602       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1603                           ConstantInt::get(X->getType(), ~C));
1604     // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1605     if (*XorC == C && (-C).isPowerOf2())
1606       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1607                           ConstantInt::get(X->getType(), ~C));
1608   }
1609   return nullptr;
1610 }
1611 
1612 /// Fold icmp (and (sh X, Y), C2), C1.
1613 Instruction *InstCombinerImpl::foldICmpAndShift(ICmpInst &Cmp,
1614                                                 BinaryOperator *And,
1615                                                 const APInt &C1,
1616                                                 const APInt &C2) {
1617   BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1618   if (!Shift || !Shift->isShift())
1619     return nullptr;
1620 
1621   // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1622   // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1623   // code produced by the clang front-end, for bitfield access.
1624   // This seemingly simple opportunity to fold away a shift turns out to be
1625   // rather complicated. See PR17827 for details.
1626   unsigned ShiftOpcode = Shift->getOpcode();
1627   bool IsShl = ShiftOpcode == Instruction::Shl;
1628   const APInt *C3;
1629   if (match(Shift->getOperand(1), m_APInt(C3))) {
1630     APInt NewAndCst, NewCmpCst;
1631     bool AnyCmpCstBitsShiftedOut;
1632     if (ShiftOpcode == Instruction::Shl) {
1633       // For a left shift, we can fold if the comparison is not signed. We can
1634       // also fold a signed comparison if the mask value and comparison value
1635       // are not negative. These constraints may not be obvious, but we can
1636       // prove that they are correct using an SMT solver.
1637       if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative()))
1638         return nullptr;
1639 
1640       NewCmpCst = C1.lshr(*C3);
1641       NewAndCst = C2.lshr(*C3);
1642       AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1;
1643     } else if (ShiftOpcode == Instruction::LShr) {
1644       // For a logical right shift, we can fold if the comparison is not signed.
1645       // We can also fold a signed comparison if the shifted mask value and the
1646       // shifted comparison value are not negative. These constraints may not be
1647       // obvious, but we can prove that they are correct using an SMT solver.
1648       NewCmpCst = C1.shl(*C3);
1649       NewAndCst = C2.shl(*C3);
1650       AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
1651       if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
1652         return nullptr;
1653     } else {
1654       // For an arithmetic shift, check that both constants don't use (in a
1655       // signed sense) the top bits being shifted out.
1656       assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
1657       NewCmpCst = C1.shl(*C3);
1658       NewAndCst = C2.shl(*C3);
1659       AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
1660       if (NewAndCst.ashr(*C3) != C2)
1661         return nullptr;
1662     }
1663 
1664     if (AnyCmpCstBitsShiftedOut) {
1665       // If we shifted bits out, the fold is not going to work out. As a
1666       // special case, check to see if this means that the result is always
1667       // true or false now.
1668       if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1669         return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1670       if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1671         return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1672     } else {
1673       Value *NewAnd = Builder.CreateAnd(
1674           Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
1675       return new ICmpInst(Cmp.getPredicate(),
1676           NewAnd, ConstantInt::get(And->getType(), NewCmpCst));
1677     }
1678   }
1679 
1680   // Turn ((X >> Y) & C2) == 0  into  (X & (C2 << Y)) == 0.  The latter is
1681   // preferable because it allows the C2 << Y expression to be hoisted out of a
1682   // loop if Y is invariant and X is not.
1683   if (Shift->hasOneUse() && C1.isNullValue() && Cmp.isEquality() &&
1684       !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
1685     // Compute C2 << Y.
1686     Value *NewShift =
1687         IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1688               : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1689 
1690     // Compute X & (C2 << Y).
1691     Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1692     return replaceOperand(Cmp, 0, NewAnd);
1693   }
1694 
1695   return nullptr;
1696 }
1697 
1698 /// Fold icmp (and X, C2), C1.
1699 Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp,
1700                                                      BinaryOperator *And,
1701                                                      const APInt &C1) {
1702   bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1703 
1704   // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1705   // TODO: We canonicalize to the longer form for scalars because we have
1706   // better analysis/folds for icmp, and codegen may be better with icmp.
1707   if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isNullValue() &&
1708       match(And->getOperand(1), m_One()))
1709     return new TruncInst(And->getOperand(0), Cmp.getType());
1710 
1711   const APInt *C2;
1712   Value *X;
1713   if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1714     return nullptr;
1715 
1716   // Don't perform the following transforms if the AND has multiple uses
1717   if (!And->hasOneUse())
1718     return nullptr;
1719 
1720   if (Cmp.isEquality() && C1.isNullValue()) {
1721     // Restrict this fold to single-use 'and' (PR10267).
1722     // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1723     if (C2->isSignMask()) {
1724       Constant *Zero = Constant::getNullValue(X->getType());
1725       auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1726       return new ICmpInst(NewPred, X, Zero);
1727     }
1728 
1729     // Restrict this fold only for single-use 'and' (PR10267).
1730     // ((%x & C) == 0) --> %x u< (-C)  iff (-C) is power of two.
1731     if ((~(*C2) + 1).isPowerOf2()) {
1732       Constant *NegBOC =
1733           ConstantExpr::getNeg(cast<Constant>(And->getOperand(1)));
1734       auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1735       return new ICmpInst(NewPred, X, NegBOC);
1736     }
1737   }
1738 
1739   // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1740   // the input width without changing the value produced, eliminate the cast:
1741   //
1742   // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1743   //
1744   // We can do this transformation if the constants do not have their sign bits
1745   // set or if it is an equality comparison. Extending a relational comparison
1746   // when we're checking the sign bit would not work.
1747   Value *W;
1748   if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1749       (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1750     // TODO: Is this a good transform for vectors? Wider types may reduce
1751     // throughput. Should this transform be limited (even for scalars) by using
1752     // shouldChangeType()?
1753     if (!Cmp.getType()->isVectorTy()) {
1754       Type *WideType = W->getType();
1755       unsigned WideScalarBits = WideType->getScalarSizeInBits();
1756       Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1757       Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1758       Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1759       return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1760     }
1761   }
1762 
1763   if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1764     return I;
1765 
1766   // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1767   // (icmp pred (and A, (or (shl 1, B), 1), 0))
1768   //
1769   // iff pred isn't signed
1770   if (!Cmp.isSigned() && C1.isNullValue() && And->getOperand(0)->hasOneUse() &&
1771       match(And->getOperand(1), m_One())) {
1772     Constant *One = cast<Constant>(And->getOperand(1));
1773     Value *Or = And->getOperand(0);
1774     Value *A, *B, *LShr;
1775     if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1776         match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1777       unsigned UsesRemoved = 0;
1778       if (And->hasOneUse())
1779         ++UsesRemoved;
1780       if (Or->hasOneUse())
1781         ++UsesRemoved;
1782       if (LShr->hasOneUse())
1783         ++UsesRemoved;
1784 
1785       // Compute A & ((1 << B) | 1)
1786       Value *NewOr = nullptr;
1787       if (auto *C = dyn_cast<Constant>(B)) {
1788         if (UsesRemoved >= 1)
1789           NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
1790       } else {
1791         if (UsesRemoved >= 3)
1792           NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1793                                                      /*HasNUW=*/true),
1794                                    One, Or->getName());
1795       }
1796       if (NewOr) {
1797         Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1798         return replaceOperand(Cmp, 0, NewAnd);
1799       }
1800     }
1801   }
1802 
1803   return nullptr;
1804 }
1805 
1806 /// Fold icmp (and X, Y), C.
1807 Instruction *InstCombinerImpl::foldICmpAndConstant(ICmpInst &Cmp,
1808                                                    BinaryOperator *And,
1809                                                    const APInt &C) {
1810   if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1811     return I;
1812 
1813   // TODO: These all require that Y is constant too, so refactor with the above.
1814 
1815   // Try to optimize things like "A[i] & 42 == 0" to index computations.
1816   Value *X = And->getOperand(0);
1817   Value *Y = And->getOperand(1);
1818   if (auto *LI = dyn_cast<LoadInst>(X))
1819     if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1820       if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1821         if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
1822             !LI->isVolatile() && isa<ConstantInt>(Y)) {
1823           ConstantInt *C2 = cast<ConstantInt>(Y);
1824           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, Cmp, C2))
1825             return Res;
1826         }
1827 
1828   if (!Cmp.isEquality())
1829     return nullptr;
1830 
1831   // X & -C == -C -> X >  u ~C
1832   // X & -C != -C -> X <= u ~C
1833   //   iff C is a power of 2
1834   if (Cmp.getOperand(1) == Y && (-C).isPowerOf2()) {
1835     auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT
1836                                                           : CmpInst::ICMP_ULE;
1837     return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1838   }
1839 
1840   // (X & C2) == 0 -> (trunc X) >= 0
1841   // (X & C2) != 0 -> (trunc X) <  0
1842   //   iff C2 is a power of 2 and it masks the sign bit of a legal integer type.
1843   const APInt *C2;
1844   if (And->hasOneUse() && C.isNullValue() && match(Y, m_APInt(C2))) {
1845     int32_t ExactLogBase2 = C2->exactLogBase2();
1846     if (ExactLogBase2 != -1 && DL.isLegalInteger(ExactLogBase2 + 1)) {
1847       Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
1848       if (auto *AndVTy = dyn_cast<VectorType>(And->getType()))
1849         NTy = VectorType::get(NTy, AndVTy->getElementCount());
1850       Value *Trunc = Builder.CreateTrunc(X, NTy);
1851       auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
1852                                                             : CmpInst::ICMP_SLT;
1853       return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
1854     }
1855   }
1856 
1857   return nullptr;
1858 }
1859 
1860 /// Fold icmp (or X, Y), C.
1861 Instruction *InstCombinerImpl::foldICmpOrConstant(ICmpInst &Cmp,
1862                                                   BinaryOperator *Or,
1863                                                   const APInt &C) {
1864   ICmpInst::Predicate Pred = Cmp.getPredicate();
1865   if (C.isOneValue()) {
1866     // icmp slt signum(V) 1 --> icmp slt V, 1
1867     Value *V = nullptr;
1868     if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
1869       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1870                           ConstantInt::get(V->getType(), 1));
1871   }
1872 
1873   Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
1874   const APInt *MaskC;
1875   if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) {
1876     if (*MaskC == C && (C + 1).isPowerOf2()) {
1877       // X | C == C --> X <=u C
1878       // X | C != C --> X  >u C
1879       //   iff C+1 is a power of 2 (C is a bitmask of the low bits)
1880       Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
1881       return new ICmpInst(Pred, OrOp0, OrOp1);
1882     }
1883 
1884     // More general: canonicalize 'equality with set bits mask' to
1885     // 'equality with clear bits mask'.
1886     // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC
1887     // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC
1888     if (Or->hasOneUse()) {
1889       Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC));
1890       Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC));
1891       return new ICmpInst(Pred, And, NewC);
1892     }
1893   }
1894 
1895   if (!Cmp.isEquality() || !C.isNullValue() || !Or->hasOneUse())
1896     return nullptr;
1897 
1898   Value *P, *Q;
1899   if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
1900     // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
1901     // -> and (icmp eq P, null), (icmp eq Q, null).
1902     Value *CmpP =
1903         Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
1904     Value *CmpQ =
1905         Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
1906     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1907     return BinaryOperator::Create(BOpc, CmpP, CmpQ);
1908   }
1909 
1910   // Are we using xors to bitwise check for a pair of (in)equalities? Convert to
1911   // a shorter form that has more potential to be folded even further.
1912   Value *X1, *X2, *X3, *X4;
1913   if (match(OrOp0, m_OneUse(m_Xor(m_Value(X1), m_Value(X2)))) &&
1914       match(OrOp1, m_OneUse(m_Xor(m_Value(X3), m_Value(X4))))) {
1915     // ((X1 ^ X2) || (X3 ^ X4)) == 0 --> (X1 == X2) && (X3 == X4)
1916     // ((X1 ^ X2) || (X3 ^ X4)) != 0 --> (X1 != X2) || (X3 != X4)
1917     Value *Cmp12 = Builder.CreateICmp(Pred, X1, X2);
1918     Value *Cmp34 = Builder.CreateICmp(Pred, X3, X4);
1919     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1920     return BinaryOperator::Create(BOpc, Cmp12, Cmp34);
1921   }
1922 
1923   return nullptr;
1924 }
1925 
1926 /// Fold icmp (mul X, Y), C.
1927 Instruction *InstCombinerImpl::foldICmpMulConstant(ICmpInst &Cmp,
1928                                                    BinaryOperator *Mul,
1929                                                    const APInt &C) {
1930   const APInt *MulC;
1931   if (!match(Mul->getOperand(1), m_APInt(MulC)))
1932     return nullptr;
1933 
1934   // If this is a test of the sign bit and the multiply is sign-preserving with
1935   // a constant operand, use the multiply LHS operand instead.
1936   ICmpInst::Predicate Pred = Cmp.getPredicate();
1937   if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
1938     if (MulC->isNegative())
1939       Pred = ICmpInst::getSwappedPredicate(Pred);
1940     return new ICmpInst(Pred, Mul->getOperand(0),
1941                         Constant::getNullValue(Mul->getType()));
1942   }
1943 
1944   // If the multiply does not wrap, try to divide the compare constant by the
1945   // multiplication factor.
1946   if (Cmp.isEquality() && !MulC->isNullValue()) {
1947     // (mul nsw X, MulC) == C --> X == C /s MulC
1948     if (Mul->hasNoSignedWrap() && C.srem(*MulC).isNullValue()) {
1949       Constant *NewC = ConstantInt::get(Mul->getType(), C.sdiv(*MulC));
1950       return new ICmpInst(Pred, Mul->getOperand(0), NewC);
1951     }
1952     // (mul nuw X, MulC) == C --> X == C /u MulC
1953     if (Mul->hasNoUnsignedWrap() && C.urem(*MulC).isNullValue()) {
1954       Constant *NewC = ConstantInt::get(Mul->getType(), C.udiv(*MulC));
1955       return new ICmpInst(Pred, Mul->getOperand(0), NewC);
1956     }
1957   }
1958 
1959   return nullptr;
1960 }
1961 
1962 /// Fold icmp (shl 1, Y), C.
1963 static Instruction *foldICmpShlOne(ICmpInst &Cmp, Instruction *Shl,
1964                                    const APInt &C) {
1965   Value *Y;
1966   if (!match(Shl, m_Shl(m_One(), m_Value(Y))))
1967     return nullptr;
1968 
1969   Type *ShiftType = Shl->getType();
1970   unsigned TypeBits = C.getBitWidth();
1971   bool CIsPowerOf2 = C.isPowerOf2();
1972   ICmpInst::Predicate Pred = Cmp.getPredicate();
1973   if (Cmp.isUnsigned()) {
1974     // (1 << Y) pred C -> Y pred Log2(C)
1975     if (!CIsPowerOf2) {
1976       // (1 << Y) <  30 -> Y <= 4
1977       // (1 << Y) <= 30 -> Y <= 4
1978       // (1 << Y) >= 30 -> Y >  4
1979       // (1 << Y) >  30 -> Y >  4
1980       if (Pred == ICmpInst::ICMP_ULT)
1981         Pred = ICmpInst::ICMP_ULE;
1982       else if (Pred == ICmpInst::ICMP_UGE)
1983         Pred = ICmpInst::ICMP_UGT;
1984     }
1985 
1986     // (1 << Y) >= 2147483648 -> Y >= 31 -> Y == 31
1987     // (1 << Y) <  2147483648 -> Y <  31 -> Y != 31
1988     unsigned CLog2 = C.logBase2();
1989     if (CLog2 == TypeBits - 1) {
1990       if (Pred == ICmpInst::ICMP_UGE)
1991         Pred = ICmpInst::ICMP_EQ;
1992       else if (Pred == ICmpInst::ICMP_ULT)
1993         Pred = ICmpInst::ICMP_NE;
1994     }
1995     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
1996   } else if (Cmp.isSigned()) {
1997     Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
1998     if (C.isAllOnesValue()) {
1999       // (1 << Y) <= -1 -> Y == 31
2000       if (Pred == ICmpInst::ICMP_SLE)
2001         return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2002 
2003       // (1 << Y) >  -1 -> Y != 31
2004       if (Pred == ICmpInst::ICMP_SGT)
2005         return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2006     } else if (!C) {
2007       // (1 << Y) <  0 -> Y == 31
2008       // (1 << Y) <= 0 -> Y == 31
2009       if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
2010         return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2011 
2012       // (1 << Y) >= 0 -> Y != 31
2013       // (1 << Y) >  0 -> Y != 31
2014       if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE)
2015         return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2016     }
2017   } else if (Cmp.isEquality() && CIsPowerOf2) {
2018     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, C.logBase2()));
2019   }
2020 
2021   return nullptr;
2022 }
2023 
2024 /// Fold icmp (shl X, Y), C.
2025 Instruction *InstCombinerImpl::foldICmpShlConstant(ICmpInst &Cmp,
2026                                                    BinaryOperator *Shl,
2027                                                    const APInt &C) {
2028   const APInt *ShiftVal;
2029   if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2030     return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2031 
2032   const APInt *ShiftAmt;
2033   if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2034     return foldICmpShlOne(Cmp, Shl, C);
2035 
2036   // Check that the shift amount is in range. If not, don't perform undefined
2037   // shifts. When the shift is visited, it will be simplified.
2038   unsigned TypeBits = C.getBitWidth();
2039   if (ShiftAmt->uge(TypeBits))
2040     return nullptr;
2041 
2042   ICmpInst::Predicate Pred = Cmp.getPredicate();
2043   Value *X = Shl->getOperand(0);
2044   Type *ShType = Shl->getType();
2045 
2046   // NSW guarantees that we are only shifting out sign bits from the high bits,
2047   // so we can ASHR the compare constant without needing a mask and eliminate
2048   // the shift.
2049   if (Shl->hasNoSignedWrap()) {
2050     if (Pred == ICmpInst::ICMP_SGT) {
2051       // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2052       APInt ShiftedC = C.ashr(*ShiftAmt);
2053       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2054     }
2055     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2056         C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2057       APInt ShiftedC = C.ashr(*ShiftAmt);
2058       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2059     }
2060     if (Pred == ICmpInst::ICMP_SLT) {
2061       // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2062       // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2063       // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2064       // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2065       assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2066       APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2067       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2068     }
2069     // If this is a signed comparison to 0 and the shift is sign preserving,
2070     // use the shift LHS operand instead; isSignTest may change 'Pred', so only
2071     // do that if we're sure to not continue on in this function.
2072     if (isSignTest(Pred, C))
2073       return new ICmpInst(Pred, X, Constant::getNullValue(ShType));
2074   }
2075 
2076   // NUW guarantees that we are only shifting out zero bits from the high bits,
2077   // so we can LSHR the compare constant without needing a mask and eliminate
2078   // the shift.
2079   if (Shl->hasNoUnsignedWrap()) {
2080     if (Pred == ICmpInst::ICMP_UGT) {
2081       // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2082       APInt ShiftedC = C.lshr(*ShiftAmt);
2083       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2084     }
2085     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2086         C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2087       APInt ShiftedC = C.lshr(*ShiftAmt);
2088       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2089     }
2090     if (Pred == ICmpInst::ICMP_ULT) {
2091       // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2092       // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2093       // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2094       // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2095       assert(C.ugt(0) && "ult 0 should have been eliminated");
2096       APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2097       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2098     }
2099   }
2100 
2101   if (Cmp.isEquality() && Shl->hasOneUse()) {
2102     // Strength-reduce the shift into an 'and'.
2103     Constant *Mask = ConstantInt::get(
2104         ShType,
2105         APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2106     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2107     Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2108     return new ICmpInst(Pred, And, LShrC);
2109   }
2110 
2111   // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2112   bool TrueIfSigned = false;
2113   if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2114     // (X << 31) <s 0  --> (X & 1) != 0
2115     Constant *Mask = ConstantInt::get(
2116         ShType,
2117         APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2118     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2119     return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2120                         And, Constant::getNullValue(ShType));
2121   }
2122 
2123   // Simplify 'shl' inequality test into 'and' equality test.
2124   if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2125     // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2126     if ((C + 1).isPowerOf2() &&
2127         (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2128       Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2129       return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2130                                                      : ICmpInst::ICMP_NE,
2131                           And, Constant::getNullValue(ShType));
2132     }
2133     // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2134     if (C.isPowerOf2() &&
2135         (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2136       Value *And =
2137           Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2138       return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2139                                                      : ICmpInst::ICMP_NE,
2140                           And, Constant::getNullValue(ShType));
2141     }
2142   }
2143 
2144   // Transform (icmp pred iM (shl iM %v, N), C)
2145   // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2146   // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2147   // This enables us to get rid of the shift in favor of a trunc that may be
2148   // free on the target. It has the additional benefit of comparing to a
2149   // smaller constant that may be more target-friendly.
2150   unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2151   if (Shl->hasOneUse() && Amt != 0 && C.countTrailingZeros() >= Amt &&
2152       DL.isLegalInteger(TypeBits - Amt)) {
2153     Type *TruncTy = IntegerType::get(Cmp.getContext(), TypeBits - Amt);
2154     if (auto *ShVTy = dyn_cast<VectorType>(ShType))
2155       TruncTy = VectorType::get(TruncTy, ShVTy->getElementCount());
2156     Constant *NewC =
2157         ConstantInt::get(TruncTy, C.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2158     return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
2159   }
2160 
2161   return nullptr;
2162 }
2163 
2164 /// Fold icmp ({al}shr X, Y), C.
2165 Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp,
2166                                                    BinaryOperator *Shr,
2167                                                    const APInt &C) {
2168   // An exact shr only shifts out zero bits, so:
2169   // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2170   Value *X = Shr->getOperand(0);
2171   CmpInst::Predicate Pred = Cmp.getPredicate();
2172   if (Cmp.isEquality() && Shr->isExact() && Shr->hasOneUse() &&
2173       C.isNullValue())
2174     return new ICmpInst(Pred, X, Cmp.getOperand(1));
2175 
2176   const APInt *ShiftVal;
2177   if (Cmp.isEquality() && match(Shr->getOperand(0), m_APInt(ShiftVal)))
2178     return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftVal);
2179 
2180   const APInt *ShiftAmt;
2181   if (!match(Shr->getOperand(1), m_APInt(ShiftAmt)))
2182     return nullptr;
2183 
2184   // Check that the shift amount is in range. If not, don't perform undefined
2185   // shifts. When the shift is visited it will be simplified.
2186   unsigned TypeBits = C.getBitWidth();
2187   unsigned ShAmtVal = ShiftAmt->getLimitedValue(TypeBits);
2188   if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2189     return nullptr;
2190 
2191   bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2192   bool IsExact = Shr->isExact();
2193   Type *ShrTy = Shr->getType();
2194   // TODO: If we could guarantee that InstSimplify would handle all of the
2195   // constant-value-based preconditions in the folds below, then we could assert
2196   // those conditions rather than checking them. This is difficult because of
2197   // undef/poison (PR34838).
2198   if (IsAShr) {
2199     if (Pred == CmpInst::ICMP_SLT || (Pred == CmpInst::ICMP_SGT && IsExact)) {
2200       // icmp slt (ashr X, ShAmtC), C --> icmp slt X, (C << ShAmtC)
2201       // icmp sgt (ashr exact X, ShAmtC), C --> icmp sgt X, (C << ShAmtC)
2202       APInt ShiftedC = C.shl(ShAmtVal);
2203       if (ShiftedC.ashr(ShAmtVal) == C)
2204         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2205     }
2206     if (Pred == CmpInst::ICMP_SGT) {
2207       // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2208       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2209       if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2210           (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2211         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2212     }
2213 
2214     // If the compare constant has significant bits above the lowest sign-bit,
2215     // then convert an unsigned cmp to a test of the sign-bit:
2216     // (ashr X, ShiftC) u> C --> X s< 0
2217     // (ashr X, ShiftC) u< C --> X s> -1
2218     if (C.getBitWidth() > 2 && C.getNumSignBits() <= ShAmtVal) {
2219       if (Pred == CmpInst::ICMP_UGT) {
2220         return new ICmpInst(CmpInst::ICMP_SLT, X,
2221                             ConstantInt::getNullValue(ShrTy));
2222       }
2223       if (Pred == CmpInst::ICMP_ULT) {
2224         return new ICmpInst(CmpInst::ICMP_SGT, X,
2225                             ConstantInt::getAllOnesValue(ShrTy));
2226       }
2227     }
2228   } else {
2229     if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2230       // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2231       // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2232       APInt ShiftedC = C.shl(ShAmtVal);
2233       if (ShiftedC.lshr(ShAmtVal) == C)
2234         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2235     }
2236     if (Pred == CmpInst::ICMP_UGT) {
2237       // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2238       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2239       if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2240         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2241     }
2242   }
2243 
2244   if (!Cmp.isEquality())
2245     return nullptr;
2246 
2247   // Handle equality comparisons of shift-by-constant.
2248 
2249   // If the comparison constant changes with the shift, the comparison cannot
2250   // succeed (bits of the comparison constant cannot match the shifted value).
2251   // This should be known by InstSimplify and already be folded to true/false.
2252   assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2253           (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2254          "Expected icmp+shr simplify did not occur.");
2255 
2256   // If the bits shifted out are known zero, compare the unshifted value:
2257   //  (X & 4) >> 1 == 2  --> (X & 4) == 4.
2258   if (Shr->isExact())
2259     return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2260 
2261   if (Shr->hasOneUse()) {
2262     // Canonicalize the shift into an 'and':
2263     // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2264     APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2265     Constant *Mask = ConstantInt::get(ShrTy, Val);
2266     Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2267     return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2268   }
2269 
2270   return nullptr;
2271 }
2272 
2273 Instruction *InstCombinerImpl::foldICmpSRemConstant(ICmpInst &Cmp,
2274                                                     BinaryOperator *SRem,
2275                                                     const APInt &C) {
2276   // Match an 'is positive' or 'is negative' comparison of remainder by a
2277   // constant power-of-2 value:
2278   // (X % pow2C) sgt/slt 0
2279   const ICmpInst::Predicate Pred = Cmp.getPredicate();
2280   if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT)
2281     return nullptr;
2282 
2283   // TODO: The one-use check is standard because we do not typically want to
2284   //       create longer instruction sequences, but this might be a special-case
2285   //       because srem is not good for analysis or codegen.
2286   if (!SRem->hasOneUse())
2287     return nullptr;
2288 
2289   const APInt *DivisorC;
2290   if (!C.isNullValue() || !match(SRem->getOperand(1), m_Power2(DivisorC)))
2291     return nullptr;
2292 
2293   // Mask off the sign bit and the modulo bits (low-bits).
2294   Type *Ty = SRem->getType();
2295   APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2296   Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2297   Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2298 
2299   // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2300   // bit is set. Example:
2301   // (i8 X % 32) s> 0 --> (X & 159) s> 0
2302   if (Pred == ICmpInst::ICMP_SGT)
2303     return new ICmpInst(ICmpInst::ICMP_SGT, And, ConstantInt::getNullValue(Ty));
2304 
2305   // For 'is negative?' check that the sign-bit is set and at least 1 masked
2306   // bit is set. Example:
2307   // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2308   return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2309 }
2310 
2311 /// Fold icmp (udiv X, Y), C.
2312 Instruction *InstCombinerImpl::foldICmpUDivConstant(ICmpInst &Cmp,
2313                                                     BinaryOperator *UDiv,
2314                                                     const APInt &C) {
2315   const APInt *C2;
2316   if (!match(UDiv->getOperand(0), m_APInt(C2)))
2317     return nullptr;
2318 
2319   assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2320 
2321   // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2322   Value *Y = UDiv->getOperand(1);
2323   if (Cmp.getPredicate() == ICmpInst::ICMP_UGT) {
2324     assert(!C.isMaxValue() &&
2325            "icmp ugt X, UINT_MAX should have been simplified already.");
2326     return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2327                         ConstantInt::get(Y->getType(), C2->udiv(C + 1)));
2328   }
2329 
2330   // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2331   if (Cmp.getPredicate() == ICmpInst::ICMP_ULT) {
2332     assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2333     return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2334                         ConstantInt::get(Y->getType(), C2->udiv(C)));
2335   }
2336 
2337   return nullptr;
2338 }
2339 
2340 /// Fold icmp ({su}div X, Y), C.
2341 Instruction *InstCombinerImpl::foldICmpDivConstant(ICmpInst &Cmp,
2342                                                    BinaryOperator *Div,
2343                                                    const APInt &C) {
2344   // Fold: icmp pred ([us]div X, C2), C -> range test
2345   // Fold this div into the comparison, producing a range check.
2346   // Determine, based on the divide type, what the range is being
2347   // checked.  If there is an overflow on the low or high side, remember
2348   // it, otherwise compute the range [low, hi) bounding the new value.
2349   // See: InsertRangeTest above for the kinds of replacements possible.
2350   const APInt *C2;
2351   if (!match(Div->getOperand(1), m_APInt(C2)))
2352     return nullptr;
2353 
2354   // FIXME: If the operand types don't match the type of the divide
2355   // then don't attempt this transform. The code below doesn't have the
2356   // logic to deal with a signed divide and an unsigned compare (and
2357   // vice versa). This is because (x /s C2) <s C  produces different
2358   // results than (x /s C2) <u C or (x /u C2) <s C or even
2359   // (x /u C2) <u C.  Simply casting the operands and result won't
2360   // work. :(  The if statement below tests that condition and bails
2361   // if it finds it.
2362   bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2363   if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2364     return nullptr;
2365 
2366   // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2367   // INT_MIN will also fail if the divisor is 1. Although folds of all these
2368   // division-by-constant cases should be present, we can not assert that they
2369   // have happened before we reach this icmp instruction.
2370   if (C2->isNullValue() || C2->isOneValue() ||
2371       (DivIsSigned && C2->isAllOnesValue()))
2372     return nullptr;
2373 
2374   // Compute Prod = C * C2. We are essentially solving an equation of
2375   // form X / C2 = C. We solve for X by multiplying C2 and C.
2376   // By solving for X, we can turn this into a range check instead of computing
2377   // a divide.
2378   APInt Prod = C * *C2;
2379 
2380   // Determine if the product overflows by seeing if the product is not equal to
2381   // the divide. Make sure we do the same kind of divide as in the LHS
2382   // instruction that we're folding.
2383   bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2384 
2385   ICmpInst::Predicate Pred = Cmp.getPredicate();
2386 
2387   // If the division is known to be exact, then there is no remainder from the
2388   // divide, so the covered range size is unit, otherwise it is the divisor.
2389   APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2390 
2391   // Figure out the interval that is being checked.  For example, a comparison
2392   // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2393   // Compute this interval based on the constants involved and the signedness of
2394   // the compare/divide.  This computes a half-open interval, keeping track of
2395   // whether either value in the interval overflows.  After analysis each
2396   // overflow variable is set to 0 if it's corresponding bound variable is valid
2397   // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2398   int LoOverflow = 0, HiOverflow = 0;
2399   APInt LoBound, HiBound;
2400 
2401   if (!DivIsSigned) {  // udiv
2402     // e.g. X/5 op 3  --> [15, 20)
2403     LoBound = Prod;
2404     HiOverflow = LoOverflow = ProdOV;
2405     if (!HiOverflow) {
2406       // If this is not an exact divide, then many values in the range collapse
2407       // to the same result value.
2408       HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2409     }
2410   } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2411     if (C.isNullValue()) {       // (X / pos) op 0
2412       // Can't overflow.  e.g.  X/2 op 0 --> [-1, 2)
2413       LoBound = -(RangeSize - 1);
2414       HiBound = RangeSize;
2415     } else if (C.isStrictlyPositive()) {   // (X / pos) op pos
2416       LoBound = Prod;     // e.g.   X/5 op 3 --> [15, 20)
2417       HiOverflow = LoOverflow = ProdOV;
2418       if (!HiOverflow)
2419         HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2420     } else {                       // (X / pos) op neg
2421       // e.g. X/5 op -3  --> [-15-4, -15+1) --> [-19, -14)
2422       HiBound = Prod + 1;
2423       LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2424       if (!LoOverflow) {
2425         APInt DivNeg = -RangeSize;
2426         LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2427       }
2428     }
2429   } else if (C2->isNegative()) { // Divisor is < 0.
2430     if (Div->isExact())
2431       RangeSize.negate();
2432     if (C.isNullValue()) { // (X / neg) op 0
2433       // e.g. X/-5 op 0  --> [-4, 5)
2434       LoBound = RangeSize + 1;
2435       HiBound = -RangeSize;
2436       if (HiBound == *C2) {        // -INTMIN = INTMIN
2437         HiOverflow = 1;            // [INTMIN+1, overflow)
2438         HiBound = APInt();         // e.g. X/INTMIN = 0 --> X > INTMIN
2439       }
2440     } else if (C.isStrictlyPositive()) {   // (X / neg) op pos
2441       // e.g. X/-5 op 3  --> [-19, -14)
2442       HiBound = Prod + 1;
2443       HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2444       if (!LoOverflow)
2445         LoOverflow = addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1:0;
2446     } else {                       // (X / neg) op neg
2447       LoBound = Prod;       // e.g. X/-5 op -3  --> [15, 20)
2448       LoOverflow = HiOverflow = ProdOV;
2449       if (!HiOverflow)
2450         HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2451     }
2452 
2453     // Dividing by a negative swaps the condition.  LT <-> GT
2454     Pred = ICmpInst::getSwappedPredicate(Pred);
2455   }
2456 
2457   Value *X = Div->getOperand(0);
2458   switch (Pred) {
2459     default: llvm_unreachable("Unhandled icmp opcode!");
2460     case ICmpInst::ICMP_EQ:
2461       if (LoOverflow && HiOverflow)
2462         return replaceInstUsesWith(Cmp, Builder.getFalse());
2463       if (HiOverflow)
2464         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2465                             ICmpInst::ICMP_UGE, X,
2466                             ConstantInt::get(Div->getType(), LoBound));
2467       if (LoOverflow)
2468         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2469                             ICmpInst::ICMP_ULT, X,
2470                             ConstantInt::get(Div->getType(), HiBound));
2471       return replaceInstUsesWith(
2472           Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2473     case ICmpInst::ICMP_NE:
2474       if (LoOverflow && HiOverflow)
2475         return replaceInstUsesWith(Cmp, Builder.getTrue());
2476       if (HiOverflow)
2477         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
2478                             ICmpInst::ICMP_ULT, X,
2479                             ConstantInt::get(Div->getType(), LoBound));
2480       if (LoOverflow)
2481         return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
2482                             ICmpInst::ICMP_UGE, X,
2483                             ConstantInt::get(Div->getType(), HiBound));
2484       return replaceInstUsesWith(Cmp,
2485                                  insertRangeTest(X, LoBound, HiBound,
2486                                                  DivIsSigned, false));
2487     case ICmpInst::ICMP_ULT:
2488     case ICmpInst::ICMP_SLT:
2489       if (LoOverflow == +1)   // Low bound is greater than input range.
2490         return replaceInstUsesWith(Cmp, Builder.getTrue());
2491       if (LoOverflow == -1)   // Low bound is less than input range.
2492         return replaceInstUsesWith(Cmp, Builder.getFalse());
2493       return new ICmpInst(Pred, X, ConstantInt::get(Div->getType(), LoBound));
2494     case ICmpInst::ICMP_UGT:
2495     case ICmpInst::ICMP_SGT:
2496       if (HiOverflow == +1)       // High bound greater than input range.
2497         return replaceInstUsesWith(Cmp, Builder.getFalse());
2498       if (HiOverflow == -1)       // High bound less than input range.
2499         return replaceInstUsesWith(Cmp, Builder.getTrue());
2500       if (Pred == ICmpInst::ICMP_UGT)
2501         return new ICmpInst(ICmpInst::ICMP_UGE, X,
2502                             ConstantInt::get(Div->getType(), HiBound));
2503       return new ICmpInst(ICmpInst::ICMP_SGE, X,
2504                           ConstantInt::get(Div->getType(), HiBound));
2505   }
2506 
2507   return nullptr;
2508 }
2509 
2510 /// Fold icmp (sub X, Y), C.
2511 Instruction *InstCombinerImpl::foldICmpSubConstant(ICmpInst &Cmp,
2512                                                    BinaryOperator *Sub,
2513                                                    const APInt &C) {
2514   Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2515   ICmpInst::Predicate Pred = Cmp.getPredicate();
2516   const APInt *C2;
2517   APInt SubResult;
2518 
2519   // icmp eq/ne (sub C, Y), C -> icmp eq/ne Y, 0
2520   if (match(X, m_APInt(C2)) && *C2 == C && Cmp.isEquality())
2521     return new ICmpInst(Cmp.getPredicate(), Y,
2522                         ConstantInt::get(Y->getType(), 0));
2523 
2524   // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2525   if (match(X, m_APInt(C2)) &&
2526       ((Cmp.isUnsigned() && Sub->hasNoUnsignedWrap()) ||
2527        (Cmp.isSigned() && Sub->hasNoSignedWrap())) &&
2528       !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2529     return new ICmpInst(Cmp.getSwappedPredicate(), Y,
2530                         ConstantInt::get(Y->getType(), SubResult));
2531 
2532   // The following transforms are only worth it if the only user of the subtract
2533   // is the icmp.
2534   if (!Sub->hasOneUse())
2535     return nullptr;
2536 
2537   if (Sub->hasNoSignedWrap()) {
2538     // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
2539     if (Pred == ICmpInst::ICMP_SGT && C.isAllOnesValue())
2540       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
2541 
2542     // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
2543     if (Pred == ICmpInst::ICMP_SGT && C.isNullValue())
2544       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
2545 
2546     // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
2547     if (Pred == ICmpInst::ICMP_SLT && C.isNullValue())
2548       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
2549 
2550     // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
2551     if (Pred == ICmpInst::ICMP_SLT && C.isOneValue())
2552       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
2553   }
2554 
2555   if (!match(X, m_APInt(C2)))
2556     return nullptr;
2557 
2558   // C2 - Y <u C -> (Y | (C - 1)) == C2
2559   //   iff (C2 & (C - 1)) == C - 1 and C is a power of 2
2560   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
2561       (*C2 & (C - 1)) == (C - 1))
2562     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
2563 
2564   // C2 - Y >u C -> (Y | C) != C2
2565   //   iff C2 & C == C and C + 1 is a power of 2
2566   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
2567     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
2568 
2569   return nullptr;
2570 }
2571 
2572 /// Fold icmp (add X, Y), C.
2573 Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
2574                                                    BinaryOperator *Add,
2575                                                    const APInt &C) {
2576   Value *Y = Add->getOperand(1);
2577   const APInt *C2;
2578   if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
2579     return nullptr;
2580 
2581   // Fold icmp pred (add X, C2), C.
2582   Value *X = Add->getOperand(0);
2583   Type *Ty = Add->getType();
2584   CmpInst::Predicate Pred = Cmp.getPredicate();
2585 
2586   // If the add does not wrap, we can always adjust the compare by subtracting
2587   // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
2588   // are canonicalized to SGT/SLT/UGT/ULT.
2589   if ((Add->hasNoSignedWrap() &&
2590        (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
2591       (Add->hasNoUnsignedWrap() &&
2592        (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
2593     bool Overflow;
2594     APInt NewC =
2595         Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
2596     // If there is overflow, the result must be true or false.
2597     // TODO: Can we assert there is no overflow because InstSimplify always
2598     // handles those cases?
2599     if (!Overflow)
2600       // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
2601       return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
2602   }
2603 
2604   auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
2605   const APInt &Upper = CR.getUpper();
2606   const APInt &Lower = CR.getLower();
2607   if (Cmp.isSigned()) {
2608     if (Lower.isSignMask())
2609       return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
2610     if (Upper.isSignMask())
2611       return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
2612   } else {
2613     if (Lower.isMinValue())
2614       return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
2615     if (Upper.isMinValue())
2616       return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
2617   }
2618 
2619   if (!Add->hasOneUse())
2620     return nullptr;
2621 
2622   // X+C <u C2 -> (X & -C2) == C
2623   //   iff C & (C2-1) == 0
2624   //       C2 is a power of 2
2625   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
2626     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
2627                         ConstantExpr::getNeg(cast<Constant>(Y)));
2628 
2629   // X+C >u C2 -> (X & ~C2) != C
2630   //   iff C & C2 == 0
2631   //       C2+1 is a power of 2
2632   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
2633     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
2634                         ConstantExpr::getNeg(cast<Constant>(Y)));
2635 
2636   return nullptr;
2637 }
2638 
2639 bool InstCombinerImpl::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
2640                                                Value *&RHS, ConstantInt *&Less,
2641                                                ConstantInt *&Equal,
2642                                                ConstantInt *&Greater) {
2643   // TODO: Generalize this to work with other comparison idioms or ensure
2644   // they get canonicalized into this form.
2645 
2646   // select i1 (a == b),
2647   //        i32 Equal,
2648   //        i32 (select i1 (a < b), i32 Less, i32 Greater)
2649   // where Equal, Less and Greater are placeholders for any three constants.
2650   ICmpInst::Predicate PredA;
2651   if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
2652       !ICmpInst::isEquality(PredA))
2653     return false;
2654   Value *EqualVal = SI->getTrueValue();
2655   Value *UnequalVal = SI->getFalseValue();
2656   // We still can get non-canonical predicate here, so canonicalize.
2657   if (PredA == ICmpInst::ICMP_NE)
2658     std::swap(EqualVal, UnequalVal);
2659   if (!match(EqualVal, m_ConstantInt(Equal)))
2660     return false;
2661   ICmpInst::Predicate PredB;
2662   Value *LHS2, *RHS2;
2663   if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
2664                                   m_ConstantInt(Less), m_ConstantInt(Greater))))
2665     return false;
2666   // We can get predicate mismatch here, so canonicalize if possible:
2667   // First, ensure that 'LHS' match.
2668   if (LHS2 != LHS) {
2669     // x sgt y <--> y slt x
2670     std::swap(LHS2, RHS2);
2671     PredB = ICmpInst::getSwappedPredicate(PredB);
2672   }
2673   if (LHS2 != LHS)
2674     return false;
2675   // We also need to canonicalize 'RHS'.
2676   if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
2677     // x sgt C-1  <-->  x sge C  <-->  not(x slt C)
2678     auto FlippedStrictness =
2679         InstCombiner::getFlippedStrictnessPredicateAndConstant(
2680             PredB, cast<Constant>(RHS2));
2681     if (!FlippedStrictness)
2682       return false;
2683     assert(FlippedStrictness->first == ICmpInst::ICMP_SGE && "Sanity check");
2684     RHS2 = FlippedStrictness->second;
2685     // And kind-of perform the result swap.
2686     std::swap(Less, Greater);
2687     PredB = ICmpInst::ICMP_SLT;
2688   }
2689   return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
2690 }
2691 
2692 Instruction *InstCombinerImpl::foldICmpSelectConstant(ICmpInst &Cmp,
2693                                                       SelectInst *Select,
2694                                                       ConstantInt *C) {
2695 
2696   assert(C && "Cmp RHS should be a constant int!");
2697   // If we're testing a constant value against the result of a three way
2698   // comparison, the result can be expressed directly in terms of the
2699   // original values being compared.  Note: We could possibly be more
2700   // aggressive here and remove the hasOneUse test. The original select is
2701   // really likely to simplify or sink when we remove a test of the result.
2702   Value *OrigLHS, *OrigRHS;
2703   ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
2704   if (Cmp.hasOneUse() &&
2705       matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
2706                               C3GreaterThan)) {
2707     assert(C1LessThan && C2Equal && C3GreaterThan);
2708 
2709     bool TrueWhenLessThan =
2710         ConstantExpr::getCompare(Cmp.getPredicate(), C1LessThan, C)
2711             ->isAllOnesValue();
2712     bool TrueWhenEqual =
2713         ConstantExpr::getCompare(Cmp.getPredicate(), C2Equal, C)
2714             ->isAllOnesValue();
2715     bool TrueWhenGreaterThan =
2716         ConstantExpr::getCompare(Cmp.getPredicate(), C3GreaterThan, C)
2717             ->isAllOnesValue();
2718 
2719     // This generates the new instruction that will replace the original Cmp
2720     // Instruction. Instead of enumerating the various combinations when
2721     // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
2722     // false, we rely on chaining of ORs and future passes of InstCombine to
2723     // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
2724 
2725     // When none of the three constants satisfy the predicate for the RHS (C),
2726     // the entire original Cmp can be simplified to a false.
2727     Value *Cond = Builder.getFalse();
2728     if (TrueWhenLessThan)
2729       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT,
2730                                                        OrigLHS, OrigRHS));
2731     if (TrueWhenEqual)
2732       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ,
2733                                                        OrigLHS, OrigRHS));
2734     if (TrueWhenGreaterThan)
2735       Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT,
2736                                                        OrigLHS, OrigRHS));
2737 
2738     return replaceInstUsesWith(Cmp, Cond);
2739   }
2740   return nullptr;
2741 }
2742 
2743 static Instruction *foldICmpBitCast(ICmpInst &Cmp,
2744                                     InstCombiner::BuilderTy &Builder) {
2745   auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
2746   if (!Bitcast)
2747     return nullptr;
2748 
2749   ICmpInst::Predicate Pred = Cmp.getPredicate();
2750   Value *Op1 = Cmp.getOperand(1);
2751   Value *BCSrcOp = Bitcast->getOperand(0);
2752 
2753   // Make sure the bitcast doesn't change the number of vector elements.
2754   if (Bitcast->getSrcTy()->getScalarSizeInBits() ==
2755           Bitcast->getDestTy()->getScalarSizeInBits()) {
2756     // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
2757     Value *X;
2758     if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
2759       // icmp  eq (bitcast (sitofp X)), 0 --> icmp  eq X, 0
2760       // icmp  ne (bitcast (sitofp X)), 0 --> icmp  ne X, 0
2761       // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
2762       // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
2763       if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
2764            Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
2765           match(Op1, m_Zero()))
2766         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2767 
2768       // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
2769       if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
2770         return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
2771 
2772       // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
2773       if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
2774         return new ICmpInst(Pred, X,
2775                             ConstantInt::getAllOnesValue(X->getType()));
2776     }
2777 
2778     // Zero-equality checks are preserved through unsigned floating-point casts:
2779     // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
2780     // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
2781     if (match(BCSrcOp, m_UIToFP(m_Value(X))))
2782       if (Cmp.isEquality() && match(Op1, m_Zero()))
2783         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2784 
2785     // If this is a sign-bit test of a bitcast of a casted FP value, eliminate
2786     // the FP extend/truncate because that cast does not change the sign-bit.
2787     // This is true for all standard IEEE-754 types and the X86 80-bit type.
2788     // The sign-bit is always the most significant bit in those types.
2789     const APInt *C;
2790     bool TrueIfSigned;
2791     if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse() &&
2792         InstCombiner::isSignBitCheck(Pred, *C, TrueIfSigned)) {
2793       if (match(BCSrcOp, m_FPExt(m_Value(X))) ||
2794           match(BCSrcOp, m_FPTrunc(m_Value(X)))) {
2795         // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0
2796         // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1
2797         Type *XType = X->getType();
2798 
2799         // We can't currently handle Power style floating point operations here.
2800         if (!(XType->isPPC_FP128Ty() || BCSrcOp->getType()->isPPC_FP128Ty())) {
2801 
2802           Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits());
2803           if (auto *XVTy = dyn_cast<VectorType>(XType))
2804             NewType = VectorType::get(NewType, XVTy->getElementCount());
2805           Value *NewBitcast = Builder.CreateBitCast(X, NewType);
2806           if (TrueIfSigned)
2807             return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast,
2808                                 ConstantInt::getNullValue(NewType));
2809           else
2810             return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast,
2811                                 ConstantInt::getAllOnesValue(NewType));
2812         }
2813       }
2814     }
2815   }
2816 
2817   // Test to see if the operands of the icmp are casted versions of other
2818   // values. If the ptr->ptr cast can be stripped off both arguments, do so.
2819   if (Bitcast->getType()->isPointerTy() &&
2820       (isa<Constant>(Op1) || isa<BitCastInst>(Op1))) {
2821     // If operand #1 is a bitcast instruction, it must also be a ptr->ptr cast
2822     // so eliminate it as well.
2823     if (auto *BC2 = dyn_cast<BitCastInst>(Op1))
2824       Op1 = BC2->getOperand(0);
2825 
2826     Op1 = Builder.CreateBitCast(Op1, BCSrcOp->getType());
2827     return new ICmpInst(Pred, BCSrcOp, Op1);
2828   }
2829 
2830   // Folding: icmp <pred> iN X, C
2831   //  where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
2832   //    and C is a splat of a K-bit pattern
2833   //    and SC is a constant vector = <C', C', C', ..., C'>
2834   // Into:
2835   //   %E = extractelement <M x iK> %vec, i32 C'
2836   //   icmp <pred> iK %E, trunc(C)
2837   const APInt *C;
2838   if (!match(Cmp.getOperand(1), m_APInt(C)) ||
2839       !Bitcast->getType()->isIntegerTy() ||
2840       !Bitcast->getSrcTy()->isIntOrIntVectorTy())
2841     return nullptr;
2842 
2843   Value *Vec;
2844   ArrayRef<int> Mask;
2845   if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
2846     // Check whether every element of Mask is the same constant
2847     if (is_splat(Mask)) {
2848       auto *VecTy = cast<VectorType>(BCSrcOp->getType());
2849       auto *EltTy = cast<IntegerType>(VecTy->getElementType());
2850       if (C->isSplat(EltTy->getBitWidth())) {
2851         // Fold the icmp based on the value of C
2852         // If C is M copies of an iK sized bit pattern,
2853         // then:
2854         //   =>  %E = extractelement <N x iK> %vec, i32 Elem
2855         //       icmp <pred> iK %SplatVal, <pattern>
2856         Value *Elem = Builder.getInt32(Mask[0]);
2857         Value *Extract = Builder.CreateExtractElement(Vec, Elem);
2858         Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
2859         return new ICmpInst(Pred, Extract, NewC);
2860       }
2861     }
2862   }
2863   return nullptr;
2864 }
2865 
2866 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
2867 /// where X is some kind of instruction.
2868 Instruction *InstCombinerImpl::foldICmpInstWithConstant(ICmpInst &Cmp) {
2869   const APInt *C;
2870   if (!match(Cmp.getOperand(1), m_APInt(C)))
2871     return nullptr;
2872 
2873   if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0))) {
2874     switch (BO->getOpcode()) {
2875     case Instruction::Xor:
2876       if (Instruction *I = foldICmpXorConstant(Cmp, BO, *C))
2877         return I;
2878       break;
2879     case Instruction::And:
2880       if (Instruction *I = foldICmpAndConstant(Cmp, BO, *C))
2881         return I;
2882       break;
2883     case Instruction::Or:
2884       if (Instruction *I = foldICmpOrConstant(Cmp, BO, *C))
2885         return I;
2886       break;
2887     case Instruction::Mul:
2888       if (Instruction *I = foldICmpMulConstant(Cmp, BO, *C))
2889         return I;
2890       break;
2891     case Instruction::Shl:
2892       if (Instruction *I = foldICmpShlConstant(Cmp, BO, *C))
2893         return I;
2894       break;
2895     case Instruction::LShr:
2896     case Instruction::AShr:
2897       if (Instruction *I = foldICmpShrConstant(Cmp, BO, *C))
2898         return I;
2899       break;
2900     case Instruction::SRem:
2901       if (Instruction *I = foldICmpSRemConstant(Cmp, BO, *C))
2902         return I;
2903       break;
2904     case Instruction::UDiv:
2905       if (Instruction *I = foldICmpUDivConstant(Cmp, BO, *C))
2906         return I;
2907       LLVM_FALLTHROUGH;
2908     case Instruction::SDiv:
2909       if (Instruction *I = foldICmpDivConstant(Cmp, BO, *C))
2910         return I;
2911       break;
2912     case Instruction::Sub:
2913       if (Instruction *I = foldICmpSubConstant(Cmp, BO, *C))
2914         return I;
2915       break;
2916     case Instruction::Add:
2917       if (Instruction *I = foldICmpAddConstant(Cmp, BO, *C))
2918         return I;
2919       break;
2920     default:
2921       break;
2922     }
2923     // TODO: These folds could be refactored to be part of the above calls.
2924     if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, *C))
2925       return I;
2926   }
2927 
2928   // Match against CmpInst LHS being instructions other than binary operators.
2929 
2930   if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0))) {
2931     // For now, we only support constant integers while folding the
2932     // ICMP(SELECT)) pattern. We can extend this to support vector of integers
2933     // similar to the cases handled by binary ops above.
2934     if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
2935       if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
2936         return I;
2937   }
2938 
2939   if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0))) {
2940     if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
2941       return I;
2942   }
2943 
2944   if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
2945     if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, II, *C))
2946       return I;
2947 
2948   return nullptr;
2949 }
2950 
2951 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
2952 /// icmp eq/ne BO, C.
2953 Instruction *InstCombinerImpl::foldICmpBinOpEqualityWithConstant(
2954     ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
2955   // TODO: Some of these folds could work with arbitrary constants, but this
2956   // function is limited to scalar and vector splat constants.
2957   if (!Cmp.isEquality())
2958     return nullptr;
2959 
2960   ICmpInst::Predicate Pred = Cmp.getPredicate();
2961   bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
2962   Constant *RHS = cast<Constant>(Cmp.getOperand(1));
2963   Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
2964 
2965   switch (BO->getOpcode()) {
2966   case Instruction::SRem:
2967     // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
2968     if (C.isNullValue() && BO->hasOneUse()) {
2969       const APInt *BOC;
2970       if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
2971         Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
2972         return new ICmpInst(Pred, NewRem,
2973                             Constant::getNullValue(BO->getType()));
2974       }
2975     }
2976     break;
2977   case Instruction::Add: {
2978     // Replace ((add A, B) != C) with (A != C-B) if B & C are constants.
2979     if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
2980       if (BO->hasOneUse())
2981         return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, BOC));
2982     } else if (C.isNullValue()) {
2983       // Replace ((add A, B) != 0) with (A != -B) if A or B is
2984       // efficiently invertible, or if the add has just this one use.
2985       if (Value *NegVal = dyn_castNegVal(BOp1))
2986         return new ICmpInst(Pred, BOp0, NegVal);
2987       if (Value *NegVal = dyn_castNegVal(BOp0))
2988         return new ICmpInst(Pred, NegVal, BOp1);
2989       if (BO->hasOneUse()) {
2990         Value *Neg = Builder.CreateNeg(BOp1);
2991         Neg->takeName(BO);
2992         return new ICmpInst(Pred, BOp0, Neg);
2993       }
2994     }
2995     break;
2996   }
2997   case Instruction::Xor:
2998     if (BO->hasOneUse()) {
2999       if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3000         // For the xor case, we can xor two constants together, eliminating
3001         // the explicit xor.
3002         return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
3003       } else if (C.isNullValue()) {
3004         // Replace ((xor A, B) != 0) with (A != B)
3005         return new ICmpInst(Pred, BOp0, BOp1);
3006       }
3007     }
3008     break;
3009   case Instruction::Sub:
3010     if (BO->hasOneUse()) {
3011       // Only check for constant LHS here, as constant RHS will be canonicalized
3012       // to add and use the fold above.
3013       if (Constant *BOC = dyn_cast<Constant>(BOp0)) {
3014         // Replace ((sub BOC, B) != C) with (B != BOC-C).
3015         return new ICmpInst(Pred, BOp1, ConstantExpr::getSub(BOC, RHS));
3016       } else if (C.isNullValue()) {
3017         // Replace ((sub A, B) != 0) with (A != B).
3018         return new ICmpInst(Pred, BOp0, BOp1);
3019       }
3020     }
3021     break;
3022   case Instruction::Or: {
3023     const APInt *BOC;
3024     if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
3025       // Comparing if all bits outside of a constant mask are set?
3026       // Replace (X | C) == -1 with (X & ~C) == ~C.
3027       // This removes the -1 constant.
3028       Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
3029       Value *And = Builder.CreateAnd(BOp0, NotBOC);
3030       return new ICmpInst(Pred, And, NotBOC);
3031     }
3032     break;
3033   }
3034   case Instruction::And: {
3035     const APInt *BOC;
3036     if (match(BOp1, m_APInt(BOC))) {
3037       // If we have ((X & C) == C), turn it into ((X & C) != 0).
3038       if (C == *BOC && C.isPowerOf2())
3039         return new ICmpInst(isICMP_NE ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE,
3040                             BO, Constant::getNullValue(RHS->getType()));
3041     }
3042     break;
3043   }
3044   case Instruction::UDiv:
3045     if (C.isNullValue()) {
3046       // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3047       auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3048       return new ICmpInst(NewPred, BOp1, BOp0);
3049     }
3050     break;
3051   default:
3052     break;
3053   }
3054   return nullptr;
3055 }
3056 
3057 /// Fold an equality icmp with LLVM intrinsic and constant operand.
3058 Instruction *InstCombinerImpl::foldICmpEqIntrinsicWithConstant(
3059     ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) {
3060   Type *Ty = II->getType();
3061   unsigned BitWidth = C.getBitWidth();
3062   switch (II->getIntrinsicID()) {
3063   case Intrinsic::abs:
3064     // abs(A) == 0  ->  A == 0
3065     // abs(A) == INT_MIN  ->  A == INT_MIN
3066     if (C.isNullValue() || C.isMinSignedValue())
3067       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3068                           ConstantInt::get(Ty, C));
3069     break;
3070 
3071   case Intrinsic::bswap:
3072     // bswap(A) == C  ->  A == bswap(C)
3073     return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3074                         ConstantInt::get(Ty, C.byteSwap()));
3075 
3076   case Intrinsic::ctlz:
3077   case Intrinsic::cttz: {
3078     // ctz(A) == bitwidth(A)  ->  A == 0 and likewise for !=
3079     if (C == BitWidth)
3080       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3081                           ConstantInt::getNullValue(Ty));
3082 
3083     // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3084     // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3085     // Limit to one use to ensure we don't increase instruction count.
3086     unsigned Num = C.getLimitedValue(BitWidth);
3087     if (Num != BitWidth && II->hasOneUse()) {
3088       bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3089       APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3090                                : APInt::getHighBitsSet(BitWidth, Num + 1);
3091       APInt Mask2 = IsTrailing
3092         ? APInt::getOneBitSet(BitWidth, Num)
3093         : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3094       return new ICmpInst(Cmp.getPredicate(),
3095           Builder.CreateAnd(II->getArgOperand(0), Mask1),
3096           ConstantInt::get(Ty, Mask2));
3097     }
3098     break;
3099   }
3100 
3101   case Intrinsic::ctpop: {
3102     // popcount(A) == 0  ->  A == 0 and likewise for !=
3103     // popcount(A) == bitwidth(A)  ->  A == -1 and likewise for !=
3104     bool IsZero = C.isNullValue();
3105     if (IsZero || C == BitWidth)
3106       return new ICmpInst(Cmp.getPredicate(), II->getArgOperand(0),
3107           IsZero ? Constant::getNullValue(Ty) : Constant::getAllOnesValue(Ty));
3108 
3109     break;
3110   }
3111 
3112   case Intrinsic::uadd_sat: {
3113     // uadd.sat(a, b) == 0  ->  (a | b) == 0
3114     if (C.isNullValue()) {
3115       Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3116       return new ICmpInst(Cmp.getPredicate(), Or, Constant::getNullValue(Ty));
3117     }
3118     break;
3119   }
3120 
3121   case Intrinsic::usub_sat: {
3122     // usub.sat(a, b) == 0  ->  a <= b
3123     if (C.isNullValue()) {
3124       ICmpInst::Predicate NewPred = Cmp.getPredicate() == ICmpInst::ICMP_EQ
3125           ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3126       return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1));
3127     }
3128     break;
3129   }
3130   default:
3131     break;
3132   }
3133 
3134   return nullptr;
3135 }
3136 
3137 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
3138 Instruction *InstCombinerImpl::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
3139                                                              IntrinsicInst *II,
3140                                                              const APInt &C) {
3141   if (Cmp.isEquality())
3142     return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
3143 
3144   Type *Ty = II->getType();
3145   unsigned BitWidth = C.getBitWidth();
3146   ICmpInst::Predicate Pred = Cmp.getPredicate();
3147   switch (II->getIntrinsicID()) {
3148   case Intrinsic::ctpop: {
3149     // (ctpop X > BitWidth - 1) --> X == -1
3150     Value *X = II->getArgOperand(0);
3151     if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT)
3152       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X,
3153                              ConstantInt::getAllOnesValue(Ty));
3154     // (ctpop X < BitWidth) --> X != -1
3155     if (C == BitWidth && Pred == ICmpInst::ICMP_ULT)
3156       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X,
3157                              ConstantInt::getAllOnesValue(Ty));
3158     break;
3159   }
3160   case Intrinsic::ctlz: {
3161     // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
3162     if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3163       unsigned Num = C.getLimitedValue();
3164       APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3165       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
3166                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3167     }
3168 
3169     // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
3170     if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
3171       unsigned Num = C.getLimitedValue();
3172       APInt Limit = APInt::getLowBitsSet(BitWidth, BitWidth - Num);
3173       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
3174                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
3175     }
3176     break;
3177   }
3178   case Intrinsic::cttz: {
3179     // Limit to one use to ensure we don't increase instruction count.
3180     if (!II->hasOneUse())
3181       return nullptr;
3182 
3183     // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
3184     if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
3185       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
3186       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
3187                              Builder.CreateAnd(II->getArgOperand(0), Mask),
3188                              ConstantInt::getNullValue(Ty));
3189     }
3190 
3191     // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
3192     if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
3193       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
3194       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
3195                              Builder.CreateAnd(II->getArgOperand(0), Mask),
3196                              ConstantInt::getNullValue(Ty));
3197     }
3198     break;
3199   }
3200   default:
3201     break;
3202   }
3203 
3204   return nullptr;
3205 }
3206 
3207 /// Handle icmp with constant (but not simple integer constant) RHS.
3208 Instruction *InstCombinerImpl::foldICmpInstWithConstantNotInt(ICmpInst &I) {
3209   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3210   Constant *RHSC = dyn_cast<Constant>(Op1);
3211   Instruction *LHSI = dyn_cast<Instruction>(Op0);
3212   if (!RHSC || !LHSI)
3213     return nullptr;
3214 
3215   switch (LHSI->getOpcode()) {
3216   case Instruction::GetElementPtr:
3217     // icmp pred GEP (P, int 0, int 0, int 0), null -> icmp pred P, null
3218     if (RHSC->isNullValue() &&
3219         cast<GetElementPtrInst>(LHSI)->hasAllZeroIndices())
3220       return new ICmpInst(
3221           I.getPredicate(), LHSI->getOperand(0),
3222           Constant::getNullValue(LHSI->getOperand(0)->getType()));
3223     break;
3224   case Instruction::PHI:
3225     // Only fold icmp into the PHI if the phi and icmp are in the same
3226     // block.  If in the same block, we're encouraging jump threading.  If
3227     // not, we are just pessimizing the code by making an i1 phi.
3228     if (LHSI->getParent() == I.getParent())
3229       if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
3230         return NV;
3231     break;
3232   case Instruction::Select: {
3233     // If either operand of the select is a constant, we can fold the
3234     // comparison into the select arms, which will cause one to be
3235     // constant folded and the select turned into a bitwise or.
3236     Value *Op1 = nullptr, *Op2 = nullptr;
3237     ConstantInt *CI = nullptr;
3238     if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(1))) {
3239       Op1 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3240       CI = dyn_cast<ConstantInt>(Op1);
3241     }
3242     if (Constant *C = dyn_cast<Constant>(LHSI->getOperand(2))) {
3243       Op2 = ConstantExpr::getICmp(I.getPredicate(), C, RHSC);
3244       CI = dyn_cast<ConstantInt>(Op2);
3245     }
3246 
3247     // We only want to perform this transformation if it will not lead to
3248     // additional code. This is true if either both sides of the select
3249     // fold to a constant (in which case the icmp is replaced with a select
3250     // which will usually simplify) or this is the only user of the
3251     // select (in which case we are trading a select+icmp for a simpler
3252     // select+icmp) or all uses of the select can be replaced based on
3253     // dominance information ("Global cases").
3254     bool Transform = false;
3255     if (Op1 && Op2)
3256       Transform = true;
3257     else if (Op1 || Op2) {
3258       // Local case
3259       if (LHSI->hasOneUse())
3260         Transform = true;
3261       // Global cases
3262       else if (CI && !CI->isZero())
3263         // When Op1 is constant try replacing select with second operand.
3264         // Otherwise Op2 is constant and try replacing select with first
3265         // operand.
3266         Transform =
3267             replacedSelectWithOperand(cast<SelectInst>(LHSI), &I, Op1 ? 2 : 1);
3268     }
3269     if (Transform) {
3270       if (!Op1)
3271         Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
3272                                  I.getName());
3273       if (!Op2)
3274         Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
3275                                  I.getName());
3276       return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
3277     }
3278     break;
3279   }
3280   case Instruction::IntToPtr:
3281     // icmp pred inttoptr(X), null -> icmp pred X, 0
3282     if (RHSC->isNullValue() &&
3283         DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
3284       return new ICmpInst(
3285           I.getPredicate(), LHSI->getOperand(0),
3286           Constant::getNullValue(LHSI->getOperand(0)->getType()));
3287     break;
3288 
3289   case Instruction::Load:
3290     // Try to optimize things like "A[i] > 4" to index computations.
3291     if (GetElementPtrInst *GEP =
3292             dyn_cast<GetElementPtrInst>(LHSI->getOperand(0))) {
3293       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
3294         if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
3295             !cast<LoadInst>(LHSI)->isVolatile())
3296           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
3297             return Res;
3298     }
3299     break;
3300   }
3301 
3302   return nullptr;
3303 }
3304 
3305 /// Some comparisons can be simplified.
3306 /// In this case, we are looking for comparisons that look like
3307 /// a check for a lossy truncation.
3308 /// Folds:
3309 ///   icmp SrcPred (x & Mask), x    to    icmp DstPred x, Mask
3310 /// Where Mask is some pattern that produces all-ones in low bits:
3311 ///    (-1 >> y)
3312 ///    ((-1 << y) >> y)     <- non-canonical, has extra uses
3313 ///   ~(-1 << y)
3314 ///    ((1 << y) + (-1))    <- non-canonical, has extra uses
3315 /// The Mask can be a constant, too.
3316 /// For some predicates, the operands are commutative.
3317 /// For others, x can only be on a specific side.
3318 static Value *foldICmpWithLowBitMaskedVal(ICmpInst &I,
3319                                           InstCombiner::BuilderTy &Builder) {
3320   ICmpInst::Predicate SrcPred;
3321   Value *X, *M, *Y;
3322   auto m_VariableMask = m_CombineOr(
3323       m_CombineOr(m_Not(m_Shl(m_AllOnes(), m_Value())),
3324                   m_Add(m_Shl(m_One(), m_Value()), m_AllOnes())),
3325       m_CombineOr(m_LShr(m_AllOnes(), m_Value()),
3326                   m_LShr(m_Shl(m_AllOnes(), m_Value(Y)), m_Deferred(Y))));
3327   auto m_Mask = m_CombineOr(m_VariableMask, m_LowBitMask());
3328   if (!match(&I, m_c_ICmp(SrcPred,
3329                           m_c_And(m_CombineAnd(m_Mask, m_Value(M)), m_Value(X)),
3330                           m_Deferred(X))))
3331     return nullptr;
3332 
3333   ICmpInst::Predicate DstPred;
3334   switch (SrcPred) {
3335   case ICmpInst::Predicate::ICMP_EQ:
3336     //  x & (-1 >> y) == x    ->    x u<= (-1 >> y)
3337     DstPred = ICmpInst::Predicate::ICMP_ULE;
3338     break;
3339   case ICmpInst::Predicate::ICMP_NE:
3340     //  x & (-1 >> y) != x    ->    x u> (-1 >> y)
3341     DstPred = ICmpInst::Predicate::ICMP_UGT;
3342     break;
3343   case ICmpInst::Predicate::ICMP_ULT:
3344     //  x & (-1 >> y) u< x    ->    x u> (-1 >> y)
3345     //  x u> x & (-1 >> y)    ->    x u> (-1 >> y)
3346     DstPred = ICmpInst::Predicate::ICMP_UGT;
3347     break;
3348   case ICmpInst::Predicate::ICMP_UGE:
3349     //  x & (-1 >> y) u>= x    ->    x u<= (-1 >> y)
3350     //  x u<= x & (-1 >> y)    ->    x u<= (-1 >> y)
3351     DstPred = ICmpInst::Predicate::ICMP_ULE;
3352     break;
3353   case ICmpInst::Predicate::ICMP_SLT:
3354     //  x & (-1 >> y) s< x    ->    x s> (-1 >> y)
3355     //  x s> x & (-1 >> y)    ->    x s> (-1 >> y)
3356     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3357       return nullptr;
3358     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3359       return nullptr;
3360     DstPred = ICmpInst::Predicate::ICMP_SGT;
3361     break;
3362   case ICmpInst::Predicate::ICMP_SGE:
3363     //  x & (-1 >> y) s>= x    ->    x s<= (-1 >> y)
3364     //  x s<= x & (-1 >> y)    ->    x s<= (-1 >> y)
3365     if (!match(M, m_Constant())) // Can not do this fold with non-constant.
3366       return nullptr;
3367     if (!match(M, m_NonNegative())) // Must not have any -1 vector elements.
3368       return nullptr;
3369     DstPred = ICmpInst::Predicate::ICMP_SLE;
3370     break;
3371   case ICmpInst::Predicate::ICMP_SGT:
3372   case ICmpInst::Predicate::ICMP_SLE:
3373     return nullptr;
3374   case ICmpInst::Predicate::ICMP_UGT:
3375   case ICmpInst::Predicate::ICMP_ULE:
3376     llvm_unreachable("Instsimplify took care of commut. variant");
3377     break;
3378   default:
3379     llvm_unreachable("All possible folds are handled.");
3380   }
3381 
3382   // The mask value may be a vector constant that has undefined elements. But it
3383   // may not be safe to propagate those undefs into the new compare, so replace
3384   // those elements by copying an existing, defined, and safe scalar constant.
3385   Type *OpTy = M->getType();
3386   auto *VecC = dyn_cast<Constant>(M);
3387   auto *OpVTy = dyn_cast<FixedVectorType>(OpTy);
3388   if (OpVTy && VecC && VecC->containsUndefOrPoisonElement()) {
3389     Constant *SafeReplacementConstant = nullptr;
3390     for (unsigned i = 0, e = OpVTy->getNumElements(); i != e; ++i) {
3391       if (!isa<UndefValue>(VecC->getAggregateElement(i))) {
3392         SafeReplacementConstant = VecC->getAggregateElement(i);
3393         break;
3394       }
3395     }
3396     assert(SafeReplacementConstant && "Failed to find undef replacement");
3397     M = Constant::replaceUndefsWith(VecC, SafeReplacementConstant);
3398   }
3399 
3400   return Builder.CreateICmp(DstPred, X, M);
3401 }
3402 
3403 /// Some comparisons can be simplified.
3404 /// In this case, we are looking for comparisons that look like
3405 /// a check for a lossy signed truncation.
3406 /// Folds:   (MaskedBits is a constant.)
3407 ///   ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
3408 /// Into:
3409 ///   (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
3410 /// Where  KeptBits = bitwidth(%x) - MaskedBits
3411 static Value *
3412 foldICmpWithTruncSignExtendedVal(ICmpInst &I,
3413                                  InstCombiner::BuilderTy &Builder) {
3414   ICmpInst::Predicate SrcPred;
3415   Value *X;
3416   const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
3417   // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
3418   if (!match(&I, m_c_ICmp(SrcPred,
3419                           m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)),
3420                                           m_APInt(C1))),
3421                           m_Deferred(X))))
3422     return nullptr;
3423 
3424   // Potential handling of non-splats: for each element:
3425   //  * if both are undef, replace with constant 0.
3426   //    Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
3427   //  * if both are not undef, and are different, bailout.
3428   //  * else, only one is undef, then pick the non-undef one.
3429 
3430   // The shift amount must be equal.
3431   if (*C0 != *C1)
3432     return nullptr;
3433   const APInt &MaskedBits = *C0;
3434   assert(MaskedBits != 0 && "shift by zero should be folded away already.");
3435 
3436   ICmpInst::Predicate DstPred;
3437   switch (SrcPred) {
3438   case ICmpInst::Predicate::ICMP_EQ:
3439     // ((%x << MaskedBits) a>> MaskedBits) == %x
3440     //   =>
3441     // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
3442     DstPred = ICmpInst::Predicate::ICMP_ULT;
3443     break;
3444   case ICmpInst::Predicate::ICMP_NE:
3445     // ((%x << MaskedBits) a>> MaskedBits) != %x
3446     //   =>
3447     // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
3448     DstPred = ICmpInst::Predicate::ICMP_UGE;
3449     break;
3450   // FIXME: are more folds possible?
3451   default:
3452     return nullptr;
3453   }
3454 
3455   auto *XType = X->getType();
3456   const unsigned XBitWidth = XType->getScalarSizeInBits();
3457   const APInt BitWidth = APInt(XBitWidth, XBitWidth);
3458   assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
3459 
3460   // KeptBits = bitwidth(%x) - MaskedBits
3461   const APInt KeptBits = BitWidth - MaskedBits;
3462   assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
3463   // ICmpCst = (1 << KeptBits)
3464   const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
3465   assert(ICmpCst.isPowerOf2());
3466   // AddCst = (1 << (KeptBits-1))
3467   const APInt AddCst = ICmpCst.lshr(1);
3468   assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
3469 
3470   // T0 = add %x, AddCst
3471   Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
3472   // T1 = T0 DstPred ICmpCst
3473   Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
3474 
3475   return T1;
3476 }
3477 
3478 // Given pattern:
3479 //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3480 // we should move shifts to the same hand of 'and', i.e. rewrite as
3481 //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3482 // We are only interested in opposite logical shifts here.
3483 // One of the shifts can be truncated.
3484 // If we can, we want to end up creating 'lshr' shift.
3485 static Value *
3486 foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
3487                                            InstCombiner::BuilderTy &Builder) {
3488   if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
3489       !I.getOperand(0)->hasOneUse())
3490     return nullptr;
3491 
3492   auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
3493 
3494   // Look for an 'and' of two logical shifts, one of which may be truncated.
3495   // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
3496   Instruction *XShift, *MaybeTruncation, *YShift;
3497   if (!match(
3498           I.getOperand(0),
3499           m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
3500                   m_CombineAnd(m_TruncOrSelf(m_CombineAnd(
3501                                    m_AnyLogicalShift, m_Instruction(YShift))),
3502                                m_Instruction(MaybeTruncation)))))
3503     return nullptr;
3504 
3505   // We potentially looked past 'trunc', but only when matching YShift,
3506   // therefore YShift must have the widest type.
3507   Instruction *WidestShift = YShift;
3508   // Therefore XShift must have the shallowest type.
3509   // Or they both have identical types if there was no truncation.
3510   Instruction *NarrowestShift = XShift;
3511 
3512   Type *WidestTy = WidestShift->getType();
3513   Type *NarrowestTy = NarrowestShift->getType();
3514   assert(NarrowestTy == I.getOperand(0)->getType() &&
3515          "We did not look past any shifts while matching XShift though.");
3516   bool HadTrunc = WidestTy != I.getOperand(0)->getType();
3517 
3518   // If YShift is a 'lshr', swap the shifts around.
3519   if (match(YShift, m_LShr(m_Value(), m_Value())))
3520     std::swap(XShift, YShift);
3521 
3522   // The shifts must be in opposite directions.
3523   auto XShiftOpcode = XShift->getOpcode();
3524   if (XShiftOpcode == YShift->getOpcode())
3525     return nullptr; // Do not care about same-direction shifts here.
3526 
3527   Value *X, *XShAmt, *Y, *YShAmt;
3528   match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
3529   match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
3530 
3531   // If one of the values being shifted is a constant, then we will end with
3532   // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
3533   // however, we will need to ensure that we won't increase instruction count.
3534   if (!isa<Constant>(X) && !isa<Constant>(Y)) {
3535     // At least one of the hands of the 'and' should be one-use shift.
3536     if (!match(I.getOperand(0),
3537                m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
3538       return nullptr;
3539     if (HadTrunc) {
3540       // Due to the 'trunc', we will need to widen X. For that either the old
3541       // 'trunc' or the shift amt in the non-truncated shift should be one-use.
3542       if (!MaybeTruncation->hasOneUse() &&
3543           !NarrowestShift->getOperand(1)->hasOneUse())
3544         return nullptr;
3545     }
3546   }
3547 
3548   // We have two shift amounts from two different shifts. The types of those
3549   // shift amounts may not match. If that's the case let's bailout now.
3550   if (XShAmt->getType() != YShAmt->getType())
3551     return nullptr;
3552 
3553   // As input, we have the following pattern:
3554   //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
3555   // We want to rewrite that as:
3556   //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
3557   // While we know that originally (Q+K) would not overflow
3558   // (because  2 * (N-1) u<= iN -1), we have looked past extensions of
3559   // shift amounts. so it may now overflow in smaller bitwidth.
3560   // To ensure that does not happen, we need to ensure that the total maximal
3561   // shift amount is still representable in that smaller bit width.
3562   unsigned MaximalPossibleTotalShiftAmount =
3563       (WidestTy->getScalarSizeInBits() - 1) +
3564       (NarrowestTy->getScalarSizeInBits() - 1);
3565   APInt MaximalRepresentableShiftAmount =
3566       APInt::getAllOnesValue(XShAmt->getType()->getScalarSizeInBits());
3567   if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
3568     return nullptr;
3569 
3570   // Can we fold (XShAmt+YShAmt) ?
3571   auto *NewShAmt = dyn_cast_or_null<Constant>(
3572       SimplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
3573                       /*isNUW=*/false, SQ.getWithInstruction(&I)));
3574   if (!NewShAmt)
3575     return nullptr;
3576   NewShAmt = ConstantExpr::getZExtOrBitCast(NewShAmt, WidestTy);
3577   unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
3578 
3579   // Is the new shift amount smaller than the bit width?
3580   // FIXME: could also rely on ConstantRange.
3581   if (!match(NewShAmt,
3582              m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
3583                                 APInt(WidestBitWidth, WidestBitWidth))))
3584     return nullptr;
3585 
3586   // An extra legality check is needed if we had trunc-of-lshr.
3587   if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
3588     auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
3589                     WidestShift]() {
3590       // It isn't obvious whether it's worth it to analyze non-constants here.
3591       // Also, let's basically give up on non-splat cases, pessimizing vectors.
3592       // If *any* of these preconditions matches we can perform the fold.
3593       Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
3594                                     ? NewShAmt->getSplatValue()
3595                                     : NewShAmt;
3596       // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
3597       if (NewShAmtSplat &&
3598           (NewShAmtSplat->isNullValue() ||
3599            NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
3600         return true;
3601       // We consider *min* leading zeros so a single outlier
3602       // blocks the transform as opposed to allowing it.
3603       if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
3604         KnownBits Known = computeKnownBits(C, SQ.DL);
3605         unsigned MinLeadZero = Known.countMinLeadingZeros();
3606         // If the value being shifted has at most lowest bit set we can fold.
3607         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3608         if (MaxActiveBits <= 1)
3609           return true;
3610         // Precondition:  NewShAmt u<= countLeadingZeros(C)
3611         if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
3612           return true;
3613       }
3614       if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
3615         KnownBits Known = computeKnownBits(C, SQ.DL);
3616         unsigned MinLeadZero = Known.countMinLeadingZeros();
3617         // If the value being shifted has at most lowest bit set we can fold.
3618         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
3619         if (MaxActiveBits <= 1)
3620           return true;
3621         // Precondition:  ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
3622         if (NewShAmtSplat) {
3623           APInt AdjNewShAmt =
3624               (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
3625           if (AdjNewShAmt.ule(MinLeadZero))
3626             return true;
3627         }
3628       }
3629       return false; // Can't tell if it's ok.
3630     };
3631     if (!CanFold())
3632       return nullptr;
3633   }
3634 
3635   // All good, we can do this fold.
3636   X = Builder.CreateZExt(X, WidestTy);
3637   Y = Builder.CreateZExt(Y, WidestTy);
3638   // The shift is the same that was for X.
3639   Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
3640                   ? Builder.CreateLShr(X, NewShAmt)
3641                   : Builder.CreateShl(X, NewShAmt);
3642   Value *T1 = Builder.CreateAnd(T0, Y);
3643   return Builder.CreateICmp(I.getPredicate(), T1,
3644                             Constant::getNullValue(WidestTy));
3645 }
3646 
3647 /// Fold
3648 ///   (-1 u/ x) u< y
3649 ///   ((x * y) u/ x) != y
3650 /// to
3651 ///   @llvm.umul.with.overflow(x, y) plus extraction of overflow bit
3652 /// Note that the comparison is commutative, while inverted (u>=, ==) predicate
3653 /// will mean that we are looking for the opposite answer.
3654 Value *InstCombinerImpl::foldUnsignedMultiplicationOverflowCheck(ICmpInst &I) {
3655   ICmpInst::Predicate Pred;
3656   Value *X, *Y;
3657   Instruction *Mul;
3658   bool NeedNegation;
3659   // Look for: (-1 u/ x) u</u>= y
3660   if (!I.isEquality() &&
3661       match(&I, m_c_ICmp(Pred, m_OneUse(m_UDiv(m_AllOnes(), m_Value(X))),
3662                          m_Value(Y)))) {
3663     Mul = nullptr;
3664 
3665     // Are we checking that overflow does not happen, or does happen?
3666     switch (Pred) {
3667     case ICmpInst::Predicate::ICMP_ULT:
3668       NeedNegation = false;
3669       break; // OK
3670     case ICmpInst::Predicate::ICMP_UGE:
3671       NeedNegation = true;
3672       break; // OK
3673     default:
3674       return nullptr; // Wrong predicate.
3675     }
3676   } else // Look for: ((x * y) u/ x) !=/== y
3677       if (I.isEquality() &&
3678           match(&I, m_c_ICmp(Pred, m_Value(Y),
3679                              m_OneUse(m_UDiv(m_CombineAnd(m_c_Mul(m_Deferred(Y),
3680                                                                   m_Value(X)),
3681                                                           m_Instruction(Mul)),
3682                                              m_Deferred(X)))))) {
3683     NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
3684   } else
3685     return nullptr;
3686 
3687   BuilderTy::InsertPointGuard Guard(Builder);
3688   // If the pattern included (x * y), we'll want to insert new instructions
3689   // right before that original multiplication so that we can replace it.
3690   bool MulHadOtherUses = Mul && !Mul->hasOneUse();
3691   if (MulHadOtherUses)
3692     Builder.SetInsertPoint(Mul);
3693 
3694   Function *F = Intrinsic::getDeclaration(
3695       I.getModule(), Intrinsic::umul_with_overflow, X->getType());
3696   CallInst *Call = Builder.CreateCall(F, {X, Y}, "umul");
3697 
3698   // If the multiplication was used elsewhere, to ensure that we don't leave
3699   // "duplicate" instructions, replace uses of that original multiplication
3700   // with the multiplication result from the with.overflow intrinsic.
3701   if (MulHadOtherUses)
3702     replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "umul.val"));
3703 
3704   Value *Res = Builder.CreateExtractValue(Call, 1, "umul.ov");
3705   if (NeedNegation) // This technically increases instruction count.
3706     Res = Builder.CreateNot(Res, "umul.not.ov");
3707 
3708   // If we replaced the mul, erase it. Do this after all uses of Builder,
3709   // as the mul is used as insertion point.
3710   if (MulHadOtherUses)
3711     eraseInstFromFunction(*Mul);
3712 
3713   return Res;
3714 }
3715 
3716 static Instruction *foldICmpXNegX(ICmpInst &I) {
3717   CmpInst::Predicate Pred;
3718   Value *X;
3719   if (!match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X))))
3720     return nullptr;
3721 
3722   if (ICmpInst::isSigned(Pred))
3723     Pred = ICmpInst::getSwappedPredicate(Pred);
3724   else if (ICmpInst::isUnsigned(Pred))
3725     Pred = ICmpInst::getSignedPredicate(Pred);
3726   // else for equality-comparisons just keep the predicate.
3727 
3728   return ICmpInst::Create(Instruction::ICmp, Pred, X,
3729                           Constant::getNullValue(X->getType()), I.getName());
3730 }
3731 
3732 /// Try to fold icmp (binop), X or icmp X, (binop).
3733 /// TODO: A large part of this logic is duplicated in InstSimplify's
3734 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
3735 /// duplication.
3736 Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
3737                                              const SimplifyQuery &SQ) {
3738   const SimplifyQuery Q = SQ.getWithInstruction(&I);
3739   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3740 
3741   // Special logic for binary operators.
3742   BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
3743   BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
3744   if (!BO0 && !BO1)
3745     return nullptr;
3746 
3747   if (Instruction *NewICmp = foldICmpXNegX(I))
3748     return NewICmp;
3749 
3750   const CmpInst::Predicate Pred = I.getPredicate();
3751   Value *X;
3752 
3753   // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
3754   // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
3755   if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
3756       (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3757     return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
3758   // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
3759   if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
3760       (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3761     return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
3762 
3763   bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
3764   if (BO0 && isa<OverflowingBinaryOperator>(BO0))
3765     NoOp0WrapProblem =
3766         ICmpInst::isEquality(Pred) ||
3767         (CmpInst::isUnsigned(Pred) && BO0->hasNoUnsignedWrap()) ||
3768         (CmpInst::isSigned(Pred) && BO0->hasNoSignedWrap());
3769   if (BO1 && isa<OverflowingBinaryOperator>(BO1))
3770     NoOp1WrapProblem =
3771         ICmpInst::isEquality(Pred) ||
3772         (CmpInst::isUnsigned(Pred) && BO1->hasNoUnsignedWrap()) ||
3773         (CmpInst::isSigned(Pred) && BO1->hasNoSignedWrap());
3774 
3775   // Analyze the case when either Op0 or Op1 is an add instruction.
3776   // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
3777   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3778   if (BO0 && BO0->getOpcode() == Instruction::Add) {
3779     A = BO0->getOperand(0);
3780     B = BO0->getOperand(1);
3781   }
3782   if (BO1 && BO1->getOpcode() == Instruction::Add) {
3783     C = BO1->getOperand(0);
3784     D = BO1->getOperand(1);
3785   }
3786 
3787   // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
3788   // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
3789   if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
3790     return new ICmpInst(Pred, A == Op1 ? B : A,
3791                         Constant::getNullValue(Op1->getType()));
3792 
3793   // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
3794   // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
3795   if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
3796     return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
3797                         C == Op0 ? D : C);
3798 
3799   // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
3800   if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
3801       NoOp1WrapProblem) {
3802     // Determine Y and Z in the form icmp (X+Y), (X+Z).
3803     Value *Y, *Z;
3804     if (A == C) {
3805       // C + B == C + D  ->  B == D
3806       Y = B;
3807       Z = D;
3808     } else if (A == D) {
3809       // D + B == C + D  ->  B == C
3810       Y = B;
3811       Z = C;
3812     } else if (B == C) {
3813       // A + C == C + D  ->  A == D
3814       Y = A;
3815       Z = D;
3816     } else {
3817       assert(B == D);
3818       // A + D == C + D  ->  A == C
3819       Y = A;
3820       Z = C;
3821     }
3822     return new ICmpInst(Pred, Y, Z);
3823   }
3824 
3825   // icmp slt (A + -1), Op1 -> icmp sle A, Op1
3826   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLT &&
3827       match(B, m_AllOnes()))
3828     return new ICmpInst(CmpInst::ICMP_SLE, A, Op1);
3829 
3830   // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
3831   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGE &&
3832       match(B, m_AllOnes()))
3833     return new ICmpInst(CmpInst::ICMP_SGT, A, Op1);
3834 
3835   // icmp sle (A + 1), Op1 -> icmp slt A, Op1
3836   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SLE && match(B, m_One()))
3837     return new ICmpInst(CmpInst::ICMP_SLT, A, Op1);
3838 
3839   // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
3840   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_SGT && match(B, m_One()))
3841     return new ICmpInst(CmpInst::ICMP_SGE, A, Op1);
3842 
3843   // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
3844   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGT &&
3845       match(D, m_AllOnes()))
3846     return new ICmpInst(CmpInst::ICMP_SGE, Op0, C);
3847 
3848   // icmp sle Op0, (C + -1) -> icmp slt Op0, C
3849   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLE &&
3850       match(D, m_AllOnes()))
3851     return new ICmpInst(CmpInst::ICMP_SLT, Op0, C);
3852 
3853   // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
3854   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SGE && match(D, m_One()))
3855     return new ICmpInst(CmpInst::ICMP_SGT, Op0, C);
3856 
3857   // icmp slt Op0, (C + 1) -> icmp sle Op0, C
3858   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_SLT && match(D, m_One()))
3859     return new ICmpInst(CmpInst::ICMP_SLE, Op0, C);
3860 
3861   // TODO: The subtraction-related identities shown below also hold, but
3862   // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
3863   // wouldn't happen even if they were implemented.
3864   //
3865   // icmp ult (A - 1), Op1 -> icmp ule A, Op1
3866   // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
3867   // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
3868   // icmp ule Op0, (C - 1) -> icmp ult Op0, C
3869 
3870   // icmp ule (A + 1), Op0 -> icmp ult A, Op1
3871   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_ULE && match(B, m_One()))
3872     return new ICmpInst(CmpInst::ICMP_ULT, A, Op1);
3873 
3874   // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
3875   if (A && NoOp0WrapProblem && Pred == CmpInst::ICMP_UGT && match(B, m_One()))
3876     return new ICmpInst(CmpInst::ICMP_UGE, A, Op1);
3877 
3878   // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
3879   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_UGE && match(D, m_One()))
3880     return new ICmpInst(CmpInst::ICMP_UGT, Op0, C);
3881 
3882   // icmp ult Op0, (C + 1) -> icmp ule Op0, C
3883   if (C && NoOp1WrapProblem && Pred == CmpInst::ICMP_ULT && match(D, m_One()))
3884     return new ICmpInst(CmpInst::ICMP_ULE, Op0, C);
3885 
3886   // if C1 has greater magnitude than C2:
3887   //  icmp (A + C1), (C + C2) -> icmp (A + C3), C
3888   //  s.t. C3 = C1 - C2
3889   //
3890   // if C2 has greater magnitude than C1:
3891   //  icmp (A + C1), (C + C2) -> icmp A, (C + C3)
3892   //  s.t. C3 = C2 - C1
3893   if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
3894       (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned())
3895     if (ConstantInt *C1 = dyn_cast<ConstantInt>(B))
3896       if (ConstantInt *C2 = dyn_cast<ConstantInt>(D)) {
3897         const APInt &AP1 = C1->getValue();
3898         const APInt &AP2 = C2->getValue();
3899         if (AP1.isNegative() == AP2.isNegative()) {
3900           APInt AP1Abs = C1->getValue().abs();
3901           APInt AP2Abs = C2->getValue().abs();
3902           if (AP1Abs.uge(AP2Abs)) {
3903             ConstantInt *C3 = Builder.getInt(AP1 - AP2);
3904             Value *NewAdd = Builder.CreateNSWAdd(A, C3);
3905             return new ICmpInst(Pred, NewAdd, C);
3906           } else {
3907             ConstantInt *C3 = Builder.getInt(AP2 - AP1);
3908             Value *NewAdd = Builder.CreateNSWAdd(C, C3);
3909             return new ICmpInst(Pred, A, NewAdd);
3910           }
3911         }
3912       }
3913 
3914   // Analyze the case when either Op0 or Op1 is a sub instruction.
3915   // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
3916   A = nullptr;
3917   B = nullptr;
3918   C = nullptr;
3919   D = nullptr;
3920   if (BO0 && BO0->getOpcode() == Instruction::Sub) {
3921     A = BO0->getOperand(0);
3922     B = BO0->getOperand(1);
3923   }
3924   if (BO1 && BO1->getOpcode() == Instruction::Sub) {
3925     C = BO1->getOperand(0);
3926     D = BO1->getOperand(1);
3927   }
3928 
3929   // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
3930   if (A == Op1 && NoOp0WrapProblem)
3931     return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
3932   // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
3933   if (C == Op0 && NoOp1WrapProblem)
3934     return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
3935 
3936   // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
3937   // (A - B) u>/u<= A --> B u>/u<= A
3938   if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
3939     return new ICmpInst(Pred, B, A);
3940   // C u</u>= (C - D) --> C u</u>= D
3941   if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
3942     return new ICmpInst(Pred, C, D);
3943   // (A - B) u>=/u< A --> B u>/u<= A  iff B != 0
3944   if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
3945       isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
3946     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
3947   // C u<=/u> (C - D) --> C u</u>= D  iff B != 0
3948   if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
3949       isKnownNonZero(D, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
3950     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
3951 
3952   // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
3953   if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
3954     return new ICmpInst(Pred, A, C);
3955 
3956   // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
3957   if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
3958     return new ICmpInst(Pred, D, B);
3959 
3960   // icmp (0-X) < cst --> x > -cst
3961   if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
3962     Value *X;
3963     if (match(BO0, m_Neg(m_Value(X))))
3964       if (Constant *RHSC = dyn_cast<Constant>(Op1))
3965         if (RHSC->isNotMinSignedValue())
3966           return new ICmpInst(I.getSwappedPredicate(), X,
3967                               ConstantExpr::getNeg(RHSC));
3968   }
3969 
3970   {
3971     // Try to remove shared constant multiplier from equality comparison:
3972     // X * C == Y * C (with no overflowing/aliasing) --> X == Y
3973     Value *X, *Y;
3974     const APInt *C;
3975     if (match(Op0, m_Mul(m_Value(X), m_APInt(C))) && *C != 0 &&
3976         match(Op1, m_Mul(m_Value(Y), m_SpecificInt(*C))) && I.isEquality())
3977       if (!C->countTrailingZeros() ||
3978           (BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap()) ||
3979           (BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap()))
3980       return new ICmpInst(Pred, X, Y);
3981   }
3982 
3983   BinaryOperator *SRem = nullptr;
3984   // icmp (srem X, Y), Y
3985   if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
3986     SRem = BO0;
3987   // icmp Y, (srem X, Y)
3988   else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
3989            Op0 == BO1->getOperand(1))
3990     SRem = BO1;
3991   if (SRem) {
3992     // We don't check hasOneUse to avoid increasing register pressure because
3993     // the value we use is the same value this instruction was already using.
3994     switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
3995     default:
3996       break;
3997     case ICmpInst::ICMP_EQ:
3998       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
3999     case ICmpInst::ICMP_NE:
4000       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
4001     case ICmpInst::ICMP_SGT:
4002     case ICmpInst::ICMP_SGE:
4003       return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
4004                           Constant::getAllOnesValue(SRem->getType()));
4005     case ICmpInst::ICMP_SLT:
4006     case ICmpInst::ICMP_SLE:
4007       return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
4008                           Constant::getNullValue(SRem->getType()));
4009     }
4010   }
4011 
4012   if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() && BO0->hasOneUse() &&
4013       BO1->hasOneUse() && BO0->getOperand(1) == BO1->getOperand(1)) {
4014     switch (BO0->getOpcode()) {
4015     default:
4016       break;
4017     case Instruction::Add:
4018     case Instruction::Sub:
4019     case Instruction::Xor: {
4020       if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
4021         return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4022 
4023       const APInt *C;
4024       if (match(BO0->getOperand(1), m_APInt(C))) {
4025         // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
4026         if (C->isSignMask()) {
4027           ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
4028           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
4029         }
4030 
4031         // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
4032         if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
4033           ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
4034           NewPred = I.getSwappedPredicate(NewPred);
4035           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
4036         }
4037       }
4038       break;
4039     }
4040     case Instruction::Mul: {
4041       if (!I.isEquality())
4042         break;
4043 
4044       const APInt *C;
4045       if (match(BO0->getOperand(1), m_APInt(C)) && !C->isNullValue() &&
4046           !C->isOneValue()) {
4047         // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
4048         // Mask = -1 >> count-trailing-zeros(C).
4049         if (unsigned TZs = C->countTrailingZeros()) {
4050           Constant *Mask = ConstantInt::get(
4051               BO0->getType(),
4052               APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
4053           Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
4054           Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
4055           return new ICmpInst(Pred, And1, And2);
4056         }
4057       }
4058       break;
4059     }
4060     case Instruction::UDiv:
4061     case Instruction::LShr:
4062       if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
4063         break;
4064       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4065 
4066     case Instruction::SDiv:
4067       if (!I.isEquality() || !BO0->isExact() || !BO1->isExact())
4068         break;
4069       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4070 
4071     case Instruction::AShr:
4072       if (!BO0->isExact() || !BO1->isExact())
4073         break;
4074       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4075 
4076     case Instruction::Shl: {
4077       bool NUW = BO0->hasNoUnsignedWrap() && BO1->hasNoUnsignedWrap();
4078       bool NSW = BO0->hasNoSignedWrap() && BO1->hasNoSignedWrap();
4079       if (!NUW && !NSW)
4080         break;
4081       if (!NSW && I.isSigned())
4082         break;
4083       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
4084     }
4085     }
4086   }
4087 
4088   if (BO0) {
4089     // Transform  A & (L - 1) `ult` L --> L != 0
4090     auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
4091     auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
4092 
4093     if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
4094       auto *Zero = Constant::getNullValue(BO0->getType());
4095       return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
4096     }
4097   }
4098 
4099   if (Value *V = foldUnsignedMultiplicationOverflowCheck(I))
4100     return replaceInstUsesWith(I, V);
4101 
4102   if (Value *V = foldICmpWithLowBitMaskedVal(I, Builder))
4103     return replaceInstUsesWith(I, V);
4104 
4105   if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
4106     return replaceInstUsesWith(I, V);
4107 
4108   if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder))
4109     return replaceInstUsesWith(I, V);
4110 
4111   return nullptr;
4112 }
4113 
4114 /// Fold icmp Pred min|max(X, Y), X.
4115 static Instruction *foldICmpWithMinMax(ICmpInst &Cmp) {
4116   ICmpInst::Predicate Pred = Cmp.getPredicate();
4117   Value *Op0 = Cmp.getOperand(0);
4118   Value *X = Cmp.getOperand(1);
4119 
4120   // Canonicalize minimum or maximum operand to LHS of the icmp.
4121   if (match(X, m_c_SMin(m_Specific(Op0), m_Value())) ||
4122       match(X, m_c_SMax(m_Specific(Op0), m_Value())) ||
4123       match(X, m_c_UMin(m_Specific(Op0), m_Value())) ||
4124       match(X, m_c_UMax(m_Specific(Op0), m_Value()))) {
4125     std::swap(Op0, X);
4126     Pred = Cmp.getSwappedPredicate();
4127   }
4128 
4129   Value *Y;
4130   if (match(Op0, m_c_SMin(m_Specific(X), m_Value(Y)))) {
4131     // smin(X, Y)  == X --> X s<= Y
4132     // smin(X, Y) s>= X --> X s<= Y
4133     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SGE)
4134       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
4135 
4136     // smin(X, Y) != X --> X s> Y
4137     // smin(X, Y) s< X --> X s> Y
4138     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SLT)
4139       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
4140 
4141     // These cases should be handled in InstSimplify:
4142     // smin(X, Y) s<= X --> true
4143     // smin(X, Y) s> X --> false
4144     return nullptr;
4145   }
4146 
4147   if (match(Op0, m_c_SMax(m_Specific(X), m_Value(Y)))) {
4148     // smax(X, Y)  == X --> X s>= Y
4149     // smax(X, Y) s<= X --> X s>= Y
4150     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_SLE)
4151       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
4152 
4153     // smax(X, Y) != X --> X s< Y
4154     // smax(X, Y) s> X --> X s< Y
4155     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_SGT)
4156       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
4157 
4158     // These cases should be handled in InstSimplify:
4159     // smax(X, Y) s>= X --> true
4160     // smax(X, Y) s< X --> false
4161     return nullptr;
4162   }
4163 
4164   if (match(Op0, m_c_UMin(m_Specific(X), m_Value(Y)))) {
4165     // umin(X, Y)  == X --> X u<= Y
4166     // umin(X, Y) u>= X --> X u<= Y
4167     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_UGE)
4168       return new ICmpInst(ICmpInst::ICMP_ULE, X, Y);
4169 
4170     // umin(X, Y) != X --> X u> Y
4171     // umin(X, Y) u< X --> X u> Y
4172     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_ULT)
4173       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
4174 
4175     // These cases should be handled in InstSimplify:
4176     // umin(X, Y) u<= X --> true
4177     // umin(X, Y) u> X --> false
4178     return nullptr;
4179   }
4180 
4181   if (match(Op0, m_c_UMax(m_Specific(X), m_Value(Y)))) {
4182     // umax(X, Y)  == X --> X u>= Y
4183     // umax(X, Y) u<= X --> X u>= Y
4184     if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_ULE)
4185       return new ICmpInst(ICmpInst::ICMP_UGE, X, Y);
4186 
4187     // umax(X, Y) != X --> X u< Y
4188     // umax(X, Y) u> X --> X u< Y
4189     if (Pred == CmpInst::ICMP_NE || Pred == CmpInst::ICMP_UGT)
4190       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
4191 
4192     // These cases should be handled in InstSimplify:
4193     // umax(X, Y) u>= X --> true
4194     // umax(X, Y) u< X --> false
4195     return nullptr;
4196   }
4197 
4198   return nullptr;
4199 }
4200 
4201 Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
4202   if (!I.isEquality())
4203     return nullptr;
4204 
4205   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4206   const CmpInst::Predicate Pred = I.getPredicate();
4207   Value *A, *B, *C, *D;
4208   if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
4209     if (A == Op1 || B == Op1) { // (A^B) == A  ->  B == 0
4210       Value *OtherVal = A == Op1 ? B : A;
4211       return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4212     }
4213 
4214     if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
4215       // A^c1 == C^c2 --> A == C^(c1^c2)
4216       ConstantInt *C1, *C2;
4217       if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
4218           Op1->hasOneUse()) {
4219         Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
4220         Value *Xor = Builder.CreateXor(C, NC);
4221         return new ICmpInst(Pred, A, Xor);
4222       }
4223 
4224       // A^B == A^D -> B == D
4225       if (A == C)
4226         return new ICmpInst(Pred, B, D);
4227       if (A == D)
4228         return new ICmpInst(Pred, B, C);
4229       if (B == C)
4230         return new ICmpInst(Pred, A, D);
4231       if (B == D)
4232         return new ICmpInst(Pred, A, C);
4233     }
4234   }
4235 
4236   if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
4237     // A == (A^B)  ->  B == 0
4238     Value *OtherVal = A == Op0 ? B : A;
4239     return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
4240   }
4241 
4242   // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
4243   if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B)))) &&
4244       match(Op1, m_OneUse(m_And(m_Value(C), m_Value(D))))) {
4245     Value *X = nullptr, *Y = nullptr, *Z = nullptr;
4246 
4247     if (A == C) {
4248       X = B;
4249       Y = D;
4250       Z = A;
4251     } else if (A == D) {
4252       X = B;
4253       Y = C;
4254       Z = A;
4255     } else if (B == C) {
4256       X = A;
4257       Y = D;
4258       Z = B;
4259     } else if (B == D) {
4260       X = A;
4261       Y = C;
4262       Z = B;
4263     }
4264 
4265     if (X) { // Build (X^Y) & Z
4266       Op1 = Builder.CreateXor(X, Y);
4267       Op1 = Builder.CreateAnd(Op1, Z);
4268       return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType()));
4269     }
4270   }
4271 
4272   // Transform (zext A) == (B & (1<<X)-1) --> A == (trunc B)
4273   // and       (B & (1<<X)-1) == (zext A) --> A == (trunc B)
4274   ConstantInt *Cst1;
4275   if ((Op0->hasOneUse() && match(Op0, m_ZExt(m_Value(A))) &&
4276        match(Op1, m_And(m_Value(B), m_ConstantInt(Cst1)))) ||
4277       (Op1->hasOneUse() && match(Op0, m_And(m_Value(B), m_ConstantInt(Cst1))) &&
4278        match(Op1, m_ZExt(m_Value(A))))) {
4279     APInt Pow2 = Cst1->getValue() + 1;
4280     if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
4281         Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
4282       return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
4283   }
4284 
4285   // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
4286   // For lshr and ashr pairs.
4287   if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4288        match(Op1, m_OneUse(m_LShr(m_Value(B), m_Specific(Cst1))))) ||
4289       (match(Op0, m_OneUse(m_AShr(m_Value(A), m_ConstantInt(Cst1)))) &&
4290        match(Op1, m_OneUse(m_AShr(m_Value(B), m_Specific(Cst1)))))) {
4291     unsigned TypeBits = Cst1->getBitWidth();
4292     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4293     if (ShAmt < TypeBits && ShAmt != 0) {
4294       ICmpInst::Predicate NewPred =
4295           Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
4296       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4297       APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
4298       return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
4299     }
4300   }
4301 
4302   // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
4303   if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
4304       match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
4305     unsigned TypeBits = Cst1->getBitWidth();
4306     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
4307     if (ShAmt < TypeBits && ShAmt != 0) {
4308       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
4309       APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
4310       Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
4311                                       I.getName() + ".mask");
4312       return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
4313     }
4314   }
4315 
4316   // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
4317   // "icmp (and X, mask), cst"
4318   uint64_t ShAmt = 0;
4319   if (Op0->hasOneUse() &&
4320       match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
4321       match(Op1, m_ConstantInt(Cst1)) &&
4322       // Only do this when A has multiple uses.  This is most important to do
4323       // when it exposes other optimizations.
4324       !A->hasOneUse()) {
4325     unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
4326 
4327     if (ShAmt < ASize) {
4328       APInt MaskV =
4329           APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
4330       MaskV <<= ShAmt;
4331 
4332       APInt CmpV = Cst1->getValue().zext(ASize);
4333       CmpV <<= ShAmt;
4334 
4335       Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
4336       return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
4337     }
4338   }
4339 
4340   // If both operands are byte-swapped or bit-reversed, just compare the
4341   // original values.
4342   // TODO: Move this to a function similar to foldICmpIntrinsicWithConstant()
4343   // and handle more intrinsics.
4344   if ((match(Op0, m_BSwap(m_Value(A))) && match(Op1, m_BSwap(m_Value(B)))) ||
4345       (match(Op0, m_BitReverse(m_Value(A))) &&
4346        match(Op1, m_BitReverse(m_Value(B)))))
4347     return new ICmpInst(Pred, A, B);
4348 
4349   // Canonicalize checking for a power-of-2-or-zero value:
4350   // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
4351   // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
4352   if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
4353                                    m_Deferred(A)))) ||
4354       !match(Op1, m_ZeroInt()))
4355     A = nullptr;
4356 
4357   // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
4358   // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
4359   if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
4360     A = Op1;
4361   else if (match(Op1,
4362                  m_OneUse(m_c_And(m_Neg(m_Specific(Op0)), m_Specific(Op0)))))
4363     A = Op0;
4364 
4365   if (A) {
4366     Type *Ty = A->getType();
4367     CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
4368     return Pred == ICmpInst::ICMP_EQ
4369         ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop, ConstantInt::get(Ty, 2))
4370         : new ICmpInst(ICmpInst::ICMP_UGT, CtPop, ConstantInt::get(Ty, 1));
4371   }
4372 
4373   return nullptr;
4374 }
4375 
4376 static Instruction *foldICmpWithZextOrSext(ICmpInst &ICmp,
4377                                            InstCombiner::BuilderTy &Builder) {
4378   assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
4379   auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
4380   Value *X;
4381   if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
4382     return nullptr;
4383 
4384   bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
4385   bool IsSignedCmp = ICmp.isSigned();
4386   if (auto *CastOp1 = dyn_cast<CastInst>(ICmp.getOperand(1))) {
4387     // If the signedness of the two casts doesn't agree (i.e. one is a sext
4388     // and the other is a zext), then we can't handle this.
4389     // TODO: This is too strict. We can handle some predicates (equality?).
4390     if (CastOp0->getOpcode() != CastOp1->getOpcode())
4391       return nullptr;
4392 
4393     // Not an extension from the same type?
4394     Value *Y = CastOp1->getOperand(0);
4395     Type *XTy = X->getType(), *YTy = Y->getType();
4396     if (XTy != YTy) {
4397       // One of the casts must have one use because we are creating a new cast.
4398       if (!CastOp0->hasOneUse() && !CastOp1->hasOneUse())
4399         return nullptr;
4400       // Extend the narrower operand to the type of the wider operand.
4401       if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
4402         X = Builder.CreateCast(CastOp0->getOpcode(), X, YTy);
4403       else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
4404         Y = Builder.CreateCast(CastOp0->getOpcode(), Y, XTy);
4405       else
4406         return nullptr;
4407     }
4408 
4409     // (zext X) == (zext Y) --> X == Y
4410     // (sext X) == (sext Y) --> X == Y
4411     if (ICmp.isEquality())
4412       return new ICmpInst(ICmp.getPredicate(), X, Y);
4413 
4414     // A signed comparison of sign extended values simplifies into a
4415     // signed comparison.
4416     if (IsSignedCmp && IsSignedExt)
4417       return new ICmpInst(ICmp.getPredicate(), X, Y);
4418 
4419     // The other three cases all fold into an unsigned comparison.
4420     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
4421   }
4422 
4423   // Below here, we are only folding a compare with constant.
4424   auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
4425   if (!C)
4426     return nullptr;
4427 
4428   // Compute the constant that would happen if we truncated to SrcTy then
4429   // re-extended to DestTy.
4430   Type *SrcTy = CastOp0->getSrcTy();
4431   Type *DestTy = CastOp0->getDestTy();
4432   Constant *Res1 = ConstantExpr::getTrunc(C, SrcTy);
4433   Constant *Res2 = ConstantExpr::getCast(CastOp0->getOpcode(), Res1, DestTy);
4434 
4435   // If the re-extended constant didn't change...
4436   if (Res2 == C) {
4437     if (ICmp.isEquality())
4438       return new ICmpInst(ICmp.getPredicate(), X, Res1);
4439 
4440     // A signed comparison of sign extended values simplifies into a
4441     // signed comparison.
4442     if (IsSignedExt && IsSignedCmp)
4443       return new ICmpInst(ICmp.getPredicate(), X, Res1);
4444 
4445     // The other three cases all fold into an unsigned comparison.
4446     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res1);
4447   }
4448 
4449   // The re-extended constant changed, partly changed (in the case of a vector),
4450   // or could not be determined to be equal (in the case of a constant
4451   // expression), so the constant cannot be represented in the shorter type.
4452   // All the cases that fold to true or false will have already been handled
4453   // by SimplifyICmpInst, so only deal with the tricky case.
4454   if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
4455     return nullptr;
4456 
4457   // Is source op positive?
4458   // icmp ult (sext X), C --> icmp sgt X, -1
4459   if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
4460     return new ICmpInst(CmpInst::ICMP_SGT, X, Constant::getAllOnesValue(SrcTy));
4461 
4462   // Is source op negative?
4463   // icmp ugt (sext X), C --> icmp slt X, 0
4464   assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
4465   return new ICmpInst(CmpInst::ICMP_SLT, X, Constant::getNullValue(SrcTy));
4466 }
4467 
4468 /// Handle icmp (cast x), (cast or constant).
4469 Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
4470   auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
4471   if (!CastOp0)
4472     return nullptr;
4473   if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
4474     return nullptr;
4475 
4476   Value *Op0Src = CastOp0->getOperand(0);
4477   Type *SrcTy = CastOp0->getSrcTy();
4478   Type *DestTy = CastOp0->getDestTy();
4479 
4480   // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
4481   // integer type is the same size as the pointer type.
4482   auto CompatibleSizes = [&](Type *SrcTy, Type *DestTy) {
4483     if (isa<VectorType>(SrcTy)) {
4484       SrcTy = cast<VectorType>(SrcTy)->getElementType();
4485       DestTy = cast<VectorType>(DestTy)->getElementType();
4486     }
4487     return DL.getPointerTypeSizeInBits(SrcTy) == DestTy->getIntegerBitWidth();
4488   };
4489   if (CastOp0->getOpcode() == Instruction::PtrToInt &&
4490       CompatibleSizes(SrcTy, DestTy)) {
4491     Value *NewOp1 = nullptr;
4492     if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
4493       Value *PtrSrc = PtrToIntOp1->getOperand(0);
4494       if (PtrSrc->getType()->getPointerAddressSpace() ==
4495           Op0Src->getType()->getPointerAddressSpace()) {
4496         NewOp1 = PtrToIntOp1->getOperand(0);
4497         // If the pointer types don't match, insert a bitcast.
4498         if (Op0Src->getType() != NewOp1->getType())
4499           NewOp1 = Builder.CreateBitCast(NewOp1, Op0Src->getType());
4500       }
4501     } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
4502       NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
4503     }
4504 
4505     if (NewOp1)
4506       return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
4507   }
4508 
4509   return foldICmpWithZextOrSext(ICmp, Builder);
4510 }
4511 
4512 static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS) {
4513   switch (BinaryOp) {
4514     default:
4515       llvm_unreachable("Unsupported binary op");
4516     case Instruction::Add:
4517     case Instruction::Sub:
4518       return match(RHS, m_Zero());
4519     case Instruction::Mul:
4520       return match(RHS, m_One());
4521   }
4522 }
4523 
4524 OverflowResult
4525 InstCombinerImpl::computeOverflow(Instruction::BinaryOps BinaryOp,
4526                                   bool IsSigned, Value *LHS, Value *RHS,
4527                                   Instruction *CxtI) const {
4528   switch (BinaryOp) {
4529     default:
4530       llvm_unreachable("Unsupported binary op");
4531     case Instruction::Add:
4532       if (IsSigned)
4533         return computeOverflowForSignedAdd(LHS, RHS, CxtI);
4534       else
4535         return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
4536     case Instruction::Sub:
4537       if (IsSigned)
4538         return computeOverflowForSignedSub(LHS, RHS, CxtI);
4539       else
4540         return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
4541     case Instruction::Mul:
4542       if (IsSigned)
4543         return computeOverflowForSignedMul(LHS, RHS, CxtI);
4544       else
4545         return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
4546   }
4547 }
4548 
4549 bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
4550                                              bool IsSigned, Value *LHS,
4551                                              Value *RHS, Instruction &OrigI,
4552                                              Value *&Result,
4553                                              Constant *&Overflow) {
4554   if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
4555     std::swap(LHS, RHS);
4556 
4557   // If the overflow check was an add followed by a compare, the insertion point
4558   // may be pointing to the compare.  We want to insert the new instructions
4559   // before the add in case there are uses of the add between the add and the
4560   // compare.
4561   Builder.SetInsertPoint(&OrigI);
4562 
4563   Type *OverflowTy = Type::getInt1Ty(LHS->getContext());
4564   if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType()))
4565     OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount());
4566 
4567   if (isNeutralValue(BinaryOp, RHS)) {
4568     Result = LHS;
4569     Overflow = ConstantInt::getFalse(OverflowTy);
4570     return true;
4571   }
4572 
4573   switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
4574     case OverflowResult::MayOverflow:
4575       return false;
4576     case OverflowResult::AlwaysOverflowsLow:
4577     case OverflowResult::AlwaysOverflowsHigh:
4578       Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4579       Result->takeName(&OrigI);
4580       Overflow = ConstantInt::getTrue(OverflowTy);
4581       return true;
4582     case OverflowResult::NeverOverflows:
4583       Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
4584       Result->takeName(&OrigI);
4585       Overflow = ConstantInt::getFalse(OverflowTy);
4586       if (auto *Inst = dyn_cast<Instruction>(Result)) {
4587         if (IsSigned)
4588           Inst->setHasNoSignedWrap();
4589         else
4590           Inst->setHasNoUnsignedWrap();
4591       }
4592       return true;
4593   }
4594 
4595   llvm_unreachable("Unexpected overflow result");
4596 }
4597 
4598 /// Recognize and process idiom involving test for multiplication
4599 /// overflow.
4600 ///
4601 /// The caller has matched a pattern of the form:
4602 ///   I = cmp u (mul(zext A, zext B), V
4603 /// The function checks if this is a test for overflow and if so replaces
4604 /// multiplication with call to 'mul.with.overflow' intrinsic.
4605 ///
4606 /// \param I Compare instruction.
4607 /// \param MulVal Result of 'mult' instruction.  It is one of the arguments of
4608 ///               the compare instruction.  Must be of integer type.
4609 /// \param OtherVal The other argument of compare instruction.
4610 /// \returns Instruction which must replace the compare instruction, NULL if no
4611 ///          replacement required.
4612 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
4613                                          Value *OtherVal,
4614                                          InstCombinerImpl &IC) {
4615   // Don't bother doing this transformation for pointers, don't do it for
4616   // vectors.
4617   if (!isa<IntegerType>(MulVal->getType()))
4618     return nullptr;
4619 
4620   assert(I.getOperand(0) == MulVal || I.getOperand(1) == MulVal);
4621   assert(I.getOperand(0) == OtherVal || I.getOperand(1) == OtherVal);
4622   auto *MulInstr = dyn_cast<Instruction>(MulVal);
4623   if (!MulInstr)
4624     return nullptr;
4625   assert(MulInstr->getOpcode() == Instruction::Mul);
4626 
4627   auto *LHS = cast<ZExtOperator>(MulInstr->getOperand(0)),
4628        *RHS = cast<ZExtOperator>(MulInstr->getOperand(1));
4629   assert(LHS->getOpcode() == Instruction::ZExt);
4630   assert(RHS->getOpcode() == Instruction::ZExt);
4631   Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
4632 
4633   // Calculate type and width of the result produced by mul.with.overflow.
4634   Type *TyA = A->getType(), *TyB = B->getType();
4635   unsigned WidthA = TyA->getPrimitiveSizeInBits(),
4636            WidthB = TyB->getPrimitiveSizeInBits();
4637   unsigned MulWidth;
4638   Type *MulType;
4639   if (WidthB > WidthA) {
4640     MulWidth = WidthB;
4641     MulType = TyB;
4642   } else {
4643     MulWidth = WidthA;
4644     MulType = TyA;
4645   }
4646 
4647   // In order to replace the original mul with a narrower mul.with.overflow,
4648   // all uses must ignore upper bits of the product.  The number of used low
4649   // bits must be not greater than the width of mul.with.overflow.
4650   if (MulVal->hasNUsesOrMore(2))
4651     for (User *U : MulVal->users()) {
4652       if (U == &I)
4653         continue;
4654       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4655         // Check if truncation ignores bits above MulWidth.
4656         unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
4657         if (TruncWidth > MulWidth)
4658           return nullptr;
4659       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4660         // Check if AND ignores bits above MulWidth.
4661         if (BO->getOpcode() != Instruction::And)
4662           return nullptr;
4663         if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
4664           const APInt &CVal = CI->getValue();
4665           if (CVal.getBitWidth() - CVal.countLeadingZeros() > MulWidth)
4666             return nullptr;
4667         } else {
4668           // In this case we could have the operand of the binary operation
4669           // being defined in another block, and performing the replacement
4670           // could break the dominance relation.
4671           return nullptr;
4672         }
4673       } else {
4674         // Other uses prohibit this transformation.
4675         return nullptr;
4676       }
4677     }
4678 
4679   // Recognize patterns
4680   switch (I.getPredicate()) {
4681   case ICmpInst::ICMP_EQ:
4682   case ICmpInst::ICMP_NE:
4683     // Recognize pattern:
4684     //   mulval = mul(zext A, zext B)
4685     //   cmp eq/neq mulval, and(mulval, mask), mask selects low MulWidth bits.
4686     ConstantInt *CI;
4687     Value *ValToMask;
4688     if (match(OtherVal, m_And(m_Value(ValToMask), m_ConstantInt(CI)))) {
4689       if (ValToMask != MulVal)
4690         return nullptr;
4691       const APInt &CVal = CI->getValue() + 1;
4692       if (CVal.isPowerOf2()) {
4693         unsigned MaskWidth = CVal.logBase2();
4694         if (MaskWidth == MulWidth)
4695           break; // Recognized
4696       }
4697     }
4698     return nullptr;
4699 
4700   case ICmpInst::ICMP_UGT:
4701     // Recognize pattern:
4702     //   mulval = mul(zext A, zext B)
4703     //   cmp ugt mulval, max
4704     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4705       APInt MaxVal = APInt::getMaxValue(MulWidth);
4706       MaxVal = MaxVal.zext(CI->getBitWidth());
4707       if (MaxVal.eq(CI->getValue()))
4708         break; // Recognized
4709     }
4710     return nullptr;
4711 
4712   case ICmpInst::ICMP_UGE:
4713     // Recognize pattern:
4714     //   mulval = mul(zext A, zext B)
4715     //   cmp uge mulval, max+1
4716     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4717       APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4718       if (MaxVal.eq(CI->getValue()))
4719         break; // Recognized
4720     }
4721     return nullptr;
4722 
4723   case ICmpInst::ICMP_ULE:
4724     // Recognize pattern:
4725     //   mulval = mul(zext A, zext B)
4726     //   cmp ule mulval, max
4727     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4728       APInt MaxVal = APInt::getMaxValue(MulWidth);
4729       MaxVal = MaxVal.zext(CI->getBitWidth());
4730       if (MaxVal.eq(CI->getValue()))
4731         break; // Recognized
4732     }
4733     return nullptr;
4734 
4735   case ICmpInst::ICMP_ULT:
4736     // Recognize pattern:
4737     //   mulval = mul(zext A, zext B)
4738     //   cmp ule mulval, max + 1
4739     if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) {
4740       APInt MaxVal = APInt::getOneBitSet(CI->getBitWidth(), MulWidth);
4741       if (MaxVal.eq(CI->getValue()))
4742         break; // Recognized
4743     }
4744     return nullptr;
4745 
4746   default:
4747     return nullptr;
4748   }
4749 
4750   InstCombiner::BuilderTy &Builder = IC.Builder;
4751   Builder.SetInsertPoint(MulInstr);
4752 
4753   // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
4754   Value *MulA = A, *MulB = B;
4755   if (WidthA < MulWidth)
4756     MulA = Builder.CreateZExt(A, MulType);
4757   if (WidthB < MulWidth)
4758     MulB = Builder.CreateZExt(B, MulType);
4759   Function *F = Intrinsic::getDeclaration(
4760       I.getModule(), Intrinsic::umul_with_overflow, MulType);
4761   CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
4762   IC.addToWorklist(MulInstr);
4763 
4764   // If there are uses of mul result other than the comparison, we know that
4765   // they are truncation or binary AND. Change them to use result of
4766   // mul.with.overflow and adjust properly mask/size.
4767   if (MulVal->hasNUsesOrMore(2)) {
4768     Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
4769     for (User *U : make_early_inc_range(MulVal->users())) {
4770       if (U == &I || U == OtherVal)
4771         continue;
4772       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
4773         if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
4774           IC.replaceInstUsesWith(*TI, Mul);
4775         else
4776           TI->setOperand(0, Mul);
4777       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
4778         assert(BO->getOpcode() == Instruction::And);
4779         // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
4780         ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
4781         APInt ShortMask = CI->getValue().trunc(MulWidth);
4782         Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
4783         Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
4784         IC.replaceInstUsesWith(*BO, Zext);
4785       } else {
4786         llvm_unreachable("Unexpected Binary operation");
4787       }
4788       IC.addToWorklist(cast<Instruction>(U));
4789     }
4790   }
4791   if (isa<Instruction>(OtherVal))
4792     IC.addToWorklist(cast<Instruction>(OtherVal));
4793 
4794   // The original icmp gets replaced with the overflow value, maybe inverted
4795   // depending on predicate.
4796   bool Inverse = false;
4797   switch (I.getPredicate()) {
4798   case ICmpInst::ICMP_NE:
4799     break;
4800   case ICmpInst::ICMP_EQ:
4801     Inverse = true;
4802     break;
4803   case ICmpInst::ICMP_UGT:
4804   case ICmpInst::ICMP_UGE:
4805     if (I.getOperand(0) == MulVal)
4806       break;
4807     Inverse = true;
4808     break;
4809   case ICmpInst::ICMP_ULT:
4810   case ICmpInst::ICMP_ULE:
4811     if (I.getOperand(1) == MulVal)
4812       break;
4813     Inverse = true;
4814     break;
4815   default:
4816     llvm_unreachable("Unexpected predicate");
4817   }
4818   if (Inverse) {
4819     Value *Res = Builder.CreateExtractValue(Call, 1);
4820     return BinaryOperator::CreateNot(Res);
4821   }
4822 
4823   return ExtractValueInst::Create(Call, 1);
4824 }
4825 
4826 /// When performing a comparison against a constant, it is possible that not all
4827 /// the bits in the LHS are demanded. This helper method computes the mask that
4828 /// IS demanded.
4829 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
4830   const APInt *RHS;
4831   if (!match(I.getOperand(1), m_APInt(RHS)))
4832     return APInt::getAllOnesValue(BitWidth);
4833 
4834   // If this is a normal comparison, it demands all bits. If it is a sign bit
4835   // comparison, it only demands the sign bit.
4836   bool UnusedBit;
4837   if (InstCombiner::isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
4838     return APInt::getSignMask(BitWidth);
4839 
4840   switch (I.getPredicate()) {
4841   // For a UGT comparison, we don't care about any bits that
4842   // correspond to the trailing ones of the comparand.  The value of these
4843   // bits doesn't impact the outcome of the comparison, because any value
4844   // greater than the RHS must differ in a bit higher than these due to carry.
4845   case ICmpInst::ICMP_UGT:
4846     return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingOnes());
4847 
4848   // Similarly, for a ULT comparison, we don't care about the trailing zeros.
4849   // Any value less than the RHS must differ in a higher bit because of carries.
4850   case ICmpInst::ICMP_ULT:
4851     return APInt::getBitsSetFrom(BitWidth, RHS->countTrailingZeros());
4852 
4853   default:
4854     return APInt::getAllOnesValue(BitWidth);
4855   }
4856 }
4857 
4858 /// Check if the order of \p Op0 and \p Op1 as operands in an ICmpInst
4859 /// should be swapped.
4860 /// The decision is based on how many times these two operands are reused
4861 /// as subtract operands and their positions in those instructions.
4862 /// The rationale is that several architectures use the same instruction for
4863 /// both subtract and cmp. Thus, it is better if the order of those operands
4864 /// match.
4865 /// \return true if Op0 and Op1 should be swapped.
4866 static bool swapMayExposeCSEOpportunities(const Value *Op0, const Value *Op1) {
4867   // Filter out pointer values as those cannot appear directly in subtract.
4868   // FIXME: we may want to go through inttoptrs or bitcasts.
4869   if (Op0->getType()->isPointerTy())
4870     return false;
4871   // If a subtract already has the same operands as a compare, swapping would be
4872   // bad. If a subtract has the same operands as a compare but in reverse order,
4873   // then swapping is good.
4874   int GoodToSwap = 0;
4875   for (const User *U : Op0->users()) {
4876     if (match(U, m_Sub(m_Specific(Op1), m_Specific(Op0))))
4877       GoodToSwap++;
4878     else if (match(U, m_Sub(m_Specific(Op0), m_Specific(Op1))))
4879       GoodToSwap--;
4880   }
4881   return GoodToSwap > 0;
4882 }
4883 
4884 /// Check that one use is in the same block as the definition and all
4885 /// other uses are in blocks dominated by a given block.
4886 ///
4887 /// \param DI Definition
4888 /// \param UI Use
4889 /// \param DB Block that must dominate all uses of \p DI outside
4890 ///           the parent block
4891 /// \return true when \p UI is the only use of \p DI in the parent block
4892 /// and all other uses of \p DI are in blocks dominated by \p DB.
4893 ///
4894 bool InstCombinerImpl::dominatesAllUses(const Instruction *DI,
4895                                         const Instruction *UI,
4896                                         const BasicBlock *DB) const {
4897   assert(DI && UI && "Instruction not defined\n");
4898   // Ignore incomplete definitions.
4899   if (!DI->getParent())
4900     return false;
4901   // DI and UI must be in the same block.
4902   if (DI->getParent() != UI->getParent())
4903     return false;
4904   // Protect from self-referencing blocks.
4905   if (DI->getParent() == DB)
4906     return false;
4907   for (const User *U : DI->users()) {
4908     auto *Usr = cast<Instruction>(U);
4909     if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
4910       return false;
4911   }
4912   return true;
4913 }
4914 
4915 /// Return true when the instruction sequence within a block is select-cmp-br.
4916 static bool isChainSelectCmpBranch(const SelectInst *SI) {
4917   const BasicBlock *BB = SI->getParent();
4918   if (!BB)
4919     return false;
4920   auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
4921   if (!BI || BI->getNumSuccessors() != 2)
4922     return false;
4923   auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
4924   if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
4925     return false;
4926   return true;
4927 }
4928 
4929 /// True when a select result is replaced by one of its operands
4930 /// in select-icmp sequence. This will eventually result in the elimination
4931 /// of the select.
4932 ///
4933 /// \param SI    Select instruction
4934 /// \param Icmp  Compare instruction
4935 /// \param SIOpd Operand that replaces the select
4936 ///
4937 /// Notes:
4938 /// - The replacement is global and requires dominator information
4939 /// - The caller is responsible for the actual replacement
4940 ///
4941 /// Example:
4942 ///
4943 /// entry:
4944 ///  %4 = select i1 %3, %C* %0, %C* null
4945 ///  %5 = icmp eq %C* %4, null
4946 ///  br i1 %5, label %9, label %7
4947 ///  ...
4948 ///  ; <label>:7                                       ; preds = %entry
4949 ///  %8 = getelementptr inbounds %C* %4, i64 0, i32 0
4950 ///  ...
4951 ///
4952 /// can be transformed to
4953 ///
4954 ///  %5 = icmp eq %C* %0, null
4955 ///  %6 = select i1 %3, i1 %5, i1 true
4956 ///  br i1 %6, label %9, label %7
4957 ///  ...
4958 ///  ; <label>:7                                       ; preds = %entry
4959 ///  %8 = getelementptr inbounds %C* %0, i64 0, i32 0  // replace by %0!
4960 ///
4961 /// Similar when the first operand of the select is a constant or/and
4962 /// the compare is for not equal rather than equal.
4963 ///
4964 /// NOTE: The function is only called when the select and compare constants
4965 /// are equal, the optimization can work only for EQ predicates. This is not a
4966 /// major restriction since a NE compare should be 'normalized' to an equal
4967 /// compare, which usually happens in the combiner and test case
4968 /// select-cmp-br.ll checks for it.
4969 bool InstCombinerImpl::replacedSelectWithOperand(SelectInst *SI,
4970                                                  const ICmpInst *Icmp,
4971                                                  const unsigned SIOpd) {
4972   assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
4973   if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
4974     BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
4975     // The check for the single predecessor is not the best that can be
4976     // done. But it protects efficiently against cases like when SI's
4977     // home block has two successors, Succ and Succ1, and Succ1 predecessor
4978     // of Succ. Then SI can't be replaced by SIOpd because the use that gets
4979     // replaced can be reached on either path. So the uniqueness check
4980     // guarantees that the path all uses of SI (outside SI's parent) are on
4981     // is disjoint from all other paths out of SI. But that information
4982     // is more expensive to compute, and the trade-off here is in favor
4983     // of compile-time. It should also be noticed that we check for a single
4984     // predecessor and not only uniqueness. This to handle the situation when
4985     // Succ and Succ1 points to the same basic block.
4986     if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
4987       NumSel++;
4988       SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
4989       return true;
4990     }
4991   }
4992   return false;
4993 }
4994 
4995 /// Try to fold the comparison based on range information we can get by checking
4996 /// whether bits are known to be zero or one in the inputs.
4997 Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) {
4998   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4999   Type *Ty = Op0->getType();
5000   ICmpInst::Predicate Pred = I.getPredicate();
5001 
5002   // Get scalar or pointer size.
5003   unsigned BitWidth = Ty->isIntOrIntVectorTy()
5004                           ? Ty->getScalarSizeInBits()
5005                           : DL.getPointerTypeSizeInBits(Ty->getScalarType());
5006 
5007   if (!BitWidth)
5008     return nullptr;
5009 
5010   KnownBits Op0Known(BitWidth);
5011   KnownBits Op1Known(BitWidth);
5012 
5013   if (SimplifyDemandedBits(&I, 0,
5014                            getDemandedBitsLHSMask(I, BitWidth),
5015                            Op0Known, 0))
5016     return &I;
5017 
5018   if (SimplifyDemandedBits(&I, 1, APInt::getAllOnesValue(BitWidth),
5019                            Op1Known, 0))
5020     return &I;
5021 
5022   // Given the known and unknown bits, compute a range that the LHS could be
5023   // in.  Compute the Min, Max and RHS values based on the known bits. For the
5024   // EQ and NE we use unsigned values.
5025   APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
5026   APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
5027   if (I.isSigned()) {
5028     Op0Min = Op0Known.getSignedMinValue();
5029     Op0Max = Op0Known.getSignedMaxValue();
5030     Op1Min = Op1Known.getSignedMinValue();
5031     Op1Max = Op1Known.getSignedMaxValue();
5032   } else {
5033     Op0Min = Op0Known.getMinValue();
5034     Op0Max = Op0Known.getMaxValue();
5035     Op1Min = Op1Known.getMinValue();
5036     Op1Max = Op1Known.getMaxValue();
5037   }
5038 
5039   // If Min and Max are known to be the same, then SimplifyDemandedBits figured
5040   // out that the LHS or RHS is a constant. Constant fold this now, so that
5041   // code below can assume that Min != Max.
5042   if (!isa<Constant>(Op0) && Op0Min == Op0Max)
5043     return new ICmpInst(Pred, ConstantExpr::getIntegerValue(Ty, Op0Min), Op1);
5044   if (!isa<Constant>(Op1) && Op1Min == Op1Max)
5045     return new ICmpInst(Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Min));
5046 
5047   // Based on the range information we know about the LHS, see if we can
5048   // simplify this comparison.  For example, (x&4) < 8 is always true.
5049   switch (Pred) {
5050   default:
5051     llvm_unreachable("Unknown icmp opcode!");
5052   case ICmpInst::ICMP_EQ:
5053   case ICmpInst::ICMP_NE: {
5054     if (Op0Max.ult(Op1Min) || Op0Min.ugt(Op1Max))
5055       return replaceInstUsesWith(
5056           I, ConstantInt::getBool(I.getType(), Pred == CmpInst::ICMP_NE));
5057 
5058     // If all bits are known zero except for one, then we know at most one bit
5059     // is set. If the comparison is against zero, then this is a check to see if
5060     // *that* bit is set.
5061     APInt Op0KnownZeroInverted = ~Op0Known.Zero;
5062     if (Op1Known.isZero()) {
5063       // If the LHS is an AND with the same constant, look through it.
5064       Value *LHS = nullptr;
5065       const APInt *LHSC;
5066       if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
5067           *LHSC != Op0KnownZeroInverted)
5068         LHS = Op0;
5069 
5070       Value *X;
5071       if (match(LHS, m_Shl(m_One(), m_Value(X)))) {
5072         APInt ValToCheck = Op0KnownZeroInverted;
5073         Type *XTy = X->getType();
5074         if (ValToCheck.isPowerOf2()) {
5075           // ((1 << X) & 8) == 0 -> X != 3
5076           // ((1 << X) & 8) != 0 -> X == 3
5077           auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5078           auto NewPred = ICmpInst::getInversePredicate(Pred);
5079           return new ICmpInst(NewPred, X, CmpC);
5080         } else if ((++ValToCheck).isPowerOf2()) {
5081           // ((1 << X) & 7) == 0 -> X >= 3
5082           // ((1 << X) & 7) != 0 -> X  < 3
5083           auto *CmpC = ConstantInt::get(XTy, ValToCheck.countTrailingZeros());
5084           auto NewPred =
5085               Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
5086           return new ICmpInst(NewPred, X, CmpC);
5087         }
5088       }
5089 
5090       // Check if the LHS is 8 >>u x and the result is a power of 2 like 1.
5091       const APInt *CI;
5092       if (Op0KnownZeroInverted.isOneValue() &&
5093           match(LHS, m_LShr(m_Power2(CI), m_Value(X)))) {
5094         // ((8 >>u X) & 1) == 0 -> X != 3
5095         // ((8 >>u X) & 1) != 0 -> X == 3
5096         unsigned CmpVal = CI->countTrailingZeros();
5097         auto NewPred = ICmpInst::getInversePredicate(Pred);
5098         return new ICmpInst(NewPred, X, ConstantInt::get(X->getType(), CmpVal));
5099       }
5100     }
5101     break;
5102   }
5103   case ICmpInst::ICMP_ULT: {
5104     if (Op0Max.ult(Op1Min)) // A <u B -> true if max(A) < min(B)
5105       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5106     if (Op0Min.uge(Op1Max)) // A <u B -> false if min(A) >= max(B)
5107       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5108     if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
5109       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5110 
5111     const APInt *CmpC;
5112     if (match(Op1, m_APInt(CmpC))) {
5113       // A <u C -> A == C-1 if min(A)+1 == C
5114       if (*CmpC == Op0Min + 1)
5115         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5116                             ConstantInt::get(Op1->getType(), *CmpC - 1));
5117       // X <u C --> X == 0, if the number of zero bits in the bottom of X
5118       // exceeds the log2 of C.
5119       if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
5120         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5121                             Constant::getNullValue(Op1->getType()));
5122     }
5123     break;
5124   }
5125   case ICmpInst::ICMP_UGT: {
5126     if (Op0Min.ugt(Op1Max)) // A >u B -> true if min(A) > max(B)
5127       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5128     if (Op0Max.ule(Op1Min)) // A >u B -> false if max(A) <= max(B)
5129       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5130     if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
5131       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5132 
5133     const APInt *CmpC;
5134     if (match(Op1, m_APInt(CmpC))) {
5135       // A >u C -> A == C+1 if max(a)-1 == C
5136       if (*CmpC == Op0Max - 1)
5137         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5138                             ConstantInt::get(Op1->getType(), *CmpC + 1));
5139       // X >u C --> X != 0, if the number of zero bits in the bottom of X
5140       // exceeds the log2 of C.
5141       if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
5142         return new ICmpInst(ICmpInst::ICMP_NE, Op0,
5143                             Constant::getNullValue(Op1->getType()));
5144     }
5145     break;
5146   }
5147   case ICmpInst::ICMP_SLT: {
5148     if (Op0Max.slt(Op1Min)) // A <s B -> true if max(A) < min(C)
5149       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5150     if (Op0Min.sge(Op1Max)) // A <s B -> false if min(A) >= max(C)
5151       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5152     if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
5153       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5154     const APInt *CmpC;
5155     if (match(Op1, m_APInt(CmpC))) {
5156       if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
5157         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5158                             ConstantInt::get(Op1->getType(), *CmpC - 1));
5159     }
5160     break;
5161   }
5162   case ICmpInst::ICMP_SGT: {
5163     if (Op0Min.sgt(Op1Max)) // A >s B -> true if min(A) > max(B)
5164       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5165     if (Op0Max.sle(Op1Min)) // A >s B -> false if max(A) <= min(B)
5166       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5167     if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
5168       return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5169     const APInt *CmpC;
5170     if (match(Op1, m_APInt(CmpC))) {
5171       if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
5172         return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
5173                             ConstantInt::get(Op1->getType(), *CmpC + 1));
5174     }
5175     break;
5176   }
5177   case ICmpInst::ICMP_SGE:
5178     assert(!isa<ConstantInt>(Op1) && "ICMP_SGE with ConstantInt not folded!");
5179     if (Op0Min.sge(Op1Max)) // A >=s B -> true if min(A) >= max(B)
5180       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5181     if (Op0Max.slt(Op1Min)) // A >=s B -> false if max(A) < min(B)
5182       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5183     if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
5184       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5185     break;
5186   case ICmpInst::ICMP_SLE:
5187     assert(!isa<ConstantInt>(Op1) && "ICMP_SLE with ConstantInt not folded!");
5188     if (Op0Max.sle(Op1Min)) // A <=s B -> true if max(A) <= min(B)
5189       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5190     if (Op0Min.sgt(Op1Max)) // A <=s B -> false if min(A) > max(B)
5191       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5192     if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
5193       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5194     break;
5195   case ICmpInst::ICMP_UGE:
5196     assert(!isa<ConstantInt>(Op1) && "ICMP_UGE with ConstantInt not folded!");
5197     if (Op0Min.uge(Op1Max)) // A >=u B -> true if min(A) >= max(B)
5198       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5199     if (Op0Max.ult(Op1Min)) // A >=u B -> false if max(A) < min(B)
5200       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5201     if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
5202       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5203     break;
5204   case ICmpInst::ICMP_ULE:
5205     assert(!isa<ConstantInt>(Op1) && "ICMP_ULE with ConstantInt not folded!");
5206     if (Op0Max.ule(Op1Min)) // A <=u B -> true if max(A) <= min(B)
5207       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5208     if (Op0Min.ugt(Op1Max)) // A <=u B -> false if min(A) > max(B)
5209       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5210     if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
5211       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5212     break;
5213   }
5214 
5215   // Turn a signed comparison into an unsigned one if both operands are known to
5216   // have the same sign.
5217   if (I.isSigned() &&
5218       ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
5219        (Op0Known.One.isNegative() && Op1Known.One.isNegative())))
5220     return new ICmpInst(I.getUnsignedPredicate(), Op0, Op1);
5221 
5222   return nullptr;
5223 }
5224 
5225 llvm::Optional<std::pair<CmpInst::Predicate, Constant *>>
5226 InstCombiner::getFlippedStrictnessPredicateAndConstant(CmpInst::Predicate Pred,
5227                                                        Constant *C) {
5228   assert(ICmpInst::isRelational(Pred) && ICmpInst::isIntPredicate(Pred) &&
5229          "Only for relational integer predicates.");
5230 
5231   Type *Type = C->getType();
5232   bool IsSigned = ICmpInst::isSigned(Pred);
5233 
5234   CmpInst::Predicate UnsignedPred = ICmpInst::getUnsignedPredicate(Pred);
5235   bool WillIncrement =
5236       UnsignedPred == ICmpInst::ICMP_ULE || UnsignedPred == ICmpInst::ICMP_UGT;
5237 
5238   // Check if the constant operand can be safely incremented/decremented
5239   // without overflowing/underflowing.
5240   auto ConstantIsOk = [WillIncrement, IsSigned](ConstantInt *C) {
5241     return WillIncrement ? !C->isMaxValue(IsSigned) : !C->isMinValue(IsSigned);
5242   };
5243 
5244   Constant *SafeReplacementConstant = nullptr;
5245   if (auto *CI = dyn_cast<ConstantInt>(C)) {
5246     // Bail out if the constant can't be safely incremented/decremented.
5247     if (!ConstantIsOk(CI))
5248       return llvm::None;
5249   } else if (auto *FVTy = dyn_cast<FixedVectorType>(Type)) {
5250     unsigned NumElts = FVTy->getNumElements();
5251     for (unsigned i = 0; i != NumElts; ++i) {
5252       Constant *Elt = C->getAggregateElement(i);
5253       if (!Elt)
5254         return llvm::None;
5255 
5256       if (isa<UndefValue>(Elt))
5257         continue;
5258 
5259       // Bail out if we can't determine if this constant is min/max or if we
5260       // know that this constant is min/max.
5261       auto *CI = dyn_cast<ConstantInt>(Elt);
5262       if (!CI || !ConstantIsOk(CI))
5263         return llvm::None;
5264 
5265       if (!SafeReplacementConstant)
5266         SafeReplacementConstant = CI;
5267     }
5268   } else {
5269     // ConstantExpr?
5270     return llvm::None;
5271   }
5272 
5273   // It may not be safe to change a compare predicate in the presence of
5274   // undefined elements, so replace those elements with the first safe constant
5275   // that we found.
5276   // TODO: in case of poison, it is safe; let's replace undefs only.
5277   if (C->containsUndefOrPoisonElement()) {
5278     assert(SafeReplacementConstant && "Replacement constant not set");
5279     C = Constant::replaceUndefsWith(C, SafeReplacementConstant);
5280   }
5281 
5282   CmpInst::Predicate NewPred = CmpInst::getFlippedStrictnessPredicate(Pred);
5283 
5284   // Increment or decrement the constant.
5285   Constant *OneOrNegOne = ConstantInt::get(Type, WillIncrement ? 1 : -1, true);
5286   Constant *NewC = ConstantExpr::getAdd(C, OneOrNegOne);
5287 
5288   return std::make_pair(NewPred, NewC);
5289 }
5290 
5291 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
5292 /// it into the appropriate icmp lt or icmp gt instruction. This transform
5293 /// allows them to be folded in visitICmpInst.
5294 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
5295   ICmpInst::Predicate Pred = I.getPredicate();
5296   if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
5297       InstCombiner::isCanonicalPredicate(Pred))
5298     return nullptr;
5299 
5300   Value *Op0 = I.getOperand(0);
5301   Value *Op1 = I.getOperand(1);
5302   auto *Op1C = dyn_cast<Constant>(Op1);
5303   if (!Op1C)
5304     return nullptr;
5305 
5306   auto FlippedStrictness =
5307       InstCombiner::getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
5308   if (!FlippedStrictness)
5309     return nullptr;
5310 
5311   return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
5312 }
5313 
5314 /// If we have a comparison with a non-canonical predicate, if we can update
5315 /// all the users, invert the predicate and adjust all the users.
5316 CmpInst *InstCombinerImpl::canonicalizeICmpPredicate(CmpInst &I) {
5317   // Is the predicate already canonical?
5318   CmpInst::Predicate Pred = I.getPredicate();
5319   if (InstCombiner::isCanonicalPredicate(Pred))
5320     return nullptr;
5321 
5322   // Can all users be adjusted to predicate inversion?
5323   if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
5324     return nullptr;
5325 
5326   // Ok, we can canonicalize comparison!
5327   // Let's first invert the comparison's predicate.
5328   I.setPredicate(CmpInst::getInversePredicate(Pred));
5329   I.setName(I.getName() + ".not");
5330 
5331   // And, adapt users.
5332   freelyInvertAllUsersOf(&I);
5333 
5334   return &I;
5335 }
5336 
5337 /// Integer compare with boolean values can always be turned into bitwise ops.
5338 static Instruction *canonicalizeICmpBool(ICmpInst &I,
5339                                          InstCombiner::BuilderTy &Builder) {
5340   Value *A = I.getOperand(0), *B = I.getOperand(1);
5341   assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
5342 
5343   // A boolean compared to true/false can be simplified to Op0/true/false in
5344   // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
5345   // Cases not handled by InstSimplify are always 'not' of Op0.
5346   if (match(B, m_Zero())) {
5347     switch (I.getPredicate()) {
5348       case CmpInst::ICMP_EQ:  // A ==   0 -> !A
5349       case CmpInst::ICMP_ULE: // A <=u  0 -> !A
5350       case CmpInst::ICMP_SGE: // A >=s  0 -> !A
5351         return BinaryOperator::CreateNot(A);
5352       default:
5353         llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5354     }
5355   } else if (match(B, m_One())) {
5356     switch (I.getPredicate()) {
5357       case CmpInst::ICMP_NE:  // A !=  1 -> !A
5358       case CmpInst::ICMP_ULT: // A <u  1 -> !A
5359       case CmpInst::ICMP_SGT: // A >s -1 -> !A
5360         return BinaryOperator::CreateNot(A);
5361       default:
5362         llvm_unreachable("ICmp i1 X, C not simplified as expected.");
5363     }
5364   }
5365 
5366   switch (I.getPredicate()) {
5367   default:
5368     llvm_unreachable("Invalid icmp instruction!");
5369   case ICmpInst::ICMP_EQ:
5370     // icmp eq i1 A, B -> ~(A ^ B)
5371     return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
5372 
5373   case ICmpInst::ICMP_NE:
5374     // icmp ne i1 A, B -> A ^ B
5375     return BinaryOperator::CreateXor(A, B);
5376 
5377   case ICmpInst::ICMP_UGT:
5378     // icmp ugt -> icmp ult
5379     std::swap(A, B);
5380     LLVM_FALLTHROUGH;
5381   case ICmpInst::ICMP_ULT:
5382     // icmp ult i1 A, B -> ~A & B
5383     return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
5384 
5385   case ICmpInst::ICMP_SGT:
5386     // icmp sgt -> icmp slt
5387     std::swap(A, B);
5388     LLVM_FALLTHROUGH;
5389   case ICmpInst::ICMP_SLT:
5390     // icmp slt i1 A, B -> A & ~B
5391     return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
5392 
5393   case ICmpInst::ICMP_UGE:
5394     // icmp uge -> icmp ule
5395     std::swap(A, B);
5396     LLVM_FALLTHROUGH;
5397   case ICmpInst::ICMP_ULE:
5398     // icmp ule i1 A, B -> ~A | B
5399     return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
5400 
5401   case ICmpInst::ICMP_SGE:
5402     // icmp sge -> icmp sle
5403     std::swap(A, B);
5404     LLVM_FALLTHROUGH;
5405   case ICmpInst::ICMP_SLE:
5406     // icmp sle i1 A, B -> A | ~B
5407     return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
5408   }
5409 }
5410 
5411 // Transform pattern like:
5412 //   (1 << Y) u<= X  or  ~(-1 << Y) u<  X  or  ((1 << Y)+(-1)) u<  X
5413 //   (1 << Y) u>  X  or  ~(-1 << Y) u>= X  or  ((1 << Y)+(-1)) u>= X
5414 // Into:
5415 //   (X l>> Y) != 0
5416 //   (X l>> Y) == 0
5417 static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp,
5418                                             InstCombiner::BuilderTy &Builder) {
5419   ICmpInst::Predicate Pred, NewPred;
5420   Value *X, *Y;
5421   if (match(&Cmp,
5422             m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
5423     switch (Pred) {
5424     case ICmpInst::ICMP_ULE:
5425       NewPred = ICmpInst::ICMP_NE;
5426       break;
5427     case ICmpInst::ICMP_UGT:
5428       NewPred = ICmpInst::ICMP_EQ;
5429       break;
5430     default:
5431       return nullptr;
5432     }
5433   } else if (match(&Cmp, m_c_ICmp(Pred,
5434                                   m_OneUse(m_CombineOr(
5435                                       m_Not(m_Shl(m_AllOnes(), m_Value(Y))),
5436                                       m_Add(m_Shl(m_One(), m_Value(Y)),
5437                                             m_AllOnes()))),
5438                                   m_Value(X)))) {
5439     // The variant with 'add' is not canonical, (the variant with 'not' is)
5440     // we only get it because it has extra uses, and can't be canonicalized,
5441 
5442     switch (Pred) {
5443     case ICmpInst::ICMP_ULT:
5444       NewPred = ICmpInst::ICMP_NE;
5445       break;
5446     case ICmpInst::ICMP_UGE:
5447       NewPred = ICmpInst::ICMP_EQ;
5448       break;
5449     default:
5450       return nullptr;
5451     }
5452   } else
5453     return nullptr;
5454 
5455   Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
5456   Constant *Zero = Constant::getNullValue(NewX->getType());
5457   return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
5458 }
5459 
5460 static Instruction *foldVectorCmp(CmpInst &Cmp,
5461                                   InstCombiner::BuilderTy &Builder) {
5462   const CmpInst::Predicate Pred = Cmp.getPredicate();
5463   Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
5464   Value *V1, *V2;
5465   ArrayRef<int> M;
5466   if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
5467     return nullptr;
5468 
5469   // If both arguments of the cmp are shuffles that use the same mask and
5470   // shuffle within a single vector, move the shuffle after the cmp:
5471   // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
5472   Type *V1Ty = V1->getType();
5473   if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
5474       V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
5475     Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
5476     return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()), M);
5477   }
5478 
5479   // Try to canonicalize compare with splatted operand and splat constant.
5480   // TODO: We could generalize this for more than splats. See/use the code in
5481   //       InstCombiner::foldVectorBinop().
5482   Constant *C;
5483   if (!LHS->hasOneUse() || !match(RHS, m_Constant(C)))
5484     return nullptr;
5485 
5486   // Length-changing splats are ok, so adjust the constants as needed:
5487   // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
5488   Constant *ScalarC = C->getSplatValue(/* AllowUndefs */ true);
5489   int MaskSplatIndex;
5490   if (ScalarC && match(M, m_SplatOrUndefMask(MaskSplatIndex))) {
5491     // We allow undefs in matching, but this transform removes those for safety.
5492     // Demanded elements analysis should be able to recover some/all of that.
5493     C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
5494                                  ScalarC);
5495     SmallVector<int, 8> NewM(M.size(), MaskSplatIndex);
5496     Value *NewCmp = Builder.CreateCmp(Pred, V1, C);
5497     return new ShuffleVectorInst(NewCmp, UndefValue::get(NewCmp->getType()),
5498                                  NewM);
5499   }
5500 
5501   return nullptr;
5502 }
5503 
5504 // extract(uadd.with.overflow(A, B), 0) ult A
5505 //  -> extract(uadd.with.overflow(A, B), 1)
5506 static Instruction *foldICmpOfUAddOv(ICmpInst &I) {
5507   CmpInst::Predicate Pred = I.getPredicate();
5508   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5509 
5510   Value *UAddOv;
5511   Value *A, *B;
5512   auto UAddOvResultPat = m_ExtractValue<0>(
5513       m_Intrinsic<Intrinsic::uadd_with_overflow>(m_Value(A), m_Value(B)));
5514   if (match(Op0, UAddOvResultPat) &&
5515       ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
5516        (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
5517         (match(A, m_One()) || match(B, m_One()))) ||
5518        (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
5519         (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
5520     // extract(uadd.with.overflow(A, B), 0) < A
5521     // extract(uadd.with.overflow(A, 1), 0) == 0
5522     // extract(uadd.with.overflow(A, -1), 0) != -1
5523     UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
5524   else if (match(Op1, UAddOvResultPat) &&
5525            Pred == ICmpInst::ICMP_UGT && (Op0 == A || Op0 == B))
5526     // A > extract(uadd.with.overflow(A, B), 0)
5527     UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
5528   else
5529     return nullptr;
5530 
5531   return ExtractValueInst::Create(UAddOv, 1);
5532 }
5533 
5534 Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
5535   bool Changed = false;
5536   const SimplifyQuery Q = SQ.getWithInstruction(&I);
5537   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5538   unsigned Op0Cplxity = getComplexity(Op0);
5539   unsigned Op1Cplxity = getComplexity(Op1);
5540 
5541   /// Orders the operands of the compare so that they are listed from most
5542   /// complex to least complex.  This puts constants before unary operators,
5543   /// before binary operators.
5544   if (Op0Cplxity < Op1Cplxity ||
5545       (Op0Cplxity == Op1Cplxity && swapMayExposeCSEOpportunities(Op0, Op1))) {
5546     I.swapOperands();
5547     std::swap(Op0, Op1);
5548     Changed = true;
5549   }
5550 
5551   if (Value *V = SimplifyICmpInst(I.getPredicate(), Op0, Op1, Q))
5552     return replaceInstUsesWith(I, V);
5553 
5554   // Comparing -val or val with non-zero is the same as just comparing val
5555   // ie, abs(val) != 0 -> val != 0
5556   if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
5557     Value *Cond, *SelectTrue, *SelectFalse;
5558     if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
5559                             m_Value(SelectFalse)))) {
5560       if (Value *V = dyn_castNegVal(SelectTrue)) {
5561         if (V == SelectFalse)
5562           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5563       }
5564       else if (Value *V = dyn_castNegVal(SelectFalse)) {
5565         if (V == SelectTrue)
5566           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
5567       }
5568     }
5569   }
5570 
5571   if (Op0->getType()->isIntOrIntVectorTy(1))
5572     if (Instruction *Res = canonicalizeICmpBool(I, Builder))
5573       return Res;
5574 
5575   if (Instruction *Res = canonicalizeCmpWithConstant(I))
5576     return Res;
5577 
5578   if (Instruction *Res = canonicalizeICmpPredicate(I))
5579     return Res;
5580 
5581   if (Instruction *Res = foldICmpWithConstant(I))
5582     return Res;
5583 
5584   if (Instruction *Res = foldICmpWithDominatingICmp(I))
5585     return Res;
5586 
5587   if (Instruction *Res = foldICmpBinOp(I, Q))
5588     return Res;
5589 
5590   if (Instruction *Res = foldICmpUsingKnownBits(I))
5591     return Res;
5592 
5593   // Test if the ICmpInst instruction is used exclusively by a select as
5594   // part of a minimum or maximum operation. If so, refrain from doing
5595   // any other folding. This helps out other analyses which understand
5596   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
5597   // and CodeGen. And in this case, at least one of the comparison
5598   // operands has at least one user besides the compare (the select),
5599   // which would often largely negate the benefit of folding anyway.
5600   //
5601   // Do the same for the other patterns recognized by matchSelectPattern.
5602   if (I.hasOneUse())
5603     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
5604       Value *A, *B;
5605       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
5606       if (SPR.Flavor != SPF_UNKNOWN)
5607         return nullptr;
5608     }
5609 
5610   // Do this after checking for min/max to prevent infinite looping.
5611   if (Instruction *Res = foldICmpWithZero(I))
5612     return Res;
5613 
5614   // FIXME: We only do this after checking for min/max to prevent infinite
5615   // looping caused by a reverse canonicalization of these patterns for min/max.
5616   // FIXME: The organization of folds is a mess. These would naturally go into
5617   // canonicalizeCmpWithConstant(), but we can't move all of the above folds
5618   // down here after the min/max restriction.
5619   ICmpInst::Predicate Pred = I.getPredicate();
5620   const APInt *C;
5621   if (match(Op1, m_APInt(C))) {
5622     // For i32: x >u 2147483647 -> x <s 0  -> true if sign bit set
5623     if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
5624       Constant *Zero = Constant::getNullValue(Op0->getType());
5625       return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
5626     }
5627 
5628     // For i32: x <u 2147483648 -> x >s -1  -> true if sign bit clear
5629     if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
5630       Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
5631       return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
5632     }
5633   }
5634 
5635   if (Instruction *Res = foldICmpInstWithConstant(I))
5636     return Res;
5637 
5638   // Try to match comparison as a sign bit test. Intentionally do this after
5639   // foldICmpInstWithConstant() to potentially let other folds to happen first.
5640   if (Instruction *New = foldSignBitTest(I))
5641     return New;
5642 
5643   if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
5644     return Res;
5645 
5646   // If we can optimize a 'icmp GEP, P' or 'icmp P, GEP', do so now.
5647   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op0))
5648     if (Instruction *NI = foldGEPICmp(GEP, Op1, I.getPredicate(), I))
5649       return NI;
5650   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1))
5651     if (Instruction *NI = foldGEPICmp(GEP, Op0,
5652                            ICmpInst::getSwappedPredicate(I.getPredicate()), I))
5653       return NI;
5654 
5655   // Try to optimize equality comparisons against alloca-based pointers.
5656   if (Op0->getType()->isPointerTy() && I.isEquality()) {
5657     assert(Op1->getType()->isPointerTy() && "Comparing pointer with non-pointer?");
5658     if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
5659       if (Instruction *New = foldAllocaCmp(I, Alloca, Op1))
5660         return New;
5661     if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
5662       if (Instruction *New = foldAllocaCmp(I, Alloca, Op0))
5663         return New;
5664   }
5665 
5666   if (Instruction *Res = foldICmpBitCast(I, Builder))
5667     return Res;
5668 
5669   // TODO: Hoist this above the min/max bailout.
5670   if (Instruction *R = foldICmpWithCastOp(I))
5671     return R;
5672 
5673   if (Instruction *Res = foldICmpWithMinMax(I))
5674     return Res;
5675 
5676   {
5677     Value *A, *B;
5678     // Transform (A & ~B) == 0 --> (A & B) != 0
5679     // and       (A & ~B) != 0 --> (A & B) == 0
5680     // if A is a power of 2.
5681     if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
5682         match(Op1, m_Zero()) &&
5683         isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
5684       return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
5685                           Op1);
5686 
5687     // ~X < ~Y --> Y < X
5688     // ~X < C -->  X > ~C
5689     if (match(Op0, m_Not(m_Value(A)))) {
5690       if (match(Op1, m_Not(m_Value(B))))
5691         return new ICmpInst(I.getPredicate(), B, A);
5692 
5693       const APInt *C;
5694       if (match(Op1, m_APInt(C)))
5695         return new ICmpInst(I.getSwappedPredicate(), A,
5696                             ConstantInt::get(Op1->getType(), ~(*C)));
5697     }
5698 
5699     Instruction *AddI = nullptr;
5700     if (match(&I, m_UAddWithOverflow(m_Value(A), m_Value(B),
5701                                      m_Instruction(AddI))) &&
5702         isa<IntegerType>(A->getType())) {
5703       Value *Result;
5704       Constant *Overflow;
5705       // m_UAddWithOverflow can match patterns that do not include  an explicit
5706       // "add" instruction, so check the opcode of the matched op.
5707       if (AddI->getOpcode() == Instruction::Add &&
5708           OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, A, B, *AddI,
5709                                 Result, Overflow)) {
5710         replaceInstUsesWith(*AddI, Result);
5711         eraseInstFromFunction(*AddI);
5712         return replaceInstUsesWith(I, Overflow);
5713       }
5714     }
5715 
5716     // (zext a) * (zext b)  --> llvm.umul.with.overflow.
5717     if (match(Op0, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5718       if (Instruction *R = processUMulZExtIdiom(I, Op0, Op1, *this))
5719         return R;
5720     }
5721     if (match(Op1, m_Mul(m_ZExt(m_Value(A)), m_ZExt(m_Value(B))))) {
5722       if (Instruction *R = processUMulZExtIdiom(I, Op1, Op0, *this))
5723         return R;
5724     }
5725   }
5726 
5727   if (Instruction *Res = foldICmpEquality(I))
5728     return Res;
5729 
5730   if (Instruction *Res = foldICmpOfUAddOv(I))
5731     return Res;
5732 
5733   // The 'cmpxchg' instruction returns an aggregate containing the old value and
5734   // an i1 which indicates whether or not we successfully did the swap.
5735   //
5736   // Replace comparisons between the old value and the expected value with the
5737   // indicator that 'cmpxchg' returns.
5738   //
5739   // N.B.  This transform is only valid when the 'cmpxchg' is not permitted to
5740   // spuriously fail.  In those cases, the old value may equal the expected
5741   // value but it is possible for the swap to not occur.
5742   if (I.getPredicate() == ICmpInst::ICMP_EQ)
5743     if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
5744       if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
5745         if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
5746             !ACXI->isWeak())
5747           return ExtractValueInst::Create(ACXI, 1);
5748 
5749   {
5750     Value *X;
5751     const APInt *C;
5752     // icmp X+Cst, X
5753     if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
5754       return foldICmpAddOpConst(X, *C, I.getPredicate());
5755 
5756     // icmp X, X+Cst
5757     if (match(Op1, m_Add(m_Value(X), m_APInt(C))) && Op0 == X)
5758       return foldICmpAddOpConst(X, *C, I.getSwappedPredicate());
5759   }
5760 
5761   if (Instruction *Res = foldICmpWithHighBitMask(I, Builder))
5762     return Res;
5763 
5764   if (I.getType()->isVectorTy())
5765     if (Instruction *Res = foldVectorCmp(I, Builder))
5766       return Res;
5767 
5768   return Changed ? &I : nullptr;
5769 }
5770 
5771 /// Fold fcmp ([us]itofp x, cst) if possible.
5772 Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
5773                                                     Instruction *LHSI,
5774                                                     Constant *RHSC) {
5775   if (!isa<ConstantFP>(RHSC)) return nullptr;
5776   const APFloat &RHS = cast<ConstantFP>(RHSC)->getValueAPF();
5777 
5778   // Get the width of the mantissa.  We don't want to hack on conversions that
5779   // might lose information from the integer, e.g. "i64 -> float"
5780   int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
5781   if (MantissaWidth == -1) return nullptr;  // Unknown.
5782 
5783   IntegerType *IntTy = cast<IntegerType>(LHSI->getOperand(0)->getType());
5784 
5785   bool LHSUnsigned = isa<UIToFPInst>(LHSI);
5786 
5787   if (I.isEquality()) {
5788     FCmpInst::Predicate P = I.getPredicate();
5789     bool IsExact = false;
5790     APSInt RHSCvt(IntTy->getBitWidth(), LHSUnsigned);
5791     RHS.convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
5792 
5793     // If the floating point constant isn't an integer value, we know if we will
5794     // ever compare equal / not equal to it.
5795     if (!IsExact) {
5796       // TODO: Can never be -0.0 and other non-representable values
5797       APFloat RHSRoundInt(RHS);
5798       RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
5799       if (RHS != RHSRoundInt) {
5800         if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
5801           return replaceInstUsesWith(I, Builder.getFalse());
5802 
5803         assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
5804         return replaceInstUsesWith(I, Builder.getTrue());
5805       }
5806     }
5807 
5808     // TODO: If the constant is exactly representable, is it always OK to do
5809     // equality compares as integer?
5810   }
5811 
5812   // Check to see that the input is converted from an integer type that is small
5813   // enough that preserves all bits.  TODO: check here for "known" sign bits.
5814   // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
5815   unsigned InputSize = IntTy->getScalarSizeInBits();
5816 
5817   // Following test does NOT adjust InputSize downwards for signed inputs,
5818   // because the most negative value still requires all the mantissa bits
5819   // to distinguish it from one less than that value.
5820   if ((int)InputSize > MantissaWidth) {
5821     // Conversion would lose accuracy. Check if loss can impact comparison.
5822     int Exp = ilogb(RHS);
5823     if (Exp == APFloat::IEK_Inf) {
5824       int MaxExponent = ilogb(APFloat::getLargest(RHS.getSemantics()));
5825       if (MaxExponent < (int)InputSize - !LHSUnsigned)
5826         // Conversion could create infinity.
5827         return nullptr;
5828     } else {
5829       // Note that if RHS is zero or NaN, then Exp is negative
5830       // and first condition is trivially false.
5831       if (MantissaWidth <= Exp && Exp <= (int)InputSize - !LHSUnsigned)
5832         // Conversion could affect comparison.
5833         return nullptr;
5834     }
5835   }
5836 
5837   // Otherwise, we can potentially simplify the comparison.  We know that it
5838   // will always come through as an integer value and we know the constant is
5839   // not a NAN (it would have been previously simplified).
5840   assert(!RHS.isNaN() && "NaN comparison not already folded!");
5841 
5842   ICmpInst::Predicate Pred;
5843   switch (I.getPredicate()) {
5844   default: llvm_unreachable("Unexpected predicate!");
5845   case FCmpInst::FCMP_UEQ:
5846   case FCmpInst::FCMP_OEQ:
5847     Pred = ICmpInst::ICMP_EQ;
5848     break;
5849   case FCmpInst::FCMP_UGT:
5850   case FCmpInst::FCMP_OGT:
5851     Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
5852     break;
5853   case FCmpInst::FCMP_UGE:
5854   case FCmpInst::FCMP_OGE:
5855     Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
5856     break;
5857   case FCmpInst::FCMP_ULT:
5858   case FCmpInst::FCMP_OLT:
5859     Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
5860     break;
5861   case FCmpInst::FCMP_ULE:
5862   case FCmpInst::FCMP_OLE:
5863     Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
5864     break;
5865   case FCmpInst::FCMP_UNE:
5866   case FCmpInst::FCMP_ONE:
5867     Pred = ICmpInst::ICMP_NE;
5868     break;
5869   case FCmpInst::FCMP_ORD:
5870     return replaceInstUsesWith(I, Builder.getTrue());
5871   case FCmpInst::FCMP_UNO:
5872     return replaceInstUsesWith(I, Builder.getFalse());
5873   }
5874 
5875   // Now we know that the APFloat is a normal number, zero or inf.
5876 
5877   // See if the FP constant is too large for the integer.  For example,
5878   // comparing an i8 to 300.0.
5879   unsigned IntWidth = IntTy->getScalarSizeInBits();
5880 
5881   if (!LHSUnsigned) {
5882     // If the RHS value is > SignedMax, fold the comparison.  This handles +INF
5883     // and large values.
5884     APFloat SMax(RHS.getSemantics());
5885     SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
5886                           APFloat::rmNearestTiesToEven);
5887     if (SMax < RHS) { // smax < 13123.0
5888       if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_SLT ||
5889           Pred == ICmpInst::ICMP_SLE)
5890         return replaceInstUsesWith(I, Builder.getTrue());
5891       return replaceInstUsesWith(I, Builder.getFalse());
5892     }
5893   } else {
5894     // If the RHS value is > UnsignedMax, fold the comparison. This handles
5895     // +INF and large values.
5896     APFloat UMax(RHS.getSemantics());
5897     UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
5898                           APFloat::rmNearestTiesToEven);
5899     if (UMax < RHS) { // umax < 13123.0
5900       if (Pred == ICmpInst::ICMP_NE  || Pred == ICmpInst::ICMP_ULT ||
5901           Pred == ICmpInst::ICMP_ULE)
5902         return replaceInstUsesWith(I, Builder.getTrue());
5903       return replaceInstUsesWith(I, Builder.getFalse());
5904     }
5905   }
5906 
5907   if (!LHSUnsigned) {
5908     // See if the RHS value is < SignedMin.
5909     APFloat SMin(RHS.getSemantics());
5910     SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
5911                           APFloat::rmNearestTiesToEven);
5912     if (SMin > RHS) { // smin > 12312.0
5913       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
5914           Pred == ICmpInst::ICMP_SGE)
5915         return replaceInstUsesWith(I, Builder.getTrue());
5916       return replaceInstUsesWith(I, Builder.getFalse());
5917     }
5918   } else {
5919     // See if the RHS value is < UnsignedMin.
5920     APFloat UMin(RHS.getSemantics());
5921     UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false,
5922                           APFloat::rmNearestTiesToEven);
5923     if (UMin > RHS) { // umin > 12312.0
5924       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
5925           Pred == ICmpInst::ICMP_UGE)
5926         return replaceInstUsesWith(I, Builder.getTrue());
5927       return replaceInstUsesWith(I, Builder.getFalse());
5928     }
5929   }
5930 
5931   // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
5932   // [0, UMAX], but it may still be fractional.  See if it is fractional by
5933   // casting the FP value to the integer value and back, checking for equality.
5934   // Don't do this for zero, because -0.0 is not fractional.
5935   Constant *RHSInt = LHSUnsigned
5936     ? ConstantExpr::getFPToUI(RHSC, IntTy)
5937     : ConstantExpr::getFPToSI(RHSC, IntTy);
5938   if (!RHS.isZero()) {
5939     bool Equal = LHSUnsigned
5940       ? ConstantExpr::getUIToFP(RHSInt, RHSC->getType()) == RHSC
5941       : ConstantExpr::getSIToFP(RHSInt, RHSC->getType()) == RHSC;
5942     if (!Equal) {
5943       // If we had a comparison against a fractional value, we have to adjust
5944       // the compare predicate and sometimes the value.  RHSC is rounded towards
5945       // zero at this point.
5946       switch (Pred) {
5947       default: llvm_unreachable("Unexpected integer comparison!");
5948       case ICmpInst::ICMP_NE:  // (float)int != 4.4   --> true
5949         return replaceInstUsesWith(I, Builder.getTrue());
5950       case ICmpInst::ICMP_EQ:  // (float)int == 4.4   --> false
5951         return replaceInstUsesWith(I, Builder.getFalse());
5952       case ICmpInst::ICMP_ULE:
5953         // (float)int <= 4.4   --> int <= 4
5954         // (float)int <= -4.4  --> false
5955         if (RHS.isNegative())
5956           return replaceInstUsesWith(I, Builder.getFalse());
5957         break;
5958       case ICmpInst::ICMP_SLE:
5959         // (float)int <= 4.4   --> int <= 4
5960         // (float)int <= -4.4  --> int < -4
5961         if (RHS.isNegative())
5962           Pred = ICmpInst::ICMP_SLT;
5963         break;
5964       case ICmpInst::ICMP_ULT:
5965         // (float)int < -4.4   --> false
5966         // (float)int < 4.4    --> int <= 4
5967         if (RHS.isNegative())
5968           return replaceInstUsesWith(I, Builder.getFalse());
5969         Pred = ICmpInst::ICMP_ULE;
5970         break;
5971       case ICmpInst::ICMP_SLT:
5972         // (float)int < -4.4   --> int < -4
5973         // (float)int < 4.4    --> int <= 4
5974         if (!RHS.isNegative())
5975           Pred = ICmpInst::ICMP_SLE;
5976         break;
5977       case ICmpInst::ICMP_UGT:
5978         // (float)int > 4.4    --> int > 4
5979         // (float)int > -4.4   --> true
5980         if (RHS.isNegative())
5981           return replaceInstUsesWith(I, Builder.getTrue());
5982         break;
5983       case ICmpInst::ICMP_SGT:
5984         // (float)int > 4.4    --> int > 4
5985         // (float)int > -4.4   --> int >= -4
5986         if (RHS.isNegative())
5987           Pred = ICmpInst::ICMP_SGE;
5988         break;
5989       case ICmpInst::ICMP_UGE:
5990         // (float)int >= -4.4   --> true
5991         // (float)int >= 4.4    --> int > 4
5992         if (RHS.isNegative())
5993           return replaceInstUsesWith(I, Builder.getTrue());
5994         Pred = ICmpInst::ICMP_UGT;
5995         break;
5996       case ICmpInst::ICMP_SGE:
5997         // (float)int >= -4.4   --> int >= -4
5998         // (float)int >= 4.4    --> int > 4
5999         if (!RHS.isNegative())
6000           Pred = ICmpInst::ICMP_SGT;
6001         break;
6002       }
6003     }
6004   }
6005 
6006   // Lower this FP comparison into an appropriate integer version of the
6007   // comparison.
6008   return new ICmpInst(Pred, LHSI->getOperand(0), RHSInt);
6009 }
6010 
6011 /// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
6012 static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI,
6013                                               Constant *RHSC) {
6014   // When C is not 0.0 and infinities are not allowed:
6015   // (C / X) < 0.0 is a sign-bit test of X
6016   // (C / X) < 0.0 --> X < 0.0 (if C is positive)
6017   // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
6018   //
6019   // Proof:
6020   // Multiply (C / X) < 0.0 by X * X / C.
6021   // - X is non zero, if it is the flag 'ninf' is violated.
6022   // - C defines the sign of X * X * C. Thus it also defines whether to swap
6023   //   the predicate. C is also non zero by definition.
6024   //
6025   // Thus X * X / C is non zero and the transformation is valid. [qed]
6026 
6027   FCmpInst::Predicate Pred = I.getPredicate();
6028 
6029   // Check that predicates are valid.
6030   if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
6031       (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
6032     return nullptr;
6033 
6034   // Check that RHS operand is zero.
6035   if (!match(RHSC, m_AnyZeroFP()))
6036     return nullptr;
6037 
6038   // Check fastmath flags ('ninf').
6039   if (!LHSI->hasNoInfs() || !I.hasNoInfs())
6040     return nullptr;
6041 
6042   // Check the properties of the dividend. It must not be zero to avoid a
6043   // division by zero (see Proof).
6044   const APFloat *C;
6045   if (!match(LHSI->getOperand(0), m_APFloat(C)))
6046     return nullptr;
6047 
6048   if (C->isZero())
6049     return nullptr;
6050 
6051   // Get swapped predicate if necessary.
6052   if (C->isNegative())
6053     Pred = I.getSwappedPredicate();
6054 
6055   return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
6056 }
6057 
6058 /// Optimize fabs(X) compared with zero.
6059 static Instruction *foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) {
6060   Value *X;
6061   if (!match(I.getOperand(0), m_FAbs(m_Value(X))) ||
6062       !match(I.getOperand(1), m_PosZeroFP()))
6063     return nullptr;
6064 
6065   auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
6066     I->setPredicate(P);
6067     return IC.replaceOperand(*I, 0, X);
6068   };
6069 
6070   switch (I.getPredicate()) {
6071   case FCmpInst::FCMP_UGE:
6072   case FCmpInst::FCMP_OLT:
6073     // fabs(X) >= 0.0 --> true
6074     // fabs(X) <  0.0 --> false
6075     llvm_unreachable("fcmp should have simplified");
6076 
6077   case FCmpInst::FCMP_OGT:
6078     // fabs(X) > 0.0 --> X != 0.0
6079     return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
6080 
6081   case FCmpInst::FCMP_UGT:
6082     // fabs(X) u> 0.0 --> X u!= 0.0
6083     return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
6084 
6085   case FCmpInst::FCMP_OLE:
6086     // fabs(X) <= 0.0 --> X == 0.0
6087     return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
6088 
6089   case FCmpInst::FCMP_ULE:
6090     // fabs(X) u<= 0.0 --> X u== 0.0
6091     return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
6092 
6093   case FCmpInst::FCMP_OGE:
6094     // fabs(X) >= 0.0 --> !isnan(X)
6095     assert(!I.hasNoNaNs() && "fcmp should have simplified");
6096     return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
6097 
6098   case FCmpInst::FCMP_ULT:
6099     // fabs(X) u< 0.0 --> isnan(X)
6100     assert(!I.hasNoNaNs() && "fcmp should have simplified");
6101     return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
6102 
6103   case FCmpInst::FCMP_OEQ:
6104   case FCmpInst::FCMP_UEQ:
6105   case FCmpInst::FCMP_ONE:
6106   case FCmpInst::FCMP_UNE:
6107   case FCmpInst::FCMP_ORD:
6108   case FCmpInst::FCMP_UNO:
6109     // Look through the fabs() because it doesn't change anything but the sign.
6110     // fabs(X) == 0.0 --> X == 0.0,
6111     // fabs(X) != 0.0 --> X != 0.0
6112     // isnan(fabs(X)) --> isnan(X)
6113     // !isnan(fabs(X) --> !isnan(X)
6114     return replacePredAndOp0(&I, I.getPredicate(), X);
6115 
6116   default:
6117     return nullptr;
6118   }
6119 }
6120 
6121 Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
6122   bool Changed = false;
6123 
6124   /// Orders the operands of the compare so that they are listed from most
6125   /// complex to least complex.  This puts constants before unary operators,
6126   /// before binary operators.
6127   if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
6128     I.swapOperands();
6129     Changed = true;
6130   }
6131 
6132   const CmpInst::Predicate Pred = I.getPredicate();
6133   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6134   if (Value *V = SimplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
6135                                   SQ.getWithInstruction(&I)))
6136     return replaceInstUsesWith(I, V);
6137 
6138   // Simplify 'fcmp pred X, X'
6139   Type *OpType = Op0->getType();
6140   assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
6141   if (Op0 == Op1) {
6142     switch (Pred) {
6143       default: break;
6144     case FCmpInst::FCMP_UNO:    // True if unordered: isnan(X) | isnan(Y)
6145     case FCmpInst::FCMP_ULT:    // True if unordered or less than
6146     case FCmpInst::FCMP_UGT:    // True if unordered or greater than
6147     case FCmpInst::FCMP_UNE:    // True if unordered or not equal
6148       // Canonicalize these to be 'fcmp uno %X, 0.0'.
6149       I.setPredicate(FCmpInst::FCMP_UNO);
6150       I.setOperand(1, Constant::getNullValue(OpType));
6151       return &I;
6152 
6153     case FCmpInst::FCMP_ORD:    // True if ordered (no nans)
6154     case FCmpInst::FCMP_OEQ:    // True if ordered and equal
6155     case FCmpInst::FCMP_OGE:    // True if ordered and greater than or equal
6156     case FCmpInst::FCMP_OLE:    // True if ordered and less than or equal
6157       // Canonicalize these to be 'fcmp ord %X, 0.0'.
6158       I.setPredicate(FCmpInst::FCMP_ORD);
6159       I.setOperand(1, Constant::getNullValue(OpType));
6160       return &I;
6161     }
6162   }
6163 
6164   // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
6165   // then canonicalize the operand to 0.0.
6166   if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
6167     if (!match(Op0, m_PosZeroFP()) && isKnownNeverNaN(Op0, &TLI))
6168       return replaceOperand(I, 0, ConstantFP::getNullValue(OpType));
6169 
6170     if (!match(Op1, m_PosZeroFP()) && isKnownNeverNaN(Op1, &TLI))
6171       return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
6172   }
6173 
6174   // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
6175   Value *X, *Y;
6176   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
6177     return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
6178 
6179   // Test if the FCmpInst instruction is used exclusively by a select as
6180   // part of a minimum or maximum operation. If so, refrain from doing
6181   // any other folding. This helps out other analyses which understand
6182   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
6183   // and CodeGen. And in this case, at least one of the comparison
6184   // operands has at least one user besides the compare (the select),
6185   // which would often largely negate the benefit of folding anyway.
6186   if (I.hasOneUse())
6187     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
6188       Value *A, *B;
6189       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
6190       if (SPR.Flavor != SPF_UNKNOWN)
6191         return nullptr;
6192     }
6193 
6194   // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
6195   // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
6196   if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
6197     return replaceOperand(I, 1, ConstantFP::getNullValue(OpType));
6198 
6199   // Handle fcmp with instruction LHS and constant RHS.
6200   Instruction *LHSI;
6201   Constant *RHSC;
6202   if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
6203     switch (LHSI->getOpcode()) {
6204     case Instruction::PHI:
6205       // Only fold fcmp into the PHI if the phi and fcmp are in the same
6206       // block.  If in the same block, we're encouraging jump threading.  If
6207       // not, we are just pessimizing the code by making an i1 phi.
6208       if (LHSI->getParent() == I.getParent())
6209         if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
6210           return NV;
6211       break;
6212     case Instruction::SIToFP:
6213     case Instruction::UIToFP:
6214       if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
6215         return NV;
6216       break;
6217     case Instruction::FDiv:
6218       if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
6219         return NV;
6220       break;
6221     case Instruction::Load:
6222       if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
6223         if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
6224           if (GV->isConstant() && GV->hasDefinitiveInitializer() &&
6225               !cast<LoadInst>(LHSI)->isVolatile())
6226             if (Instruction *Res = foldCmpLoadFromIndexedGlobal(GEP, GV, I))
6227               return Res;
6228       break;
6229   }
6230   }
6231 
6232   if (Instruction *R = foldFabsWithFcmpZero(I, *this))
6233     return R;
6234 
6235   if (match(Op0, m_FNeg(m_Value(X)))) {
6236     // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
6237     Constant *C;
6238     if (match(Op1, m_Constant(C))) {
6239       Constant *NegC = ConstantExpr::getFNeg(C);
6240       return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
6241     }
6242   }
6243 
6244   if (match(Op0, m_FPExt(m_Value(X)))) {
6245     // fcmp (fpext X), (fpext Y) -> fcmp X, Y
6246     if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
6247       return new FCmpInst(Pred, X, Y, "", &I);
6248 
6249     // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
6250     const APFloat *C;
6251     if (match(Op1, m_APFloat(C))) {
6252       const fltSemantics &FPSem =
6253           X->getType()->getScalarType()->getFltSemantics();
6254       bool Lossy;
6255       APFloat TruncC = *C;
6256       TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
6257 
6258       // Avoid lossy conversions and denormals.
6259       // Zero is a special case that's OK to convert.
6260       APFloat Fabs = TruncC;
6261       Fabs.clearSign();
6262       if (!Lossy &&
6263           (!(Fabs < APFloat::getSmallestNormalized(FPSem)) || Fabs.isZero())) {
6264         Constant *NewC = ConstantFP::get(X->getType(), TruncC);
6265         return new FCmpInst(Pred, X, NewC, "", &I);
6266       }
6267     }
6268   }
6269 
6270   if (I.getType()->isVectorTy())
6271     if (Instruction *Res = foldVectorCmp(I, Builder))
6272       return Res;
6273 
6274   return Changed ? &I : nullptr;
6275 }
6276