xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp (revision 700637cbb5e582861067a11aaca4d053546871d2)
1 //===- InstCombineCompares.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitICmp and visitFCmp functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APSInt.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/Statistic.h"
17 #include "llvm/Analysis/CaptureTracking.h"
18 #include "llvm/Analysis/CmpInstAnalysis.h"
19 #include "llvm/Analysis/ConstantFolding.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/Utils/Local.h"
22 #include "llvm/Analysis/VectorUtils.h"
23 #include "llvm/IR/ConstantRange.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/InstrTypes.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/Support/KnownBits.h"
29 #include "llvm/Transforms/InstCombine/InstCombiner.h"
30 #include <bitset>
31 
32 using namespace llvm;
33 using namespace PatternMatch;
34 
35 #define DEBUG_TYPE "instcombine"
36 
37 // How many times is a select replaced by one of its operands?
38 STATISTIC(NumSel, "Number of select opts");
39 
40 /// Compute Result = In1+In2, returning true if the result overflowed for this
41 /// type.
addWithOverflow(APInt & Result,const APInt & In1,const APInt & In2,bool IsSigned=false)42 static bool addWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
43                             bool IsSigned = false) {
44   bool Overflow;
45   if (IsSigned)
46     Result = In1.sadd_ov(In2, Overflow);
47   else
48     Result = In1.uadd_ov(In2, Overflow);
49 
50   return Overflow;
51 }
52 
53 /// Compute Result = In1-In2, returning true if the result overflowed for this
54 /// type.
subWithOverflow(APInt & Result,const APInt & In1,const APInt & In2,bool IsSigned=false)55 static bool subWithOverflow(APInt &Result, const APInt &In1, const APInt &In2,
56                             bool IsSigned = false) {
57   bool Overflow;
58   if (IsSigned)
59     Result = In1.ssub_ov(In2, Overflow);
60   else
61     Result = In1.usub_ov(In2, Overflow);
62 
63   return Overflow;
64 }
65 
66 /// Given an icmp instruction, return true if any use of this comparison is a
67 /// branch on sign bit comparison.
hasBranchUse(ICmpInst & I)68 static bool hasBranchUse(ICmpInst &I) {
69   for (auto *U : I.users())
70     if (isa<BranchInst>(U))
71       return true;
72   return false;
73 }
74 
75 /// Returns true if the exploded icmp can be expressed as a signed comparison
76 /// to zero and updates the predicate accordingly.
77 /// The signedness of the comparison is preserved.
78 /// TODO: Refactor with decomposeBitTestICmp()?
isSignTest(ICmpInst::Predicate & Pred,const APInt & C)79 static bool isSignTest(ICmpInst::Predicate &Pred, const APInt &C) {
80   if (!ICmpInst::isSigned(Pred))
81     return false;
82 
83   if (C.isZero())
84     return ICmpInst::isRelational(Pred);
85 
86   if (C.isOne()) {
87     if (Pred == ICmpInst::ICMP_SLT) {
88       Pred = ICmpInst::ICMP_SLE;
89       return true;
90     }
91   } else if (C.isAllOnes()) {
92     if (Pred == ICmpInst::ICMP_SGT) {
93       Pred = ICmpInst::ICMP_SGE;
94       return true;
95     }
96   }
97 
98   return false;
99 }
100 
101 /// This is called when we see this pattern:
102 ///   cmp pred (load (gep GV, ...)), cmpcst
103 /// where GV is a global variable with a constant initializer. Try to simplify
104 /// this into some simple computation that does not need the load. For example
105 /// we can optimize "icmp eq (load (gep "foo", 0, i)), 0" into "icmp eq i, 3".
106 ///
107 /// If AndCst is non-null, then the loaded value is masked with that constant
108 /// before doing the comparison. This handles cases like "A[i]&4 == 0".
foldCmpLoadFromIndexedGlobal(LoadInst * LI,GetElementPtrInst * GEP,GlobalVariable * GV,CmpInst & ICI,ConstantInt * AndCst)109 Instruction *InstCombinerImpl::foldCmpLoadFromIndexedGlobal(
110     LoadInst *LI, GetElementPtrInst *GEP, GlobalVariable *GV, CmpInst &ICI,
111     ConstantInt *AndCst) {
112   if (LI->isVolatile() || LI->getType() != GEP->getResultElementType() ||
113       GV->getValueType() != GEP->getSourceElementType() || !GV->isConstant() ||
114       !GV->hasDefinitiveInitializer())
115     return nullptr;
116 
117   Constant *Init = GV->getInitializer();
118   if (!isa<ConstantArray>(Init) && !isa<ConstantDataArray>(Init))
119     return nullptr;
120 
121   uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
122   // Don't blow up on huge arrays.
123   if (ArrayElementCount > MaxArraySizeForCombine)
124     return nullptr;
125 
126   // There are many forms of this optimization we can handle, for now, just do
127   // the simple index into a single-dimensional array.
128   //
129   // Require: GEP GV, 0, i {{, constant indices}}
130   if (GEP->getNumOperands() < 3 || !isa<ConstantInt>(GEP->getOperand(1)) ||
131       !cast<ConstantInt>(GEP->getOperand(1))->isZero() ||
132       isa<Constant>(GEP->getOperand(2)))
133     return nullptr;
134 
135   // Check that indices after the variable are constants and in-range for the
136   // type they index.  Collect the indices.  This is typically for arrays of
137   // structs.
138   SmallVector<unsigned, 4> LaterIndices;
139 
140   Type *EltTy = Init->getType()->getArrayElementType();
141   for (unsigned i = 3, e = GEP->getNumOperands(); i != e; ++i) {
142     ConstantInt *Idx = dyn_cast<ConstantInt>(GEP->getOperand(i));
143     if (!Idx)
144       return nullptr; // Variable index.
145 
146     uint64_t IdxVal = Idx->getZExtValue();
147     if ((unsigned)IdxVal != IdxVal)
148       return nullptr; // Too large array index.
149 
150     if (StructType *STy = dyn_cast<StructType>(EltTy))
151       EltTy = STy->getElementType(IdxVal);
152     else if (ArrayType *ATy = dyn_cast<ArrayType>(EltTy)) {
153       if (IdxVal >= ATy->getNumElements())
154         return nullptr;
155       EltTy = ATy->getElementType();
156     } else {
157       return nullptr; // Unknown type.
158     }
159 
160     LaterIndices.push_back(IdxVal);
161   }
162 
163   enum { Overdefined = -3, Undefined = -2 };
164 
165   // Variables for our state machines.
166 
167   // FirstTrueElement/SecondTrueElement - Used to emit a comparison of the form
168   // "i == 47 | i == 87", where 47 is the first index the condition is true for,
169   // and 87 is the second (and last) index.  FirstTrueElement is -2 when
170   // undefined, otherwise set to the first true element.  SecondTrueElement is
171   // -2 when undefined, -3 when overdefined and >= 0 when that index is true.
172   int FirstTrueElement = Undefined, SecondTrueElement = Undefined;
173 
174   // FirstFalseElement/SecondFalseElement - Used to emit a comparison of the
175   // form "i != 47 & i != 87".  Same state transitions as for true elements.
176   int FirstFalseElement = Undefined, SecondFalseElement = Undefined;
177 
178   /// TrueRangeEnd/FalseRangeEnd - In conjunction with First*Element, these
179   /// define a state machine that triggers for ranges of values that the index
180   /// is true or false for.  This triggers on things like "abbbbc"[i] == 'b'.
181   /// This is -2 when undefined, -3 when overdefined, and otherwise the last
182   /// index in the range (inclusive).  We use -2 for undefined here because we
183   /// use relative comparisons and don't want 0-1 to match -1.
184   int TrueRangeEnd = Undefined, FalseRangeEnd = Undefined;
185 
186   // MagicBitvector - This is a magic bitvector where we set a bit if the
187   // comparison is true for element 'i'.  If there are 64 elements or less in
188   // the array, this will fully represent all the comparison results.
189   uint64_t MagicBitvector = 0;
190 
191   // Scan the array and see if one of our patterns matches.
192   Constant *CompareRHS = cast<Constant>(ICI.getOperand(1));
193   for (unsigned i = 0, e = ArrayElementCount; i != e; ++i) {
194     Constant *Elt = Init->getAggregateElement(i);
195     if (!Elt)
196       return nullptr;
197 
198     // If this is indexing an array of structures, get the structure element.
199     if (!LaterIndices.empty()) {
200       Elt = ConstantFoldExtractValueInstruction(Elt, LaterIndices);
201       if (!Elt)
202         return nullptr;
203     }
204 
205     // If the element is masked, handle it.
206     if (AndCst) {
207       Elt = ConstantFoldBinaryOpOperands(Instruction::And, Elt, AndCst, DL);
208       if (!Elt)
209         return nullptr;
210     }
211 
212     // Find out if the comparison would be true or false for the i'th element.
213     Constant *C = ConstantFoldCompareInstOperands(ICI.getPredicate(), Elt,
214                                                   CompareRHS, DL, &TLI);
215     if (!C)
216       return nullptr;
217 
218     // If the result is undef for this element, ignore it.
219     if (isa<UndefValue>(C)) {
220       // Extend range state machines to cover this element in case there is an
221       // undef in the middle of the range.
222       if (TrueRangeEnd == (int)i - 1)
223         TrueRangeEnd = i;
224       if (FalseRangeEnd == (int)i - 1)
225         FalseRangeEnd = i;
226       continue;
227     }
228 
229     // If we can't compute the result for any of the elements, we have to give
230     // up evaluating the entire conditional.
231     if (!isa<ConstantInt>(C))
232       return nullptr;
233 
234     // Otherwise, we know if the comparison is true or false for this element,
235     // update our state machines.
236     bool IsTrueForElt = !cast<ConstantInt>(C)->isZero();
237 
238     // State machine for single/double/range index comparison.
239     if (IsTrueForElt) {
240       // Update the TrueElement state machine.
241       if (FirstTrueElement == Undefined)
242         FirstTrueElement = TrueRangeEnd = i; // First true element.
243       else {
244         // Update double-compare state machine.
245         if (SecondTrueElement == Undefined)
246           SecondTrueElement = i;
247         else
248           SecondTrueElement = Overdefined;
249 
250         // Update range state machine.
251         if (TrueRangeEnd == (int)i - 1)
252           TrueRangeEnd = i;
253         else
254           TrueRangeEnd = Overdefined;
255       }
256     } else {
257       // Update the FalseElement state machine.
258       if (FirstFalseElement == Undefined)
259         FirstFalseElement = FalseRangeEnd = i; // First false element.
260       else {
261         // Update double-compare state machine.
262         if (SecondFalseElement == Undefined)
263           SecondFalseElement = i;
264         else
265           SecondFalseElement = Overdefined;
266 
267         // Update range state machine.
268         if (FalseRangeEnd == (int)i - 1)
269           FalseRangeEnd = i;
270         else
271           FalseRangeEnd = Overdefined;
272       }
273     }
274 
275     // If this element is in range, update our magic bitvector.
276     if (i < 64 && IsTrueForElt)
277       MagicBitvector |= 1ULL << i;
278 
279     // If all of our states become overdefined, bail out early.  Since the
280     // predicate is expensive, only check it every 8 elements.  This is only
281     // really useful for really huge arrays.
282     if ((i & 8) == 0 && i >= 64 && SecondTrueElement == Overdefined &&
283         SecondFalseElement == Overdefined && TrueRangeEnd == Overdefined &&
284         FalseRangeEnd == Overdefined)
285       return nullptr;
286   }
287 
288   // Now that we've scanned the entire array, emit our new comparison(s).  We
289   // order the state machines in complexity of the generated code.
290   Value *Idx = GEP->getOperand(2);
291 
292   // If the index is larger than the pointer offset size of the target, truncate
293   // the index down like the GEP would do implicitly.  We don't have to do this
294   // for an inbounds GEP because the index can't be out of range.
295   if (!GEP->isInBounds()) {
296     Type *PtrIdxTy = DL.getIndexType(GEP->getType());
297     unsigned OffsetSize = PtrIdxTy->getIntegerBitWidth();
298     if (Idx->getType()->getPrimitiveSizeInBits().getFixedValue() > OffsetSize)
299       Idx = Builder.CreateTrunc(Idx, PtrIdxTy);
300   }
301 
302   // If inbounds keyword is not present, Idx * ElementSize can overflow.
303   // Let's assume that ElementSize is 2 and the wanted value is at offset 0.
304   // Then, there are two possible values for Idx to match offset 0:
305   // 0x00..00, 0x80..00.
306   // Emitting 'icmp eq Idx, 0' isn't correct in this case because the
307   // comparison is false if Idx was 0x80..00.
308   // We need to erase the highest countTrailingZeros(ElementSize) bits of Idx.
309   unsigned ElementSize =
310       DL.getTypeAllocSize(Init->getType()->getArrayElementType());
311   auto MaskIdx = [&](Value *Idx) {
312     if (!GEP->isInBounds() && llvm::countr_zero(ElementSize) != 0) {
313       Value *Mask = Constant::getAllOnesValue(Idx->getType());
314       Mask = Builder.CreateLShr(Mask, llvm::countr_zero(ElementSize));
315       Idx = Builder.CreateAnd(Idx, Mask);
316     }
317     return Idx;
318   };
319 
320   // If the comparison is only true for one or two elements, emit direct
321   // comparisons.
322   if (SecondTrueElement != Overdefined) {
323     Idx = MaskIdx(Idx);
324     // None true -> false.
325     if (FirstTrueElement == Undefined)
326       return replaceInstUsesWith(ICI, Builder.getFalse());
327 
328     Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
329 
330     // True for one element -> 'i == 47'.
331     if (SecondTrueElement == Undefined)
332       return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
333 
334     // True for two elements -> 'i == 47 | i == 72'.
335     Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
336     Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
337     Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
338     return BinaryOperator::CreateOr(C1, C2);
339   }
340 
341   // If the comparison is only false for one or two elements, emit direct
342   // comparisons.
343   if (SecondFalseElement != Overdefined) {
344     Idx = MaskIdx(Idx);
345     // None false -> true.
346     if (FirstFalseElement == Undefined)
347       return replaceInstUsesWith(ICI, Builder.getTrue());
348 
349     Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
350 
351     // False for one element -> 'i != 47'.
352     if (SecondFalseElement == Undefined)
353       return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
354 
355     // False for two elements -> 'i != 47 & i != 72'.
356     Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
357     Value *SecondFalseIdx =
358         ConstantInt::get(Idx->getType(), SecondFalseElement);
359     Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
360     return BinaryOperator::CreateAnd(C1, C2);
361   }
362 
363   // If the comparison can be replaced with a range comparison for the elements
364   // where it is true, emit the range check.
365   if (TrueRangeEnd != Overdefined) {
366     assert(TrueRangeEnd != FirstTrueElement && "Should emit single compare");
367     Idx = MaskIdx(Idx);
368 
369     // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
370     if (FirstTrueElement) {
371       Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
372       Idx = Builder.CreateAdd(Idx, Offs);
373     }
374 
375     Value *End =
376         ConstantInt::get(Idx->getType(), TrueRangeEnd - FirstTrueElement + 1);
377     return new ICmpInst(ICmpInst::ICMP_ULT, Idx, End);
378   }
379 
380   // False range check.
381   if (FalseRangeEnd != Overdefined) {
382     assert(FalseRangeEnd != FirstFalseElement && "Should emit single compare");
383     Idx = MaskIdx(Idx);
384     // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
385     if (FirstFalseElement) {
386       Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
387       Idx = Builder.CreateAdd(Idx, Offs);
388     }
389 
390     Value *End =
391         ConstantInt::get(Idx->getType(), FalseRangeEnd - FirstFalseElement);
392     return new ICmpInst(ICmpInst::ICMP_UGT, Idx, End);
393   }
394 
395   // If a magic bitvector captures the entire comparison state
396   // of this load, replace it with computation that does:
397   //   ((magic_cst >> i) & 1) != 0
398   {
399     Type *Ty = nullptr;
400 
401     // Look for an appropriate type:
402     // - The type of Idx if the magic fits
403     // - The smallest fitting legal type
404     if (ArrayElementCount <= Idx->getType()->getIntegerBitWidth())
405       Ty = Idx->getType();
406     else
407       Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
408 
409     if (Ty) {
410       Idx = MaskIdx(Idx);
411       Value *V = Builder.CreateIntCast(Idx, Ty, false);
412       V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
413       V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
414       return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
415     }
416   }
417 
418   return nullptr;
419 }
420 
421 /// Returns true if we can rewrite Start as a GEP with pointer Base
422 /// and some integer offset. The nodes that need to be re-written
423 /// for this transformation will be added to Explored.
canRewriteGEPAsOffset(Value * Start,Value * Base,GEPNoWrapFlags & NW,const DataLayout & DL,SetVector<Value * > & Explored)424 static bool canRewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags &NW,
425                                   const DataLayout &DL,
426                                   SetVector<Value *> &Explored) {
427   SmallVector<Value *, 16> WorkList(1, Start);
428   Explored.insert(Base);
429 
430   // The following traversal gives us an order which can be used
431   // when doing the final transformation. Since in the final
432   // transformation we create the PHI replacement instructions first,
433   // we don't have to get them in any particular order.
434   //
435   // However, for other instructions we will have to traverse the
436   // operands of an instruction first, which means that we have to
437   // do a post-order traversal.
438   while (!WorkList.empty()) {
439     SetVector<PHINode *> PHIs;
440 
441     while (!WorkList.empty()) {
442       if (Explored.size() >= 100)
443         return false;
444 
445       Value *V = WorkList.back();
446 
447       if (Explored.contains(V)) {
448         WorkList.pop_back();
449         continue;
450       }
451 
452       if (!isa<GetElementPtrInst>(V) && !isa<PHINode>(V))
453         // We've found some value that we can't explore which is different from
454         // the base. Therefore we can't do this transformation.
455         return false;
456 
457       if (auto *GEP = dyn_cast<GEPOperator>(V)) {
458         // Only allow inbounds GEPs with at most one variable offset.
459         auto IsNonConst = [](Value *V) { return !isa<ConstantInt>(V); };
460         if (!GEP->isInBounds() || count_if(GEP->indices(), IsNonConst) > 1)
461           return false;
462 
463         NW = NW.intersectForOffsetAdd(GEP->getNoWrapFlags());
464         if (!Explored.contains(GEP->getOperand(0)))
465           WorkList.push_back(GEP->getOperand(0));
466       }
467 
468       if (WorkList.back() == V) {
469         WorkList.pop_back();
470         // We've finished visiting this node, mark it as such.
471         Explored.insert(V);
472       }
473 
474       if (auto *PN = dyn_cast<PHINode>(V)) {
475         // We cannot transform PHIs on unsplittable basic blocks.
476         if (isa<CatchSwitchInst>(PN->getParent()->getTerminator()))
477           return false;
478         Explored.insert(PN);
479         PHIs.insert(PN);
480       }
481     }
482 
483     // Explore the PHI nodes further.
484     for (auto *PN : PHIs)
485       for (Value *Op : PN->incoming_values())
486         if (!Explored.contains(Op))
487           WorkList.push_back(Op);
488   }
489 
490   // Make sure that we can do this. Since we can't insert GEPs in a basic
491   // block before a PHI node, we can't easily do this transformation if
492   // we have PHI node users of transformed instructions.
493   for (Value *Val : Explored) {
494     for (Value *Use : Val->uses()) {
495 
496       auto *PHI = dyn_cast<PHINode>(Use);
497       auto *Inst = dyn_cast<Instruction>(Val);
498 
499       if (Inst == Base || Inst == PHI || !Inst || !PHI ||
500           !Explored.contains(PHI))
501         continue;
502 
503       if (PHI->getParent() == Inst->getParent())
504         return false;
505     }
506   }
507   return true;
508 }
509 
510 // Sets the appropriate insert point on Builder where we can add
511 // a replacement Instruction for V (if that is possible).
setInsertionPoint(IRBuilder<> & Builder,Value * V,bool Before=true)512 static void setInsertionPoint(IRBuilder<> &Builder, Value *V,
513                               bool Before = true) {
514   if (auto *PHI = dyn_cast<PHINode>(V)) {
515     BasicBlock *Parent = PHI->getParent();
516     Builder.SetInsertPoint(Parent, Parent->getFirstInsertionPt());
517     return;
518   }
519   if (auto *I = dyn_cast<Instruction>(V)) {
520     if (!Before)
521       I = &*std::next(I->getIterator());
522     Builder.SetInsertPoint(I);
523     return;
524   }
525   if (auto *A = dyn_cast<Argument>(V)) {
526     // Set the insertion point in the entry block.
527     BasicBlock &Entry = A->getParent()->getEntryBlock();
528     Builder.SetInsertPoint(&Entry, Entry.getFirstInsertionPt());
529     return;
530   }
531   // Otherwise, this is a constant and we don't need to set a new
532   // insertion point.
533   assert(isa<Constant>(V) && "Setting insertion point for unknown value!");
534 }
535 
536 /// Returns a re-written value of Start as an indexed GEP using Base as a
537 /// pointer.
rewriteGEPAsOffset(Value * Start,Value * Base,GEPNoWrapFlags NW,const DataLayout & DL,SetVector<Value * > & Explored,InstCombiner & IC)538 static Value *rewriteGEPAsOffset(Value *Start, Value *Base, GEPNoWrapFlags NW,
539                                  const DataLayout &DL,
540                                  SetVector<Value *> &Explored,
541                                  InstCombiner &IC) {
542   // Perform all the substitutions. This is a bit tricky because we can
543   // have cycles in our use-def chains.
544   // 1. Create the PHI nodes without any incoming values.
545   // 2. Create all the other values.
546   // 3. Add the edges for the PHI nodes.
547   // 4. Emit GEPs to get the original pointers.
548   // 5. Remove the original instructions.
549   Type *IndexType = IntegerType::get(
550       Base->getContext(), DL.getIndexTypeSizeInBits(Start->getType()));
551 
552   DenseMap<Value *, Value *> NewInsts;
553   NewInsts[Base] = ConstantInt::getNullValue(IndexType);
554 
555   // Create the new PHI nodes, without adding any incoming values.
556   for (Value *Val : Explored) {
557     if (Val == Base)
558       continue;
559     // Create empty phi nodes. This avoids cyclic dependencies when creating
560     // the remaining instructions.
561     if (auto *PHI = dyn_cast<PHINode>(Val))
562       NewInsts[PHI] =
563           PHINode::Create(IndexType, PHI->getNumIncomingValues(),
564                           PHI->getName() + ".idx", PHI->getIterator());
565   }
566   IRBuilder<> Builder(Base->getContext());
567 
568   // Create all the other instructions.
569   for (Value *Val : Explored) {
570     if (NewInsts.contains(Val))
571       continue;
572 
573     if (auto *GEP = dyn_cast<GEPOperator>(Val)) {
574       setInsertionPoint(Builder, GEP);
575       Value *Op = NewInsts[GEP->getOperand(0)];
576       Value *OffsetV = emitGEPOffset(&Builder, DL, GEP);
577       if (isa<ConstantInt>(Op) && cast<ConstantInt>(Op)->isZero())
578         NewInsts[GEP] = OffsetV;
579       else
580         NewInsts[GEP] = Builder.CreateAdd(
581             Op, OffsetV, GEP->getOperand(0)->getName() + ".add",
582             /*NUW=*/NW.hasNoUnsignedWrap(),
583             /*NSW=*/NW.hasNoUnsignedSignedWrap());
584       continue;
585     }
586     if (isa<PHINode>(Val))
587       continue;
588 
589     llvm_unreachable("Unexpected instruction type");
590   }
591 
592   // Add the incoming values to the PHI nodes.
593   for (Value *Val : Explored) {
594     if (Val == Base)
595       continue;
596     // All the instructions have been created, we can now add edges to the
597     // phi nodes.
598     if (auto *PHI = dyn_cast<PHINode>(Val)) {
599       PHINode *NewPhi = static_cast<PHINode *>(NewInsts[PHI]);
600       for (unsigned I = 0, E = PHI->getNumIncomingValues(); I < E; ++I) {
601         Value *NewIncoming = PHI->getIncomingValue(I);
602 
603         auto It = NewInsts.find(NewIncoming);
604         if (It != NewInsts.end())
605           NewIncoming = It->second;
606 
607         NewPhi->addIncoming(NewIncoming, PHI->getIncomingBlock(I));
608       }
609     }
610   }
611 
612   for (Value *Val : Explored) {
613     if (Val == Base)
614       continue;
615 
616     setInsertionPoint(Builder, Val, false);
617     // Create GEP for external users.
618     Value *NewVal = Builder.CreateGEP(Builder.getInt8Ty(), Base, NewInsts[Val],
619                                       Val->getName() + ".ptr", NW);
620     IC.replaceInstUsesWith(*cast<Instruction>(Val), NewVal);
621     // Add old instruction to worklist for DCE. We don't directly remove it
622     // here because the original compare is one of the users.
623     IC.addToWorklist(cast<Instruction>(Val));
624   }
625 
626   return NewInsts[Start];
627 }
628 
629 /// Converts (CMP GEPLHS, RHS) if this change would make RHS a constant.
630 /// We can look through PHIs, GEPs and casts in order to determine a common base
631 /// between GEPLHS and RHS.
transformToIndexedCompare(GEPOperator * GEPLHS,Value * RHS,CmpPredicate Cond,const DataLayout & DL,InstCombiner & IC)632 static Instruction *transformToIndexedCompare(GEPOperator *GEPLHS, Value *RHS,
633                                               CmpPredicate Cond,
634                                               const DataLayout &DL,
635                                               InstCombiner &IC) {
636   // FIXME: Support vector of pointers.
637   if (GEPLHS->getType()->isVectorTy())
638     return nullptr;
639 
640   if (!GEPLHS->hasAllConstantIndices())
641     return nullptr;
642 
643   APInt Offset(DL.getIndexTypeSizeInBits(GEPLHS->getType()), 0);
644   Value *PtrBase =
645       GEPLHS->stripAndAccumulateConstantOffsets(DL, Offset,
646                                                 /*AllowNonInbounds*/ false);
647 
648   // Bail if we looked through addrspacecast.
649   if (PtrBase->getType() != GEPLHS->getType())
650     return nullptr;
651 
652   // The set of nodes that will take part in this transformation.
653   SetVector<Value *> Nodes;
654   GEPNoWrapFlags NW = GEPLHS->getNoWrapFlags();
655   if (!canRewriteGEPAsOffset(RHS, PtrBase, NW, DL, Nodes))
656     return nullptr;
657 
658   // We know we can re-write this as
659   //  ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)
660   // Since we've only looked through inbouds GEPs we know that we
661   // can't have overflow on either side. We can therefore re-write
662   // this as:
663   //   OFFSET1 cmp OFFSET2
664   Value *NewRHS = rewriteGEPAsOffset(RHS, PtrBase, NW, DL, Nodes, IC);
665 
666   // RewriteGEPAsOffset has replaced RHS and all of its uses with a re-written
667   // GEP having PtrBase as the pointer base, and has returned in NewRHS the
668   // offset. Since Index is the offset of LHS to the base pointer, we will now
669   // compare the offsets instead of comparing the pointers.
670   return new ICmpInst(ICmpInst::getSignedPredicate(Cond),
671                       IC.Builder.getInt(Offset), NewRHS);
672 }
673 
674 /// Fold comparisons between a GEP instruction and something else. At this point
675 /// we know that the GEP is on the LHS of the comparison.
foldGEPICmp(GEPOperator * GEPLHS,Value * RHS,CmpPredicate Cond,Instruction & I)676 Instruction *InstCombinerImpl::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
677                                            CmpPredicate Cond, Instruction &I) {
678   // Don't transform signed compares of GEPs into index compares. Even if the
679   // GEP is inbounds, the final add of the base pointer can have signed overflow
680   // and would change the result of the icmp.
681   // e.g. "&foo[0] <s &foo[1]" can't be folded to "true" because "foo" could be
682   // the maximum signed value for the pointer type.
683   if (ICmpInst::isSigned(Cond))
684     return nullptr;
685 
686   // Look through bitcasts and addrspacecasts. We do not however want to remove
687   // 0 GEPs.
688   if (!isa<GetElementPtrInst>(RHS))
689     RHS = RHS->stripPointerCasts();
690 
691   auto CanFold = [Cond](GEPNoWrapFlags NW) {
692     if (ICmpInst::isEquality(Cond))
693       return true;
694 
695     // Unsigned predicates can be folded if the GEPs have *any* nowrap flags.
696     assert(ICmpInst::isUnsigned(Cond));
697     return NW != GEPNoWrapFlags::none();
698   };
699 
700   auto NewICmp = [Cond](GEPNoWrapFlags NW, Value *Op1, Value *Op2) {
701     if (!NW.hasNoUnsignedWrap()) {
702       // Convert signed to unsigned comparison.
703       return new ICmpInst(ICmpInst::getSignedPredicate(Cond), Op1, Op2);
704     }
705 
706     auto *I = new ICmpInst(Cond, Op1, Op2);
707     I->setSameSign(NW.hasNoUnsignedSignedWrap());
708     return I;
709   };
710 
711   CommonPointerBase Base = CommonPointerBase::compute(GEPLHS, RHS);
712   if (Base.Ptr == RHS && CanFold(Base.LHSNW)) {
713     // ((gep Ptr, OFFSET) cmp Ptr)   ---> (OFFSET cmp 0).
714     Type *IdxTy = DL.getIndexType(GEPLHS->getType());
715     Value *Offset =
716         EmitGEPOffsets(Base.LHSGEPs, Base.LHSNW, IdxTy, /*RewriteGEPs=*/true);
717     return NewICmp(Base.LHSNW, Offset,
718                    Constant::getNullValue(Offset->getType()));
719   }
720 
721   if (GEPLHS->isInBounds() && ICmpInst::isEquality(Cond) &&
722       isa<Constant>(RHS) && cast<Constant>(RHS)->isNullValue() &&
723       !NullPointerIsDefined(I.getFunction(),
724                             RHS->getType()->getPointerAddressSpace())) {
725     // For most address spaces, an allocation can't be placed at null, but null
726     // itself is treated as a 0 size allocation in the in bounds rules.  Thus,
727     // the only valid inbounds address derived from null, is null itself.
728     // Thus, we have four cases to consider:
729     // 1) Base == nullptr, Offset == 0 -> inbounds, null
730     // 2) Base == nullptr, Offset != 0 -> poison as the result is out of bounds
731     // 3) Base != nullptr, Offset == (-base) -> poison (crossing allocations)
732     // 4) Base != nullptr, Offset != (-base) -> nonnull (and possibly poison)
733     //
734     // (Note if we're indexing a type of size 0, that simply collapses into one
735     //  of the buckets above.)
736     //
737     // In general, we're allowed to make values less poison (i.e. remove
738     //   sources of full UB), so in this case, we just select between the two
739     //   non-poison cases (1 and 4 above).
740     //
741     // For vectors, we apply the same reasoning on a per-lane basis.
742     auto *Base = GEPLHS->getPointerOperand();
743     if (GEPLHS->getType()->isVectorTy() && Base->getType()->isPointerTy()) {
744       auto EC = cast<VectorType>(GEPLHS->getType())->getElementCount();
745       Base = Builder.CreateVectorSplat(EC, Base);
746     }
747     return new ICmpInst(Cond, Base,
748                         ConstantExpr::getPointerBitCastOrAddrSpaceCast(
749                             cast<Constant>(RHS), Base->getType()));
750   } else if (GEPOperator *GEPRHS = dyn_cast<GEPOperator>(RHS)) {
751     GEPNoWrapFlags NW = GEPLHS->getNoWrapFlags() & GEPRHS->getNoWrapFlags();
752 
753     // If the base pointers are different, but the indices are the same, just
754     // compare the base pointer.
755     Value *PtrBase = GEPLHS->getOperand(0);
756     if (PtrBase != GEPRHS->getOperand(0)) {
757       bool IndicesTheSame =
758           GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
759           GEPLHS->getPointerOperand()->getType() ==
760               GEPRHS->getPointerOperand()->getType() &&
761           GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType();
762       if (IndicesTheSame)
763         for (unsigned i = 1, e = GEPLHS->getNumOperands(); i != e; ++i)
764           if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
765             IndicesTheSame = false;
766             break;
767           }
768 
769       // If all indices are the same, just compare the base pointers.
770       Type *BaseType = GEPLHS->getOperand(0)->getType();
771       if (IndicesTheSame &&
772           CmpInst::makeCmpResultType(BaseType) == I.getType() && CanFold(NW))
773         return new ICmpInst(Cond, GEPLHS->getOperand(0), GEPRHS->getOperand(0));
774 
775       // If we're comparing GEPs with two base pointers that only differ in type
776       // and both GEPs have only constant indices or just one use, then fold
777       // the compare with the adjusted indices.
778       // FIXME: Support vector of pointers.
779       if (GEPLHS->isInBounds() && GEPRHS->isInBounds() &&
780           (GEPLHS->hasAllConstantIndices() || GEPLHS->hasOneUse()) &&
781           (GEPRHS->hasAllConstantIndices() || GEPRHS->hasOneUse()) &&
782           PtrBase->stripPointerCasts() ==
783               GEPRHS->getOperand(0)->stripPointerCasts() &&
784           !GEPLHS->getType()->isVectorTy()) {
785         Value *LOffset = EmitGEPOffset(GEPLHS);
786         Value *ROffset = EmitGEPOffset(GEPRHS);
787 
788         // If we looked through an addrspacecast between different sized address
789         // spaces, the LHS and RHS pointers are different sized
790         // integers. Truncate to the smaller one.
791         Type *LHSIndexTy = LOffset->getType();
792         Type *RHSIndexTy = ROffset->getType();
793         if (LHSIndexTy != RHSIndexTy) {
794           if (LHSIndexTy->getPrimitiveSizeInBits().getFixedValue() <
795               RHSIndexTy->getPrimitiveSizeInBits().getFixedValue()) {
796             ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
797           } else
798             LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
799         }
800 
801         Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
802                                         LOffset, ROffset);
803         return replaceInstUsesWith(I, Cmp);
804       }
805 
806       // Otherwise, the base pointers are different and the indices are
807       // different. Try convert this to an indexed compare by looking through
808       // PHIs/casts.
809       return transformToIndexedCompare(GEPLHS, RHS, Cond, DL, *this);
810     }
811 
812     if (GEPLHS->getNumOperands() == GEPRHS->getNumOperands() &&
813         GEPLHS->getSourceElementType() == GEPRHS->getSourceElementType()) {
814       // If the GEPs only differ by one index, compare it.
815       unsigned NumDifferences = 0; // Keep track of # differences.
816       unsigned DiffOperand = 0;    // The operand that differs.
817       for (unsigned i = 1, e = GEPRHS->getNumOperands(); i != e; ++i)
818         if (GEPLHS->getOperand(i) != GEPRHS->getOperand(i)) {
819           Type *LHSType = GEPLHS->getOperand(i)->getType();
820           Type *RHSType = GEPRHS->getOperand(i)->getType();
821           // FIXME: Better support for vector of pointers.
822           if (LHSType->getPrimitiveSizeInBits() !=
823                   RHSType->getPrimitiveSizeInBits() ||
824               (GEPLHS->getType()->isVectorTy() &&
825                (!LHSType->isVectorTy() || !RHSType->isVectorTy()))) {
826             // Irreconcilable differences.
827             NumDifferences = 2;
828             break;
829           }
830 
831           if (NumDifferences++)
832             break;
833           DiffOperand = i;
834         }
835 
836       if (NumDifferences == 0) // SAME GEP?
837         return replaceInstUsesWith(
838             I, // No comparison is needed here.
839             ConstantInt::get(I.getType(), ICmpInst::isTrueWhenEqual(Cond)));
840       // If two GEPs only differ by an index, compare them.
841       // Note that nowrap flags are always needed when comparing two indices.
842       else if (NumDifferences == 1 && NW != GEPNoWrapFlags::none()) {
843         Value *LHSV = GEPLHS->getOperand(DiffOperand);
844         Value *RHSV = GEPRHS->getOperand(DiffOperand);
845         return NewICmp(NW, LHSV, RHSV);
846       }
847     }
848 
849     if (CanFold(NW)) {
850       // ((gep Ptr, OFFSET1) cmp (gep Ptr, OFFSET2)  --->  (OFFSET1 cmp OFFSET2)
851       Value *L = EmitGEPOffset(GEPLHS, /*RewriteGEP=*/true);
852       Value *R = EmitGEPOffset(GEPRHS, /*RewriteGEP=*/true);
853       return NewICmp(NW, L, R);
854     }
855   }
856 
857   // Try convert this to an indexed compare by looking through PHIs/casts as a
858   // last resort.
859   return transformToIndexedCompare(GEPLHS, RHS, Cond, DL, *this);
860 }
861 
foldAllocaCmp(AllocaInst * Alloca)862 bool InstCombinerImpl::foldAllocaCmp(AllocaInst *Alloca) {
863   // It would be tempting to fold away comparisons between allocas and any
864   // pointer not based on that alloca (e.g. an argument). However, even
865   // though such pointers cannot alias, they can still compare equal.
866   //
867   // But LLVM doesn't specify where allocas get their memory, so if the alloca
868   // doesn't escape we can argue that it's impossible to guess its value, and we
869   // can therefore act as if any such guesses are wrong.
870   //
871   // However, we need to ensure that this folding is consistent: We can't fold
872   // one comparison to false, and then leave a different comparison against the
873   // same value alone (as it might evaluate to true at runtime, leading to a
874   // contradiction). As such, this code ensures that all comparisons are folded
875   // at the same time, and there are no other escapes.
876 
877   struct CmpCaptureTracker : public CaptureTracker {
878     AllocaInst *Alloca;
879     bool Captured = false;
880     /// The value of the map is a bit mask of which icmp operands the alloca is
881     /// used in.
882     SmallMapVector<ICmpInst *, unsigned, 4> ICmps;
883 
884     CmpCaptureTracker(AllocaInst *Alloca) : Alloca(Alloca) {}
885 
886     void tooManyUses() override { Captured = true; }
887 
888     Action captured(const Use *U, UseCaptureInfo CI) override {
889       // TODO(captures): Use UseCaptureInfo.
890       auto *ICmp = dyn_cast<ICmpInst>(U->getUser());
891       // We need to check that U is based *only* on the alloca, and doesn't
892       // have other contributions from a select/phi operand.
893       // TODO: We could check whether getUnderlyingObjects() reduces to one
894       // object, which would allow looking through phi nodes.
895       if (ICmp && ICmp->isEquality() && getUnderlyingObject(*U) == Alloca) {
896         // Collect equality icmps of the alloca, and don't treat them as
897         // captures.
898         ICmps[ICmp] |= 1u << U->getOperandNo();
899         return Continue;
900       }
901 
902       Captured = true;
903       return Stop;
904     }
905   };
906 
907   CmpCaptureTracker Tracker(Alloca);
908   PointerMayBeCaptured(Alloca, &Tracker);
909   if (Tracker.Captured)
910     return false;
911 
912   bool Changed = false;
913   for (auto [ICmp, Operands] : Tracker.ICmps) {
914     switch (Operands) {
915     case 1:
916     case 2: {
917       // The alloca is only used in one icmp operand. Assume that the
918       // equality is false.
919       auto *Res = ConstantInt::get(ICmp->getType(),
920                                    ICmp->getPredicate() == ICmpInst::ICMP_NE);
921       replaceInstUsesWith(*ICmp, Res);
922       eraseInstFromFunction(*ICmp);
923       Changed = true;
924       break;
925     }
926     case 3:
927       // Both icmp operands are based on the alloca, so this is comparing
928       // pointer offsets, without leaking any information about the address
929       // of the alloca. Ignore such comparisons.
930       break;
931     default:
932       llvm_unreachable("Cannot happen");
933     }
934   }
935 
936   return Changed;
937 }
938 
939 /// Fold "icmp pred (X+C), X".
foldICmpAddOpConst(Value * X,const APInt & C,CmpPredicate Pred)940 Instruction *InstCombinerImpl::foldICmpAddOpConst(Value *X, const APInt &C,
941                                                   CmpPredicate Pred) {
942   // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0,
943   // so the values can never be equal.  Similarly for all other "or equals"
944   // operators.
945   assert(!!C && "C should not be zero!");
946 
947   // (X+1) <u X        --> X >u (MAXUINT-1)        --> X == 255
948   // (X+2) <u X        --> X >u (MAXUINT-2)        --> X > 253
949   // (X+MAXUINT) <u X  --> X >u (MAXUINT-MAXUINT)  --> X != 0
950   if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) {
951     Constant *R =
952         ConstantInt::get(X->getType(), APInt::getMaxValue(C.getBitWidth()) - C);
953     return new ICmpInst(ICmpInst::ICMP_UGT, X, R);
954   }
955 
956   // (X+1) >u X        --> X <u (0-1)        --> X != 255
957   // (X+2) >u X        --> X <u (0-2)        --> X <u 254
958   // (X+MAXUINT) >u X  --> X <u (0-MAXUINT)  --> X <u 1  --> X == 0
959   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE)
960     return new ICmpInst(ICmpInst::ICMP_ULT, X,
961                         ConstantInt::get(X->getType(), -C));
962 
963   APInt SMax = APInt::getSignedMaxValue(C.getBitWidth());
964 
965   // (X+ 1) <s X       --> X >s (MAXSINT-1)          --> X == 127
966   // (X+ 2) <s X       --> X >s (MAXSINT-2)          --> X >s 125
967   // (X+MAXSINT) <s X  --> X >s (MAXSINT-MAXSINT)    --> X >s 0
968   // (X+MINSINT) <s X  --> X >s (MAXSINT-MINSINT)    --> X >s -1
969   // (X+ -2) <s X      --> X >s (MAXSINT- -2)        --> X >s 126
970   // (X+ -1) <s X      --> X >s (MAXSINT- -1)        --> X != 127
971   if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE)
972     return new ICmpInst(ICmpInst::ICMP_SGT, X,
973                         ConstantInt::get(X->getType(), SMax - C));
974 
975   // (X+ 1) >s X       --> X <s (MAXSINT-(1-1))       --> X != 127
976   // (X+ 2) >s X       --> X <s (MAXSINT-(2-1))       --> X <s 126
977   // (X+MAXSINT) >s X  --> X <s (MAXSINT-(MAXSINT-1)) --> X <s 1
978   // (X+MINSINT) >s X  --> X <s (MAXSINT-(MINSINT-1)) --> X <s -2
979   // (X+ -2) >s X      --> X <s (MAXSINT-(-2-1))      --> X <s -126
980   // (X+ -1) >s X      --> X <s (MAXSINT-(-1-1))      --> X == -128
981 
982   assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
983   return new ICmpInst(ICmpInst::ICMP_SLT, X,
984                       ConstantInt::get(X->getType(), SMax - (C - 1)));
985 }
986 
987 /// Handle "(icmp eq/ne (ashr/lshr AP2, A), AP1)" ->
988 /// (icmp eq/ne A, Log2(AP2/AP1)) ->
989 /// (icmp eq/ne A, Log2(AP2) - Log2(AP1)).
foldICmpShrConstConst(ICmpInst & I,Value * A,const APInt & AP1,const APInt & AP2)990 Instruction *InstCombinerImpl::foldICmpShrConstConst(ICmpInst &I, Value *A,
991                                                      const APInt &AP1,
992                                                      const APInt &AP2) {
993   assert(I.isEquality() && "Cannot fold icmp gt/lt");
994 
995   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
996     if (I.getPredicate() == I.ICMP_NE)
997       Pred = CmpInst::getInversePredicate(Pred);
998     return new ICmpInst(Pred, LHS, RHS);
999   };
1000 
1001   // Don't bother doing any work for cases which InstSimplify handles.
1002   if (AP2.isZero())
1003     return nullptr;
1004 
1005   bool IsAShr = isa<AShrOperator>(I.getOperand(0));
1006   if (IsAShr) {
1007     if (AP2.isAllOnes())
1008       return nullptr;
1009     if (AP2.isNegative() != AP1.isNegative())
1010       return nullptr;
1011     if (AP2.sgt(AP1))
1012       return nullptr;
1013   }
1014 
1015   if (!AP1)
1016     // 'A' must be large enough to shift out the highest set bit.
1017     return getICmp(I.ICMP_UGT, A,
1018                    ConstantInt::get(A->getType(), AP2.logBase2()));
1019 
1020   if (AP1 == AP2)
1021     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1022 
1023   int Shift;
1024   if (IsAShr && AP1.isNegative())
1025     Shift = AP1.countl_one() - AP2.countl_one();
1026   else
1027     Shift = AP1.countl_zero() - AP2.countl_zero();
1028 
1029   if (Shift > 0) {
1030     if (IsAShr && AP1 == AP2.ashr(Shift)) {
1031       // There are multiple solutions if we are comparing against -1 and the LHS
1032       // of the ashr is not a power of two.
1033       if (AP1.isAllOnes() && !AP2.isPowerOf2())
1034         return getICmp(I.ICMP_UGE, A, ConstantInt::get(A->getType(), Shift));
1035       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1036     } else if (AP1 == AP2.lshr(Shift)) {
1037       return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1038     }
1039   }
1040 
1041   // Shifting const2 will never be equal to const1.
1042   // FIXME: This should always be handled by InstSimplify?
1043   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1044   return replaceInstUsesWith(I, TorF);
1045 }
1046 
1047 /// Handle "(icmp eq/ne (shl AP2, A), AP1)" ->
1048 /// (icmp eq/ne A, TrailingZeros(AP1) - TrailingZeros(AP2)).
foldICmpShlConstConst(ICmpInst & I,Value * A,const APInt & AP1,const APInt & AP2)1049 Instruction *InstCombinerImpl::foldICmpShlConstConst(ICmpInst &I, Value *A,
1050                                                      const APInt &AP1,
1051                                                      const APInt &AP2) {
1052   assert(I.isEquality() && "Cannot fold icmp gt/lt");
1053 
1054   auto getICmp = [&I](CmpInst::Predicate Pred, Value *LHS, Value *RHS) {
1055     if (I.getPredicate() == I.ICMP_NE)
1056       Pred = CmpInst::getInversePredicate(Pred);
1057     return new ICmpInst(Pred, LHS, RHS);
1058   };
1059 
1060   // Don't bother doing any work for cases which InstSimplify handles.
1061   if (AP2.isZero())
1062     return nullptr;
1063 
1064   unsigned AP2TrailingZeros = AP2.countr_zero();
1065 
1066   if (!AP1 && AP2TrailingZeros != 0)
1067     return getICmp(
1068         I.ICMP_UGE, A,
1069         ConstantInt::get(A->getType(), AP2.getBitWidth() - AP2TrailingZeros));
1070 
1071   if (AP1 == AP2)
1072     return getICmp(I.ICMP_EQ, A, ConstantInt::getNullValue(A->getType()));
1073 
1074   // Get the distance between the lowest bits that are set.
1075   int Shift = AP1.countr_zero() - AP2TrailingZeros;
1076 
1077   if (Shift > 0 && AP2.shl(Shift) == AP1)
1078     return getICmp(I.ICMP_EQ, A, ConstantInt::get(A->getType(), Shift));
1079 
1080   // Shifting const2 will never be equal to const1.
1081   // FIXME: This should always be handled by InstSimplify?
1082   auto *TorF = ConstantInt::get(I.getType(), I.getPredicate() == I.ICMP_NE);
1083   return replaceInstUsesWith(I, TorF);
1084 }
1085 
1086 /// The caller has matched a pattern of the form:
1087 ///   I = icmp ugt (add (add A, B), CI2), CI1
1088 /// If this is of the form:
1089 ///   sum = a + b
1090 ///   if (sum+128 >u 255)
1091 /// Then replace it with llvm.sadd.with.overflow.i8.
1092 ///
processUGT_ADDCST_ADD(ICmpInst & I,Value * A,Value * B,ConstantInt * CI2,ConstantInt * CI1,InstCombinerImpl & IC)1093 static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
1094                                           ConstantInt *CI2, ConstantInt *CI1,
1095                                           InstCombinerImpl &IC) {
1096   // The transformation we're trying to do here is to transform this into an
1097   // llvm.sadd.with.overflow.  To do this, we have to replace the original add
1098   // with a narrower add, and discard the add-with-constant that is part of the
1099   // range check (if we can't eliminate it, this isn't profitable).
1100 
1101   // In order to eliminate the add-with-constant, the compare can be its only
1102   // use.
1103   Instruction *AddWithCst = cast<Instruction>(I.getOperand(0));
1104   if (!AddWithCst->hasOneUse())
1105     return nullptr;
1106 
1107   // If CI2 is 2^7, 2^15, 2^31, then it might be an sadd.with.overflow.
1108   if (!CI2->getValue().isPowerOf2())
1109     return nullptr;
1110   unsigned NewWidth = CI2->getValue().countr_zero();
1111   if (NewWidth != 7 && NewWidth != 15 && NewWidth != 31)
1112     return nullptr;
1113 
1114   // The width of the new add formed is 1 more than the bias.
1115   ++NewWidth;
1116 
1117   // Check to see that CI1 is an all-ones value with NewWidth bits.
1118   if (CI1->getBitWidth() == NewWidth ||
1119       CI1->getValue() != APInt::getLowBitsSet(CI1->getBitWidth(), NewWidth))
1120     return nullptr;
1121 
1122   // This is only really a signed overflow check if the inputs have been
1123   // sign-extended; check for that condition. For example, if CI2 is 2^31 and
1124   // the operands of the add are 64 bits wide, we need at least 33 sign bits.
1125   if (IC.ComputeMaxSignificantBits(A, &I) > NewWidth ||
1126       IC.ComputeMaxSignificantBits(B, &I) > NewWidth)
1127     return nullptr;
1128 
1129   // In order to replace the original add with a narrower
1130   // llvm.sadd.with.overflow, the only uses allowed are the add-with-constant
1131   // and truncates that discard the high bits of the add.  Verify that this is
1132   // the case.
1133   Instruction *OrigAdd = cast<Instruction>(AddWithCst->getOperand(0));
1134   for (User *U : OrigAdd->users()) {
1135     if (U == AddWithCst)
1136       continue;
1137 
1138     // Only accept truncates for now.  We would really like a nice recursive
1139     // predicate like SimplifyDemandedBits, but which goes downwards the use-def
1140     // chain to see which bits of a value are actually demanded.  If the
1141     // original add had another add which was then immediately truncated, we
1142     // could still do the transformation.
1143     TruncInst *TI = dyn_cast<TruncInst>(U);
1144     if (!TI || TI->getType()->getPrimitiveSizeInBits() > NewWidth)
1145       return nullptr;
1146   }
1147 
1148   // If the pattern matches, truncate the inputs to the narrower type and
1149   // use the sadd_with_overflow intrinsic to efficiently compute both the
1150   // result and the overflow bit.
1151   Type *NewType = IntegerType::get(OrigAdd->getContext(), NewWidth);
1152   Function *F = Intrinsic::getOrInsertDeclaration(
1153       I.getModule(), Intrinsic::sadd_with_overflow, NewType);
1154 
1155   InstCombiner::BuilderTy &Builder = IC.Builder;
1156 
1157   // Put the new code above the original add, in case there are any uses of the
1158   // add between the add and the compare.
1159   Builder.SetInsertPoint(OrigAdd);
1160 
1161   Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
1162   Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
1163   CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
1164   Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
1165   Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
1166 
1167   // The inner add was the result of the narrow add, zero extended to the
1168   // wider type.  Replace it with the result computed by the intrinsic.
1169   IC.replaceInstUsesWith(*OrigAdd, ZExt);
1170   IC.eraseInstFromFunction(*OrigAdd);
1171 
1172   // The original icmp gets replaced with the overflow value.
1173   return ExtractValueInst::Create(Call, 1, "sadd.overflow");
1174 }
1175 
1176 /// If we have:
1177 ///   icmp eq/ne (urem/srem %x, %y), 0
1178 /// iff %y is a power-of-two, we can replace this with a bit test:
1179 ///   icmp eq/ne (and %x, (add %y, -1)), 0
foldIRemByPowerOfTwoToBitTest(ICmpInst & I)1180 Instruction *InstCombinerImpl::foldIRemByPowerOfTwoToBitTest(ICmpInst &I) {
1181   // This fold is only valid for equality predicates.
1182   if (!I.isEquality())
1183     return nullptr;
1184   CmpPredicate Pred;
1185   Value *X, *Y, *Zero;
1186   if (!match(&I, m_ICmp(Pred, m_OneUse(m_IRem(m_Value(X), m_Value(Y))),
1187                         m_CombineAnd(m_Zero(), m_Value(Zero)))))
1188     return nullptr;
1189   if (!isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, &I))
1190     return nullptr;
1191   // This may increase instruction count, we don't enforce that Y is a constant.
1192   Value *Mask = Builder.CreateAdd(Y, Constant::getAllOnesValue(Y->getType()));
1193   Value *Masked = Builder.CreateAnd(X, Mask);
1194   return ICmpInst::Create(Instruction::ICmp, Pred, Masked, Zero);
1195 }
1196 
1197 /// Fold equality-comparison between zero and any (maybe truncated) right-shift
1198 /// by one-less-than-bitwidth into a sign test on the original value.
foldSignBitTest(ICmpInst & I)1199 Instruction *InstCombinerImpl::foldSignBitTest(ICmpInst &I) {
1200   Instruction *Val;
1201   CmpPredicate Pred;
1202   if (!I.isEquality() || !match(&I, m_ICmp(Pred, m_Instruction(Val), m_Zero())))
1203     return nullptr;
1204 
1205   Value *X;
1206   Type *XTy;
1207 
1208   Constant *C;
1209   if (match(Val, m_TruncOrSelf(m_Shr(m_Value(X), m_Constant(C))))) {
1210     XTy = X->getType();
1211     unsigned XBitWidth = XTy->getScalarSizeInBits();
1212     if (!match(C, m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_EQ,
1213                                      APInt(XBitWidth, XBitWidth - 1))))
1214       return nullptr;
1215   } else if (isa<BinaryOperator>(Val) &&
1216              (X = reassociateShiftAmtsOfTwoSameDirectionShifts(
1217                   cast<BinaryOperator>(Val), SQ.getWithInstruction(Val),
1218                   /*AnalyzeForSignBitExtraction=*/true))) {
1219     XTy = X->getType();
1220   } else
1221     return nullptr;
1222 
1223   return ICmpInst::Create(Instruction::ICmp,
1224                           Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SGE
1225                                                     : ICmpInst::ICMP_SLT,
1226                           X, ConstantInt::getNullValue(XTy));
1227 }
1228 
1229 // Handle  icmp pred X, 0
foldICmpWithZero(ICmpInst & Cmp)1230 Instruction *InstCombinerImpl::foldICmpWithZero(ICmpInst &Cmp) {
1231   CmpInst::Predicate Pred = Cmp.getPredicate();
1232   if (!match(Cmp.getOperand(1), m_Zero()))
1233     return nullptr;
1234 
1235   // (icmp sgt smin(PosA, B) 0) -> (icmp sgt B 0)
1236   if (Pred == ICmpInst::ICMP_SGT) {
1237     Value *A, *B;
1238     if (match(Cmp.getOperand(0), m_SMin(m_Value(A), m_Value(B)))) {
1239       if (isKnownPositive(A, SQ.getWithInstruction(&Cmp)))
1240         return new ICmpInst(Pred, B, Cmp.getOperand(1));
1241       if (isKnownPositive(B, SQ.getWithInstruction(&Cmp)))
1242         return new ICmpInst(Pred, A, Cmp.getOperand(1));
1243     }
1244   }
1245 
1246   if (Instruction *New = foldIRemByPowerOfTwoToBitTest(Cmp))
1247     return New;
1248 
1249   // Given:
1250   //   icmp eq/ne (urem %x, %y), 0
1251   // Iff %x has 0 or 1 bits set, and %y has at least 2 bits set, omit 'urem':
1252   //   icmp eq/ne %x, 0
1253   Value *X, *Y;
1254   if (match(Cmp.getOperand(0), m_URem(m_Value(X), m_Value(Y))) &&
1255       ICmpInst::isEquality(Pred)) {
1256     KnownBits XKnown = computeKnownBits(X, &Cmp);
1257     KnownBits YKnown = computeKnownBits(Y, &Cmp);
1258     if (XKnown.countMaxPopulation() == 1 && YKnown.countMinPopulation() >= 2)
1259       return new ICmpInst(Pred, X, Cmp.getOperand(1));
1260   }
1261 
1262   // (icmp eq/ne (mul X Y)) -> (icmp eq/ne X/Y) if we know about whether X/Y are
1263   // odd/non-zero/there is no overflow.
1264   if (match(Cmp.getOperand(0), m_Mul(m_Value(X), m_Value(Y))) &&
1265       ICmpInst::isEquality(Pred)) {
1266 
1267     KnownBits XKnown = computeKnownBits(X, &Cmp);
1268     // if X % 2 != 0
1269     //    (icmp eq/ne Y)
1270     if (XKnown.countMaxTrailingZeros() == 0)
1271       return new ICmpInst(Pred, Y, Cmp.getOperand(1));
1272 
1273     KnownBits YKnown = computeKnownBits(Y, &Cmp);
1274     // if Y % 2 != 0
1275     //    (icmp eq/ne X)
1276     if (YKnown.countMaxTrailingZeros() == 0)
1277       return new ICmpInst(Pred, X, Cmp.getOperand(1));
1278 
1279     auto *BO0 = cast<OverflowingBinaryOperator>(Cmp.getOperand(0));
1280     if (BO0->hasNoUnsignedWrap() || BO0->hasNoSignedWrap()) {
1281       const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
1282       // `isKnownNonZero` does more analysis than just `!KnownBits.One.isZero()`
1283       // but to avoid unnecessary work, first just if this is an obvious case.
1284 
1285       // if X non-zero and NoOverflow(X * Y)
1286       //    (icmp eq/ne Y)
1287       if (!XKnown.One.isZero() || isKnownNonZero(X, Q))
1288         return new ICmpInst(Pred, Y, Cmp.getOperand(1));
1289 
1290       // if Y non-zero and NoOverflow(X * Y)
1291       //    (icmp eq/ne X)
1292       if (!YKnown.One.isZero() || isKnownNonZero(Y, Q))
1293         return new ICmpInst(Pred, X, Cmp.getOperand(1));
1294     }
1295     // Note, we are skipping cases:
1296     //      if Y % 2 != 0 AND X % 2 != 0
1297     //          (false/true)
1298     //      if X non-zero and Y non-zero and NoOverflow(X * Y)
1299     //          (false/true)
1300     // Those can be simplified later as we would have already replaced the (icmp
1301     // eq/ne (mul X, Y)) with (icmp eq/ne X/Y) and if X/Y is known non-zero that
1302     // will fold to a constant elsewhere.
1303   }
1304 
1305   // (icmp eq/ne f(X), 0) -> (icmp eq/ne X, 0)
1306   // where f(X) == 0 if and only if X == 0
1307   if (ICmpInst::isEquality(Pred))
1308     if (Value *Stripped = stripNullTest(Cmp.getOperand(0)))
1309       return new ICmpInst(Pred, Stripped,
1310                           Constant::getNullValue(Stripped->getType()));
1311 
1312   return nullptr;
1313 }
1314 
1315 /// Fold icmp Pred X, C.
1316 /// TODO: This code structure does not make sense. The saturating add fold
1317 /// should be moved to some other helper and extended as noted below (it is also
1318 /// possible that code has been made unnecessary - do we canonicalize IR to
1319 /// overflow/saturating intrinsics or not?).
foldICmpWithConstant(ICmpInst & Cmp)1320 Instruction *InstCombinerImpl::foldICmpWithConstant(ICmpInst &Cmp) {
1321   // Match the following pattern, which is a common idiom when writing
1322   // overflow-safe integer arithmetic functions. The source performs an addition
1323   // in wider type and explicitly checks for overflow using comparisons against
1324   // INT_MIN and INT_MAX. Simplify by using the sadd_with_overflow intrinsic.
1325   //
1326   // TODO: This could probably be generalized to handle other overflow-safe
1327   // operations if we worked out the formulas to compute the appropriate magic
1328   // constants.
1329   //
1330   // sum = a + b
1331   // if (sum+128 >u 255)  ...  -> llvm.sadd.with.overflow.i8
1332   CmpInst::Predicate Pred = Cmp.getPredicate();
1333   Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1);
1334   Value *A, *B;
1335   ConstantInt *CI, *CI2; // I = icmp ugt (add (add A, B), CI2), CI
1336   if (Pred == ICmpInst::ICMP_UGT && match(Op1, m_ConstantInt(CI)) &&
1337       match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2))))
1338     if (Instruction *Res = processUGT_ADDCST_ADD(Cmp, A, B, CI2, CI, *this))
1339       return Res;
1340 
1341   // icmp(phi(C1, C2, ...), C) -> phi(icmp(C1, C), icmp(C2, C), ...).
1342   Constant *C = dyn_cast<Constant>(Op1);
1343   if (!C)
1344     return nullptr;
1345 
1346   if (auto *Phi = dyn_cast<PHINode>(Op0))
1347     if (all_of(Phi->operands(), [](Value *V) { return isa<Constant>(V); })) {
1348       SmallVector<Constant *> Ops;
1349       for (Value *V : Phi->incoming_values()) {
1350         Constant *Res =
1351             ConstantFoldCompareInstOperands(Pred, cast<Constant>(V), C, DL);
1352         if (!Res)
1353           return nullptr;
1354         Ops.push_back(Res);
1355       }
1356       Builder.SetInsertPoint(Phi);
1357       PHINode *NewPhi = Builder.CreatePHI(Cmp.getType(), Phi->getNumOperands());
1358       for (auto [V, Pred] : zip(Ops, Phi->blocks()))
1359         NewPhi->addIncoming(V, Pred);
1360       return replaceInstUsesWith(Cmp, NewPhi);
1361     }
1362 
1363   if (Instruction *R = tryFoldInstWithCtpopWithNot(&Cmp))
1364     return R;
1365 
1366   return nullptr;
1367 }
1368 
1369 /// Canonicalize icmp instructions based on dominating conditions.
foldICmpWithDominatingICmp(ICmpInst & Cmp)1370 Instruction *InstCombinerImpl::foldICmpWithDominatingICmp(ICmpInst &Cmp) {
1371   // We already checked simple implication in InstSimplify, only handle complex
1372   // cases here.
1373   Value *X = Cmp.getOperand(0), *Y = Cmp.getOperand(1);
1374   const APInt *C;
1375   if (!match(Y, m_APInt(C)))
1376     return nullptr;
1377 
1378   CmpInst::Predicate Pred = Cmp.getPredicate();
1379   ConstantRange CR = ConstantRange::makeExactICmpRegion(Pred, *C);
1380 
1381   auto handleDomCond = [&](ICmpInst::Predicate DomPred,
1382                            const APInt *DomC) -> Instruction * {
1383     // We have 2 compares of a variable with constants. Calculate the constant
1384     // ranges of those compares to see if we can transform the 2nd compare:
1385     // DomBB:
1386     //   DomCond = icmp DomPred X, DomC
1387     //   br DomCond, CmpBB, FalseBB
1388     // CmpBB:
1389     //   Cmp = icmp Pred X, C
1390     ConstantRange DominatingCR =
1391         ConstantRange::makeExactICmpRegion(DomPred, *DomC);
1392     ConstantRange Intersection = DominatingCR.intersectWith(CR);
1393     ConstantRange Difference = DominatingCR.difference(CR);
1394     if (Intersection.isEmptySet())
1395       return replaceInstUsesWith(Cmp, Builder.getFalse());
1396     if (Difference.isEmptySet())
1397       return replaceInstUsesWith(Cmp, Builder.getTrue());
1398 
1399     // Canonicalizing a sign bit comparison that gets used in a branch,
1400     // pessimizes codegen by generating branch on zero instruction instead
1401     // of a test and branch. So we avoid canonicalizing in such situations
1402     // because test and branch instruction has better branch displacement
1403     // than compare and branch instruction.
1404     bool UnusedBit;
1405     bool IsSignBit = isSignBitCheck(Pred, *C, UnusedBit);
1406     if (Cmp.isEquality() || (IsSignBit && hasBranchUse(Cmp)))
1407       return nullptr;
1408 
1409     // Avoid an infinite loop with min/max canonicalization.
1410     // TODO: This will be unnecessary if we canonicalize to min/max intrinsics.
1411     if (Cmp.hasOneUse() &&
1412         match(Cmp.user_back(), m_MaxOrMin(m_Value(), m_Value())))
1413       return nullptr;
1414 
1415     if (const APInt *EqC = Intersection.getSingleElement())
1416       return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*EqC));
1417     if (const APInt *NeC = Difference.getSingleElement())
1418       return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*NeC));
1419     return nullptr;
1420   };
1421 
1422   for (BranchInst *BI : DC.conditionsFor(X)) {
1423     CmpPredicate DomPred;
1424     const APInt *DomC;
1425     if (!match(BI->getCondition(),
1426                m_ICmp(DomPred, m_Specific(X), m_APInt(DomC))))
1427       continue;
1428 
1429     BasicBlockEdge Edge0(BI->getParent(), BI->getSuccessor(0));
1430     if (DT.dominates(Edge0, Cmp.getParent())) {
1431       if (auto *V = handleDomCond(DomPred, DomC))
1432         return V;
1433     } else {
1434       BasicBlockEdge Edge1(BI->getParent(), BI->getSuccessor(1));
1435       if (DT.dominates(Edge1, Cmp.getParent()))
1436         if (auto *V =
1437                 handleDomCond(CmpInst::getInversePredicate(DomPred), DomC))
1438           return V;
1439     }
1440   }
1441 
1442   return nullptr;
1443 }
1444 
1445 /// Fold icmp (trunc X), C.
foldICmpTruncConstant(ICmpInst & Cmp,TruncInst * Trunc,const APInt & C)1446 Instruction *InstCombinerImpl::foldICmpTruncConstant(ICmpInst &Cmp,
1447                                                      TruncInst *Trunc,
1448                                                      const APInt &C) {
1449   ICmpInst::Predicate Pred = Cmp.getPredicate();
1450   Value *X = Trunc->getOperand(0);
1451   Type *SrcTy = X->getType();
1452   unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
1453            SrcBits = SrcTy->getScalarSizeInBits();
1454 
1455   // Match (icmp pred (trunc nuw/nsw X), C)
1456   // Which we can convert to (icmp pred X, (sext/zext C))
1457   if (shouldChangeType(Trunc->getType(), SrcTy)) {
1458     if (Trunc->hasNoSignedWrap())
1459       return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, C.sext(SrcBits)));
1460     if (!Cmp.isSigned() && Trunc->hasNoUnsignedWrap())
1461       return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, C.zext(SrcBits)));
1462   }
1463 
1464   if (C.isOne() && C.getBitWidth() > 1) {
1465     // icmp slt trunc(signum(V)) 1 --> icmp slt V, 1
1466     Value *V = nullptr;
1467     if (Pred == ICmpInst::ICMP_SLT && match(X, m_Signum(m_Value(V))))
1468       return new ICmpInst(ICmpInst::ICMP_SLT, V,
1469                           ConstantInt::get(V->getType(), 1));
1470   }
1471 
1472   // TODO: Handle any shifted constant by subtracting trailing zeros.
1473   // TODO: Handle non-equality predicates.
1474   Value *Y;
1475   if (Cmp.isEquality() && match(X, m_Shl(m_One(), m_Value(Y)))) {
1476     // (trunc (1 << Y) to iN) == 0 --> Y u>= N
1477     // (trunc (1 << Y) to iN) != 0 --> Y u<  N
1478     if (C.isZero()) {
1479       auto NewPred = (Pred == Cmp.ICMP_EQ) ? Cmp.ICMP_UGE : Cmp.ICMP_ULT;
1480       return new ICmpInst(NewPred, Y, ConstantInt::get(SrcTy, DstBits));
1481     }
1482     // (trunc (1 << Y) to iN) == 2**C --> Y == C
1483     // (trunc (1 << Y) to iN) != 2**C --> Y != C
1484     if (C.isPowerOf2())
1485       return new ICmpInst(Pred, Y, ConstantInt::get(SrcTy, C.logBase2()));
1486   }
1487 
1488   if (Cmp.isEquality() && Trunc->hasOneUse()) {
1489     // Canonicalize to a mask and wider compare if the wide type is suitable:
1490     // (trunc X to i8) == C --> (X & 0xff) == (zext C)
1491     if (!SrcTy->isVectorTy() && shouldChangeType(DstBits, SrcBits)) {
1492       Constant *Mask =
1493           ConstantInt::get(SrcTy, APInt::getLowBitsSet(SrcBits, DstBits));
1494       Value *And = Builder.CreateAnd(X, Mask);
1495       Constant *WideC = ConstantInt::get(SrcTy, C.zext(SrcBits));
1496       return new ICmpInst(Pred, And, WideC);
1497     }
1498 
1499     // Simplify icmp eq (trunc x to i8), 42 -> icmp eq x, 42|highbits if all
1500     // of the high bits truncated out of x are known.
1501     KnownBits Known = computeKnownBits(X, &Cmp);
1502 
1503     // If all the high bits are known, we can do this xform.
1504     if ((Known.Zero | Known.One).countl_one() >= SrcBits - DstBits) {
1505       // Pull in the high bits from known-ones set.
1506       APInt NewRHS = C.zext(SrcBits);
1507       NewRHS |= Known.One & APInt::getHighBitsSet(SrcBits, SrcBits - DstBits);
1508       return new ICmpInst(Pred, X, ConstantInt::get(SrcTy, NewRHS));
1509     }
1510   }
1511 
1512   // Look through truncated right-shift of the sign-bit for a sign-bit check:
1513   // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] < 0  --> ShOp <  0
1514   // trunc iN (ShOp >> ShAmtC) to i[N - ShAmtC] > -1 --> ShOp > -1
1515   Value *ShOp;
1516   const APInt *ShAmtC;
1517   bool TrueIfSigned;
1518   if (isSignBitCheck(Pred, C, TrueIfSigned) &&
1519       match(X, m_Shr(m_Value(ShOp), m_APInt(ShAmtC))) &&
1520       DstBits == SrcBits - ShAmtC->getZExtValue()) {
1521     return TrueIfSigned ? new ICmpInst(ICmpInst::ICMP_SLT, ShOp,
1522                                        ConstantInt::getNullValue(SrcTy))
1523                         : new ICmpInst(ICmpInst::ICMP_SGT, ShOp,
1524                                        ConstantInt::getAllOnesValue(SrcTy));
1525   }
1526 
1527   return nullptr;
1528 }
1529 
1530 /// Fold icmp (trunc nuw/nsw X), (trunc nuw/nsw Y).
1531 /// Fold icmp (trunc nuw/nsw X), (zext/sext Y).
1532 Instruction *
foldICmpTruncWithTruncOrExt(ICmpInst & Cmp,const SimplifyQuery & Q)1533 InstCombinerImpl::foldICmpTruncWithTruncOrExt(ICmpInst &Cmp,
1534                                               const SimplifyQuery &Q) {
1535   Value *X, *Y;
1536   CmpPredicate Pred;
1537   bool YIsSExt = false;
1538   // Try to match icmp (trunc X), (trunc Y)
1539   if (match(&Cmp, m_ICmp(Pred, m_Trunc(m_Value(X)), m_Trunc(m_Value(Y))))) {
1540     unsigned NoWrapFlags = cast<TruncInst>(Cmp.getOperand(0))->getNoWrapKind() &
1541                            cast<TruncInst>(Cmp.getOperand(1))->getNoWrapKind();
1542     if (Cmp.isSigned()) {
1543       // For signed comparisons, both truncs must be nsw.
1544       if (!(NoWrapFlags & TruncInst::NoSignedWrap))
1545         return nullptr;
1546     } else {
1547       // For unsigned and equality comparisons, either both must be nuw or
1548       // both must be nsw, we don't care which.
1549       if (!NoWrapFlags)
1550         return nullptr;
1551     }
1552 
1553     if (X->getType() != Y->getType() &&
1554         (!Cmp.getOperand(0)->hasOneUse() || !Cmp.getOperand(1)->hasOneUse()))
1555       return nullptr;
1556     if (!isDesirableIntType(X->getType()->getScalarSizeInBits()) &&
1557         isDesirableIntType(Y->getType()->getScalarSizeInBits())) {
1558       std::swap(X, Y);
1559       Pred = Cmp.getSwappedPredicate(Pred);
1560     }
1561     YIsSExt = !(NoWrapFlags & TruncInst::NoUnsignedWrap);
1562   }
1563   // Try to match icmp (trunc nuw X), (zext Y)
1564   else if (!Cmp.isSigned() &&
1565            match(&Cmp, m_c_ICmp(Pred, m_NUWTrunc(m_Value(X)),
1566                                 m_OneUse(m_ZExt(m_Value(Y)))))) {
1567     // Can fold trunc nuw + zext for unsigned and equality predicates.
1568   }
1569   // Try to match icmp (trunc nsw X), (sext Y)
1570   else if (match(&Cmp, m_c_ICmp(Pred, m_NSWTrunc(m_Value(X)),
1571                                 m_OneUse(m_ZExtOrSExt(m_Value(Y)))))) {
1572     // Can fold trunc nsw + zext/sext for all predicates.
1573     YIsSExt =
1574         isa<SExtInst>(Cmp.getOperand(0)) || isa<SExtInst>(Cmp.getOperand(1));
1575   } else
1576     return nullptr;
1577 
1578   Type *TruncTy = Cmp.getOperand(0)->getType();
1579   unsigned TruncBits = TruncTy->getScalarSizeInBits();
1580 
1581   // If this transform will end up changing from desirable types -> undesirable
1582   // types skip it.
1583   if (isDesirableIntType(TruncBits) &&
1584       !isDesirableIntType(X->getType()->getScalarSizeInBits()))
1585     return nullptr;
1586 
1587   Value *NewY = Builder.CreateIntCast(Y, X->getType(), YIsSExt);
1588   return new ICmpInst(Pred, X, NewY);
1589 }
1590 
1591 /// Fold icmp (xor X, Y), C.
foldICmpXorConstant(ICmpInst & Cmp,BinaryOperator * Xor,const APInt & C)1592 Instruction *InstCombinerImpl::foldICmpXorConstant(ICmpInst &Cmp,
1593                                                    BinaryOperator *Xor,
1594                                                    const APInt &C) {
1595   if (Instruction *I = foldICmpXorShiftConst(Cmp, Xor, C))
1596     return I;
1597 
1598   Value *X = Xor->getOperand(0);
1599   Value *Y = Xor->getOperand(1);
1600   const APInt *XorC;
1601   if (!match(Y, m_APInt(XorC)))
1602     return nullptr;
1603 
1604   // If this is a comparison that tests the signbit (X < 0) or (x > -1),
1605   // fold the xor.
1606   ICmpInst::Predicate Pred = Cmp.getPredicate();
1607   bool TrueIfSigned = false;
1608   if (isSignBitCheck(Cmp.getPredicate(), C, TrueIfSigned)) {
1609 
1610     // If the sign bit of the XorCst is not set, there is no change to
1611     // the operation, just stop using the Xor.
1612     if (!XorC->isNegative())
1613       return replaceOperand(Cmp, 0, X);
1614 
1615     // Emit the opposite comparison.
1616     if (TrueIfSigned)
1617       return new ICmpInst(ICmpInst::ICMP_SGT, X,
1618                           ConstantInt::getAllOnesValue(X->getType()));
1619     else
1620       return new ICmpInst(ICmpInst::ICMP_SLT, X,
1621                           ConstantInt::getNullValue(X->getType()));
1622   }
1623 
1624   if (Xor->hasOneUse()) {
1625     // (icmp u/s (xor X SignMask), C) -> (icmp s/u X, (xor C SignMask))
1626     if (!Cmp.isEquality() && XorC->isSignMask()) {
1627       Pred = Cmp.getFlippedSignednessPredicate();
1628       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1629     }
1630 
1631     // (icmp u/s (xor X ~SignMask), C) -> (icmp s/u X, (xor C ~SignMask))
1632     if (!Cmp.isEquality() && XorC->isMaxSignedValue()) {
1633       Pred = Cmp.getFlippedSignednessPredicate();
1634       Pred = Cmp.getSwappedPredicate(Pred);
1635       return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), C ^ *XorC));
1636     }
1637   }
1638 
1639   // Mask constant magic can eliminate an 'xor' with unsigned compares.
1640   if (Pred == ICmpInst::ICMP_UGT) {
1641     // (xor X, ~C) >u C --> X <u ~C (when C+1 is a power of 2)
1642     if (*XorC == ~C && (C + 1).isPowerOf2())
1643       return new ICmpInst(ICmpInst::ICMP_ULT, X, Y);
1644     // (xor X, C) >u C --> X >u C (when C+1 is a power of 2)
1645     if (*XorC == C && (C + 1).isPowerOf2())
1646       return new ICmpInst(ICmpInst::ICMP_UGT, X, Y);
1647   }
1648   if (Pred == ICmpInst::ICMP_ULT) {
1649     // (xor X, -C) <u C --> X >u ~C (when C is a power of 2)
1650     if (*XorC == -C && C.isPowerOf2())
1651       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1652                           ConstantInt::get(X->getType(), ~C));
1653     // (xor X, C) <u C --> X >u ~C (when -C is a power of 2)
1654     if (*XorC == C && (-C).isPowerOf2())
1655       return new ICmpInst(ICmpInst::ICMP_UGT, X,
1656                           ConstantInt::get(X->getType(), ~C));
1657   }
1658   return nullptr;
1659 }
1660 
1661 /// For power-of-2 C:
1662 /// ((X s>> ShiftC) ^ X) u< C --> (X + C) u< (C << 1)
1663 /// ((X s>> ShiftC) ^ X) u> (C - 1) --> (X + C) u> ((C << 1) - 1)
foldICmpXorShiftConst(ICmpInst & Cmp,BinaryOperator * Xor,const APInt & C)1664 Instruction *InstCombinerImpl::foldICmpXorShiftConst(ICmpInst &Cmp,
1665                                                      BinaryOperator *Xor,
1666                                                      const APInt &C) {
1667   CmpInst::Predicate Pred = Cmp.getPredicate();
1668   APInt PowerOf2;
1669   if (Pred == ICmpInst::ICMP_ULT)
1670     PowerOf2 = C;
1671   else if (Pred == ICmpInst::ICMP_UGT && !C.isMaxValue())
1672     PowerOf2 = C + 1;
1673   else
1674     return nullptr;
1675   if (!PowerOf2.isPowerOf2())
1676     return nullptr;
1677   Value *X;
1678   const APInt *ShiftC;
1679   if (!match(Xor, m_OneUse(m_c_Xor(m_Value(X),
1680                                    m_AShr(m_Deferred(X), m_APInt(ShiftC))))))
1681     return nullptr;
1682   uint64_t Shift = ShiftC->getLimitedValue();
1683   Type *XType = X->getType();
1684   if (Shift == 0 || PowerOf2.isMinSignedValue())
1685     return nullptr;
1686   Value *Add = Builder.CreateAdd(X, ConstantInt::get(XType, PowerOf2));
1687   APInt Bound =
1688       Pred == ICmpInst::ICMP_ULT ? PowerOf2 << 1 : ((PowerOf2 << 1) - 1);
1689   return new ICmpInst(Pred, Add, ConstantInt::get(XType, Bound));
1690 }
1691 
1692 /// Fold icmp (and (sh X, Y), C2), C1.
foldICmpAndShift(ICmpInst & Cmp,BinaryOperator * And,const APInt & C1,const APInt & C2)1693 Instruction *InstCombinerImpl::foldICmpAndShift(ICmpInst &Cmp,
1694                                                 BinaryOperator *And,
1695                                                 const APInt &C1,
1696                                                 const APInt &C2) {
1697   BinaryOperator *Shift = dyn_cast<BinaryOperator>(And->getOperand(0));
1698   if (!Shift || !Shift->isShift())
1699     return nullptr;
1700 
1701   // If this is: (X >> C3) & C2 != C1 (where any shift and any compare could
1702   // exist), turn it into (X & (C2 << C3)) != (C1 << C3). This happens a LOT in
1703   // code produced by the clang front-end, for bitfield access.
1704   // This seemingly simple opportunity to fold away a shift turns out to be
1705   // rather complicated. See PR17827 for details.
1706   unsigned ShiftOpcode = Shift->getOpcode();
1707   bool IsShl = ShiftOpcode == Instruction::Shl;
1708   const APInt *C3;
1709   if (match(Shift->getOperand(1), m_APInt(C3))) {
1710     APInt NewAndCst, NewCmpCst;
1711     bool AnyCmpCstBitsShiftedOut;
1712     if (ShiftOpcode == Instruction::Shl) {
1713       // For a left shift, we can fold if the comparison is not signed. We can
1714       // also fold a signed comparison if the mask value and comparison value
1715       // are not negative. These constraints may not be obvious, but we can
1716       // prove that they are correct using an SMT solver.
1717       if (Cmp.isSigned() && (C2.isNegative() || C1.isNegative()))
1718         return nullptr;
1719 
1720       NewCmpCst = C1.lshr(*C3);
1721       NewAndCst = C2.lshr(*C3);
1722       AnyCmpCstBitsShiftedOut = NewCmpCst.shl(*C3) != C1;
1723     } else if (ShiftOpcode == Instruction::LShr) {
1724       // For a logical right shift, we can fold if the comparison is not signed.
1725       // We can also fold a signed comparison if the shifted mask value and the
1726       // shifted comparison value are not negative. These constraints may not be
1727       // obvious, but we can prove that they are correct using an SMT solver.
1728       NewCmpCst = C1.shl(*C3);
1729       NewAndCst = C2.shl(*C3);
1730       AnyCmpCstBitsShiftedOut = NewCmpCst.lshr(*C3) != C1;
1731       if (Cmp.isSigned() && (NewAndCst.isNegative() || NewCmpCst.isNegative()))
1732         return nullptr;
1733     } else {
1734       // For an arithmetic shift, check that both constants don't use (in a
1735       // signed sense) the top bits being shifted out.
1736       assert(ShiftOpcode == Instruction::AShr && "Unknown shift opcode");
1737       NewCmpCst = C1.shl(*C3);
1738       NewAndCst = C2.shl(*C3);
1739       AnyCmpCstBitsShiftedOut = NewCmpCst.ashr(*C3) != C1;
1740       if (NewAndCst.ashr(*C3) != C2)
1741         return nullptr;
1742     }
1743 
1744     if (AnyCmpCstBitsShiftedOut) {
1745       // If we shifted bits out, the fold is not going to work out. As a
1746       // special case, check to see if this means that the result is always
1747       // true or false now.
1748       if (Cmp.getPredicate() == ICmpInst::ICMP_EQ)
1749         return replaceInstUsesWith(Cmp, ConstantInt::getFalse(Cmp.getType()));
1750       if (Cmp.getPredicate() == ICmpInst::ICMP_NE)
1751         return replaceInstUsesWith(Cmp, ConstantInt::getTrue(Cmp.getType()));
1752     } else {
1753       Value *NewAnd = Builder.CreateAnd(
1754           Shift->getOperand(0), ConstantInt::get(And->getType(), NewAndCst));
1755       return new ICmpInst(Cmp.getPredicate(), NewAnd,
1756                           ConstantInt::get(And->getType(), NewCmpCst));
1757     }
1758   }
1759 
1760   // Turn ((X >> Y) & C2) == 0  into  (X & (C2 << Y)) == 0.  The latter is
1761   // preferable because it allows the C2 << Y expression to be hoisted out of a
1762   // loop if Y is invariant and X is not.
1763   if (Shift->hasOneUse() && C1.isZero() && Cmp.isEquality() &&
1764       !Shift->isArithmeticShift() &&
1765       ((!IsShl && C2.isOne()) || !isa<Constant>(Shift->getOperand(0)))) {
1766     // Compute C2 << Y.
1767     Value *NewShift =
1768         IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
1769               : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
1770 
1771     // Compute X & (C2 << Y).
1772     Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
1773     return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1774   }
1775 
1776   return nullptr;
1777 }
1778 
1779 /// Fold icmp (and X, C2), C1.
foldICmpAndConstConst(ICmpInst & Cmp,BinaryOperator * And,const APInt & C1)1780 Instruction *InstCombinerImpl::foldICmpAndConstConst(ICmpInst &Cmp,
1781                                                      BinaryOperator *And,
1782                                                      const APInt &C1) {
1783   bool isICMP_NE = Cmp.getPredicate() == ICmpInst::ICMP_NE;
1784 
1785   // For vectors: icmp ne (and X, 1), 0 --> trunc X to N x i1
1786   // TODO: We canonicalize to the longer form for scalars because we have
1787   // better analysis/folds for icmp, and codegen may be better with icmp.
1788   if (isICMP_NE && Cmp.getType()->isVectorTy() && C1.isZero() &&
1789       match(And->getOperand(1), m_One()))
1790     return new TruncInst(And->getOperand(0), Cmp.getType());
1791 
1792   const APInt *C2;
1793   Value *X;
1794   if (!match(And, m_And(m_Value(X), m_APInt(C2))))
1795     return nullptr;
1796 
1797   // (and X, highmask) s> [0, ~highmask] --> X s> ~highmask
1798   if (Cmp.getPredicate() == ICmpInst::ICMP_SGT && C1.ule(~*C2) &&
1799       C2->isNegatedPowerOf2())
1800     return new ICmpInst(ICmpInst::ICMP_SGT, X,
1801                         ConstantInt::get(X->getType(), ~*C2));
1802   // (and X, highmask) s< [1, -highmask] --> X s< -highmask
1803   if (Cmp.getPredicate() == ICmpInst::ICMP_SLT && !C1.isSignMask() &&
1804       (C1 - 1).ule(~*C2) && C2->isNegatedPowerOf2() && !C2->isSignMask())
1805     return new ICmpInst(ICmpInst::ICMP_SLT, X,
1806                         ConstantInt::get(X->getType(), -*C2));
1807 
1808   // Don't perform the following transforms if the AND has multiple uses
1809   if (!And->hasOneUse())
1810     return nullptr;
1811 
1812   if (Cmp.isEquality() && C1.isZero()) {
1813     // Restrict this fold to single-use 'and' (PR10267).
1814     // Replace (and X, (1 << size(X)-1) != 0) with X s< 0
1815     if (C2->isSignMask()) {
1816       Constant *Zero = Constant::getNullValue(X->getType());
1817       auto NewPred = isICMP_NE ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGE;
1818       return new ICmpInst(NewPred, X, Zero);
1819     }
1820 
1821     APInt NewC2 = *C2;
1822     KnownBits Know = computeKnownBits(And->getOperand(0), And);
1823     // Set high zeros of C2 to allow matching negated power-of-2.
1824     NewC2 = *C2 | APInt::getHighBitsSet(C2->getBitWidth(),
1825                                         Know.countMinLeadingZeros());
1826 
1827     // Restrict this fold only for single-use 'and' (PR10267).
1828     // ((%x & C) == 0) --> %x u< (-C)  iff (-C) is power of two.
1829     if (NewC2.isNegatedPowerOf2()) {
1830       Constant *NegBOC = ConstantInt::get(And->getType(), -NewC2);
1831       auto NewPred = isICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
1832       return new ICmpInst(NewPred, X, NegBOC);
1833     }
1834   }
1835 
1836   // If the LHS is an 'and' of a truncate and we can widen the and/compare to
1837   // the input width without changing the value produced, eliminate the cast:
1838   //
1839   // icmp (and (trunc W), C2), C1 -> icmp (and W, C2'), C1'
1840   //
1841   // We can do this transformation if the constants do not have their sign bits
1842   // set or if it is an equality comparison. Extending a relational comparison
1843   // when we're checking the sign bit would not work.
1844   Value *W;
1845   if (match(And->getOperand(0), m_OneUse(m_Trunc(m_Value(W)))) &&
1846       (Cmp.isEquality() || (!C1.isNegative() && !C2->isNegative()))) {
1847     // TODO: Is this a good transform for vectors? Wider types may reduce
1848     // throughput. Should this transform be limited (even for scalars) by using
1849     // shouldChangeType()?
1850     if (!Cmp.getType()->isVectorTy()) {
1851       Type *WideType = W->getType();
1852       unsigned WideScalarBits = WideType->getScalarSizeInBits();
1853       Constant *ZextC1 = ConstantInt::get(WideType, C1.zext(WideScalarBits));
1854       Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
1855       Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
1856       return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
1857     }
1858   }
1859 
1860   if (Instruction *I = foldICmpAndShift(Cmp, And, C1, *C2))
1861     return I;
1862 
1863   // (icmp pred (and (or (lshr A, B), A), 1), 0) -->
1864   // (icmp pred (and A, (or (shl 1, B), 1), 0))
1865   //
1866   // iff pred isn't signed
1867   if (!Cmp.isSigned() && C1.isZero() && And->getOperand(0)->hasOneUse() &&
1868       match(And->getOperand(1), m_One())) {
1869     Constant *One = cast<Constant>(And->getOperand(1));
1870     Value *Or = And->getOperand(0);
1871     Value *A, *B, *LShr;
1872     if (match(Or, m_Or(m_Value(LShr), m_Value(A))) &&
1873         match(LShr, m_LShr(m_Specific(A), m_Value(B)))) {
1874       unsigned UsesRemoved = 0;
1875       if (And->hasOneUse())
1876         ++UsesRemoved;
1877       if (Or->hasOneUse())
1878         ++UsesRemoved;
1879       if (LShr->hasOneUse())
1880         ++UsesRemoved;
1881 
1882       // Compute A & ((1 << B) | 1)
1883       unsigned RequireUsesRemoved = match(B, m_ImmConstant()) ? 1 : 3;
1884       if (UsesRemoved >= RequireUsesRemoved) {
1885         Value *NewOr =
1886             Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
1887                                                /*HasNUW=*/true),
1888                              One, Or->getName());
1889         Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
1890         return new ICmpInst(Cmp.getPredicate(), NewAnd, Cmp.getOperand(1));
1891       }
1892     }
1893   }
1894 
1895   // (icmp eq (and (bitcast X to int), ExponentMask), ExponentMask) -->
1896   // llvm.is.fpclass(X, fcInf|fcNan)
1897   // (icmp ne (and (bitcast X to int), ExponentMask), ExponentMask) -->
1898   // llvm.is.fpclass(X, ~(fcInf|fcNan))
1899   // (icmp eq (and (bitcast X to int), ExponentMask), 0) -->
1900   // llvm.is.fpclass(X, fcSubnormal|fcZero)
1901   // (icmp ne (and (bitcast X to int), ExponentMask), 0) -->
1902   // llvm.is.fpclass(X, ~(fcSubnormal|fcZero))
1903   Value *V;
1904   if (!Cmp.getParent()->getParent()->hasFnAttribute(
1905           Attribute::NoImplicitFloat) &&
1906       Cmp.isEquality() &&
1907       match(X, m_OneUse(m_ElementWiseBitCast(m_Value(V))))) {
1908     Type *FPType = V->getType()->getScalarType();
1909     if (FPType->isIEEELikeFPTy() && (C1.isZero() || C1 == *C2)) {
1910       APInt ExponentMask =
1911           APFloat::getInf(FPType->getFltSemantics()).bitcastToAPInt();
1912       if (*C2 == ExponentMask) {
1913         unsigned Mask = C1.isZero()
1914                             ? FPClassTest::fcZero | FPClassTest::fcSubnormal
1915                             : FPClassTest::fcNan | FPClassTest::fcInf;
1916         if (isICMP_NE)
1917           Mask = ~Mask & fcAllFlags;
1918         return replaceInstUsesWith(Cmp, Builder.createIsFPClass(V, Mask));
1919       }
1920     }
1921   }
1922 
1923   return nullptr;
1924 }
1925 
1926 /// Fold icmp (and X, Y), C.
foldICmpAndConstant(ICmpInst & Cmp,BinaryOperator * And,const APInt & C)1927 Instruction *InstCombinerImpl::foldICmpAndConstant(ICmpInst &Cmp,
1928                                                    BinaryOperator *And,
1929                                                    const APInt &C) {
1930   if (Instruction *I = foldICmpAndConstConst(Cmp, And, C))
1931     return I;
1932 
1933   const ICmpInst::Predicate Pred = Cmp.getPredicate();
1934   bool TrueIfNeg;
1935   if (isSignBitCheck(Pred, C, TrueIfNeg)) {
1936     // ((X - 1) & ~X) <  0 --> X == 0
1937     // ((X - 1) & ~X) >= 0 --> X != 0
1938     Value *X;
1939     if (match(And->getOperand(0), m_Add(m_Value(X), m_AllOnes())) &&
1940         match(And->getOperand(1), m_Not(m_Specific(X)))) {
1941       auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1942       return new ICmpInst(NewPred, X, ConstantInt::getNullValue(X->getType()));
1943     }
1944     // (X & -X) <  0 --> X == MinSignedC
1945     // (X & -X) > -1 --> X != MinSignedC
1946     if (match(And, m_c_And(m_Neg(m_Value(X)), m_Deferred(X)))) {
1947       Constant *MinSignedC = ConstantInt::get(
1948           X->getType(),
1949           APInt::getSignedMinValue(X->getType()->getScalarSizeInBits()));
1950       auto NewPred = TrueIfNeg ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1951       return new ICmpInst(NewPred, X, MinSignedC);
1952     }
1953   }
1954 
1955   // TODO: These all require that Y is constant too, so refactor with the above.
1956 
1957   // Try to optimize things like "A[i] & 42 == 0" to index computations.
1958   Value *X = And->getOperand(0);
1959   Value *Y = And->getOperand(1);
1960   if (auto *C2 = dyn_cast<ConstantInt>(Y))
1961     if (auto *LI = dyn_cast<LoadInst>(X))
1962       if (auto *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0)))
1963         if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
1964           if (Instruction *Res =
1965                   foldCmpLoadFromIndexedGlobal(LI, GEP, GV, Cmp, C2))
1966             return Res;
1967 
1968   if (!Cmp.isEquality())
1969     return nullptr;
1970 
1971   // X & -C == -C -> X >  u ~C
1972   // X & -C != -C -> X <= u ~C
1973   //   iff C is a power of 2
1974   if (Cmp.getOperand(1) == Y && C.isNegatedPowerOf2()) {
1975     auto NewPred =
1976         Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGT : CmpInst::ICMP_ULE;
1977     return new ICmpInst(NewPred, X, SubOne(cast<Constant>(Cmp.getOperand(1))));
1978   }
1979 
1980   // ((zext i1 X) & Y) == 0 --> !((trunc Y) & X)
1981   // ((zext i1 X) & Y) != 0 -->  ((trunc Y) & X)
1982   // ((zext i1 X) & Y) == 1 -->  ((trunc Y) & X)
1983   // ((zext i1 X) & Y) != 1 --> !((trunc Y) & X)
1984   if (match(And, m_OneUse(m_c_And(m_OneUse(m_ZExt(m_Value(X))), m_Value(Y)))) &&
1985       X->getType()->isIntOrIntVectorTy(1) && (C.isZero() || C.isOne())) {
1986     Value *TruncY = Builder.CreateTrunc(Y, X->getType());
1987     if (C.isZero() ^ (Pred == CmpInst::ICMP_NE)) {
1988       Value *And = Builder.CreateAnd(TruncY, X);
1989       return BinaryOperator::CreateNot(And);
1990     }
1991     return BinaryOperator::CreateAnd(TruncY, X);
1992   }
1993 
1994   // (icmp eq/ne (and (shl -1, X), Y), 0)
1995   //    -> (icmp eq/ne (lshr Y, X), 0)
1996   // We could technically handle any C == 0 or (C < 0 && isOdd(C)) but it seems
1997   // highly unlikely the non-zero case will ever show up in code.
1998   if (C.isZero() &&
1999       match(And, m_OneUse(m_c_And(m_OneUse(m_Shl(m_AllOnes(), m_Value(X))),
2000                                   m_Value(Y))))) {
2001     Value *LShr = Builder.CreateLShr(Y, X);
2002     return new ICmpInst(Pred, LShr, Constant::getNullValue(LShr->getType()));
2003   }
2004 
2005   // (icmp eq/ne (and (add A, Addend), Msk), C)
2006   //    -> (icmp eq/ne (and A, Msk), (and (sub C, Addend), Msk))
2007   {
2008     Value *A;
2009     const APInt *Addend, *Msk;
2010     if (match(And, m_And(m_OneUse(m_Add(m_Value(A), m_APInt(Addend))),
2011                          m_LowBitMask(Msk))) &&
2012         C.ule(*Msk)) {
2013       APInt NewComperand = (C - *Addend) & *Msk;
2014       Value *MaskA = Builder.CreateAnd(A, ConstantInt::get(A->getType(), *Msk));
2015       return new ICmpInst(Pred, MaskA,
2016                           ConstantInt::get(MaskA->getType(), NewComperand));
2017     }
2018   }
2019 
2020   return nullptr;
2021 }
2022 
2023 /// Fold icmp eq/ne (or (xor/sub (X1, X2), xor/sub (X3, X4))), 0.
foldICmpOrXorSubChain(ICmpInst & Cmp,BinaryOperator * Or,InstCombiner::BuilderTy & Builder)2024 static Value *foldICmpOrXorSubChain(ICmpInst &Cmp, BinaryOperator *Or,
2025                                     InstCombiner::BuilderTy &Builder) {
2026   // Are we using xors or subs to bitwise check for a pair or pairs of
2027   // (in)equalities? Convert to a shorter form that has more potential to be
2028   // folded even further.
2029   // ((X1 ^/- X2) || (X3 ^/- X4)) == 0 --> (X1 == X2) && (X3 == X4)
2030   // ((X1 ^/- X2) || (X3 ^/- X4)) != 0 --> (X1 != X2) || (X3 != X4)
2031   // ((X1 ^/- X2) || (X3 ^/- X4) || (X5 ^/- X6)) == 0 -->
2032   // (X1 == X2) && (X3 == X4) && (X5 == X6)
2033   // ((X1 ^/- X2) || (X3 ^/- X4) || (X5 ^/- X6)) != 0 -->
2034   // (X1 != X2) || (X3 != X4) || (X5 != X6)
2035   SmallVector<std::pair<Value *, Value *>, 2> CmpValues;
2036   SmallVector<Value *, 16> WorkList(1, Or);
2037 
2038   while (!WorkList.empty()) {
2039     auto MatchOrOperatorArgument = [&](Value *OrOperatorArgument) {
2040       Value *Lhs, *Rhs;
2041 
2042       if (match(OrOperatorArgument,
2043                 m_OneUse(m_Xor(m_Value(Lhs), m_Value(Rhs))))) {
2044         CmpValues.emplace_back(Lhs, Rhs);
2045         return;
2046       }
2047 
2048       if (match(OrOperatorArgument,
2049                 m_OneUse(m_Sub(m_Value(Lhs), m_Value(Rhs))))) {
2050         CmpValues.emplace_back(Lhs, Rhs);
2051         return;
2052       }
2053 
2054       WorkList.push_back(OrOperatorArgument);
2055     };
2056 
2057     Value *CurrentValue = WorkList.pop_back_val();
2058     Value *OrOperatorLhs, *OrOperatorRhs;
2059 
2060     if (!match(CurrentValue,
2061                m_Or(m_Value(OrOperatorLhs), m_Value(OrOperatorRhs)))) {
2062       return nullptr;
2063     }
2064 
2065     MatchOrOperatorArgument(OrOperatorRhs);
2066     MatchOrOperatorArgument(OrOperatorLhs);
2067   }
2068 
2069   ICmpInst::Predicate Pred = Cmp.getPredicate();
2070   auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2071   Value *LhsCmp = Builder.CreateICmp(Pred, CmpValues.rbegin()->first,
2072                                      CmpValues.rbegin()->second);
2073 
2074   for (auto It = CmpValues.rbegin() + 1; It != CmpValues.rend(); ++It) {
2075     Value *RhsCmp = Builder.CreateICmp(Pred, It->first, It->second);
2076     LhsCmp = Builder.CreateBinOp(BOpc, LhsCmp, RhsCmp);
2077   }
2078 
2079   return LhsCmp;
2080 }
2081 
2082 /// Fold icmp (or X, Y), C.
foldICmpOrConstant(ICmpInst & Cmp,BinaryOperator * Or,const APInt & C)2083 Instruction *InstCombinerImpl::foldICmpOrConstant(ICmpInst &Cmp,
2084                                                   BinaryOperator *Or,
2085                                                   const APInt &C) {
2086   ICmpInst::Predicate Pred = Cmp.getPredicate();
2087   if (C.isOne()) {
2088     // icmp slt signum(V) 1 --> icmp slt V, 1
2089     Value *V = nullptr;
2090     if (Pred == ICmpInst::ICMP_SLT && match(Or, m_Signum(m_Value(V))))
2091       return new ICmpInst(ICmpInst::ICMP_SLT, V,
2092                           ConstantInt::get(V->getType(), 1));
2093   }
2094 
2095   Value *OrOp0 = Or->getOperand(0), *OrOp1 = Or->getOperand(1);
2096 
2097   // (icmp eq/ne (or disjoint x, C0), C1)
2098   //    -> (icmp eq/ne x, C0^C1)
2099   if (Cmp.isEquality() && match(OrOp1, m_ImmConstant()) &&
2100       cast<PossiblyDisjointInst>(Or)->isDisjoint()) {
2101     Value *NewC =
2102         Builder.CreateXor(OrOp1, ConstantInt::get(OrOp1->getType(), C));
2103     return new ICmpInst(Pred, OrOp0, NewC);
2104   }
2105 
2106   const APInt *MaskC;
2107   if (match(OrOp1, m_APInt(MaskC)) && Cmp.isEquality()) {
2108     if (*MaskC == C && (C + 1).isPowerOf2()) {
2109       // X | C == C --> X <=u C
2110       // X | C != C --> X  >u C
2111       //   iff C+1 is a power of 2 (C is a bitmask of the low bits)
2112       Pred = (Pred == CmpInst::ICMP_EQ) ? CmpInst::ICMP_ULE : CmpInst::ICMP_UGT;
2113       return new ICmpInst(Pred, OrOp0, OrOp1);
2114     }
2115 
2116     // More general: canonicalize 'equality with set bits mask' to
2117     // 'equality with clear bits mask'.
2118     // (X | MaskC) == C --> (X & ~MaskC) == C ^ MaskC
2119     // (X | MaskC) != C --> (X & ~MaskC) != C ^ MaskC
2120     if (Or->hasOneUse()) {
2121       Value *And = Builder.CreateAnd(OrOp0, ~(*MaskC));
2122       Constant *NewC = ConstantInt::get(Or->getType(), C ^ (*MaskC));
2123       return new ICmpInst(Pred, And, NewC);
2124     }
2125   }
2126 
2127   // (X | (X-1)) s<  0 --> X s< 1
2128   // (X | (X-1)) s> -1 --> X s> 0
2129   Value *X;
2130   bool TrueIfSigned;
2131   if (isSignBitCheck(Pred, C, TrueIfSigned) &&
2132       match(Or, m_c_Or(m_Add(m_Value(X), m_AllOnes()), m_Deferred(X)))) {
2133     auto NewPred = TrueIfSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_SGT;
2134     Constant *NewC = ConstantInt::get(X->getType(), TrueIfSigned ? 1 : 0);
2135     return new ICmpInst(NewPred, X, NewC);
2136   }
2137 
2138   const APInt *OrC;
2139   // icmp(X | OrC, C) --> icmp(X, 0)
2140   if (C.isNonNegative() && match(Or, m_Or(m_Value(X), m_APInt(OrC)))) {
2141     switch (Pred) {
2142     // X | OrC s< C --> X s< 0 iff OrC s>= C s>= 0
2143     case ICmpInst::ICMP_SLT:
2144     // X | OrC s>= C --> X s>= 0 iff OrC s>= C s>= 0
2145     case ICmpInst::ICMP_SGE:
2146       if (OrC->sge(C))
2147         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
2148       break;
2149     // X | OrC s<= C --> X s< 0 iff OrC s> C s>= 0
2150     case ICmpInst::ICMP_SLE:
2151     // X | OrC s> C --> X s>= 0 iff OrC s> C s>= 0
2152     case ICmpInst::ICMP_SGT:
2153       if (OrC->sgt(C))
2154         return new ICmpInst(ICmpInst::getFlippedStrictnessPredicate(Pred), X,
2155                             ConstantInt::getNullValue(X->getType()));
2156       break;
2157     default:
2158       break;
2159     }
2160   }
2161 
2162   if (!Cmp.isEquality() || !C.isZero() || !Or->hasOneUse())
2163     return nullptr;
2164 
2165   Value *P, *Q;
2166   if (match(Or, m_Or(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Value(Q))))) {
2167     // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
2168     // -> and (icmp eq P, null), (icmp eq Q, null).
2169     Value *CmpP =
2170         Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
2171     Value *CmpQ =
2172         Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
2173     auto BOpc = Pred == CmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2174     return BinaryOperator::Create(BOpc, CmpP, CmpQ);
2175   }
2176 
2177   if (Value *V = foldICmpOrXorSubChain(Cmp, Or, Builder))
2178     return replaceInstUsesWith(Cmp, V);
2179 
2180   return nullptr;
2181 }
2182 
2183 /// Fold icmp (mul X, Y), C.
foldICmpMulConstant(ICmpInst & Cmp,BinaryOperator * Mul,const APInt & C)2184 Instruction *InstCombinerImpl::foldICmpMulConstant(ICmpInst &Cmp,
2185                                                    BinaryOperator *Mul,
2186                                                    const APInt &C) {
2187   ICmpInst::Predicate Pred = Cmp.getPredicate();
2188   Type *MulTy = Mul->getType();
2189   Value *X = Mul->getOperand(0);
2190 
2191   // If there's no overflow:
2192   // X * X == 0 --> X == 0
2193   // X * X != 0 --> X != 0
2194   if (Cmp.isEquality() && C.isZero() && X == Mul->getOperand(1) &&
2195       (Mul->hasNoUnsignedWrap() || Mul->hasNoSignedWrap()))
2196     return new ICmpInst(Pred, X, ConstantInt::getNullValue(MulTy));
2197 
2198   const APInt *MulC;
2199   if (!match(Mul->getOperand(1), m_APInt(MulC)))
2200     return nullptr;
2201 
2202   // If this is a test of the sign bit and the multiply is sign-preserving with
2203   // a constant operand, use the multiply LHS operand instead:
2204   // (X * +MulC) < 0 --> X < 0
2205   // (X * -MulC) < 0 --> X > 0
2206   if (isSignTest(Pred, C) && Mul->hasNoSignedWrap()) {
2207     if (MulC->isNegative())
2208       Pred = ICmpInst::getSwappedPredicate(Pred);
2209     return new ICmpInst(Pred, X, ConstantInt::getNullValue(MulTy));
2210   }
2211 
2212   if (MulC->isZero())
2213     return nullptr;
2214 
2215   // If the multiply does not wrap or the constant is odd, try to divide the
2216   // compare constant by the multiplication factor.
2217   if (Cmp.isEquality()) {
2218     // (mul nsw X, MulC) eq/ne C --> X eq/ne C /s MulC
2219     if (Mul->hasNoSignedWrap() && C.srem(*MulC).isZero()) {
2220       Constant *NewC = ConstantInt::get(MulTy, C.sdiv(*MulC));
2221       return new ICmpInst(Pred, X, NewC);
2222     }
2223 
2224     // C % MulC == 0 is weaker than we could use if MulC is odd because it
2225     // correct to transform if MulC * N == C including overflow. I.e with i8
2226     // (icmp eq (mul X, 5), 101) -> (icmp eq X, 225) but since 101 % 5 != 0, we
2227     // miss that case.
2228     if (C.urem(*MulC).isZero()) {
2229       // (mul nuw X, MulC) eq/ne C --> X eq/ne C /u MulC
2230       // (mul X, OddC) eq/ne N * C --> X eq/ne N
2231       if ((*MulC & 1).isOne() || Mul->hasNoUnsignedWrap()) {
2232         Constant *NewC = ConstantInt::get(MulTy, C.udiv(*MulC));
2233         return new ICmpInst(Pred, X, NewC);
2234       }
2235     }
2236   }
2237 
2238   // With a matching no-overflow guarantee, fold the constants:
2239   // (X * MulC) < C --> X < (C / MulC)
2240   // (X * MulC) > C --> X > (C / MulC)
2241   // TODO: Assert that Pred is not equal to SGE, SLE, UGE, ULE?
2242   Constant *NewC = nullptr;
2243   if (Mul->hasNoSignedWrap() && ICmpInst::isSigned(Pred)) {
2244     // MININT / -1 --> overflow.
2245     if (C.isMinSignedValue() && MulC->isAllOnes())
2246       return nullptr;
2247     if (MulC->isNegative())
2248       Pred = ICmpInst::getSwappedPredicate(Pred);
2249 
2250     if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2251       NewC = ConstantInt::get(
2252           MulTy, APIntOps::RoundingSDiv(C, *MulC, APInt::Rounding::UP));
2253     } else {
2254       assert((Pred == ICmpInst::ICMP_SLE || Pred == ICmpInst::ICMP_SGT) &&
2255              "Unexpected predicate");
2256       NewC = ConstantInt::get(
2257           MulTy, APIntOps::RoundingSDiv(C, *MulC, APInt::Rounding::DOWN));
2258     }
2259   } else if (Mul->hasNoUnsignedWrap() && ICmpInst::isUnsigned(Pred)) {
2260     if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) {
2261       NewC = ConstantInt::get(
2262           MulTy, APIntOps::RoundingUDiv(C, *MulC, APInt::Rounding::UP));
2263     } else {
2264       assert((Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
2265              "Unexpected predicate");
2266       NewC = ConstantInt::get(
2267           MulTy, APIntOps::RoundingUDiv(C, *MulC, APInt::Rounding::DOWN));
2268     }
2269   }
2270 
2271   return NewC ? new ICmpInst(Pred, X, NewC) : nullptr;
2272 }
2273 
2274 /// Fold icmp (shl nuw C2, Y), C.
foldICmpShlLHSC(ICmpInst & Cmp,Instruction * Shl,const APInt & C)2275 static Instruction *foldICmpShlLHSC(ICmpInst &Cmp, Instruction *Shl,
2276                                     const APInt &C) {
2277   Value *Y;
2278   const APInt *C2;
2279   if (!match(Shl, m_NUWShl(m_APInt(C2), m_Value(Y))))
2280     return nullptr;
2281 
2282   Type *ShiftType = Shl->getType();
2283   unsigned TypeBits = C.getBitWidth();
2284   ICmpInst::Predicate Pred = Cmp.getPredicate();
2285   if (Cmp.isUnsigned()) {
2286     if (C2->isZero() || C2->ugt(C))
2287       return nullptr;
2288     APInt Div, Rem;
2289     APInt::udivrem(C, *C2, Div, Rem);
2290     bool CIsPowerOf2 = Rem.isZero() && Div.isPowerOf2();
2291 
2292     // (1 << Y) pred C -> Y pred Log2(C)
2293     if (!CIsPowerOf2) {
2294       // (1 << Y) <  30 -> Y <= 4
2295       // (1 << Y) <= 30 -> Y <= 4
2296       // (1 << Y) >= 30 -> Y >  4
2297       // (1 << Y) >  30 -> Y >  4
2298       if (Pred == ICmpInst::ICMP_ULT)
2299         Pred = ICmpInst::ICMP_ULE;
2300       else if (Pred == ICmpInst::ICMP_UGE)
2301         Pred = ICmpInst::ICMP_UGT;
2302     }
2303 
2304     unsigned CLog2 = Div.logBase2();
2305     return new ICmpInst(Pred, Y, ConstantInt::get(ShiftType, CLog2));
2306   } else if (Cmp.isSigned() && C2->isOne()) {
2307     Constant *BitWidthMinusOne = ConstantInt::get(ShiftType, TypeBits - 1);
2308     // (1 << Y) >  0 -> Y != 31
2309     // (1 << Y) >  C -> Y != 31 if C is negative.
2310     if (Pred == ICmpInst::ICMP_SGT && C.sle(0))
2311       return new ICmpInst(ICmpInst::ICMP_NE, Y, BitWidthMinusOne);
2312 
2313     // (1 << Y) <  0 -> Y == 31
2314     // (1 << Y) <  1 -> Y == 31
2315     // (1 << Y) <  C -> Y == 31 if C is negative and not signed min.
2316     // Exclude signed min by subtracting 1 and lower the upper bound to 0.
2317     if (Pred == ICmpInst::ICMP_SLT && (C - 1).sle(0))
2318       return new ICmpInst(ICmpInst::ICMP_EQ, Y, BitWidthMinusOne);
2319   }
2320 
2321   return nullptr;
2322 }
2323 
2324 /// Fold icmp (shl X, Y), C.
foldICmpShlConstant(ICmpInst & Cmp,BinaryOperator * Shl,const APInt & C)2325 Instruction *InstCombinerImpl::foldICmpShlConstant(ICmpInst &Cmp,
2326                                                    BinaryOperator *Shl,
2327                                                    const APInt &C) {
2328   const APInt *ShiftVal;
2329   if (Cmp.isEquality() && match(Shl->getOperand(0), m_APInt(ShiftVal)))
2330     return foldICmpShlConstConst(Cmp, Shl->getOperand(1), C, *ShiftVal);
2331 
2332   ICmpInst::Predicate Pred = Cmp.getPredicate();
2333   // (icmp pred (shl nuw&nsw X, Y), Csle0)
2334   //      -> (icmp pred X, Csle0)
2335   //
2336   // The idea is the nuw/nsw essentially freeze the sign bit for the shift op
2337   // so X's must be what is used.
2338   if (C.sle(0) && Shl->hasNoUnsignedWrap() && Shl->hasNoSignedWrap())
2339     return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2340 
2341   // (icmp eq/ne (shl nuw|nsw X, Y), 0)
2342   //      -> (icmp eq/ne X, 0)
2343   if (ICmpInst::isEquality(Pred) && C.isZero() &&
2344       (Shl->hasNoUnsignedWrap() || Shl->hasNoSignedWrap()))
2345     return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2346 
2347   // (icmp slt (shl nsw X, Y), 0/1)
2348   //      -> (icmp slt X, 0/1)
2349   // (icmp sgt (shl nsw X, Y), 0/-1)
2350   //      -> (icmp sgt X, 0/-1)
2351   //
2352   // NB: sge/sle with a constant will canonicalize to sgt/slt.
2353   if (Shl->hasNoSignedWrap() &&
2354       (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT))
2355     if (C.isZero() || (Pred == ICmpInst::ICMP_SGT ? C.isAllOnes() : C.isOne()))
2356       return new ICmpInst(Pred, Shl->getOperand(0), Cmp.getOperand(1));
2357 
2358   const APInt *ShiftAmt;
2359   if (!match(Shl->getOperand(1), m_APInt(ShiftAmt)))
2360     return foldICmpShlLHSC(Cmp, Shl, C);
2361 
2362   // Check that the shift amount is in range. If not, don't perform undefined
2363   // shifts. When the shift is visited, it will be simplified.
2364   unsigned TypeBits = C.getBitWidth();
2365   if (ShiftAmt->uge(TypeBits))
2366     return nullptr;
2367 
2368   Value *X = Shl->getOperand(0);
2369   Type *ShType = Shl->getType();
2370 
2371   // NSW guarantees that we are only shifting out sign bits from the high bits,
2372   // so we can ASHR the compare constant without needing a mask and eliminate
2373   // the shift.
2374   if (Shl->hasNoSignedWrap()) {
2375     if (Pred == ICmpInst::ICMP_SGT) {
2376       // icmp Pred (shl nsw X, ShiftAmt), C --> icmp Pred X, (C >>s ShiftAmt)
2377       APInt ShiftedC = C.ashr(*ShiftAmt);
2378       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2379     }
2380     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2381         C.ashr(*ShiftAmt).shl(*ShiftAmt) == C) {
2382       APInt ShiftedC = C.ashr(*ShiftAmt);
2383       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2384     }
2385     if (Pred == ICmpInst::ICMP_SLT) {
2386       // SLE is the same as above, but SLE is canonicalized to SLT, so convert:
2387       // (X << S) <=s C is equiv to X <=s (C >> S) for all C
2388       // (X << S) <s (C + 1) is equiv to X <s (C >> S) + 1 if C <s SMAX
2389       // (X << S) <s C is equiv to X <s ((C - 1) >> S) + 1 if C >s SMIN
2390       assert(!C.isMinSignedValue() && "Unexpected icmp slt");
2391       APInt ShiftedC = (C - 1).ashr(*ShiftAmt) + 1;
2392       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2393     }
2394   }
2395 
2396   // NUW guarantees that we are only shifting out zero bits from the high bits,
2397   // so we can LSHR the compare constant without needing a mask and eliminate
2398   // the shift.
2399   if (Shl->hasNoUnsignedWrap()) {
2400     if (Pred == ICmpInst::ICMP_UGT) {
2401       // icmp Pred (shl nuw X, ShiftAmt), C --> icmp Pred X, (C >>u ShiftAmt)
2402       APInt ShiftedC = C.lshr(*ShiftAmt);
2403       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2404     }
2405     if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2406         C.lshr(*ShiftAmt).shl(*ShiftAmt) == C) {
2407       APInt ShiftedC = C.lshr(*ShiftAmt);
2408       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2409     }
2410     if (Pred == ICmpInst::ICMP_ULT) {
2411       // ULE is the same as above, but ULE is canonicalized to ULT, so convert:
2412       // (X << S) <=u C is equiv to X <=u (C >> S) for all C
2413       // (X << S) <u (C + 1) is equiv to X <u (C >> S) + 1 if C <u ~0u
2414       // (X << S) <u C is equiv to X <u ((C - 1) >> S) + 1 if C >u 0
2415       assert(C.ugt(0) && "ult 0 should have been eliminated");
2416       APInt ShiftedC = (C - 1).lshr(*ShiftAmt) + 1;
2417       return new ICmpInst(Pred, X, ConstantInt::get(ShType, ShiftedC));
2418     }
2419   }
2420 
2421   if (Cmp.isEquality() && Shl->hasOneUse()) {
2422     // Strength-reduce the shift into an 'and'.
2423     Constant *Mask = ConstantInt::get(
2424         ShType,
2425         APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
2426     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2427     Constant *LShrC = ConstantInt::get(ShType, C.lshr(*ShiftAmt));
2428     return new ICmpInst(Pred, And, LShrC);
2429   }
2430 
2431   // Otherwise, if this is a comparison of the sign bit, simplify to and/test.
2432   bool TrueIfSigned = false;
2433   if (Shl->hasOneUse() && isSignBitCheck(Pred, C, TrueIfSigned)) {
2434     // (X << 31) <s 0  --> (X & 1) != 0
2435     Constant *Mask = ConstantInt::get(
2436         ShType,
2437         APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
2438     Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
2439     return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
2440                         And, Constant::getNullValue(ShType));
2441   }
2442 
2443   // Simplify 'shl' inequality test into 'and' equality test.
2444   if (Cmp.isUnsigned() && Shl->hasOneUse()) {
2445     // (X l<< C2) u<=/u> C1 iff C1+1 is power of two -> X & (~C1 l>> C2) ==/!= 0
2446     if ((C + 1).isPowerOf2() &&
2447         (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT)) {
2448       Value *And = Builder.CreateAnd(X, (~C).lshr(ShiftAmt->getZExtValue()));
2449       return new ICmpInst(Pred == ICmpInst::ICMP_ULE ? ICmpInst::ICMP_EQ
2450                                                      : ICmpInst::ICMP_NE,
2451                           And, Constant::getNullValue(ShType));
2452     }
2453     // (X l<< C2) u</u>= C1 iff C1 is power of two -> X & (-C1 l>> C2) ==/!= 0
2454     if (C.isPowerOf2() &&
2455         (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
2456       Value *And =
2457           Builder.CreateAnd(X, (~(C - 1)).lshr(ShiftAmt->getZExtValue()));
2458       return new ICmpInst(Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_EQ
2459                                                      : ICmpInst::ICMP_NE,
2460                           And, Constant::getNullValue(ShType));
2461     }
2462   }
2463 
2464   // Transform (icmp pred iM (shl iM %v, N), C)
2465   // -> (icmp pred i(M-N) (trunc %v iM to i(M-N)), (trunc (C>>N))
2466   // Transform the shl to a trunc if (trunc (C>>N)) has no loss and M-N.
2467   // This enables us to get rid of the shift in favor of a trunc that may be
2468   // free on the target. It has the additional benefit of comparing to a
2469   // smaller constant that may be more target-friendly.
2470   unsigned Amt = ShiftAmt->getLimitedValue(TypeBits - 1);
2471   if (Shl->hasOneUse() && Amt != 0 &&
2472       shouldChangeType(ShType->getScalarSizeInBits(), TypeBits - Amt)) {
2473     ICmpInst::Predicate CmpPred = Pred;
2474     APInt RHSC = C;
2475 
2476     if (RHSC.countr_zero() < Amt && ICmpInst::isStrictPredicate(CmpPred)) {
2477       // Try the flipped strictness predicate.
2478       // e.g.:
2479       // icmp ult i64 (shl X, 32), 8589934593 ->
2480       // icmp ule i64 (shl X, 32), 8589934592 ->
2481       // icmp ule i32 (trunc X, i32), 2 ->
2482       // icmp ult i32 (trunc X, i32), 3
2483       if (auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(
2484               Pred, ConstantInt::get(ShType->getContext(), C))) {
2485         CmpPred = FlippedStrictness->first;
2486         RHSC = cast<ConstantInt>(FlippedStrictness->second)->getValue();
2487       }
2488     }
2489 
2490     if (RHSC.countr_zero() >= Amt) {
2491       Type *TruncTy = ShType->getWithNewBitWidth(TypeBits - Amt);
2492       Constant *NewC =
2493           ConstantInt::get(TruncTy, RHSC.ashr(*ShiftAmt).trunc(TypeBits - Amt));
2494       return new ICmpInst(CmpPred,
2495                           Builder.CreateTrunc(X, TruncTy, "", /*IsNUW=*/false,
2496                                               Shl->hasNoSignedWrap()),
2497                           NewC);
2498     }
2499   }
2500 
2501   return nullptr;
2502 }
2503 
2504 /// Fold icmp ({al}shr X, Y), C.
foldICmpShrConstant(ICmpInst & Cmp,BinaryOperator * Shr,const APInt & C)2505 Instruction *InstCombinerImpl::foldICmpShrConstant(ICmpInst &Cmp,
2506                                                    BinaryOperator *Shr,
2507                                                    const APInt &C) {
2508   // An exact shr only shifts out zero bits, so:
2509   // icmp eq/ne (shr X, Y), 0 --> icmp eq/ne X, 0
2510   Value *X = Shr->getOperand(0);
2511   CmpInst::Predicate Pred = Cmp.getPredicate();
2512   if (Cmp.isEquality() && Shr->isExact() && C.isZero())
2513     return new ICmpInst(Pred, X, Cmp.getOperand(1));
2514 
2515   bool IsAShr = Shr->getOpcode() == Instruction::AShr;
2516   const APInt *ShiftValC;
2517   if (match(X, m_APInt(ShiftValC))) {
2518     if (Cmp.isEquality())
2519       return foldICmpShrConstConst(Cmp, Shr->getOperand(1), C, *ShiftValC);
2520 
2521     // (ShiftValC >> Y) >s -1 --> Y != 0 with ShiftValC < 0
2522     // (ShiftValC >> Y) <s  0 --> Y == 0 with ShiftValC < 0
2523     bool TrueIfSigned;
2524     if (!IsAShr && ShiftValC->isNegative() &&
2525         isSignBitCheck(Pred, C, TrueIfSigned))
2526       return new ICmpInst(TrueIfSigned ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE,
2527                           Shr->getOperand(1),
2528                           ConstantInt::getNullValue(X->getType()));
2529 
2530     // If the shifted constant is a power-of-2, test the shift amount directly:
2531     // (ShiftValC >> Y) >u C --> X <u (LZ(C) - LZ(ShiftValC))
2532     // (ShiftValC >> Y) <u C --> X >=u (LZ(C-1) - LZ(ShiftValC))
2533     if (!IsAShr && ShiftValC->isPowerOf2() &&
2534         (Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_ULT)) {
2535       bool IsUGT = Pred == CmpInst::ICMP_UGT;
2536       assert(ShiftValC->uge(C) && "Expected simplify of compare");
2537       assert((IsUGT || !C.isZero()) && "Expected X u< 0 to simplify");
2538 
2539       unsigned CmpLZ = IsUGT ? C.countl_zero() : (C - 1).countl_zero();
2540       unsigned ShiftLZ = ShiftValC->countl_zero();
2541       Constant *NewC = ConstantInt::get(Shr->getType(), CmpLZ - ShiftLZ);
2542       auto NewPred = IsUGT ? CmpInst::ICMP_ULT : CmpInst::ICMP_UGE;
2543       return new ICmpInst(NewPred, Shr->getOperand(1), NewC);
2544     }
2545   }
2546 
2547   const APInt *ShiftAmtC;
2548   if (!match(Shr->getOperand(1), m_APInt(ShiftAmtC)))
2549     return nullptr;
2550 
2551   // Check that the shift amount is in range. If not, don't perform undefined
2552   // shifts. When the shift is visited it will be simplified.
2553   unsigned TypeBits = C.getBitWidth();
2554   unsigned ShAmtVal = ShiftAmtC->getLimitedValue(TypeBits);
2555   if (ShAmtVal >= TypeBits || ShAmtVal == 0)
2556     return nullptr;
2557 
2558   bool IsExact = Shr->isExact();
2559   Type *ShrTy = Shr->getType();
2560   // TODO: If we could guarantee that InstSimplify would handle all of the
2561   // constant-value-based preconditions in the folds below, then we could assert
2562   // those conditions rather than checking them. This is difficult because of
2563   // undef/poison (PR34838).
2564   if (IsAShr && Shr->hasOneUse()) {
2565     if (IsExact && (Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) &&
2566         (C - 1).isPowerOf2() && C.countLeadingZeros() > ShAmtVal) {
2567       // When C - 1 is a power of two and the transform can be legally
2568       // performed, prefer this form so the produced constant is close to a
2569       // power of two.
2570       // icmp slt/ult (ashr exact X, ShAmtC), C
2571       // --> icmp slt/ult X, (C - 1) << ShAmtC) + 1
2572       APInt ShiftedC = (C - 1).shl(ShAmtVal) + 1;
2573       return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2574     }
2575     if (IsExact || Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_ULT) {
2576       // When ShAmtC can be shifted losslessly:
2577       // icmp PRED (ashr exact X, ShAmtC), C --> icmp PRED X, (C << ShAmtC)
2578       // icmp slt/ult (ashr X, ShAmtC), C --> icmp slt/ult X, (C << ShAmtC)
2579       APInt ShiftedC = C.shl(ShAmtVal);
2580       if (ShiftedC.ashr(ShAmtVal) == C)
2581         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2582     }
2583     if (Pred == CmpInst::ICMP_SGT) {
2584       // icmp sgt (ashr X, ShAmtC), C --> icmp sgt X, ((C + 1) << ShAmtC) - 1
2585       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2586       if (!C.isMaxSignedValue() && !(C + 1).shl(ShAmtVal).isMinSignedValue() &&
2587           (ShiftedC + 1).ashr(ShAmtVal) == (C + 1))
2588         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2589     }
2590     if (Pred == CmpInst::ICMP_UGT) {
2591       // icmp ugt (ashr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2592       // 'C + 1 << ShAmtC' can overflow as a signed number, so the 2nd
2593       // clause accounts for that pattern.
2594       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2595       if ((ShiftedC + 1).ashr(ShAmtVal) == (C + 1) ||
2596           (C + 1).shl(ShAmtVal).isMinSignedValue())
2597         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2598     }
2599 
2600     // If the compare constant has significant bits above the lowest sign-bit,
2601     // then convert an unsigned cmp to a test of the sign-bit:
2602     // (ashr X, ShiftC) u> C --> X s< 0
2603     // (ashr X, ShiftC) u< C --> X s> -1
2604     if (C.getBitWidth() > 2 && C.getNumSignBits() <= ShAmtVal) {
2605       if (Pred == CmpInst::ICMP_UGT) {
2606         return new ICmpInst(CmpInst::ICMP_SLT, X,
2607                             ConstantInt::getNullValue(ShrTy));
2608       }
2609       if (Pred == CmpInst::ICMP_ULT) {
2610         return new ICmpInst(CmpInst::ICMP_SGT, X,
2611                             ConstantInt::getAllOnesValue(ShrTy));
2612       }
2613     }
2614   } else if (!IsAShr) {
2615     if (Pred == CmpInst::ICMP_ULT || (Pred == CmpInst::ICMP_UGT && IsExact)) {
2616       // icmp ult (lshr X, ShAmtC), C --> icmp ult X, (C << ShAmtC)
2617       // icmp ugt (lshr exact X, ShAmtC), C --> icmp ugt X, (C << ShAmtC)
2618       APInt ShiftedC = C.shl(ShAmtVal);
2619       if (ShiftedC.lshr(ShAmtVal) == C)
2620         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2621     }
2622     if (Pred == CmpInst::ICMP_UGT) {
2623       // icmp ugt (lshr X, ShAmtC), C --> icmp ugt X, ((C + 1) << ShAmtC) - 1
2624       APInt ShiftedC = (C + 1).shl(ShAmtVal) - 1;
2625       if ((ShiftedC + 1).lshr(ShAmtVal) == (C + 1))
2626         return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, ShiftedC));
2627     }
2628   }
2629 
2630   if (!Cmp.isEquality())
2631     return nullptr;
2632 
2633   // Handle equality comparisons of shift-by-constant.
2634 
2635   // If the comparison constant changes with the shift, the comparison cannot
2636   // succeed (bits of the comparison constant cannot match the shifted value).
2637   // This should be known by InstSimplify and already be folded to true/false.
2638   assert(((IsAShr && C.shl(ShAmtVal).ashr(ShAmtVal) == C) ||
2639           (!IsAShr && C.shl(ShAmtVal).lshr(ShAmtVal) == C)) &&
2640          "Expected icmp+shr simplify did not occur.");
2641 
2642   // If the bits shifted out are known zero, compare the unshifted value:
2643   //  (X & 4) >> 1 == 2  --> (X & 4) == 4.
2644   if (Shr->isExact())
2645     return new ICmpInst(Pred, X, ConstantInt::get(ShrTy, C << ShAmtVal));
2646 
2647   if (C.isZero()) {
2648     // == 0 is u< 1.
2649     if (Pred == CmpInst::ICMP_EQ)
2650       return new ICmpInst(CmpInst::ICMP_ULT, X,
2651                           ConstantInt::get(ShrTy, (C + 1).shl(ShAmtVal)));
2652     else
2653       return new ICmpInst(CmpInst::ICMP_UGT, X,
2654                           ConstantInt::get(ShrTy, (C + 1).shl(ShAmtVal) - 1));
2655   }
2656 
2657   if (Shr->hasOneUse()) {
2658     // Canonicalize the shift into an 'and':
2659     // icmp eq/ne (shr X, ShAmt), C --> icmp eq/ne (and X, HiMask), (C << ShAmt)
2660     APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
2661     Constant *Mask = ConstantInt::get(ShrTy, Val);
2662     Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
2663     return new ICmpInst(Pred, And, ConstantInt::get(ShrTy, C << ShAmtVal));
2664   }
2665 
2666   return nullptr;
2667 }
2668 
foldICmpSRemConstant(ICmpInst & Cmp,BinaryOperator * SRem,const APInt & C)2669 Instruction *InstCombinerImpl::foldICmpSRemConstant(ICmpInst &Cmp,
2670                                                     BinaryOperator *SRem,
2671                                                     const APInt &C) {
2672   const ICmpInst::Predicate Pred = Cmp.getPredicate();
2673   if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT) {
2674     // Canonicalize unsigned predicates to signed:
2675     // (X s% DivisorC) u> C -> (X s% DivisorC) s< 0
2676     //   iff (C s< 0 ? ~C : C) u>= abs(DivisorC)-1
2677     // (X s% DivisorC) u< C+1 -> (X s% DivisorC) s> -1
2678     //   iff (C+1 s< 0 ? ~C : C) u>= abs(DivisorC)-1
2679 
2680     const APInt *DivisorC;
2681     if (!match(SRem->getOperand(1), m_APInt(DivisorC)))
2682       return nullptr;
2683 
2684     APInt NormalizedC = C;
2685     if (Pred == ICmpInst::ICMP_ULT) {
2686       assert(!NormalizedC.isZero() &&
2687              "ult X, 0 should have been simplified already.");
2688       --NormalizedC;
2689     }
2690     if (C.isNegative())
2691       NormalizedC.flipAllBits();
2692     assert(!DivisorC->isZero() &&
2693            "srem X, 0 should have been simplified already.");
2694     if (!NormalizedC.uge(DivisorC->abs() - 1))
2695       return nullptr;
2696 
2697     Type *Ty = SRem->getType();
2698     if (Pred == ICmpInst::ICMP_UGT)
2699       return new ICmpInst(ICmpInst::ICMP_SLT, SRem,
2700                           ConstantInt::getNullValue(Ty));
2701     return new ICmpInst(ICmpInst::ICMP_SGT, SRem,
2702                         ConstantInt::getAllOnesValue(Ty));
2703   }
2704   // Match an 'is positive' or 'is negative' comparison of remainder by a
2705   // constant power-of-2 value:
2706   // (X % pow2C) sgt/slt 0
2707   if (Pred != ICmpInst::ICMP_SGT && Pred != ICmpInst::ICMP_SLT &&
2708       Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE)
2709     return nullptr;
2710 
2711   // TODO: The one-use check is standard because we do not typically want to
2712   //       create longer instruction sequences, but this might be a special-case
2713   //       because srem is not good for analysis or codegen.
2714   if (!SRem->hasOneUse())
2715     return nullptr;
2716 
2717   const APInt *DivisorC;
2718   if (!match(SRem->getOperand(1), m_Power2(DivisorC)))
2719     return nullptr;
2720 
2721   // For cmp_sgt/cmp_slt only zero valued C is handled.
2722   // For cmp_eq/cmp_ne only positive valued C is handled.
2723   if (((Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT) &&
2724        !C.isZero()) ||
2725       ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE) &&
2726        !C.isStrictlyPositive()))
2727     return nullptr;
2728 
2729   // Mask off the sign bit and the modulo bits (low-bits).
2730   Type *Ty = SRem->getType();
2731   APInt SignMask = APInt::getSignMask(Ty->getScalarSizeInBits());
2732   Constant *MaskC = ConstantInt::get(Ty, SignMask | (*DivisorC - 1));
2733   Value *And = Builder.CreateAnd(SRem->getOperand(0), MaskC);
2734 
2735   if (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)
2736     return new ICmpInst(Pred, And, ConstantInt::get(Ty, C));
2737 
2738   // For 'is positive?' check that the sign-bit is clear and at least 1 masked
2739   // bit is set. Example:
2740   // (i8 X % 32) s> 0 --> (X & 159) s> 0
2741   if (Pred == ICmpInst::ICMP_SGT)
2742     return new ICmpInst(ICmpInst::ICMP_SGT, And, ConstantInt::getNullValue(Ty));
2743 
2744   // For 'is negative?' check that the sign-bit is set and at least 1 masked
2745   // bit is set. Example:
2746   // (i16 X % 4) s< 0 --> (X & 32771) u> 32768
2747   return new ICmpInst(ICmpInst::ICMP_UGT, And, ConstantInt::get(Ty, SignMask));
2748 }
2749 
2750 /// Fold icmp (udiv X, Y), C.
foldICmpUDivConstant(ICmpInst & Cmp,BinaryOperator * UDiv,const APInt & C)2751 Instruction *InstCombinerImpl::foldICmpUDivConstant(ICmpInst &Cmp,
2752                                                     BinaryOperator *UDiv,
2753                                                     const APInt &C) {
2754   ICmpInst::Predicate Pred = Cmp.getPredicate();
2755   Value *X = UDiv->getOperand(0);
2756   Value *Y = UDiv->getOperand(1);
2757   Type *Ty = UDiv->getType();
2758 
2759   const APInt *C2;
2760   if (!match(X, m_APInt(C2)))
2761     return nullptr;
2762 
2763   assert(*C2 != 0 && "udiv 0, X should have been simplified already.");
2764 
2765   // (icmp ugt (udiv C2, Y), C) -> (icmp ule Y, C2/(C+1))
2766   if (Pred == ICmpInst::ICMP_UGT) {
2767     assert(!C.isMaxValue() &&
2768            "icmp ugt X, UINT_MAX should have been simplified already.");
2769     return new ICmpInst(ICmpInst::ICMP_ULE, Y,
2770                         ConstantInt::get(Ty, C2->udiv(C + 1)));
2771   }
2772 
2773   // (icmp ult (udiv C2, Y), C) -> (icmp ugt Y, C2/C)
2774   if (Pred == ICmpInst::ICMP_ULT) {
2775     assert(C != 0 && "icmp ult X, 0 should have been simplified already.");
2776     return new ICmpInst(ICmpInst::ICMP_UGT, Y,
2777                         ConstantInt::get(Ty, C2->udiv(C)));
2778   }
2779 
2780   return nullptr;
2781 }
2782 
2783 /// Fold icmp ({su}div X, Y), C.
foldICmpDivConstant(ICmpInst & Cmp,BinaryOperator * Div,const APInt & C)2784 Instruction *InstCombinerImpl::foldICmpDivConstant(ICmpInst &Cmp,
2785                                                    BinaryOperator *Div,
2786                                                    const APInt &C) {
2787   ICmpInst::Predicate Pred = Cmp.getPredicate();
2788   Value *X = Div->getOperand(0);
2789   Value *Y = Div->getOperand(1);
2790   Type *Ty = Div->getType();
2791   bool DivIsSigned = Div->getOpcode() == Instruction::SDiv;
2792 
2793   // If unsigned division and the compare constant is bigger than
2794   // UMAX/2 (negative), there's only one pair of values that satisfies an
2795   // equality check, so eliminate the division:
2796   // (X u/ Y) == C --> (X == C) && (Y == 1)
2797   // (X u/ Y) != C --> (X != C) || (Y != 1)
2798   // Similarly, if signed division and the compare constant is exactly SMIN:
2799   // (X s/ Y) == SMIN --> (X == SMIN) && (Y == 1)
2800   // (X s/ Y) != SMIN --> (X != SMIN) || (Y != 1)
2801   if (Cmp.isEquality() && Div->hasOneUse() && C.isSignBitSet() &&
2802       (!DivIsSigned || C.isMinSignedValue())) {
2803     Value *XBig = Builder.CreateICmp(Pred, X, ConstantInt::get(Ty, C));
2804     Value *YOne = Builder.CreateICmp(Pred, Y, ConstantInt::get(Ty, 1));
2805     auto Logic = Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
2806     return BinaryOperator::Create(Logic, XBig, YOne);
2807   }
2808 
2809   // Fold: icmp pred ([us]div X, C2), C -> range test
2810   // Fold this div into the comparison, producing a range check.
2811   // Determine, based on the divide type, what the range is being
2812   // checked.  If there is an overflow on the low or high side, remember
2813   // it, otherwise compute the range [low, hi) bounding the new value.
2814   // See: InsertRangeTest above for the kinds of replacements possible.
2815   const APInt *C2;
2816   if (!match(Y, m_APInt(C2)))
2817     return nullptr;
2818 
2819   // FIXME: If the operand types don't match the type of the divide
2820   // then don't attempt this transform. The code below doesn't have the
2821   // logic to deal with a signed divide and an unsigned compare (and
2822   // vice versa). This is because (x /s C2) <s C  produces different
2823   // results than (x /s C2) <u C or (x /u C2) <s C or even
2824   // (x /u C2) <u C.  Simply casting the operands and result won't
2825   // work. :(  The if statement below tests that condition and bails
2826   // if it finds it.
2827   if (!Cmp.isEquality() && DivIsSigned != Cmp.isSigned())
2828     return nullptr;
2829 
2830   // The ProdOV computation fails on divide by 0 and divide by -1. Cases with
2831   // INT_MIN will also fail if the divisor is 1. Although folds of all these
2832   // division-by-constant cases should be present, we can not assert that they
2833   // have happened before we reach this icmp instruction.
2834   if (C2->isZero() || C2->isOne() || (DivIsSigned && C2->isAllOnes()))
2835     return nullptr;
2836 
2837   // Compute Prod = C * C2. We are essentially solving an equation of
2838   // form X / C2 = C. We solve for X by multiplying C2 and C.
2839   // By solving for X, we can turn this into a range check instead of computing
2840   // a divide.
2841   APInt Prod = C * *C2;
2842 
2843   // Determine if the product overflows by seeing if the product is not equal to
2844   // the divide. Make sure we do the same kind of divide as in the LHS
2845   // instruction that we're folding.
2846   bool ProdOV = (DivIsSigned ? Prod.sdiv(*C2) : Prod.udiv(*C2)) != C;
2847 
2848   // If the division is known to be exact, then there is no remainder from the
2849   // divide, so the covered range size is unit, otherwise it is the divisor.
2850   APInt RangeSize = Div->isExact() ? APInt(C2->getBitWidth(), 1) : *C2;
2851 
2852   // Figure out the interval that is being checked.  For example, a comparison
2853   // like "X /u 5 == 0" is really checking that X is in the interval [0, 5).
2854   // Compute this interval based on the constants involved and the signedness of
2855   // the compare/divide.  This computes a half-open interval, keeping track of
2856   // whether either value in the interval overflows.  After analysis each
2857   // overflow variable is set to 0 if it's corresponding bound variable is valid
2858   // -1 if overflowed off the bottom end, or +1 if overflowed off the top end.
2859   int LoOverflow = 0, HiOverflow = 0;
2860   APInt LoBound, HiBound;
2861 
2862   if (!DivIsSigned) { // udiv
2863     // e.g. X/5 op 3  --> [15, 20)
2864     LoBound = Prod;
2865     HiOverflow = LoOverflow = ProdOV;
2866     if (!HiOverflow) {
2867       // If this is not an exact divide, then many values in the range collapse
2868       // to the same result value.
2869       HiOverflow = addWithOverflow(HiBound, LoBound, RangeSize, false);
2870     }
2871   } else if (C2->isStrictlyPositive()) { // Divisor is > 0.
2872     if (C.isZero()) {                    // (X / pos) op 0
2873       // Can't overflow.  e.g.  X/2 op 0 --> [-1, 2)
2874       LoBound = -(RangeSize - 1);
2875       HiBound = RangeSize;
2876     } else if (C.isStrictlyPositive()) { // (X / pos) op pos
2877       LoBound = Prod;                    // e.g.   X/5 op 3 --> [15, 20)
2878       HiOverflow = LoOverflow = ProdOV;
2879       if (!HiOverflow)
2880         HiOverflow = addWithOverflow(HiBound, Prod, RangeSize, true);
2881     } else { // (X / pos) op neg
2882       // e.g. X/5 op -3  --> [-15-4, -15+1) --> [-19, -14)
2883       HiBound = Prod + 1;
2884       LoOverflow = HiOverflow = ProdOV ? -1 : 0;
2885       if (!LoOverflow) {
2886         APInt DivNeg = -RangeSize;
2887         LoOverflow = addWithOverflow(LoBound, HiBound, DivNeg, true) ? -1 : 0;
2888       }
2889     }
2890   } else if (C2->isNegative()) { // Divisor is < 0.
2891     if (Div->isExact())
2892       RangeSize.negate();
2893     if (C.isZero()) { // (X / neg) op 0
2894       // e.g. X/-5 op 0  --> [-4, 5)
2895       LoBound = RangeSize + 1;
2896       HiBound = -RangeSize;
2897       if (HiBound == *C2) { // -INTMIN = INTMIN
2898         HiOverflow = 1;     // [INTMIN+1, overflow)
2899         HiBound = APInt();  // e.g. X/INTMIN = 0 --> X > INTMIN
2900       }
2901     } else if (C.isStrictlyPositive()) { // (X / neg) op pos
2902       // e.g. X/-5 op 3  --> [-19, -14)
2903       HiBound = Prod + 1;
2904       HiOverflow = LoOverflow = ProdOV ? -1 : 0;
2905       if (!LoOverflow)
2906         LoOverflow =
2907             addWithOverflow(LoBound, HiBound, RangeSize, true) ? -1 : 0;
2908     } else {          // (X / neg) op neg
2909       LoBound = Prod; // e.g. X/-5 op -3  --> [15, 20)
2910       LoOverflow = HiOverflow = ProdOV;
2911       if (!HiOverflow)
2912         HiOverflow = subWithOverflow(HiBound, Prod, RangeSize, true);
2913     }
2914 
2915     // Dividing by a negative swaps the condition.  LT <-> GT
2916     Pred = ICmpInst::getSwappedPredicate(Pred);
2917   }
2918 
2919   switch (Pred) {
2920   default:
2921     llvm_unreachable("Unhandled icmp predicate!");
2922   case ICmpInst::ICMP_EQ:
2923     if (LoOverflow && HiOverflow)
2924       return replaceInstUsesWith(Cmp, Builder.getFalse());
2925     if (HiOverflow)
2926       return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2927                           X, ConstantInt::get(Ty, LoBound));
2928     if (LoOverflow)
2929       return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2930                           X, ConstantInt::get(Ty, HiBound));
2931     return replaceInstUsesWith(
2932         Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, true));
2933   case ICmpInst::ICMP_NE:
2934     if (LoOverflow && HiOverflow)
2935       return replaceInstUsesWith(Cmp, Builder.getTrue());
2936     if (HiOverflow)
2937       return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
2938                           X, ConstantInt::get(Ty, LoBound));
2939     if (LoOverflow)
2940       return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE,
2941                           X, ConstantInt::get(Ty, HiBound));
2942     return replaceInstUsesWith(
2943         Cmp, insertRangeTest(X, LoBound, HiBound, DivIsSigned, false));
2944   case ICmpInst::ICMP_ULT:
2945   case ICmpInst::ICMP_SLT:
2946     if (LoOverflow == +1) // Low bound is greater than input range.
2947       return replaceInstUsesWith(Cmp, Builder.getTrue());
2948     if (LoOverflow == -1) // Low bound is less than input range.
2949       return replaceInstUsesWith(Cmp, Builder.getFalse());
2950     return new ICmpInst(Pred, X, ConstantInt::get(Ty, LoBound));
2951   case ICmpInst::ICMP_UGT:
2952   case ICmpInst::ICMP_SGT:
2953     if (HiOverflow == +1) // High bound greater than input range.
2954       return replaceInstUsesWith(Cmp, Builder.getFalse());
2955     if (HiOverflow == -1) // High bound less than input range.
2956       return replaceInstUsesWith(Cmp, Builder.getTrue());
2957     if (Pred == ICmpInst::ICMP_UGT)
2958       return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, HiBound));
2959     return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, HiBound));
2960   }
2961 
2962   return nullptr;
2963 }
2964 
2965 /// Fold icmp (sub X, Y), C.
foldICmpSubConstant(ICmpInst & Cmp,BinaryOperator * Sub,const APInt & C)2966 Instruction *InstCombinerImpl::foldICmpSubConstant(ICmpInst &Cmp,
2967                                                    BinaryOperator *Sub,
2968                                                    const APInt &C) {
2969   Value *X = Sub->getOperand(0), *Y = Sub->getOperand(1);
2970   ICmpInst::Predicate Pred = Cmp.getPredicate();
2971   Type *Ty = Sub->getType();
2972 
2973   // (SubC - Y) == C) --> Y == (SubC - C)
2974   // (SubC - Y) != C) --> Y != (SubC - C)
2975   Constant *SubC;
2976   if (Cmp.isEquality() && match(X, m_ImmConstant(SubC))) {
2977     return new ICmpInst(Pred, Y,
2978                         ConstantExpr::getSub(SubC, ConstantInt::get(Ty, C)));
2979   }
2980 
2981   // (icmp P (sub nuw|nsw C2, Y), C) -> (icmp swap(P) Y, C2-C)
2982   const APInt *C2;
2983   APInt SubResult;
2984   ICmpInst::Predicate SwappedPred = Cmp.getSwappedPredicate();
2985   bool HasNSW = Sub->hasNoSignedWrap();
2986   bool HasNUW = Sub->hasNoUnsignedWrap();
2987   if (match(X, m_APInt(C2)) &&
2988       ((Cmp.isUnsigned() && HasNUW) || (Cmp.isSigned() && HasNSW)) &&
2989       !subWithOverflow(SubResult, *C2, C, Cmp.isSigned()))
2990     return new ICmpInst(SwappedPred, Y, ConstantInt::get(Ty, SubResult));
2991 
2992   // X - Y == 0 --> X == Y.
2993   // X - Y != 0 --> X != Y.
2994   // TODO: We allow this with multiple uses as long as the other uses are not
2995   //       in phis. The phi use check is guarding against a codegen regression
2996   //       for a loop test. If the backend could undo this (and possibly
2997   //       subsequent transforms), we would not need this hack.
2998   if (Cmp.isEquality() && C.isZero() &&
2999       none_of((Sub->users()), [](const User *U) { return isa<PHINode>(U); }))
3000     return new ICmpInst(Pred, X, Y);
3001 
3002   // The following transforms are only worth it if the only user of the subtract
3003   // is the icmp.
3004   // TODO: This is an artificial restriction for all of the transforms below
3005   //       that only need a single replacement icmp. Can these use the phi test
3006   //       like the transform above here?
3007   if (!Sub->hasOneUse())
3008     return nullptr;
3009 
3010   if (Sub->hasNoSignedWrap()) {
3011     // (icmp sgt (sub nsw X, Y), -1) -> (icmp sge X, Y)
3012     if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
3013       return new ICmpInst(ICmpInst::ICMP_SGE, X, Y);
3014 
3015     // (icmp sgt (sub nsw X, Y), 0) -> (icmp sgt X, Y)
3016     if (Pred == ICmpInst::ICMP_SGT && C.isZero())
3017       return new ICmpInst(ICmpInst::ICMP_SGT, X, Y);
3018 
3019     // (icmp slt (sub nsw X, Y), 0) -> (icmp slt X, Y)
3020     if (Pred == ICmpInst::ICMP_SLT && C.isZero())
3021       return new ICmpInst(ICmpInst::ICMP_SLT, X, Y);
3022 
3023     // (icmp slt (sub nsw X, Y), 1) -> (icmp sle X, Y)
3024     if (Pred == ICmpInst::ICMP_SLT && C.isOne())
3025       return new ICmpInst(ICmpInst::ICMP_SLE, X, Y);
3026   }
3027 
3028   if (!match(X, m_APInt(C2)))
3029     return nullptr;
3030 
3031   // C2 - Y <u C -> (Y | (C - 1)) == C2
3032   //   iff (C2 & (C - 1)) == C - 1 and C is a power of 2
3033   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() &&
3034       (*C2 & (C - 1)) == (C - 1))
3035     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, C - 1), X);
3036 
3037   // C2 - Y >u C -> (Y | C) != C2
3038   //   iff C2 & C == C and C + 1 is a power of 2
3039   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == C)
3040     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, C), X);
3041 
3042   // We have handled special cases that reduce.
3043   // Canonicalize any remaining sub to add as:
3044   // (C2 - Y) > C --> (Y + ~C2) < ~C
3045   Value *Add = Builder.CreateAdd(Y, ConstantInt::get(Ty, ~(*C2)), "notsub",
3046                                  HasNUW, HasNSW);
3047   return new ICmpInst(SwappedPred, Add, ConstantInt::get(Ty, ~C));
3048 }
3049 
createLogicFromTable(const std::bitset<4> & Table,Value * Op0,Value * Op1,IRBuilderBase & Builder,bool HasOneUse)3050 static Value *createLogicFromTable(const std::bitset<4> &Table, Value *Op0,
3051                                    Value *Op1, IRBuilderBase &Builder,
3052                                    bool HasOneUse) {
3053   auto FoldConstant = [&](bool Val) {
3054     Constant *Res = Val ? Builder.getTrue() : Builder.getFalse();
3055     if (Op0->getType()->isVectorTy())
3056       Res = ConstantVector::getSplat(
3057           cast<VectorType>(Op0->getType())->getElementCount(), Res);
3058     return Res;
3059   };
3060 
3061   switch (Table.to_ulong()) {
3062   case 0: // 0 0 0 0
3063     return FoldConstant(false);
3064   case 1: // 0 0 0 1
3065     return HasOneUse ? Builder.CreateNot(Builder.CreateOr(Op0, Op1)) : nullptr;
3066   case 2: // 0 0 1 0
3067     return HasOneUse ? Builder.CreateAnd(Builder.CreateNot(Op0), Op1) : nullptr;
3068   case 3: // 0 0 1 1
3069     return Builder.CreateNot(Op0);
3070   case 4: // 0 1 0 0
3071     return HasOneUse ? Builder.CreateAnd(Op0, Builder.CreateNot(Op1)) : nullptr;
3072   case 5: // 0 1 0 1
3073     return Builder.CreateNot(Op1);
3074   case 6: // 0 1 1 0
3075     return Builder.CreateXor(Op0, Op1);
3076   case 7: // 0 1 1 1
3077     return HasOneUse ? Builder.CreateNot(Builder.CreateAnd(Op0, Op1)) : nullptr;
3078   case 8: // 1 0 0 0
3079     return Builder.CreateAnd(Op0, Op1);
3080   case 9: // 1 0 0 1
3081     return HasOneUse ? Builder.CreateNot(Builder.CreateXor(Op0, Op1)) : nullptr;
3082   case 10: // 1 0 1 0
3083     return Op1;
3084   case 11: // 1 0 1 1
3085     return HasOneUse ? Builder.CreateOr(Builder.CreateNot(Op0), Op1) : nullptr;
3086   case 12: // 1 1 0 0
3087     return Op0;
3088   case 13: // 1 1 0 1
3089     return HasOneUse ? Builder.CreateOr(Op0, Builder.CreateNot(Op1)) : nullptr;
3090   case 14: // 1 1 1 0
3091     return Builder.CreateOr(Op0, Op1);
3092   case 15: // 1 1 1 1
3093     return FoldConstant(true);
3094   default:
3095     llvm_unreachable("Invalid Operation");
3096   }
3097   return nullptr;
3098 }
3099 
foldICmpBinOpWithConstantViaTruthTable(ICmpInst & Cmp,BinaryOperator * BO,const APInt & C)3100 Instruction *InstCombinerImpl::foldICmpBinOpWithConstantViaTruthTable(
3101     ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3102   Value *A, *B;
3103   Constant *C1, *C2, *C3, *C4;
3104   if (!(match(BO->getOperand(0),
3105               m_Select(m_Value(A), m_Constant(C1), m_Constant(C2)))) ||
3106       !match(BO->getOperand(1),
3107              m_Select(m_Value(B), m_Constant(C3), m_Constant(C4))) ||
3108       Cmp.getType() != A->getType())
3109     return nullptr;
3110 
3111   std::bitset<4> Table;
3112   auto ComputeTable = [&](bool First, bool Second) -> std::optional<bool> {
3113     Constant *L = First ? C1 : C2;
3114     Constant *R = Second ? C3 : C4;
3115     if (auto *Res = ConstantFoldBinaryOpOperands(BO->getOpcode(), L, R, DL)) {
3116       auto *Val = Res->getType()->isVectorTy() ? Res->getSplatValue() : Res;
3117       if (auto *CI = dyn_cast_or_null<ConstantInt>(Val))
3118         return ICmpInst::compare(CI->getValue(), C, Cmp.getPredicate());
3119     }
3120     return std::nullopt;
3121   };
3122 
3123   for (unsigned I = 0; I < 4; ++I) {
3124     bool First = (I >> 1) & 1;
3125     bool Second = I & 1;
3126     if (auto Res = ComputeTable(First, Second))
3127       Table[I] = *Res;
3128     else
3129       return nullptr;
3130   }
3131 
3132   // Synthesize optimal logic.
3133   if (auto *Cond = createLogicFromTable(Table, A, B, Builder, BO->hasOneUse()))
3134     return replaceInstUsesWith(Cmp, Cond);
3135   return nullptr;
3136 }
3137 
3138 /// Fold icmp (add X, Y), C.
foldICmpAddConstant(ICmpInst & Cmp,BinaryOperator * Add,const APInt & C)3139 Instruction *InstCombinerImpl::foldICmpAddConstant(ICmpInst &Cmp,
3140                                                    BinaryOperator *Add,
3141                                                    const APInt &C) {
3142   Value *Y = Add->getOperand(1);
3143   Value *X = Add->getOperand(0);
3144 
3145   Value *Op0, *Op1;
3146   Instruction *Ext0, *Ext1;
3147   const CmpInst::Predicate Pred = Cmp.getPredicate();
3148   if (match(Add,
3149             m_Add(m_CombineAnd(m_Instruction(Ext0), m_ZExtOrSExt(m_Value(Op0))),
3150                   m_CombineAnd(m_Instruction(Ext1),
3151                                m_ZExtOrSExt(m_Value(Op1))))) &&
3152       Op0->getType()->isIntOrIntVectorTy(1) &&
3153       Op1->getType()->isIntOrIntVectorTy(1)) {
3154     unsigned BW = C.getBitWidth();
3155     std::bitset<4> Table;
3156     auto ComputeTable = [&](bool Op0Val, bool Op1Val) {
3157       APInt Res(BW, 0);
3158       if (Op0Val)
3159         Res += APInt(BW, isa<ZExtInst>(Ext0) ? 1 : -1, /*isSigned=*/true);
3160       if (Op1Val)
3161         Res += APInt(BW, isa<ZExtInst>(Ext1) ? 1 : -1, /*isSigned=*/true);
3162       return ICmpInst::compare(Res, C, Pred);
3163     };
3164 
3165     Table[0] = ComputeTable(false, false);
3166     Table[1] = ComputeTable(false, true);
3167     Table[2] = ComputeTable(true, false);
3168     Table[3] = ComputeTable(true, true);
3169     if (auto *Cond =
3170             createLogicFromTable(Table, Op0, Op1, Builder, Add->hasOneUse()))
3171       return replaceInstUsesWith(Cmp, Cond);
3172   }
3173   const APInt *C2;
3174   if (Cmp.isEquality() || !match(Y, m_APInt(C2)))
3175     return nullptr;
3176 
3177   // Fold icmp pred (add X, C2), C.
3178   Type *Ty = Add->getType();
3179 
3180   // If the add does not wrap, we can always adjust the compare by subtracting
3181   // the constants. Equality comparisons are handled elsewhere. SGE/SLE/UGE/ULE
3182   // are canonicalized to SGT/SLT/UGT/ULT.
3183   if ((Add->hasNoSignedWrap() &&
3184        (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLT)) ||
3185       (Add->hasNoUnsignedWrap() &&
3186        (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULT))) {
3187     bool Overflow;
3188     APInt NewC =
3189         Cmp.isSigned() ? C.ssub_ov(*C2, Overflow) : C.usub_ov(*C2, Overflow);
3190     // If there is overflow, the result must be true or false.
3191     // TODO: Can we assert there is no overflow because InstSimplify always
3192     // handles those cases?
3193     if (!Overflow)
3194       // icmp Pred (add nsw X, C2), C --> icmp Pred X, (C - C2)
3195       return new ICmpInst(Pred, X, ConstantInt::get(Ty, NewC));
3196   }
3197 
3198   if (ICmpInst::isUnsigned(Pred) && Add->hasNoSignedWrap() &&
3199       C.isNonNegative() && (C - *C2).isNonNegative() &&
3200       computeConstantRange(X, /*ForSigned=*/true).add(*C2).isAllNonNegative())
3201     return new ICmpInst(ICmpInst::getSignedPredicate(Pred), X,
3202                         ConstantInt::get(Ty, C - *C2));
3203 
3204   auto CR = ConstantRange::makeExactICmpRegion(Pred, C).subtract(*C2);
3205   const APInt &Upper = CR.getUpper();
3206   const APInt &Lower = CR.getLower();
3207   if (Cmp.isSigned()) {
3208     if (Lower.isSignMask())
3209       return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, Upper));
3210     if (Upper.isSignMask())
3211       return new ICmpInst(ICmpInst::ICMP_SGE, X, ConstantInt::get(Ty, Lower));
3212   } else {
3213     if (Lower.isMinValue())
3214       return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, Upper));
3215     if (Upper.isMinValue())
3216       return new ICmpInst(ICmpInst::ICMP_UGE, X, ConstantInt::get(Ty, Lower));
3217   }
3218 
3219   // This set of folds is intentionally placed after folds that use no-wrapping
3220   // flags because those folds are likely better for later analysis/codegen.
3221   const APInt SMax = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
3222   const APInt SMin = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
3223 
3224   // Fold compare with offset to opposite sign compare if it eliminates offset:
3225   // (X + C2) >u C --> X <s -C2 (if C == C2 + SMAX)
3226   if (Pred == CmpInst::ICMP_UGT && C == *C2 + SMax)
3227     return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantInt::get(Ty, -(*C2)));
3228 
3229   // (X + C2) <u C --> X >s ~C2 (if C == C2 + SMIN)
3230   if (Pred == CmpInst::ICMP_ULT && C == *C2 + SMin)
3231     return new ICmpInst(ICmpInst::ICMP_SGT, X, ConstantInt::get(Ty, ~(*C2)));
3232 
3233   // (X + C2) >s C --> X <u (SMAX - C) (if C == C2 - 1)
3234   if (Pred == CmpInst::ICMP_SGT && C == *C2 - 1)
3235     return new ICmpInst(ICmpInst::ICMP_ULT, X, ConstantInt::get(Ty, SMax - C));
3236 
3237   // (X + C2) <s C --> X >u (C ^ SMAX) (if C == C2)
3238   if (Pred == CmpInst::ICMP_SLT && C == *C2)
3239     return new ICmpInst(ICmpInst::ICMP_UGT, X, ConstantInt::get(Ty, C ^ SMax));
3240 
3241   // (X + -1) <u C --> X <=u C (if X is never null)
3242   if (Pred == CmpInst::ICMP_ULT && C2->isAllOnes()) {
3243     const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
3244     if (llvm::isKnownNonZero(X, Q))
3245       return new ICmpInst(ICmpInst::ICMP_ULE, X, ConstantInt::get(Ty, C));
3246   }
3247 
3248   if (!Add->hasOneUse())
3249     return nullptr;
3250 
3251   // X+C <u C2 -> (X & -C2) == C
3252   //   iff C & (C2-1) == 0
3253   //       C2 is a power of 2
3254   if (Pred == ICmpInst::ICMP_ULT && C.isPowerOf2() && (*C2 & (C - 1)) == 0)
3255     return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -C),
3256                         ConstantExpr::getNeg(cast<Constant>(Y)));
3257 
3258   // X+C2 <u C -> (X & C) == 2C
3259   //   iff C == -(C2)
3260   //       C2 is a power of 2
3261   if (Pred == ICmpInst::ICMP_ULT && C2->isPowerOf2() && C == -*C2)
3262     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, C),
3263                         ConstantInt::get(Ty, C * 2));
3264 
3265   // X+C >u C2 -> (X & ~C2) != C
3266   //   iff C & C2 == 0
3267   //       C2+1 is a power of 2
3268   if (Pred == ICmpInst::ICMP_UGT && (C + 1).isPowerOf2() && (*C2 & C) == 0)
3269     return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~C),
3270                         ConstantExpr::getNeg(cast<Constant>(Y)));
3271 
3272   // The range test idiom can use either ult or ugt. Arbitrarily canonicalize
3273   // to the ult form.
3274   // X+C2 >u C -> X+(C2-C-1) <u ~C
3275   if (Pred == ICmpInst::ICMP_UGT)
3276     return new ICmpInst(ICmpInst::ICMP_ULT,
3277                         Builder.CreateAdd(X, ConstantInt::get(Ty, *C2 - C - 1)),
3278                         ConstantInt::get(Ty, ~C));
3279 
3280   // zext(V) + C2 pred C -> V + C3 pred' C4
3281   Value *V;
3282   if (match(X, m_ZExt(m_Value(V)))) {
3283     Type *NewCmpTy = V->getType();
3284     unsigned NewCmpBW = NewCmpTy->getScalarSizeInBits();
3285     if (shouldChangeType(Ty, NewCmpTy)) {
3286       if (CR.getActiveBits() <= NewCmpBW) {
3287         ConstantRange SrcCR = CR.truncate(NewCmpBW);
3288         CmpInst::Predicate EquivPred;
3289         APInt EquivInt;
3290         APInt EquivOffset;
3291 
3292         SrcCR.getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
3293         return new ICmpInst(
3294             EquivPred,
3295             EquivOffset.isZero()
3296                 ? V
3297                 : Builder.CreateAdd(V, ConstantInt::get(NewCmpTy, EquivOffset)),
3298             ConstantInt::get(NewCmpTy, EquivInt));
3299       }
3300     }
3301   }
3302 
3303   return nullptr;
3304 }
3305 
matchThreeWayIntCompare(SelectInst * SI,Value * & LHS,Value * & RHS,ConstantInt * & Less,ConstantInt * & Equal,ConstantInt * & Greater)3306 bool InstCombinerImpl::matchThreeWayIntCompare(SelectInst *SI, Value *&LHS,
3307                                                Value *&RHS, ConstantInt *&Less,
3308                                                ConstantInt *&Equal,
3309                                                ConstantInt *&Greater) {
3310   // TODO: Generalize this to work with other comparison idioms or ensure
3311   // they get canonicalized into this form.
3312 
3313   // select i1 (a == b),
3314   //        i32 Equal,
3315   //        i32 (select i1 (a < b), i32 Less, i32 Greater)
3316   // where Equal, Less and Greater are placeholders for any three constants.
3317   CmpPredicate PredA;
3318   if (!match(SI->getCondition(), m_ICmp(PredA, m_Value(LHS), m_Value(RHS))) ||
3319       !ICmpInst::isEquality(PredA))
3320     return false;
3321   Value *EqualVal = SI->getTrueValue();
3322   Value *UnequalVal = SI->getFalseValue();
3323   // We still can get non-canonical predicate here, so canonicalize.
3324   if (PredA == ICmpInst::ICMP_NE)
3325     std::swap(EqualVal, UnequalVal);
3326   if (!match(EqualVal, m_ConstantInt(Equal)))
3327     return false;
3328   CmpPredicate PredB;
3329   Value *LHS2, *RHS2;
3330   if (!match(UnequalVal, m_Select(m_ICmp(PredB, m_Value(LHS2), m_Value(RHS2)),
3331                                   m_ConstantInt(Less), m_ConstantInt(Greater))))
3332     return false;
3333   // We can get predicate mismatch here, so canonicalize if possible:
3334   // First, ensure that 'LHS' match.
3335   if (LHS2 != LHS) {
3336     // x sgt y <--> y slt x
3337     std::swap(LHS2, RHS2);
3338     PredB = ICmpInst::getSwappedPredicate(PredB);
3339   }
3340   if (LHS2 != LHS)
3341     return false;
3342   // We also need to canonicalize 'RHS'.
3343   if (PredB == ICmpInst::ICMP_SGT && isa<Constant>(RHS2)) {
3344     // x sgt C-1  <-->  x sge C  <-->  not(x slt C)
3345     auto FlippedStrictness =
3346         getFlippedStrictnessPredicateAndConstant(PredB, cast<Constant>(RHS2));
3347     if (!FlippedStrictness)
3348       return false;
3349     assert(FlippedStrictness->first == ICmpInst::ICMP_SGE &&
3350            "basic correctness failure");
3351     RHS2 = FlippedStrictness->second;
3352     // And kind-of perform the result swap.
3353     std::swap(Less, Greater);
3354     PredB = ICmpInst::ICMP_SLT;
3355   }
3356   return PredB == ICmpInst::ICMP_SLT && RHS == RHS2;
3357 }
3358 
foldICmpSelectConstant(ICmpInst & Cmp,SelectInst * Select,ConstantInt * C)3359 Instruction *InstCombinerImpl::foldICmpSelectConstant(ICmpInst &Cmp,
3360                                                       SelectInst *Select,
3361                                                       ConstantInt *C) {
3362 
3363   assert(C && "Cmp RHS should be a constant int!");
3364   // If we're testing a constant value against the result of a three way
3365   // comparison, the result can be expressed directly in terms of the
3366   // original values being compared.  Note: We could possibly be more
3367   // aggressive here and remove the hasOneUse test. The original select is
3368   // really likely to simplify or sink when we remove a test of the result.
3369   Value *OrigLHS, *OrigRHS;
3370   ConstantInt *C1LessThan, *C2Equal, *C3GreaterThan;
3371   if (Cmp.hasOneUse() &&
3372       matchThreeWayIntCompare(Select, OrigLHS, OrigRHS, C1LessThan, C2Equal,
3373                               C3GreaterThan)) {
3374     assert(C1LessThan && C2Equal && C3GreaterThan);
3375 
3376     bool TrueWhenLessThan = ICmpInst::compare(
3377         C1LessThan->getValue(), C->getValue(), Cmp.getPredicate());
3378     bool TrueWhenEqual = ICmpInst::compare(C2Equal->getValue(), C->getValue(),
3379                                            Cmp.getPredicate());
3380     bool TrueWhenGreaterThan = ICmpInst::compare(
3381         C3GreaterThan->getValue(), C->getValue(), Cmp.getPredicate());
3382 
3383     // This generates the new instruction that will replace the original Cmp
3384     // Instruction. Instead of enumerating the various combinations when
3385     // TrueWhenLessThan, TrueWhenEqual and TrueWhenGreaterThan are true versus
3386     // false, we rely on chaining of ORs and future passes of InstCombine to
3387     // simplify the OR further (i.e. a s< b || a == b becomes a s<= b).
3388 
3389     // When none of the three constants satisfy the predicate for the RHS (C),
3390     // the entire original Cmp can be simplified to a false.
3391     Value *Cond = Builder.getFalse();
3392     if (TrueWhenLessThan)
3393       Cond = Builder.CreateOr(
3394           Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
3395     if (TrueWhenEqual)
3396       Cond = Builder.CreateOr(
3397           Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
3398     if (TrueWhenGreaterThan)
3399       Cond = Builder.CreateOr(
3400           Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
3401 
3402     return replaceInstUsesWith(Cmp, Cond);
3403   }
3404   return nullptr;
3405 }
3406 
foldICmpBitCast(ICmpInst & Cmp)3407 Instruction *InstCombinerImpl::foldICmpBitCast(ICmpInst &Cmp) {
3408   auto *Bitcast = dyn_cast<BitCastInst>(Cmp.getOperand(0));
3409   if (!Bitcast)
3410     return nullptr;
3411 
3412   ICmpInst::Predicate Pred = Cmp.getPredicate();
3413   Value *Op1 = Cmp.getOperand(1);
3414   Value *BCSrcOp = Bitcast->getOperand(0);
3415   Type *SrcType = Bitcast->getSrcTy();
3416   Type *DstType = Bitcast->getType();
3417 
3418   // Make sure the bitcast doesn't change between scalar and vector and
3419   // doesn't change the number of vector elements.
3420   if (SrcType->isVectorTy() == DstType->isVectorTy() &&
3421       SrcType->getScalarSizeInBits() == DstType->getScalarSizeInBits()) {
3422     // Zero-equality and sign-bit checks are preserved through sitofp + bitcast.
3423     Value *X;
3424     if (match(BCSrcOp, m_SIToFP(m_Value(X)))) {
3425       // icmp  eq (bitcast (sitofp X)), 0 --> icmp  eq X, 0
3426       // icmp  ne (bitcast (sitofp X)), 0 --> icmp  ne X, 0
3427       // icmp slt (bitcast (sitofp X)), 0 --> icmp slt X, 0
3428       // icmp sgt (bitcast (sitofp X)), 0 --> icmp sgt X, 0
3429       if ((Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_SLT ||
3430            Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT) &&
3431           match(Op1, m_Zero()))
3432         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3433 
3434       // icmp slt (bitcast (sitofp X)), 1 --> icmp slt X, 1
3435       if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_One()))
3436         return new ICmpInst(Pred, X, ConstantInt::get(X->getType(), 1));
3437 
3438       // icmp sgt (bitcast (sitofp X)), -1 --> icmp sgt X, -1
3439       if (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))
3440         return new ICmpInst(Pred, X,
3441                             ConstantInt::getAllOnesValue(X->getType()));
3442     }
3443 
3444     // Zero-equality checks are preserved through unsigned floating-point casts:
3445     // icmp eq (bitcast (uitofp X)), 0 --> icmp eq X, 0
3446     // icmp ne (bitcast (uitofp X)), 0 --> icmp ne X, 0
3447     if (match(BCSrcOp, m_UIToFP(m_Value(X))))
3448       if (Cmp.isEquality() && match(Op1, m_Zero()))
3449         return new ICmpInst(Pred, X, ConstantInt::getNullValue(X->getType()));
3450 
3451     const APInt *C;
3452     bool TrueIfSigned;
3453     if (match(Op1, m_APInt(C)) && Bitcast->hasOneUse()) {
3454       // If this is a sign-bit test of a bitcast of a casted FP value, eliminate
3455       // the FP extend/truncate because that cast does not change the sign-bit.
3456       // This is true for all standard IEEE-754 types and the X86 80-bit type.
3457       // The sign-bit is always the most significant bit in those types.
3458       if (isSignBitCheck(Pred, *C, TrueIfSigned) &&
3459           (match(BCSrcOp, m_FPExt(m_Value(X))) ||
3460            match(BCSrcOp, m_FPTrunc(m_Value(X))))) {
3461         // (bitcast (fpext/fptrunc X)) to iX) < 0 --> (bitcast X to iY) < 0
3462         // (bitcast (fpext/fptrunc X)) to iX) > -1 --> (bitcast X to iY) > -1
3463         Type *XType = X->getType();
3464 
3465         // We can't currently handle Power style floating point operations here.
3466         if (!(XType->isPPC_FP128Ty() || SrcType->isPPC_FP128Ty())) {
3467           Type *NewType = Builder.getIntNTy(XType->getScalarSizeInBits());
3468           if (auto *XVTy = dyn_cast<VectorType>(XType))
3469             NewType = VectorType::get(NewType, XVTy->getElementCount());
3470           Value *NewBitcast = Builder.CreateBitCast(X, NewType);
3471           if (TrueIfSigned)
3472             return new ICmpInst(ICmpInst::ICMP_SLT, NewBitcast,
3473                                 ConstantInt::getNullValue(NewType));
3474           else
3475             return new ICmpInst(ICmpInst::ICMP_SGT, NewBitcast,
3476                                 ConstantInt::getAllOnesValue(NewType));
3477         }
3478       }
3479 
3480       // icmp eq/ne (bitcast X to int), special fp -> llvm.is.fpclass(X, class)
3481       Type *FPType = SrcType->getScalarType();
3482       if (!Cmp.getParent()->getParent()->hasFnAttribute(
3483               Attribute::NoImplicitFloat) &&
3484           Cmp.isEquality() && FPType->isIEEELikeFPTy()) {
3485         FPClassTest Mask = APFloat(FPType->getFltSemantics(), *C).classify();
3486         if (Mask & (fcInf | fcZero)) {
3487           if (Pred == ICmpInst::ICMP_NE)
3488             Mask = ~Mask;
3489           return replaceInstUsesWith(Cmp,
3490                                      Builder.createIsFPClass(BCSrcOp, Mask));
3491         }
3492       }
3493     }
3494   }
3495 
3496   const APInt *C;
3497   if (!match(Cmp.getOperand(1), m_APInt(C)) || !DstType->isIntegerTy() ||
3498       !SrcType->isIntOrIntVectorTy())
3499     return nullptr;
3500 
3501   // If this is checking if all elements of a vector compare are set or not,
3502   // invert the casted vector equality compare and test if all compare
3503   // elements are clear or not. Compare against zero is generally easier for
3504   // analysis and codegen.
3505   // icmp eq/ne (bitcast (not X) to iN), -1 --> icmp eq/ne (bitcast X to iN), 0
3506   // Example: are all elements equal? --> are zero elements not equal?
3507   // TODO: Try harder to reduce compare of 2 freely invertible operands?
3508   if (Cmp.isEquality() && C->isAllOnes() && Bitcast->hasOneUse()) {
3509     if (Value *NotBCSrcOp =
3510             getFreelyInverted(BCSrcOp, BCSrcOp->hasOneUse(), &Builder)) {
3511       Value *Cast = Builder.CreateBitCast(NotBCSrcOp, DstType);
3512       return new ICmpInst(Pred, Cast, ConstantInt::getNullValue(DstType));
3513     }
3514   }
3515 
3516   // If this is checking if all elements of an extended vector are clear or not,
3517   // compare in a narrow type to eliminate the extend:
3518   // icmp eq/ne (bitcast (ext X) to iN), 0 --> icmp eq/ne (bitcast X to iM), 0
3519   Value *X;
3520   if (Cmp.isEquality() && C->isZero() && Bitcast->hasOneUse() &&
3521       match(BCSrcOp, m_ZExtOrSExt(m_Value(X)))) {
3522     if (auto *VecTy = dyn_cast<FixedVectorType>(X->getType())) {
3523       Type *NewType = Builder.getIntNTy(VecTy->getPrimitiveSizeInBits());
3524       Value *NewCast = Builder.CreateBitCast(X, NewType);
3525       return new ICmpInst(Pred, NewCast, ConstantInt::getNullValue(NewType));
3526     }
3527   }
3528 
3529   // Folding: icmp <pred> iN X, C
3530   //  where X = bitcast <M x iK> (shufflevector <M x iK> %vec, undef, SC)) to iN
3531   //    and C is a splat of a K-bit pattern
3532   //    and SC is a constant vector = <C', C', C', ..., C'>
3533   // Into:
3534   //   %E = extractelement <M x iK> %vec, i32 C'
3535   //   icmp <pred> iK %E, trunc(C)
3536   Value *Vec;
3537   ArrayRef<int> Mask;
3538   if (match(BCSrcOp, m_Shuffle(m_Value(Vec), m_Undef(), m_Mask(Mask)))) {
3539     // Check whether every element of Mask is the same constant
3540     if (all_equal(Mask)) {
3541       auto *VecTy = cast<VectorType>(SrcType);
3542       auto *EltTy = cast<IntegerType>(VecTy->getElementType());
3543       if (C->isSplat(EltTy->getBitWidth())) {
3544         // Fold the icmp based on the value of C
3545         // If C is M copies of an iK sized bit pattern,
3546         // then:
3547         //   =>  %E = extractelement <N x iK> %vec, i32 Elem
3548         //       icmp <pred> iK %SplatVal, <pattern>
3549         Value *Elem = Builder.getInt32(Mask[0]);
3550         Value *Extract = Builder.CreateExtractElement(Vec, Elem);
3551         Value *NewC = ConstantInt::get(EltTy, C->trunc(EltTy->getBitWidth()));
3552         return new ICmpInst(Pred, Extract, NewC);
3553       }
3554     }
3555   }
3556   return nullptr;
3557 }
3558 
3559 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3560 /// where X is some kind of instruction.
foldICmpInstWithConstant(ICmpInst & Cmp)3561 Instruction *InstCombinerImpl::foldICmpInstWithConstant(ICmpInst &Cmp) {
3562   const APInt *C;
3563 
3564   if (match(Cmp.getOperand(1), m_APInt(C))) {
3565     if (auto *BO = dyn_cast<BinaryOperator>(Cmp.getOperand(0)))
3566       if (Instruction *I = foldICmpBinOpWithConstant(Cmp, BO, *C))
3567         return I;
3568 
3569     if (auto *SI = dyn_cast<SelectInst>(Cmp.getOperand(0)))
3570       // For now, we only support constant integers while folding the
3571       // ICMP(SELECT)) pattern. We can extend this to support vector of integers
3572       // similar to the cases handled by binary ops above.
3573       if (auto *ConstRHS = dyn_cast<ConstantInt>(Cmp.getOperand(1)))
3574         if (Instruction *I = foldICmpSelectConstant(Cmp, SI, ConstRHS))
3575           return I;
3576 
3577     if (auto *TI = dyn_cast<TruncInst>(Cmp.getOperand(0)))
3578       if (Instruction *I = foldICmpTruncConstant(Cmp, TI, *C))
3579         return I;
3580 
3581     if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0)))
3582       if (Instruction *I = foldICmpIntrinsicWithConstant(Cmp, II, *C))
3583         return I;
3584 
3585     // (extractval ([s/u]subo X, Y), 0) == 0 --> X == Y
3586     // (extractval ([s/u]subo X, Y), 0) != 0 --> X != Y
3587     // TODO: This checks one-use, but that is not strictly necessary.
3588     Value *Cmp0 = Cmp.getOperand(0);
3589     Value *X, *Y;
3590     if (C->isZero() && Cmp.isEquality() && Cmp0->hasOneUse() &&
3591         (match(Cmp0,
3592                m_ExtractValue<0>(m_Intrinsic<Intrinsic::ssub_with_overflow>(
3593                    m_Value(X), m_Value(Y)))) ||
3594          match(Cmp0,
3595                m_ExtractValue<0>(m_Intrinsic<Intrinsic::usub_with_overflow>(
3596                    m_Value(X), m_Value(Y))))))
3597       return new ICmpInst(Cmp.getPredicate(), X, Y);
3598   }
3599 
3600   if (match(Cmp.getOperand(1), m_APIntAllowPoison(C)))
3601     return foldICmpInstWithConstantAllowPoison(Cmp, *C);
3602 
3603   return nullptr;
3604 }
3605 
3606 /// Fold an icmp equality instruction with binary operator LHS and constant RHS:
3607 /// icmp eq/ne BO, C.
foldICmpBinOpEqualityWithConstant(ICmpInst & Cmp,BinaryOperator * BO,const APInt & C)3608 Instruction *InstCombinerImpl::foldICmpBinOpEqualityWithConstant(
3609     ICmpInst &Cmp, BinaryOperator *BO, const APInt &C) {
3610   // TODO: Some of these folds could work with arbitrary constants, but this
3611   // function is limited to scalar and vector splat constants.
3612   if (!Cmp.isEquality())
3613     return nullptr;
3614 
3615   ICmpInst::Predicate Pred = Cmp.getPredicate();
3616   bool isICMP_NE = Pred == ICmpInst::ICMP_NE;
3617   Constant *RHS = cast<Constant>(Cmp.getOperand(1));
3618   Value *BOp0 = BO->getOperand(0), *BOp1 = BO->getOperand(1);
3619 
3620   switch (BO->getOpcode()) {
3621   case Instruction::SRem:
3622     // If we have a signed (X % (2^c)) == 0, turn it into an unsigned one.
3623     if (C.isZero() && BO->hasOneUse()) {
3624       const APInt *BOC;
3625       if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
3626         Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
3627         return new ICmpInst(Pred, NewRem,
3628                             Constant::getNullValue(BO->getType()));
3629       }
3630     }
3631     break;
3632   case Instruction::Add: {
3633     // (A + C2) == C --> A == (C - C2)
3634     // (A + C2) != C --> A != (C - C2)
3635     // TODO: Remove the one-use limitation? See discussion in D58633.
3636     if (Constant *C2 = dyn_cast<Constant>(BOp1)) {
3637       if (BO->hasOneUse())
3638         return new ICmpInst(Pred, BOp0, ConstantExpr::getSub(RHS, C2));
3639     } else if (C.isZero()) {
3640       // Replace ((add A, B) != 0) with (A != -B) if A or B is
3641       // efficiently invertible, or if the add has just this one use.
3642       if (Value *NegVal = dyn_castNegVal(BOp1))
3643         return new ICmpInst(Pred, BOp0, NegVal);
3644       if (Value *NegVal = dyn_castNegVal(BOp0))
3645         return new ICmpInst(Pred, NegVal, BOp1);
3646       if (BO->hasOneUse()) {
3647         // (add nuw A, B) != 0 -> (or A, B) != 0
3648         if (match(BO, m_NUWAdd(m_Value(), m_Value()))) {
3649           Value *Or = Builder.CreateOr(BOp0, BOp1);
3650           return new ICmpInst(Pred, Or, Constant::getNullValue(BO->getType()));
3651         }
3652         Value *Neg = Builder.CreateNeg(BOp1);
3653         Neg->takeName(BO);
3654         return new ICmpInst(Pred, BOp0, Neg);
3655       }
3656     }
3657     break;
3658   }
3659   case Instruction::Xor:
3660     if (Constant *BOC = dyn_cast<Constant>(BOp1)) {
3661       // For the xor case, we can xor two constants together, eliminating
3662       // the explicit xor.
3663       return new ICmpInst(Pred, BOp0, ConstantExpr::getXor(RHS, BOC));
3664     } else if (C.isZero()) {
3665       // Replace ((xor A, B) != 0) with (A != B)
3666       return new ICmpInst(Pred, BOp0, BOp1);
3667     }
3668     break;
3669   case Instruction::Or: {
3670     const APInt *BOC;
3671     if (match(BOp1, m_APInt(BOC)) && BO->hasOneUse() && RHS->isAllOnesValue()) {
3672       // Comparing if all bits outside of a constant mask are set?
3673       // Replace (X | C) == -1 with (X & ~C) == ~C.
3674       // This removes the -1 constant.
3675       Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
3676       Value *And = Builder.CreateAnd(BOp0, NotBOC);
3677       return new ICmpInst(Pred, And, NotBOC);
3678     }
3679     // (icmp eq (or (select cond, 0, NonZero), Other), 0)
3680     //  -> (and cond, (icmp eq Other, 0))
3681     // (icmp ne (or (select cond, NonZero, 0), Other), 0)
3682     //  -> (or cond, (icmp ne Other, 0))
3683     Value *Cond, *TV, *FV, *Other, *Sel;
3684     if (C.isZero() &&
3685         match(BO,
3686               m_OneUse(m_c_Or(m_CombineAnd(m_Value(Sel),
3687                                            m_Select(m_Value(Cond), m_Value(TV),
3688                                                     m_Value(FV))),
3689                               m_Value(Other)))) &&
3690         Cond->getType() == Cmp.getType()) {
3691       const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
3692       // Easy case is if eq/ne matches whether 0 is trueval/falseval.
3693       if (Pred == ICmpInst::ICMP_EQ
3694               ? (match(TV, m_Zero()) && isKnownNonZero(FV, Q))
3695               : (match(FV, m_Zero()) && isKnownNonZero(TV, Q))) {
3696         Value *Cmp = Builder.CreateICmp(
3697             Pred, Other, Constant::getNullValue(Other->getType()));
3698         return BinaryOperator::Create(
3699             Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or, Cmp,
3700             Cond);
3701       }
3702       // Harder case is if eq/ne matches whether 0 is falseval/trueval. In this
3703       // case we need to invert the select condition so we need to be careful to
3704       // avoid creating extra instructions.
3705       // (icmp ne (or (select cond, 0, NonZero), Other), 0)
3706       //  -> (or (not cond), (icmp ne Other, 0))
3707       // (icmp eq (or (select cond, NonZero, 0), Other), 0)
3708       //  -> (and (not cond), (icmp eq Other, 0))
3709       //
3710       // Only do this if the inner select has one use, in which case we are
3711       // replacing `select` with `(not cond)`. Otherwise, we will create more
3712       // uses. NB: Trying to freely invert cond doesn't make sense here, as if
3713       // cond was freely invertable, the select arms would have been inverted.
3714       if (Sel->hasOneUse() &&
3715           (Pred == ICmpInst::ICMP_EQ
3716                ? (match(FV, m_Zero()) && isKnownNonZero(TV, Q))
3717                : (match(TV, m_Zero()) && isKnownNonZero(FV, Q)))) {
3718         Value *NotCond = Builder.CreateNot(Cond);
3719         Value *Cmp = Builder.CreateICmp(
3720             Pred, Other, Constant::getNullValue(Other->getType()));
3721         return BinaryOperator::Create(
3722             Pred == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or, Cmp,
3723             NotCond);
3724       }
3725     }
3726     break;
3727   }
3728   case Instruction::UDiv:
3729   case Instruction::SDiv:
3730     if (BO->isExact()) {
3731       // div exact X, Y eq/ne 0 -> X eq/ne 0
3732       // div exact X, Y eq/ne 1 -> X eq/ne Y
3733       // div exact X, Y eq/ne C ->
3734       //    if Y * C never-overflow && OneUse:
3735       //      -> Y * C eq/ne X
3736       if (C.isZero())
3737         return new ICmpInst(Pred, BOp0, Constant::getNullValue(BO->getType()));
3738       else if (C.isOne())
3739         return new ICmpInst(Pred, BOp0, BOp1);
3740       else if (BO->hasOneUse()) {
3741         OverflowResult OR = computeOverflow(
3742             Instruction::Mul, BO->getOpcode() == Instruction::SDiv, BOp1,
3743             Cmp.getOperand(1), BO);
3744         if (OR == OverflowResult::NeverOverflows) {
3745           Value *YC =
3746               Builder.CreateMul(BOp1, ConstantInt::get(BO->getType(), C));
3747           return new ICmpInst(Pred, YC, BOp0);
3748         }
3749       }
3750     }
3751     if (BO->getOpcode() == Instruction::UDiv && C.isZero()) {
3752       // (icmp eq/ne (udiv A, B), 0) -> (icmp ugt/ule i32 B, A)
3753       auto NewPred = isICMP_NE ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3754       return new ICmpInst(NewPred, BOp1, BOp0);
3755     }
3756     break;
3757   default:
3758     break;
3759   }
3760   return nullptr;
3761 }
3762 
foldCtpopPow2Test(ICmpInst & I,IntrinsicInst * CtpopLhs,const APInt & CRhs,InstCombiner::BuilderTy & Builder,const SimplifyQuery & Q)3763 static Instruction *foldCtpopPow2Test(ICmpInst &I, IntrinsicInst *CtpopLhs,
3764                                       const APInt &CRhs,
3765                                       InstCombiner::BuilderTy &Builder,
3766                                       const SimplifyQuery &Q) {
3767   assert(CtpopLhs->getIntrinsicID() == Intrinsic::ctpop &&
3768          "Non-ctpop intrin in ctpop fold");
3769   if (!CtpopLhs->hasOneUse())
3770     return nullptr;
3771 
3772   // Power of 2 test:
3773   //    isPow2OrZero : ctpop(X) u< 2
3774   //    isPow2       : ctpop(X) == 1
3775   //    NotPow2OrZero: ctpop(X) u> 1
3776   //    NotPow2      : ctpop(X) != 1
3777   // If we know any bit of X can be folded to:
3778   //    IsPow2       : X & (~Bit) == 0
3779   //    NotPow2      : X & (~Bit) != 0
3780   const ICmpInst::Predicate Pred = I.getPredicate();
3781   if (((I.isEquality() || Pred == ICmpInst::ICMP_UGT) && CRhs == 1) ||
3782       (Pred == ICmpInst::ICMP_ULT && CRhs == 2)) {
3783     Value *Op = CtpopLhs->getArgOperand(0);
3784     KnownBits OpKnown = computeKnownBits(Op, Q.DL, Q.AC, Q.CxtI, Q.DT);
3785     // No need to check for count > 1, that should be already constant folded.
3786     if (OpKnown.countMinPopulation() == 1) {
3787       Value *And = Builder.CreateAnd(
3788           Op, Constant::getIntegerValue(Op->getType(), ~(OpKnown.One)));
3789       return new ICmpInst(
3790           (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_ULT)
3791               ? ICmpInst::ICMP_EQ
3792               : ICmpInst::ICMP_NE,
3793           And, Constant::getNullValue(Op->getType()));
3794     }
3795   }
3796 
3797   return nullptr;
3798 }
3799 
3800 /// Fold an equality icmp with LLVM intrinsic and constant operand.
foldICmpEqIntrinsicWithConstant(ICmpInst & Cmp,IntrinsicInst * II,const APInt & C)3801 Instruction *InstCombinerImpl::foldICmpEqIntrinsicWithConstant(
3802     ICmpInst &Cmp, IntrinsicInst *II, const APInt &C) {
3803   Type *Ty = II->getType();
3804   unsigned BitWidth = C.getBitWidth();
3805   const ICmpInst::Predicate Pred = Cmp.getPredicate();
3806 
3807   switch (II->getIntrinsicID()) {
3808   case Intrinsic::abs:
3809     // abs(A) == 0  ->  A == 0
3810     // abs(A) == INT_MIN  ->  A == INT_MIN
3811     if (C.isZero() || C.isMinSignedValue())
3812       return new ICmpInst(Pred, II->getArgOperand(0), ConstantInt::get(Ty, C));
3813     break;
3814 
3815   case Intrinsic::bswap:
3816     // bswap(A) == C  ->  A == bswap(C)
3817     return new ICmpInst(Pred, II->getArgOperand(0),
3818                         ConstantInt::get(Ty, C.byteSwap()));
3819 
3820   case Intrinsic::bitreverse:
3821     // bitreverse(A) == C  ->  A == bitreverse(C)
3822     return new ICmpInst(Pred, II->getArgOperand(0),
3823                         ConstantInt::get(Ty, C.reverseBits()));
3824 
3825   case Intrinsic::ctlz:
3826   case Intrinsic::cttz: {
3827     // ctz(A) == bitwidth(A)  ->  A == 0 and likewise for !=
3828     if (C == BitWidth)
3829       return new ICmpInst(Pred, II->getArgOperand(0),
3830                           ConstantInt::getNullValue(Ty));
3831 
3832     // ctz(A) == C -> A & Mask1 == Mask2, where Mask2 only has bit C set
3833     // and Mask1 has bits 0..C+1 set. Similar for ctl, but for high bits.
3834     // Limit to one use to ensure we don't increase instruction count.
3835     unsigned Num = C.getLimitedValue(BitWidth);
3836     if (Num != BitWidth && II->hasOneUse()) {
3837       bool IsTrailing = II->getIntrinsicID() == Intrinsic::cttz;
3838       APInt Mask1 = IsTrailing ? APInt::getLowBitsSet(BitWidth, Num + 1)
3839                                : APInt::getHighBitsSet(BitWidth, Num + 1);
3840       APInt Mask2 = IsTrailing
3841                         ? APInt::getOneBitSet(BitWidth, Num)
3842                         : APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
3843       return new ICmpInst(Pred, Builder.CreateAnd(II->getArgOperand(0), Mask1),
3844                           ConstantInt::get(Ty, Mask2));
3845     }
3846     break;
3847   }
3848 
3849   case Intrinsic::ctpop: {
3850     // popcount(A) == 0  ->  A == 0 and likewise for !=
3851     // popcount(A) == bitwidth(A)  ->  A == -1 and likewise for !=
3852     bool IsZero = C.isZero();
3853     if (IsZero || C == BitWidth)
3854       return new ICmpInst(Pred, II->getArgOperand(0),
3855                           IsZero ? Constant::getNullValue(Ty)
3856                                  : Constant::getAllOnesValue(Ty));
3857 
3858     break;
3859   }
3860 
3861   case Intrinsic::fshl:
3862   case Intrinsic::fshr:
3863     if (II->getArgOperand(0) == II->getArgOperand(1)) {
3864       const APInt *RotAmtC;
3865       // ror(X, RotAmtC) == C --> X == rol(C, RotAmtC)
3866       // rol(X, RotAmtC) == C --> X == ror(C, RotAmtC)
3867       if (match(II->getArgOperand(2), m_APInt(RotAmtC)))
3868         return new ICmpInst(Pred, II->getArgOperand(0),
3869                             II->getIntrinsicID() == Intrinsic::fshl
3870                                 ? ConstantInt::get(Ty, C.rotr(*RotAmtC))
3871                                 : ConstantInt::get(Ty, C.rotl(*RotAmtC)));
3872     }
3873     break;
3874 
3875   case Intrinsic::umax:
3876   case Intrinsic::uadd_sat: {
3877     // uadd.sat(a, b) == 0  ->  (a | b) == 0
3878     // umax(a, b) == 0  ->  (a | b) == 0
3879     if (C.isZero() && II->hasOneUse()) {
3880       Value *Or = Builder.CreateOr(II->getArgOperand(0), II->getArgOperand(1));
3881       return new ICmpInst(Pred, Or, Constant::getNullValue(Ty));
3882     }
3883     break;
3884   }
3885 
3886   case Intrinsic::ssub_sat:
3887     // ssub.sat(a, b) == 0 -> a == b
3888     if (C.isZero())
3889       return new ICmpInst(Pred, II->getArgOperand(0), II->getArgOperand(1));
3890     break;
3891   case Intrinsic::usub_sat: {
3892     // usub.sat(a, b) == 0  ->  a <= b
3893     if (C.isZero()) {
3894       ICmpInst::Predicate NewPred =
3895           Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_UGT;
3896       return new ICmpInst(NewPred, II->getArgOperand(0), II->getArgOperand(1));
3897     }
3898     break;
3899   }
3900   default:
3901     break;
3902   }
3903 
3904   return nullptr;
3905 }
3906 
3907 /// Fold an icmp with LLVM intrinsics
3908 static Instruction *
foldICmpIntrinsicWithIntrinsic(ICmpInst & Cmp,InstCombiner::BuilderTy & Builder)3909 foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp,
3910                                InstCombiner::BuilderTy &Builder) {
3911   assert(Cmp.isEquality());
3912 
3913   ICmpInst::Predicate Pred = Cmp.getPredicate();
3914   Value *Op0 = Cmp.getOperand(0);
3915   Value *Op1 = Cmp.getOperand(1);
3916   const auto *IIOp0 = dyn_cast<IntrinsicInst>(Op0);
3917   const auto *IIOp1 = dyn_cast<IntrinsicInst>(Op1);
3918   if (!IIOp0 || !IIOp1 || IIOp0->getIntrinsicID() != IIOp1->getIntrinsicID())
3919     return nullptr;
3920 
3921   switch (IIOp0->getIntrinsicID()) {
3922   case Intrinsic::bswap:
3923   case Intrinsic::bitreverse:
3924     // If both operands are byte-swapped or bit-reversed, just compare the
3925     // original values.
3926     return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3927   case Intrinsic::fshl:
3928   case Intrinsic::fshr: {
3929     // If both operands are rotated by same amount, just compare the
3930     // original values.
3931     if (IIOp0->getOperand(0) != IIOp0->getOperand(1))
3932       break;
3933     if (IIOp1->getOperand(0) != IIOp1->getOperand(1))
3934       break;
3935     if (IIOp0->getOperand(2) == IIOp1->getOperand(2))
3936       return new ICmpInst(Pred, IIOp0->getOperand(0), IIOp1->getOperand(0));
3937 
3938     // rotate(X, AmtX) == rotate(Y, AmtY)
3939     //  -> rotate(X, AmtX - AmtY) == Y
3940     // Do this if either both rotates have one use or if only one has one use
3941     // and AmtX/AmtY are constants.
3942     unsigned OneUses = IIOp0->hasOneUse() + IIOp1->hasOneUse();
3943     if (OneUses == 2 ||
3944         (OneUses == 1 && match(IIOp0->getOperand(2), m_ImmConstant()) &&
3945          match(IIOp1->getOperand(2), m_ImmConstant()))) {
3946       Value *SubAmt =
3947           Builder.CreateSub(IIOp0->getOperand(2), IIOp1->getOperand(2));
3948       Value *CombinedRotate = Builder.CreateIntrinsic(
3949           Op0->getType(), IIOp0->getIntrinsicID(),
3950           {IIOp0->getOperand(0), IIOp0->getOperand(0), SubAmt});
3951       return new ICmpInst(Pred, IIOp1->getOperand(0), CombinedRotate);
3952     }
3953   } break;
3954   default:
3955     break;
3956   }
3957 
3958   return nullptr;
3959 }
3960 
3961 /// Try to fold integer comparisons with a constant operand: icmp Pred X, C
3962 /// where X is some kind of instruction and C is AllowPoison.
3963 /// TODO: Move more folds which allow poison to this function.
3964 Instruction *
foldICmpInstWithConstantAllowPoison(ICmpInst & Cmp,const APInt & C)3965 InstCombinerImpl::foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp,
3966                                                       const APInt &C) {
3967   const ICmpInst::Predicate Pred = Cmp.getPredicate();
3968   if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
3969     switch (II->getIntrinsicID()) {
3970     default:
3971       break;
3972     case Intrinsic::fshl:
3973     case Intrinsic::fshr:
3974       if (Cmp.isEquality() && II->getArgOperand(0) == II->getArgOperand(1)) {
3975         // (rot X, ?) == 0/-1 --> X == 0/-1
3976         if (C.isZero() || C.isAllOnes())
3977           return new ICmpInst(Pred, II->getArgOperand(0), Cmp.getOperand(1));
3978       }
3979       break;
3980     }
3981   }
3982 
3983   return nullptr;
3984 }
3985 
3986 /// Fold an icmp with BinaryOp and constant operand: icmp Pred BO, C.
foldICmpBinOpWithConstant(ICmpInst & Cmp,BinaryOperator * BO,const APInt & C)3987 Instruction *InstCombinerImpl::foldICmpBinOpWithConstant(ICmpInst &Cmp,
3988                                                          BinaryOperator *BO,
3989                                                          const APInt &C) {
3990   switch (BO->getOpcode()) {
3991   case Instruction::Xor:
3992     if (Instruction *I = foldICmpXorConstant(Cmp, BO, C))
3993       return I;
3994     break;
3995   case Instruction::And:
3996     if (Instruction *I = foldICmpAndConstant(Cmp, BO, C))
3997       return I;
3998     break;
3999   case Instruction::Or:
4000     if (Instruction *I = foldICmpOrConstant(Cmp, BO, C))
4001       return I;
4002     break;
4003   case Instruction::Mul:
4004     if (Instruction *I = foldICmpMulConstant(Cmp, BO, C))
4005       return I;
4006     break;
4007   case Instruction::Shl:
4008     if (Instruction *I = foldICmpShlConstant(Cmp, BO, C))
4009       return I;
4010     break;
4011   case Instruction::LShr:
4012   case Instruction::AShr:
4013     if (Instruction *I = foldICmpShrConstant(Cmp, BO, C))
4014       return I;
4015     break;
4016   case Instruction::SRem:
4017     if (Instruction *I = foldICmpSRemConstant(Cmp, BO, C))
4018       return I;
4019     break;
4020   case Instruction::UDiv:
4021     if (Instruction *I = foldICmpUDivConstant(Cmp, BO, C))
4022       return I;
4023     [[fallthrough]];
4024   case Instruction::SDiv:
4025     if (Instruction *I = foldICmpDivConstant(Cmp, BO, C))
4026       return I;
4027     break;
4028   case Instruction::Sub:
4029     if (Instruction *I = foldICmpSubConstant(Cmp, BO, C))
4030       return I;
4031     break;
4032   case Instruction::Add:
4033     if (Instruction *I = foldICmpAddConstant(Cmp, BO, C))
4034       return I;
4035     break;
4036   default:
4037     break;
4038   }
4039 
4040   // TODO: These folds could be refactored to be part of the above calls.
4041   if (Instruction *I = foldICmpBinOpEqualityWithConstant(Cmp, BO, C))
4042     return I;
4043 
4044   // Fall back to handling `icmp pred (select A ? C1 : C2) binop (select B ? C3
4045   // : C4), C5` pattern, by computing a truth table of the four constant
4046   // variants.
4047   return foldICmpBinOpWithConstantViaTruthTable(Cmp, BO, C);
4048 }
4049 
4050 static Instruction *
foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred,SaturatingInst * II,const APInt & C,InstCombiner::BuilderTy & Builder)4051 foldICmpUSubSatOrUAddSatWithConstant(CmpPredicate Pred, SaturatingInst *II,
4052                                      const APInt &C,
4053                                      InstCombiner::BuilderTy &Builder) {
4054   // This transform may end up producing more than one instruction for the
4055   // intrinsic, so limit it to one user of the intrinsic.
4056   if (!II->hasOneUse())
4057     return nullptr;
4058 
4059   // Let Y        = [add/sub]_sat(X, C) pred C2
4060   //     SatVal   = The saturating value for the operation
4061   //     WillWrap = Whether or not the operation will underflow / overflow
4062   // => Y = (WillWrap ? SatVal : (X binop C)) pred C2
4063   // => Y = WillWrap ? (SatVal pred C2) : ((X binop C) pred C2)
4064   //
4065   // When (SatVal pred C2) is true, then
4066   //    Y = WillWrap ? true : ((X binop C) pred C2)
4067   // => Y = WillWrap || ((X binop C) pred C2)
4068   // else
4069   //    Y =  WillWrap ? false : ((X binop C) pred C2)
4070   // => Y = !WillWrap ?  ((X binop C) pred C2) : false
4071   // => Y = !WillWrap && ((X binop C) pred C2)
4072   Value *Op0 = II->getOperand(0);
4073   Value *Op1 = II->getOperand(1);
4074 
4075   const APInt *COp1;
4076   // This transform only works when the intrinsic has an integral constant or
4077   // splat vector as the second operand.
4078   if (!match(Op1, m_APInt(COp1)))
4079     return nullptr;
4080 
4081   APInt SatVal;
4082   switch (II->getIntrinsicID()) {
4083   default:
4084     llvm_unreachable(
4085         "This function only works with usub_sat and uadd_sat for now!");
4086   case Intrinsic::uadd_sat:
4087     SatVal = APInt::getAllOnes(C.getBitWidth());
4088     break;
4089   case Intrinsic::usub_sat:
4090     SatVal = APInt::getZero(C.getBitWidth());
4091     break;
4092   }
4093 
4094   // Check (SatVal pred C2)
4095   bool SatValCheck = ICmpInst::compare(SatVal, C, Pred);
4096 
4097   // !WillWrap.
4098   ConstantRange C1 = ConstantRange::makeExactNoWrapRegion(
4099       II->getBinaryOp(), *COp1, II->getNoWrapKind());
4100 
4101   // WillWrap.
4102   if (SatValCheck)
4103     C1 = C1.inverse();
4104 
4105   ConstantRange C2 = ConstantRange::makeExactICmpRegion(Pred, C);
4106   if (II->getBinaryOp() == Instruction::Add)
4107     C2 = C2.sub(*COp1);
4108   else
4109     C2 = C2.add(*COp1);
4110 
4111   Instruction::BinaryOps CombiningOp =
4112       SatValCheck ? Instruction::BinaryOps::Or : Instruction::BinaryOps::And;
4113 
4114   std::optional<ConstantRange> Combination;
4115   if (CombiningOp == Instruction::BinaryOps::Or)
4116     Combination = C1.exactUnionWith(C2);
4117   else /* CombiningOp == Instruction::BinaryOps::And */
4118     Combination = C1.exactIntersectWith(C2);
4119 
4120   if (!Combination)
4121     return nullptr;
4122 
4123   CmpInst::Predicate EquivPred;
4124   APInt EquivInt;
4125   APInt EquivOffset;
4126 
4127   Combination->getEquivalentICmp(EquivPred, EquivInt, EquivOffset);
4128 
4129   return new ICmpInst(
4130       EquivPred,
4131       Builder.CreateAdd(Op0, ConstantInt::get(Op1->getType(), EquivOffset)),
4132       ConstantInt::get(Op1->getType(), EquivInt));
4133 }
4134 
4135 static Instruction *
foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred,IntrinsicInst * I,const APInt & C,InstCombiner::BuilderTy & Builder)4136 foldICmpOfCmpIntrinsicWithConstant(CmpPredicate Pred, IntrinsicInst *I,
4137                                    const APInt &C,
4138                                    InstCombiner::BuilderTy &Builder) {
4139   std::optional<ICmpInst::Predicate> NewPredicate = std::nullopt;
4140   switch (Pred) {
4141   case ICmpInst::ICMP_EQ:
4142   case ICmpInst::ICMP_NE:
4143     if (C.isZero())
4144       NewPredicate = Pred;
4145     else if (C.isOne())
4146       NewPredicate =
4147           Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE;
4148     else if (C.isAllOnes())
4149       NewPredicate =
4150           Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
4151     break;
4152 
4153   case ICmpInst::ICMP_SGT:
4154     if (C.isAllOnes())
4155       NewPredicate = ICmpInst::ICMP_UGE;
4156     else if (C.isZero())
4157       NewPredicate = ICmpInst::ICMP_UGT;
4158     break;
4159 
4160   case ICmpInst::ICMP_SLT:
4161     if (C.isZero())
4162       NewPredicate = ICmpInst::ICMP_ULT;
4163     else if (C.isOne())
4164       NewPredicate = ICmpInst::ICMP_ULE;
4165     break;
4166 
4167   case ICmpInst::ICMP_ULT:
4168     if (C.ugt(1))
4169       NewPredicate = ICmpInst::ICMP_UGE;
4170     break;
4171 
4172   case ICmpInst::ICMP_UGT:
4173     if (!C.isZero() && !C.isAllOnes())
4174       NewPredicate = ICmpInst::ICMP_ULT;
4175     break;
4176 
4177   default:
4178     break;
4179   }
4180 
4181   if (!NewPredicate)
4182     return nullptr;
4183 
4184   if (I->getIntrinsicID() == Intrinsic::scmp)
4185     NewPredicate = ICmpInst::getSignedPredicate(*NewPredicate);
4186   Value *LHS = I->getOperand(0);
4187   Value *RHS = I->getOperand(1);
4188   return new ICmpInst(*NewPredicate, LHS, RHS);
4189 }
4190 
4191 /// Fold an icmp with LLVM intrinsic and constant operand: icmp Pred II, C.
foldICmpIntrinsicWithConstant(ICmpInst & Cmp,IntrinsicInst * II,const APInt & C)4192 Instruction *InstCombinerImpl::foldICmpIntrinsicWithConstant(ICmpInst &Cmp,
4193                                                              IntrinsicInst *II,
4194                                                              const APInt &C) {
4195   ICmpInst::Predicate Pred = Cmp.getPredicate();
4196 
4197   // Handle folds that apply for any kind of icmp.
4198   switch (II->getIntrinsicID()) {
4199   default:
4200     break;
4201   case Intrinsic::uadd_sat:
4202   case Intrinsic::usub_sat:
4203     if (auto *Folded = foldICmpUSubSatOrUAddSatWithConstant(
4204             Pred, cast<SaturatingInst>(II), C, Builder))
4205       return Folded;
4206     break;
4207   case Intrinsic::ctpop: {
4208     const SimplifyQuery Q = SQ.getWithInstruction(&Cmp);
4209     if (Instruction *R = foldCtpopPow2Test(Cmp, II, C, Builder, Q))
4210       return R;
4211   } break;
4212   case Intrinsic::scmp:
4213   case Intrinsic::ucmp:
4214     if (auto *Folded = foldICmpOfCmpIntrinsicWithConstant(Pred, II, C, Builder))
4215       return Folded;
4216     break;
4217   }
4218 
4219   if (Cmp.isEquality())
4220     return foldICmpEqIntrinsicWithConstant(Cmp, II, C);
4221 
4222   Type *Ty = II->getType();
4223   unsigned BitWidth = C.getBitWidth();
4224   switch (II->getIntrinsicID()) {
4225   case Intrinsic::ctpop: {
4226     // (ctpop X > BitWidth - 1) --> X == -1
4227     Value *X = II->getArgOperand(0);
4228     if (C == BitWidth - 1 && Pred == ICmpInst::ICMP_UGT)
4229       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ, X,
4230                              ConstantInt::getAllOnesValue(Ty));
4231     // (ctpop X < BitWidth) --> X != -1
4232     if (C == BitWidth && Pred == ICmpInst::ICMP_ULT)
4233       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE, X,
4234                              ConstantInt::getAllOnesValue(Ty));
4235     break;
4236   }
4237   case Intrinsic::ctlz: {
4238     // ctlz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX < 0b00010000
4239     if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
4240       unsigned Num = C.getLimitedValue();
4241       APInt Limit = APInt::getOneBitSet(BitWidth, BitWidth - Num - 1);
4242       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_ULT,
4243                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4244     }
4245 
4246     // ctlz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX > 0b00011111
4247     if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
4248       unsigned Num = C.getLimitedValue();
4249       APInt Limit = APInt::getLowBitsSet(BitWidth, BitWidth - Num);
4250       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_UGT,
4251                              II->getArgOperand(0), ConstantInt::get(Ty, Limit));
4252     }
4253     break;
4254   }
4255   case Intrinsic::cttz: {
4256     // Limit to one use to ensure we don't increase instruction count.
4257     if (!II->hasOneUse())
4258       return nullptr;
4259 
4260     // cttz(0bXXXXXXXX) > 3 -> 0bXXXXXXXX & 0b00001111 == 0
4261     if (Pred == ICmpInst::ICMP_UGT && C.ult(BitWidth)) {
4262       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue() + 1);
4263       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_EQ,
4264                              Builder.CreateAnd(II->getArgOperand(0), Mask),
4265                              ConstantInt::getNullValue(Ty));
4266     }
4267 
4268     // cttz(0bXXXXXXXX) < 3 -> 0bXXXXXXXX & 0b00000111 != 0
4269     if (Pred == ICmpInst::ICMP_ULT && C.uge(1) && C.ule(BitWidth)) {
4270       APInt Mask = APInt::getLowBitsSet(BitWidth, C.getLimitedValue());
4271       return CmpInst::Create(Instruction::ICmp, ICmpInst::ICMP_NE,
4272                              Builder.CreateAnd(II->getArgOperand(0), Mask),
4273                              ConstantInt::getNullValue(Ty));
4274     }
4275     break;
4276   }
4277   case Intrinsic::ssub_sat:
4278     // ssub.sat(a, b) spred 0 -> a spred b
4279     if (ICmpInst::isSigned(Pred)) {
4280       if (C.isZero())
4281         return new ICmpInst(Pred, II->getArgOperand(0), II->getArgOperand(1));
4282       // X s<= 0 is cannonicalized to X s< 1
4283       if (Pred == ICmpInst::ICMP_SLT && C.isOne())
4284         return new ICmpInst(ICmpInst::ICMP_SLE, II->getArgOperand(0),
4285                             II->getArgOperand(1));
4286       // X s>= 0 is cannonicalized to X s> -1
4287       if (Pred == ICmpInst::ICMP_SGT && C.isAllOnes())
4288         return new ICmpInst(ICmpInst::ICMP_SGE, II->getArgOperand(0),
4289                             II->getArgOperand(1));
4290     }
4291     break;
4292   default:
4293     break;
4294   }
4295 
4296   return nullptr;
4297 }
4298 
4299 /// Handle icmp with constant (but not simple integer constant) RHS.
foldICmpInstWithConstantNotInt(ICmpInst & I)4300 Instruction *InstCombinerImpl::foldICmpInstWithConstantNotInt(ICmpInst &I) {
4301   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4302   Constant *RHSC = dyn_cast<Constant>(Op1);
4303   Instruction *LHSI = dyn_cast<Instruction>(Op0);
4304   if (!RHSC || !LHSI)
4305     return nullptr;
4306 
4307   switch (LHSI->getOpcode()) {
4308   case Instruction::PHI:
4309     if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
4310       return NV;
4311     break;
4312   case Instruction::IntToPtr:
4313     // icmp pred inttoptr(X), null -> icmp pred X, 0
4314     if (RHSC->isNullValue() &&
4315         DL.getIntPtrType(RHSC->getType()) == LHSI->getOperand(0)->getType())
4316       return new ICmpInst(
4317           I.getPredicate(), LHSI->getOperand(0),
4318           Constant::getNullValue(LHSI->getOperand(0)->getType()));
4319     break;
4320 
4321   case Instruction::Load:
4322     // Try to optimize things like "A[i] > 4" to index computations.
4323     if (GetElementPtrInst *GEP =
4324             dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
4325       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
4326         if (Instruction *Res =
4327                 foldCmpLoadFromIndexedGlobal(cast<LoadInst>(LHSI), GEP, GV, I))
4328           return Res;
4329     break;
4330   }
4331 
4332   return nullptr;
4333 }
4334 
foldSelectICmp(CmpPredicate Pred,SelectInst * SI,Value * RHS,const ICmpInst & I)4335 Instruction *InstCombinerImpl::foldSelectICmp(CmpPredicate Pred, SelectInst *SI,
4336                                               Value *RHS, const ICmpInst &I) {
4337   // Try to fold the comparison into the select arms, which will cause the
4338   // select to be converted into a logical and/or.
4339   auto SimplifyOp = [&](Value *Op, bool SelectCondIsTrue) -> Value * {
4340     if (Value *Res = simplifyICmpInst(Pred, Op, RHS, SQ))
4341       return Res;
4342     if (std::optional<bool> Impl = isImpliedCondition(
4343             SI->getCondition(), Pred, Op, RHS, DL, SelectCondIsTrue))
4344       return ConstantInt::get(I.getType(), *Impl);
4345     return nullptr;
4346   };
4347 
4348   ConstantInt *CI = nullptr;
4349   Value *Op1 = SimplifyOp(SI->getOperand(1), true);
4350   if (Op1)
4351     CI = dyn_cast<ConstantInt>(Op1);
4352 
4353   Value *Op2 = SimplifyOp(SI->getOperand(2), false);
4354   if (Op2)
4355     CI = dyn_cast<ConstantInt>(Op2);
4356 
4357   auto Simplifies = [&](Value *Op, unsigned Idx) {
4358     // A comparison of ucmp/scmp with a constant will fold into an icmp.
4359     const APInt *Dummy;
4360     return Op ||
4361            (isa<CmpIntrinsic>(SI->getOperand(Idx)) &&
4362             SI->getOperand(Idx)->hasOneUse() && match(RHS, m_APInt(Dummy)));
4363   };
4364 
4365   // We only want to perform this transformation if it will not lead to
4366   // additional code. This is true if either both sides of the select
4367   // fold to a constant (in which case the icmp is replaced with a select
4368   // which will usually simplify) or this is the only user of the
4369   // select (in which case we are trading a select+icmp for a simpler
4370   // select+icmp) or all uses of the select can be replaced based on
4371   // dominance information ("Global cases").
4372   bool Transform = false;
4373   if (Op1 && Op2)
4374     Transform = true;
4375   else if (Simplifies(Op1, 1) || Simplifies(Op2, 2)) {
4376     // Local case
4377     if (SI->hasOneUse())
4378       Transform = true;
4379     // Global cases
4380     else if (CI && !CI->isZero())
4381       // When Op1 is constant try replacing select with second operand.
4382       // Otherwise Op2 is constant and try replacing select with first
4383       // operand.
4384       Transform = replacedSelectWithOperand(SI, &I, Op1 ? 2 : 1);
4385   }
4386   if (Transform) {
4387     if (!Op1)
4388       Op1 = Builder.CreateICmp(Pred, SI->getOperand(1), RHS, I.getName());
4389     if (!Op2)
4390       Op2 = Builder.CreateICmp(Pred, SI->getOperand(2), RHS, I.getName());
4391     return SelectInst::Create(SI->getOperand(0), Op1, Op2);
4392   }
4393 
4394   return nullptr;
4395 }
4396 
4397 // Returns whether V is a Mask ((X + 1) & X == 0) or ~Mask (-Pow2OrZero)
isMaskOrZero(const Value * V,bool Not,const SimplifyQuery & Q,unsigned Depth=0)4398 static bool isMaskOrZero(const Value *V, bool Not, const SimplifyQuery &Q,
4399                          unsigned Depth = 0) {
4400   if (Not ? match(V, m_NegatedPower2OrZero()) : match(V, m_LowBitMaskOrZero()))
4401     return true;
4402   if (V->getType()->getScalarSizeInBits() == 1)
4403     return true;
4404   if (Depth++ >= MaxAnalysisRecursionDepth)
4405     return false;
4406   Value *X;
4407   const Instruction *I = dyn_cast<Instruction>(V);
4408   if (!I)
4409     return false;
4410   switch (I->getOpcode()) {
4411   case Instruction::ZExt:
4412     // ZExt(Mask) is a Mask.
4413     return !Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4414   case Instruction::SExt:
4415     // SExt(Mask) is a Mask.
4416     // SExt(~Mask) is a ~Mask.
4417     return isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4418   case Instruction::And:
4419   case Instruction::Or:
4420     // Mask0 | Mask1 is a Mask.
4421     // Mask0 & Mask1 is a Mask.
4422     // ~Mask0 | ~Mask1 is a ~Mask.
4423     // ~Mask0 & ~Mask1 is a ~Mask.
4424     return isMaskOrZero(I->getOperand(1), Not, Q, Depth) &&
4425            isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4426   case Instruction::Xor:
4427     if (match(V, m_Not(m_Value(X))))
4428       return isMaskOrZero(X, !Not, Q, Depth);
4429 
4430     // (X ^ -X) is a ~Mask
4431     if (Not)
4432       return match(V, m_c_Xor(m_Value(X), m_Neg(m_Deferred(X))));
4433     // (X ^ (X - 1)) is a Mask
4434     else
4435       return match(V, m_c_Xor(m_Value(X), m_Add(m_Deferred(X), m_AllOnes())));
4436   case Instruction::Select:
4437     // c ? Mask0 : Mask1 is a Mask.
4438     return isMaskOrZero(I->getOperand(1), Not, Q, Depth) &&
4439            isMaskOrZero(I->getOperand(2), Not, Q, Depth);
4440   case Instruction::Shl:
4441     // (~Mask) << X is a ~Mask.
4442     return Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4443   case Instruction::LShr:
4444     // Mask >> X is a Mask.
4445     return !Not && isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4446   case Instruction::AShr:
4447     // Mask s>> X is a Mask.
4448     // ~Mask s>> X is a ~Mask.
4449     return isMaskOrZero(I->getOperand(0), Not, Q, Depth);
4450   case Instruction::Add:
4451     // Pow2 - 1 is a Mask.
4452     if (!Not && match(I->getOperand(1), m_AllOnes()))
4453       return isKnownToBeAPowerOfTwo(I->getOperand(0), Q.DL, /*OrZero*/ true,
4454                                     Q.AC, Q.CxtI, Q.DT, Depth);
4455     break;
4456   case Instruction::Sub:
4457     // -Pow2 is a ~Mask.
4458     if (Not && match(I->getOperand(0), m_Zero()))
4459       return isKnownToBeAPowerOfTwo(I->getOperand(1), Q.DL, /*OrZero*/ true,
4460                                     Q.AC, Q.CxtI, Q.DT, Depth);
4461     break;
4462   case Instruction::Call: {
4463     if (auto *II = dyn_cast<IntrinsicInst>(I)) {
4464       switch (II->getIntrinsicID()) {
4465         // min/max(Mask0, Mask1) is a Mask.
4466         // min/max(~Mask0, ~Mask1) is a ~Mask.
4467       case Intrinsic::umax:
4468       case Intrinsic::smax:
4469       case Intrinsic::umin:
4470       case Intrinsic::smin:
4471         return isMaskOrZero(II->getArgOperand(1), Not, Q, Depth) &&
4472                isMaskOrZero(II->getArgOperand(0), Not, Q, Depth);
4473 
4474         // In the context of masks, bitreverse(Mask) == ~Mask
4475       case Intrinsic::bitreverse:
4476         return isMaskOrZero(II->getArgOperand(0), !Not, Q, Depth);
4477       default:
4478         break;
4479       }
4480     }
4481     break;
4482   }
4483   default:
4484     break;
4485   }
4486   return false;
4487 }
4488 
4489 /// Some comparisons can be simplified.
4490 /// In this case, we are looking for comparisons that look like
4491 /// a check for a lossy truncation.
4492 /// Folds:
4493 ///   icmp SrcPred (x & Mask), x    to    icmp DstPred x, Mask
4494 ///   icmp SrcPred (x & ~Mask), ~Mask    to    icmp DstPred x, ~Mask
4495 ///   icmp eq/ne (x & ~Mask), 0     to    icmp DstPred x, Mask
4496 ///   icmp eq/ne (~x | Mask), -1     to    icmp DstPred x, Mask
4497 /// Where Mask is some pattern that produces all-ones in low bits:
4498 ///    (-1 >> y)
4499 ///    ((-1 << y) >> y)     <- non-canonical, has extra uses
4500 ///   ~(-1 << y)
4501 ///    ((1 << y) + (-1))    <- non-canonical, has extra uses
4502 /// The Mask can be a constant, too.
4503 /// For some predicates, the operands are commutative.
4504 /// For others, x can only be on a specific side.
foldICmpWithLowBitMaskedVal(CmpPredicate Pred,Value * Op0,Value * Op1,const SimplifyQuery & Q,InstCombiner & IC)4505 static Value *foldICmpWithLowBitMaskedVal(CmpPredicate Pred, Value *Op0,
4506                                           Value *Op1, const SimplifyQuery &Q,
4507                                           InstCombiner &IC) {
4508 
4509   ICmpInst::Predicate DstPred;
4510   switch (Pred) {
4511   case ICmpInst::Predicate::ICMP_EQ:
4512     //  x & Mask == x
4513     //  x & ~Mask == 0
4514     //  ~x | Mask == -1
4515     //    ->    x u<= Mask
4516     //  x & ~Mask == ~Mask
4517     //    ->    ~Mask u<= x
4518     DstPred = ICmpInst::Predicate::ICMP_ULE;
4519     break;
4520   case ICmpInst::Predicate::ICMP_NE:
4521     //  x & Mask != x
4522     //  x & ~Mask != 0
4523     //  ~x | Mask != -1
4524     //    ->    x u> Mask
4525     //  x & ~Mask != ~Mask
4526     //    ->    ~Mask u> x
4527     DstPred = ICmpInst::Predicate::ICMP_UGT;
4528     break;
4529   case ICmpInst::Predicate::ICMP_ULT:
4530     //  x & Mask u< x
4531     //    -> x u> Mask
4532     //  x & ~Mask u< ~Mask
4533     //    -> ~Mask u> x
4534     DstPred = ICmpInst::Predicate::ICMP_UGT;
4535     break;
4536   case ICmpInst::Predicate::ICMP_UGE:
4537     //  x & Mask u>= x
4538     //    -> x u<= Mask
4539     //  x & ~Mask u>= ~Mask
4540     //    -> ~Mask u<= x
4541     DstPred = ICmpInst::Predicate::ICMP_ULE;
4542     break;
4543   case ICmpInst::Predicate::ICMP_SLT:
4544     //  x & Mask s< x [iff Mask s>= 0]
4545     //    -> x s> Mask
4546     //  x & ~Mask s< ~Mask [iff ~Mask != 0]
4547     //    -> ~Mask s> x
4548     DstPred = ICmpInst::Predicate::ICMP_SGT;
4549     break;
4550   case ICmpInst::Predicate::ICMP_SGE:
4551     //  x & Mask s>= x [iff Mask s>= 0]
4552     //    -> x s<= Mask
4553     //  x & ~Mask s>= ~Mask [iff ~Mask != 0]
4554     //    -> ~Mask s<= x
4555     DstPred = ICmpInst::Predicate::ICMP_SLE;
4556     break;
4557   default:
4558     // We don't support sgt,sle
4559     // ult/ugt are simplified to true/false respectively.
4560     return nullptr;
4561   }
4562 
4563   Value *X, *M;
4564   // Put search code in lambda for early positive returns.
4565   auto IsLowBitMask = [&]() {
4566     if (match(Op0, m_c_And(m_Specific(Op1), m_Value(M)))) {
4567       X = Op1;
4568       // Look for: x & Mask pred x
4569       if (isMaskOrZero(M, /*Not=*/false, Q)) {
4570         return !ICmpInst::isSigned(Pred) ||
4571                (match(M, m_NonNegative()) || isKnownNonNegative(M, Q));
4572       }
4573 
4574       // Look for: x & ~Mask pred ~Mask
4575       if (isMaskOrZero(X, /*Not=*/true, Q)) {
4576         return !ICmpInst::isSigned(Pred) || isKnownNonZero(X, Q);
4577       }
4578       return false;
4579     }
4580     if (ICmpInst::isEquality(Pred) && match(Op1, m_AllOnes()) &&
4581         match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(M))))) {
4582 
4583       auto Check = [&]() {
4584         // Look for: ~x | Mask == -1
4585         if (isMaskOrZero(M, /*Not=*/false, Q)) {
4586           if (Value *NotX =
4587                   IC.getFreelyInverted(X, X->hasOneUse(), &IC.Builder)) {
4588             X = NotX;
4589             return true;
4590           }
4591         }
4592         return false;
4593       };
4594       if (Check())
4595         return true;
4596       std::swap(X, M);
4597       return Check();
4598     }
4599     if (ICmpInst::isEquality(Pred) && match(Op1, m_Zero()) &&
4600         match(Op0, m_OneUse(m_And(m_Value(X), m_Value(M))))) {
4601       auto Check = [&]() {
4602         // Look for: x & ~Mask == 0
4603         if (isMaskOrZero(M, /*Not=*/true, Q)) {
4604           if (Value *NotM =
4605                   IC.getFreelyInverted(M, M->hasOneUse(), &IC.Builder)) {
4606             M = NotM;
4607             return true;
4608           }
4609         }
4610         return false;
4611       };
4612       if (Check())
4613         return true;
4614       std::swap(X, M);
4615       return Check();
4616     }
4617     return false;
4618   };
4619 
4620   if (!IsLowBitMask())
4621     return nullptr;
4622 
4623   return IC.Builder.CreateICmp(DstPred, X, M);
4624 }
4625 
4626 /// Some comparisons can be simplified.
4627 /// In this case, we are looking for comparisons that look like
4628 /// a check for a lossy signed truncation.
4629 /// Folds:   (MaskedBits is a constant.)
4630 ///   ((%x << MaskedBits) a>> MaskedBits) SrcPred %x
4631 /// Into:
4632 ///   (add %x, (1 << (KeptBits-1))) DstPred (1 << KeptBits)
4633 /// Where  KeptBits = bitwidth(%x) - MaskedBits
4634 static Value *
foldICmpWithTruncSignExtendedVal(ICmpInst & I,InstCombiner::BuilderTy & Builder)4635 foldICmpWithTruncSignExtendedVal(ICmpInst &I,
4636                                  InstCombiner::BuilderTy &Builder) {
4637   CmpPredicate SrcPred;
4638   Value *X;
4639   const APInt *C0, *C1; // FIXME: non-splats, potentially with undef.
4640   // We are ok with 'shl' having multiple uses, but 'ashr' must be one-use.
4641   if (!match(&I, m_c_ICmp(SrcPred,
4642                           m_OneUse(m_AShr(m_Shl(m_Value(X), m_APInt(C0)),
4643                                           m_APInt(C1))),
4644                           m_Deferred(X))))
4645     return nullptr;
4646 
4647   // Potential handling of non-splats: for each element:
4648   //  * if both are undef, replace with constant 0.
4649   //    Because (1<<0) is OK and is 1, and ((1<<0)>>1) is also OK and is 0.
4650   //  * if both are not undef, and are different, bailout.
4651   //  * else, only one is undef, then pick the non-undef one.
4652 
4653   // The shift amount must be equal.
4654   if (*C0 != *C1)
4655     return nullptr;
4656   const APInt &MaskedBits = *C0;
4657   assert(MaskedBits != 0 && "shift by zero should be folded away already.");
4658 
4659   ICmpInst::Predicate DstPred;
4660   switch (SrcPred) {
4661   case ICmpInst::Predicate::ICMP_EQ:
4662     // ((%x << MaskedBits) a>> MaskedBits) == %x
4663     //   =>
4664     // (add %x, (1 << (KeptBits-1))) u< (1 << KeptBits)
4665     DstPred = ICmpInst::Predicate::ICMP_ULT;
4666     break;
4667   case ICmpInst::Predicate::ICMP_NE:
4668     // ((%x << MaskedBits) a>> MaskedBits) != %x
4669     //   =>
4670     // (add %x, (1 << (KeptBits-1))) u>= (1 << KeptBits)
4671     DstPred = ICmpInst::Predicate::ICMP_UGE;
4672     break;
4673   // FIXME: are more folds possible?
4674   default:
4675     return nullptr;
4676   }
4677 
4678   auto *XType = X->getType();
4679   const unsigned XBitWidth = XType->getScalarSizeInBits();
4680   const APInt BitWidth = APInt(XBitWidth, XBitWidth);
4681   assert(BitWidth.ugt(MaskedBits) && "shifts should leave some bits untouched");
4682 
4683   // KeptBits = bitwidth(%x) - MaskedBits
4684   const APInt KeptBits = BitWidth - MaskedBits;
4685   assert(KeptBits.ugt(0) && KeptBits.ult(BitWidth) && "unreachable");
4686   // ICmpCst = (1 << KeptBits)
4687   const APInt ICmpCst = APInt(XBitWidth, 1).shl(KeptBits);
4688   assert(ICmpCst.isPowerOf2());
4689   // AddCst = (1 << (KeptBits-1))
4690   const APInt AddCst = ICmpCst.lshr(1);
4691   assert(AddCst.ult(ICmpCst) && AddCst.isPowerOf2());
4692 
4693   // T0 = add %x, AddCst
4694   Value *T0 = Builder.CreateAdd(X, ConstantInt::get(XType, AddCst));
4695   // T1 = T0 DstPred ICmpCst
4696   Value *T1 = Builder.CreateICmp(DstPred, T0, ConstantInt::get(XType, ICmpCst));
4697 
4698   return T1;
4699 }
4700 
4701 // Given pattern:
4702 //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
4703 // we should move shifts to the same hand of 'and', i.e. rewrite as
4704 //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
4705 // We are only interested in opposite logical shifts here.
4706 // One of the shifts can be truncated.
4707 // If we can, we want to end up creating 'lshr' shift.
4708 static Value *
foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst & I,const SimplifyQuery SQ,InstCombiner::BuilderTy & Builder)4709 foldShiftIntoShiftInAnotherHandOfAndInICmp(ICmpInst &I, const SimplifyQuery SQ,
4710                                            InstCombiner::BuilderTy &Builder) {
4711   if (!I.isEquality() || !match(I.getOperand(1), m_Zero()) ||
4712       !I.getOperand(0)->hasOneUse())
4713     return nullptr;
4714 
4715   auto m_AnyLogicalShift = m_LogicalShift(m_Value(), m_Value());
4716 
4717   // Look for an 'and' of two logical shifts, one of which may be truncated.
4718   // We use m_TruncOrSelf() on the RHS to correctly handle commutative case.
4719   Instruction *XShift, *MaybeTruncation, *YShift;
4720   if (!match(
4721           I.getOperand(0),
4722           m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
4723                   m_CombineAnd(m_TruncOrSelf(m_CombineAnd(
4724                                    m_AnyLogicalShift, m_Instruction(YShift))),
4725                                m_Instruction(MaybeTruncation)))))
4726     return nullptr;
4727 
4728   // We potentially looked past 'trunc', but only when matching YShift,
4729   // therefore YShift must have the widest type.
4730   Instruction *WidestShift = YShift;
4731   // Therefore XShift must have the shallowest type.
4732   // Or they both have identical types if there was no truncation.
4733   Instruction *NarrowestShift = XShift;
4734 
4735   Type *WidestTy = WidestShift->getType();
4736   Type *NarrowestTy = NarrowestShift->getType();
4737   assert(NarrowestTy == I.getOperand(0)->getType() &&
4738          "We did not look past any shifts while matching XShift though.");
4739   bool HadTrunc = WidestTy != I.getOperand(0)->getType();
4740 
4741   // If YShift is a 'lshr', swap the shifts around.
4742   if (match(YShift, m_LShr(m_Value(), m_Value())))
4743     std::swap(XShift, YShift);
4744 
4745   // The shifts must be in opposite directions.
4746   auto XShiftOpcode = XShift->getOpcode();
4747   if (XShiftOpcode == YShift->getOpcode())
4748     return nullptr; // Do not care about same-direction shifts here.
4749 
4750   Value *X, *XShAmt, *Y, *YShAmt;
4751   match(XShift, m_BinOp(m_Value(X), m_ZExtOrSelf(m_Value(XShAmt))));
4752   match(YShift, m_BinOp(m_Value(Y), m_ZExtOrSelf(m_Value(YShAmt))));
4753 
4754   // If one of the values being shifted is a constant, then we will end with
4755   // and+icmp, and [zext+]shift instrs will be constant-folded. If they are not,
4756   // however, we will need to ensure that we won't increase instruction count.
4757   if (!isa<Constant>(X) && !isa<Constant>(Y)) {
4758     // At least one of the hands of the 'and' should be one-use shift.
4759     if (!match(I.getOperand(0),
4760                m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
4761       return nullptr;
4762     if (HadTrunc) {
4763       // Due to the 'trunc', we will need to widen X. For that either the old
4764       // 'trunc' or the shift amt in the non-truncated shift should be one-use.
4765       if (!MaybeTruncation->hasOneUse() &&
4766           !NarrowestShift->getOperand(1)->hasOneUse())
4767         return nullptr;
4768     }
4769   }
4770 
4771   // We have two shift amounts from two different shifts. The types of those
4772   // shift amounts may not match. If that's the case let's bailout now.
4773   if (XShAmt->getType() != YShAmt->getType())
4774     return nullptr;
4775 
4776   // As input, we have the following pattern:
4777   //   icmp eq/ne (and ((x shift Q), (y oppositeshift K))), 0
4778   // We want to rewrite that as:
4779   //   icmp eq/ne (and (x shift (Q+K)), y), 0  iff (Q+K) u< bitwidth(x)
4780   // While we know that originally (Q+K) would not overflow
4781   // (because  2 * (N-1) u<= iN -1), we have looked past extensions of
4782   // shift amounts. so it may now overflow in smaller bitwidth.
4783   // To ensure that does not happen, we need to ensure that the total maximal
4784   // shift amount is still representable in that smaller bit width.
4785   unsigned MaximalPossibleTotalShiftAmount =
4786       (WidestTy->getScalarSizeInBits() - 1) +
4787       (NarrowestTy->getScalarSizeInBits() - 1);
4788   APInt MaximalRepresentableShiftAmount =
4789       APInt::getAllOnes(XShAmt->getType()->getScalarSizeInBits());
4790   if (MaximalRepresentableShiftAmount.ult(MaximalPossibleTotalShiftAmount))
4791     return nullptr;
4792 
4793   // Can we fold (XShAmt+YShAmt) ?
4794   auto *NewShAmt = dyn_cast_or_null<Constant>(
4795       simplifyAddInst(XShAmt, YShAmt, /*isNSW=*/false,
4796                       /*isNUW=*/false, SQ.getWithInstruction(&I)));
4797   if (!NewShAmt)
4798     return nullptr;
4799   if (NewShAmt->getType() != WidestTy) {
4800     NewShAmt =
4801         ConstantFoldCastOperand(Instruction::ZExt, NewShAmt, WidestTy, SQ.DL);
4802     if (!NewShAmt)
4803       return nullptr;
4804   }
4805   unsigned WidestBitWidth = WidestTy->getScalarSizeInBits();
4806 
4807   // Is the new shift amount smaller than the bit width?
4808   // FIXME: could also rely on ConstantRange.
4809   if (!match(NewShAmt,
4810              m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_ULT,
4811                                 APInt(WidestBitWidth, WidestBitWidth))))
4812     return nullptr;
4813 
4814   // An extra legality check is needed if we had trunc-of-lshr.
4815   if (HadTrunc && match(WidestShift, m_LShr(m_Value(), m_Value()))) {
4816     auto CanFold = [NewShAmt, WidestBitWidth, NarrowestShift, SQ,
4817                     WidestShift]() {
4818       // It isn't obvious whether it's worth it to analyze non-constants here.
4819       // Also, let's basically give up on non-splat cases, pessimizing vectors.
4820       // If *any* of these preconditions matches we can perform the fold.
4821       Constant *NewShAmtSplat = NewShAmt->getType()->isVectorTy()
4822                                     ? NewShAmt->getSplatValue()
4823                                     : NewShAmt;
4824       // If it's edge-case shift (by 0 or by WidestBitWidth-1) we can fold.
4825       if (NewShAmtSplat &&
4826           (NewShAmtSplat->isNullValue() ||
4827            NewShAmtSplat->getUniqueInteger() == WidestBitWidth - 1))
4828         return true;
4829       // We consider *min* leading zeros so a single outlier
4830       // blocks the transform as opposed to allowing it.
4831       if (auto *C = dyn_cast<Constant>(NarrowestShift->getOperand(0))) {
4832         KnownBits Known = computeKnownBits(C, SQ.DL);
4833         unsigned MinLeadZero = Known.countMinLeadingZeros();
4834         // If the value being shifted has at most lowest bit set we can fold.
4835         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
4836         if (MaxActiveBits <= 1)
4837           return true;
4838         // Precondition:  NewShAmt u<= countLeadingZeros(C)
4839         if (NewShAmtSplat && NewShAmtSplat->getUniqueInteger().ule(MinLeadZero))
4840           return true;
4841       }
4842       if (auto *C = dyn_cast<Constant>(WidestShift->getOperand(0))) {
4843         KnownBits Known = computeKnownBits(C, SQ.DL);
4844         unsigned MinLeadZero = Known.countMinLeadingZeros();
4845         // If the value being shifted has at most lowest bit set we can fold.
4846         unsigned MaxActiveBits = Known.getBitWidth() - MinLeadZero;
4847         if (MaxActiveBits <= 1)
4848           return true;
4849         // Precondition:  ((WidestBitWidth-1)-NewShAmt) u<= countLeadingZeros(C)
4850         if (NewShAmtSplat) {
4851           APInt AdjNewShAmt =
4852               (WidestBitWidth - 1) - NewShAmtSplat->getUniqueInteger();
4853           if (AdjNewShAmt.ule(MinLeadZero))
4854             return true;
4855         }
4856       }
4857       return false; // Can't tell if it's ok.
4858     };
4859     if (!CanFold())
4860       return nullptr;
4861   }
4862 
4863   // All good, we can do this fold.
4864   X = Builder.CreateZExt(X, WidestTy);
4865   Y = Builder.CreateZExt(Y, WidestTy);
4866   // The shift is the same that was for X.
4867   Value *T0 = XShiftOpcode == Instruction::BinaryOps::LShr
4868                   ? Builder.CreateLShr(X, NewShAmt)
4869                   : Builder.CreateShl(X, NewShAmt);
4870   Value *T1 = Builder.CreateAnd(T0, Y);
4871   return Builder.CreateICmp(I.getPredicate(), T1,
4872                             Constant::getNullValue(WidestTy));
4873 }
4874 
4875 /// Fold
4876 ///   (-1 u/ x) u< y
4877 ///   ((x * y) ?/ x) != y
4878 /// to
4879 ///   @llvm.?mul.with.overflow(x, y) plus extraction of overflow bit
4880 /// Note that the comparison is commutative, while inverted (u>=, ==) predicate
4881 /// will mean that we are looking for the opposite answer.
foldMultiplicationOverflowCheck(ICmpInst & I)4882 Value *InstCombinerImpl::foldMultiplicationOverflowCheck(ICmpInst &I) {
4883   CmpPredicate Pred;
4884   Value *X, *Y;
4885   Instruction *Mul;
4886   Instruction *Div;
4887   bool NeedNegation;
4888   // Look for: (-1 u/ x) u</u>= y
4889   if (!I.isEquality() &&
4890       match(&I, m_c_ICmp(Pred,
4891                          m_CombineAnd(m_OneUse(m_UDiv(m_AllOnes(), m_Value(X))),
4892                                       m_Instruction(Div)),
4893                          m_Value(Y)))) {
4894     Mul = nullptr;
4895 
4896     // Are we checking that overflow does not happen, or does happen?
4897     switch (Pred) {
4898     case ICmpInst::Predicate::ICMP_ULT:
4899       NeedNegation = false;
4900       break; // OK
4901     case ICmpInst::Predicate::ICMP_UGE:
4902       NeedNegation = true;
4903       break; // OK
4904     default:
4905       return nullptr; // Wrong predicate.
4906     }
4907   } else // Look for: ((x * y) / x) !=/== y
4908     if (I.isEquality() &&
4909         match(&I, m_c_ICmp(Pred, m_Value(Y),
4910                            m_CombineAnd(m_OneUse(m_IDiv(
4911                                             m_CombineAnd(m_c_Mul(m_Deferred(Y),
4912                                                                  m_Value(X)),
4913                                                          m_Instruction(Mul)),
4914                                             m_Deferred(X))),
4915                                         m_Instruction(Div))))) {
4916       NeedNegation = Pred == ICmpInst::Predicate::ICMP_EQ;
4917     } else
4918       return nullptr;
4919 
4920   BuilderTy::InsertPointGuard Guard(Builder);
4921   // If the pattern included (x * y), we'll want to insert new instructions
4922   // right before that original multiplication so that we can replace it.
4923   bool MulHadOtherUses = Mul && !Mul->hasOneUse();
4924   if (MulHadOtherUses)
4925     Builder.SetInsertPoint(Mul);
4926 
4927   CallInst *Call = Builder.CreateIntrinsic(
4928       Div->getOpcode() == Instruction::UDiv ? Intrinsic::umul_with_overflow
4929                                             : Intrinsic::smul_with_overflow,
4930       X->getType(), {X, Y}, /*FMFSource=*/nullptr, "mul");
4931 
4932   // If the multiplication was used elsewhere, to ensure that we don't leave
4933   // "duplicate" instructions, replace uses of that original multiplication
4934   // with the multiplication result from the with.overflow intrinsic.
4935   if (MulHadOtherUses)
4936     replaceInstUsesWith(*Mul, Builder.CreateExtractValue(Call, 0, "mul.val"));
4937 
4938   Value *Res = Builder.CreateExtractValue(Call, 1, "mul.ov");
4939   if (NeedNegation) // This technically increases instruction count.
4940     Res = Builder.CreateNot(Res, "mul.not.ov");
4941 
4942   // If we replaced the mul, erase it. Do this after all uses of Builder,
4943   // as the mul is used as insertion point.
4944   if (MulHadOtherUses)
4945     eraseInstFromFunction(*Mul);
4946 
4947   return Res;
4948 }
4949 
foldICmpXNegX(ICmpInst & I,InstCombiner::BuilderTy & Builder)4950 static Instruction *foldICmpXNegX(ICmpInst &I,
4951                                   InstCombiner::BuilderTy &Builder) {
4952   CmpPredicate Pred;
4953   Value *X;
4954   if (match(&I, m_c_ICmp(Pred, m_NSWNeg(m_Value(X)), m_Deferred(X)))) {
4955 
4956     if (ICmpInst::isSigned(Pred))
4957       Pred = ICmpInst::getSwappedPredicate(Pred);
4958     else if (ICmpInst::isUnsigned(Pred))
4959       Pred = ICmpInst::getSignedPredicate(Pred);
4960     // else for equality-comparisons just keep the predicate.
4961 
4962     return ICmpInst::Create(Instruction::ICmp, Pred, X,
4963                             Constant::getNullValue(X->getType()), I.getName());
4964   }
4965 
4966   // A value is not equal to its negation unless that value is 0 or
4967   // MinSignedValue, ie: a != -a --> (a & MaxSignedVal) != 0
4968   if (match(&I, m_c_ICmp(Pred, m_OneUse(m_Neg(m_Value(X))), m_Deferred(X))) &&
4969       ICmpInst::isEquality(Pred)) {
4970     Type *Ty = X->getType();
4971     uint32_t BitWidth = Ty->getScalarSizeInBits();
4972     Constant *MaxSignedVal =
4973         ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
4974     Value *And = Builder.CreateAnd(X, MaxSignedVal);
4975     Constant *Zero = Constant::getNullValue(Ty);
4976     return CmpInst::Create(Instruction::ICmp, Pred, And, Zero);
4977   }
4978 
4979   return nullptr;
4980 }
4981 
foldICmpAndXX(ICmpInst & I,const SimplifyQuery & Q,InstCombinerImpl & IC)4982 static Instruction *foldICmpAndXX(ICmpInst &I, const SimplifyQuery &Q,
4983                                   InstCombinerImpl &IC) {
4984   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
4985   // Normalize and operand as operand 0.
4986   CmpInst::Predicate Pred = I.getPredicate();
4987   if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) {
4988     std::swap(Op0, Op1);
4989     Pred = ICmpInst::getSwappedPredicate(Pred);
4990   }
4991 
4992   if (!match(Op0, m_c_And(m_Specific(Op1), m_Value(A))))
4993     return nullptr;
4994 
4995   // (icmp (X & Y) u< X --> (X & Y) != X
4996   if (Pred == ICmpInst::ICMP_ULT)
4997     return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
4998 
4999   // (icmp (X & Y) u>= X --> (X & Y) == X
5000   if (Pred == ICmpInst::ICMP_UGE)
5001     return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5002 
5003   if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
5004     // icmp (X & Y) eq/ne Y --> (X | ~Y) eq/ne -1 if Y is freely invertible and
5005     // Y is non-constant. If Y is constant the `X & C == C` form is preferable
5006     // so don't do this fold.
5007     if (!match(Op1, m_ImmConstant()))
5008       if (auto *NotOp1 =
5009               IC.getFreelyInverted(Op1, !Op1->hasNUsesOrMore(3), &IC.Builder))
5010         return new ICmpInst(Pred, IC.Builder.CreateOr(A, NotOp1),
5011                             Constant::getAllOnesValue(Op1->getType()));
5012     // icmp (X & Y) eq/ne Y --> (~X & Y) eq/ne 0 if X  is freely invertible.
5013     if (auto *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
5014       return new ICmpInst(Pred, IC.Builder.CreateAnd(Op1, NotA),
5015                           Constant::getNullValue(Op1->getType()));
5016   }
5017 
5018   if (!ICmpInst::isSigned(Pred))
5019     return nullptr;
5020 
5021   KnownBits KnownY = IC.computeKnownBits(A, &I);
5022   // (X & NegY) spred X --> (X & NegY) upred X
5023   if (KnownY.isNegative())
5024     return new ICmpInst(ICmpInst::getUnsignedPredicate(Pred), Op0, Op1);
5025 
5026   if (Pred != ICmpInst::ICMP_SLE && Pred != ICmpInst::ICMP_SGT)
5027     return nullptr;
5028 
5029   if (KnownY.isNonNegative())
5030     // (X & PosY) s<= X --> X s>= 0
5031     // (X & PosY) s> X --> X s< 0
5032     return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
5033                         Constant::getNullValue(Op1->getType()));
5034 
5035   if (isKnownNegative(Op1, IC.getSimplifyQuery().getWithInstruction(&I)))
5036     // (NegX & Y) s<= NegX --> Y s< 0
5037     // (NegX & Y) s> NegX --> Y s>= 0
5038     return new ICmpInst(ICmpInst::getFlippedStrictnessPredicate(Pred), A,
5039                         Constant::getNullValue(A->getType()));
5040 
5041   return nullptr;
5042 }
5043 
foldICmpOrXX(ICmpInst & I,const SimplifyQuery & Q,InstCombinerImpl & IC)5044 static Instruction *foldICmpOrXX(ICmpInst &I, const SimplifyQuery &Q,
5045                                  InstCombinerImpl &IC) {
5046   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
5047 
5048   // Normalize or operand as operand 0.
5049   CmpInst::Predicate Pred = I.getPredicate();
5050   if (match(Op1, m_c_Or(m_Specific(Op0), m_Value(A)))) {
5051     std::swap(Op0, Op1);
5052     Pred = ICmpInst::getSwappedPredicate(Pred);
5053   } else if (!match(Op0, m_c_Or(m_Specific(Op1), m_Value(A)))) {
5054     return nullptr;
5055   }
5056 
5057   // icmp (X | Y) u<= X --> (X | Y) == X
5058   if (Pred == ICmpInst::ICMP_ULE)
5059     return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
5060 
5061   // icmp (X | Y) u> X --> (X | Y) != X
5062   if (Pred == ICmpInst::ICMP_UGT)
5063     return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
5064 
5065   if (ICmpInst::isEquality(Pred) && Op0->hasOneUse()) {
5066     // icmp (X | Y) eq/ne Y --> (X & ~Y) eq/ne 0 if Y is freely invertible
5067     if (Value *NotOp1 = IC.getFreelyInverted(
5068             Op1, !isa<Constant>(Op1) && !Op1->hasNUsesOrMore(3), &IC.Builder))
5069       return new ICmpInst(Pred, IC.Builder.CreateAnd(A, NotOp1),
5070                           Constant::getNullValue(Op1->getType()));
5071     // icmp (X | Y) eq/ne Y --> (~X | Y) eq/ne -1 if X  is freely invertible.
5072     if (Value *NotA = IC.getFreelyInverted(A, A->hasOneUse(), &IC.Builder))
5073       return new ICmpInst(Pred, IC.Builder.CreateOr(Op1, NotA),
5074                           Constant::getAllOnesValue(Op1->getType()));
5075   }
5076   return nullptr;
5077 }
5078 
foldICmpXorXX(ICmpInst & I,const SimplifyQuery & Q,InstCombinerImpl & IC)5079 static Instruction *foldICmpXorXX(ICmpInst &I, const SimplifyQuery &Q,
5080                                   InstCombinerImpl &IC) {
5081   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1), *A;
5082   // Normalize xor operand as operand 0.
5083   CmpInst::Predicate Pred = I.getPredicate();
5084   if (match(Op1, m_c_Xor(m_Specific(Op0), m_Value()))) {
5085     std::swap(Op0, Op1);
5086     Pred = ICmpInst::getSwappedPredicate(Pred);
5087   }
5088   if (!match(Op0, m_c_Xor(m_Specific(Op1), m_Value(A))))
5089     return nullptr;
5090 
5091   // icmp (X ^ Y_NonZero) u>= X --> icmp (X ^ Y_NonZero) u> X
5092   // icmp (X ^ Y_NonZero) u<= X --> icmp (X ^ Y_NonZero) u< X
5093   // icmp (X ^ Y_NonZero) s>= X --> icmp (X ^ Y_NonZero) s> X
5094   // icmp (X ^ Y_NonZero) s<= X --> icmp (X ^ Y_NonZero) s< X
5095   CmpInst::Predicate PredOut = CmpInst::getStrictPredicate(Pred);
5096   if (PredOut != Pred && isKnownNonZero(A, Q))
5097     return new ICmpInst(PredOut, Op0, Op1);
5098 
5099   // These transform work when A is negative.
5100   // X s< X^A, X s<= X^A, X u> X^A, X u>= X^A  --> X s< 0
5101   // X s> X^A, X s>= X^A, X u< X^A, X u<= X^A  --> X s>= 0
5102   if (match(A, m_Negative())) {
5103     CmpInst::Predicate NewPred;
5104     switch (ICmpInst::getStrictPredicate(Pred)) {
5105     default:
5106       return nullptr;
5107     case ICmpInst::ICMP_SLT:
5108     case ICmpInst::ICMP_UGT:
5109       NewPred = ICmpInst::ICMP_SLT;
5110       break;
5111     case ICmpInst::ICMP_SGT:
5112     case ICmpInst::ICMP_ULT:
5113       NewPred = ICmpInst::ICMP_SGE;
5114       break;
5115     }
5116     Constant *Const = Constant::getNullValue(Op0->getType());
5117     return new ICmpInst(NewPred, Op0, Const);
5118   }
5119 
5120   return nullptr;
5121 }
5122 
5123 /// Return true if X is a multiple of C.
5124 /// TODO: Handle non-power-of-2 factors.
isMultipleOf(Value * X,const APInt & C,const SimplifyQuery & Q)5125 static bool isMultipleOf(Value *X, const APInt &C, const SimplifyQuery &Q) {
5126   if (C.isOne())
5127     return true;
5128 
5129   if (!C.isPowerOf2())
5130     return false;
5131 
5132   return MaskedValueIsZero(X, C - 1, Q);
5133 }
5134 
5135 /// Try to fold icmp (binop), X or icmp X, (binop).
5136 /// TODO: A large part of this logic is duplicated in InstSimplify's
5137 /// simplifyICmpWithBinOp(). We should be able to share that and avoid the code
5138 /// duplication.
foldICmpBinOp(ICmpInst & I,const SimplifyQuery & SQ)5139 Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
5140                                              const SimplifyQuery &SQ) {
5141   const SimplifyQuery Q = SQ.getWithInstruction(&I);
5142   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5143 
5144   // Special logic for binary operators.
5145   BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0);
5146   BinaryOperator *BO1 = dyn_cast<BinaryOperator>(Op1);
5147   if (!BO0 && !BO1)
5148     return nullptr;
5149 
5150   if (Instruction *NewICmp = foldICmpXNegX(I, Builder))
5151     return NewICmp;
5152 
5153   const CmpInst::Predicate Pred = I.getPredicate();
5154   Value *X;
5155 
5156   // Convert add-with-unsigned-overflow comparisons into a 'not' with compare.
5157   // (Op1 + X) u</u>= Op1 --> ~Op1 u</u>= X
5158   if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) &&
5159       (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
5160     return new ICmpInst(Pred, Builder.CreateNot(Op1), X);
5161   // Op0 u>/u<= (Op0 + X) --> X u>/u<= ~Op0
5162   if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) &&
5163       (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
5164     return new ICmpInst(Pred, X, Builder.CreateNot(Op0));
5165 
5166   {
5167     // (Op1 + X) + C u</u>= Op1 --> ~C - X u</u>= Op1
5168     Constant *C;
5169     if (match(Op0, m_OneUse(m_Add(m_c_Add(m_Specific(Op1), m_Value(X)),
5170                                   m_ImmConstant(C)))) &&
5171         (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE)) {
5172       Constant *C2 = ConstantExpr::getNot(C);
5173       return new ICmpInst(Pred, Builder.CreateSub(C2, X), Op1);
5174     }
5175     // Op0 u>/u<= (Op0 + X) + C --> Op0 u>/u<= ~C - X
5176     if (match(Op1, m_OneUse(m_Add(m_c_Add(m_Specific(Op0), m_Value(X)),
5177                                   m_ImmConstant(C)))) &&
5178         (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE)) {
5179       Constant *C2 = ConstantExpr::getNot(C);
5180       return new ICmpInst(Pred, Op0, Builder.CreateSub(C2, X));
5181     }
5182   }
5183 
5184   // (icmp eq/ne (X, -P2), INT_MIN)
5185   //	-> (icmp slt/sge X, INT_MIN + P2)
5186   if (ICmpInst::isEquality(Pred) && BO0 &&
5187       match(I.getOperand(1), m_SignMask()) &&
5188       match(BO0, m_And(m_Value(), m_NegatedPower2OrZero()))) {
5189     // Will Constant fold.
5190     Value *NewC = Builder.CreateSub(I.getOperand(1), BO0->getOperand(1));
5191     return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_SLT
5192                                                   : ICmpInst::ICMP_SGE,
5193                         BO0->getOperand(0), NewC);
5194   }
5195 
5196   {
5197     // Similar to above: an unsigned overflow comparison may use offset + mask:
5198     // ((Op1 + C) & C) u<  Op1 --> Op1 != 0
5199     // ((Op1 + C) & C) u>= Op1 --> Op1 == 0
5200     // Op0 u>  ((Op0 + C) & C) --> Op0 != 0
5201     // Op0 u<= ((Op0 + C) & C) --> Op0 == 0
5202     BinaryOperator *BO;
5203     const APInt *C;
5204     if ((Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) &&
5205         match(Op0, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
5206         match(BO, m_Add(m_Specific(Op1), m_SpecificIntAllowPoison(*C)))) {
5207       CmpInst::Predicate NewPred =
5208           Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
5209       Constant *Zero = ConstantInt::getNullValue(Op1->getType());
5210       return new ICmpInst(NewPred, Op1, Zero);
5211     }
5212 
5213     if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
5214         match(Op1, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
5215         match(BO, m_Add(m_Specific(Op0), m_SpecificIntAllowPoison(*C)))) {
5216       CmpInst::Predicate NewPred =
5217           Pred == ICmpInst::ICMP_UGT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
5218       Constant *Zero = ConstantInt::getNullValue(Op1->getType());
5219       return new ICmpInst(NewPred, Op0, Zero);
5220     }
5221   }
5222 
5223   bool NoOp0WrapProblem = false, NoOp1WrapProblem = false;
5224   bool Op0HasNUW = false, Op1HasNUW = false;
5225   bool Op0HasNSW = false, Op1HasNSW = false;
5226   // Analyze the case when either Op0 or Op1 is an add instruction.
5227   // Op0 = A + B (or A and B are null); Op1 = C + D (or C and D are null).
5228   auto hasNoWrapProblem = [](const BinaryOperator &BO, CmpInst::Predicate Pred,
5229                              bool &HasNSW, bool &HasNUW) -> bool {
5230     if (isa<OverflowingBinaryOperator>(BO)) {
5231       HasNUW = BO.hasNoUnsignedWrap();
5232       HasNSW = BO.hasNoSignedWrap();
5233       return ICmpInst::isEquality(Pred) ||
5234              (CmpInst::isUnsigned(Pred) && HasNUW) ||
5235              (CmpInst::isSigned(Pred) && HasNSW);
5236     } else if (BO.getOpcode() == Instruction::Or) {
5237       HasNUW = true;
5238       HasNSW = true;
5239       return true;
5240     } else {
5241       return false;
5242     }
5243   };
5244   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
5245 
5246   if (BO0) {
5247     match(BO0, m_AddLike(m_Value(A), m_Value(B)));
5248     NoOp0WrapProblem = hasNoWrapProblem(*BO0, Pred, Op0HasNSW, Op0HasNUW);
5249   }
5250   if (BO1) {
5251     match(BO1, m_AddLike(m_Value(C), m_Value(D)));
5252     NoOp1WrapProblem = hasNoWrapProblem(*BO1, Pred, Op1HasNSW, Op1HasNUW);
5253   }
5254 
5255   // icmp (A+B), A -> icmp B, 0 for equalities or if there is no overflow.
5256   // icmp (A+B), B -> icmp A, 0 for equalities or if there is no overflow.
5257   if ((A == Op1 || B == Op1) && NoOp0WrapProblem)
5258     return new ICmpInst(Pred, A == Op1 ? B : A,
5259                         Constant::getNullValue(Op1->getType()));
5260 
5261   // icmp C, (C+D) -> icmp 0, D for equalities or if there is no overflow.
5262   // icmp D, (C+D) -> icmp 0, C for equalities or if there is no overflow.
5263   if ((C == Op0 || D == Op0) && NoOp1WrapProblem)
5264     return new ICmpInst(Pred, Constant::getNullValue(Op0->getType()),
5265                         C == Op0 ? D : C);
5266 
5267   // icmp (A+B), (A+D) -> icmp B, D for equalities or if there is no overflow.
5268   if (A && C && (A == C || A == D || B == C || B == D) && NoOp0WrapProblem &&
5269       NoOp1WrapProblem) {
5270     // Determine Y and Z in the form icmp (X+Y), (X+Z).
5271     Value *Y, *Z;
5272     if (A == C) {
5273       // C + B == C + D  ->  B == D
5274       Y = B;
5275       Z = D;
5276     } else if (A == D) {
5277       // D + B == C + D  ->  B == C
5278       Y = B;
5279       Z = C;
5280     } else if (B == C) {
5281       // A + C == C + D  ->  A == D
5282       Y = A;
5283       Z = D;
5284     } else {
5285       assert(B == D);
5286       // A + D == C + D  ->  A == C
5287       Y = A;
5288       Z = C;
5289     }
5290     return new ICmpInst(Pred, Y, Z);
5291   }
5292 
5293   if (ICmpInst::isRelational(Pred)) {
5294     // Return if both X and Y is divisible by Z/-Z.
5295     // TODO: Generalize to check if (X - Y) is divisible by Z/-Z.
5296     auto ShareCommonDivisor = [&Q](Value *X, Value *Y, Value *Z,
5297                                    bool IsNegative) -> bool {
5298       const APInt *OffsetC;
5299       if (!match(Z, m_APInt(OffsetC)))
5300         return false;
5301 
5302       // Fast path for Z == 1/-1.
5303       if (IsNegative ? OffsetC->isAllOnes() : OffsetC->isOne())
5304         return true;
5305 
5306       APInt C = *OffsetC;
5307       if (IsNegative)
5308         C.negate();
5309       // Note: -INT_MIN is also negative.
5310       if (!C.isStrictlyPositive())
5311         return false;
5312 
5313       return isMultipleOf(X, C, Q) && isMultipleOf(Y, C, Q);
5314     };
5315 
5316     // TODO: The subtraction-related identities shown below also hold, but
5317     // canonicalization from (X -nuw 1) to (X + -1) means that the combinations
5318     // wouldn't happen even if they were implemented.
5319     //
5320     // icmp ult (A - 1), Op1 -> icmp ule A, Op1
5321     // icmp uge (A - 1), Op1 -> icmp ugt A, Op1
5322     // icmp ugt Op0, (C - 1) -> icmp uge Op0, C
5323     // icmp ule Op0, (C - 1) -> icmp ult Op0, C
5324 
5325     // icmp slt (A + -1), Op1 -> icmp sle A, Op1
5326     // icmp sge (A + -1), Op1 -> icmp sgt A, Op1
5327     // icmp sle (A + 1), Op1 -> icmp slt A, Op1
5328     // icmp sgt (A + 1), Op1 -> icmp sge A, Op1
5329     // icmp ule (A + 1), Op0 -> icmp ult A, Op1
5330     // icmp ugt (A + 1), Op0 -> icmp uge A, Op1
5331     if (A && NoOp0WrapProblem &&
5332         ShareCommonDivisor(A, Op1, B,
5333                            ICmpInst::isLT(Pred) || ICmpInst::isGE(Pred)))
5334       return new ICmpInst(ICmpInst::getFlippedStrictnessPredicate(Pred), A,
5335                           Op1);
5336 
5337     // icmp sgt Op0, (C + -1) -> icmp sge Op0, C
5338     // icmp sle Op0, (C + -1) -> icmp slt Op0, C
5339     // icmp sge Op0, (C + 1) -> icmp sgt Op0, C
5340     // icmp slt Op0, (C + 1) -> icmp sle Op0, C
5341     // icmp uge Op0, (C + 1) -> icmp ugt Op0, C
5342     // icmp ult Op0, (C + 1) -> icmp ule Op0, C
5343     if (C && NoOp1WrapProblem &&
5344         ShareCommonDivisor(Op0, C, D,
5345                            ICmpInst::isGT(Pred) || ICmpInst::isLE(Pred)))
5346       return new ICmpInst(ICmpInst::getFlippedStrictnessPredicate(Pred), Op0,
5347                           C);
5348   }
5349 
5350   // if C1 has greater magnitude than C2:
5351   //  icmp (A + C1), (C + C2) -> icmp (A + C3), C
5352   //  s.t. C3 = C1 - C2
5353   //
5354   // if C2 has greater magnitude than C1:
5355   //  icmp (A + C1), (C + C2) -> icmp A, (C + C3)
5356   //  s.t. C3 = C2 - C1
5357   if (A && C && NoOp0WrapProblem && NoOp1WrapProblem &&
5358       (BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) {
5359     const APInt *AP1, *AP2;
5360     // TODO: Support non-uniform vectors.
5361     // TODO: Allow poison passthrough if B or D's element is poison.
5362     if (match(B, m_APIntAllowPoison(AP1)) &&
5363         match(D, m_APIntAllowPoison(AP2)) &&
5364         AP1->isNegative() == AP2->isNegative()) {
5365       APInt AP1Abs = AP1->abs();
5366       APInt AP2Abs = AP2->abs();
5367       if (AP1Abs.uge(AP2Abs)) {
5368         APInt Diff = *AP1 - *AP2;
5369         Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
5370         Value *NewAdd = Builder.CreateAdd(
5371             A, C3, "", Op0HasNUW && Diff.ule(*AP1), Op0HasNSW);
5372         return new ICmpInst(Pred, NewAdd, C);
5373       } else {
5374         APInt Diff = *AP2 - *AP1;
5375         Constant *C3 = Constant::getIntegerValue(BO0->getType(), Diff);
5376         Value *NewAdd = Builder.CreateAdd(
5377             C, C3, "", Op1HasNUW && Diff.ule(*AP2), Op1HasNSW);
5378         return new ICmpInst(Pred, A, NewAdd);
5379       }
5380     }
5381     Constant *Cst1, *Cst2;
5382     if (match(B, m_ImmConstant(Cst1)) && match(D, m_ImmConstant(Cst2)) &&
5383         ICmpInst::isEquality(Pred)) {
5384       Constant *Diff = ConstantExpr::getSub(Cst2, Cst1);
5385       Value *NewAdd = Builder.CreateAdd(C, Diff);
5386       return new ICmpInst(Pred, A, NewAdd);
5387     }
5388   }
5389 
5390   // Analyze the case when either Op0 or Op1 is a sub instruction.
5391   // Op0 = A - B (or A and B are null); Op1 = C - D (or C and D are null).
5392   A = nullptr;
5393   B = nullptr;
5394   C = nullptr;
5395   D = nullptr;
5396   if (BO0 && BO0->getOpcode() == Instruction::Sub) {
5397     A = BO0->getOperand(0);
5398     B = BO0->getOperand(1);
5399   }
5400   if (BO1 && BO1->getOpcode() == Instruction::Sub) {
5401     C = BO1->getOperand(0);
5402     D = BO1->getOperand(1);
5403   }
5404 
5405   // icmp (A-B), A -> icmp 0, B for equalities or if there is no overflow.
5406   if (A == Op1 && NoOp0WrapProblem)
5407     return new ICmpInst(Pred, Constant::getNullValue(Op1->getType()), B);
5408   // icmp C, (C-D) -> icmp D, 0 for equalities or if there is no overflow.
5409   if (C == Op0 && NoOp1WrapProblem)
5410     return new ICmpInst(Pred, D, Constant::getNullValue(Op0->getType()));
5411 
5412   // Convert sub-with-unsigned-overflow comparisons into a comparison of args.
5413   // (A - B) u>/u<= A --> B u>/u<= A
5414   if (A == Op1 && (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE))
5415     return new ICmpInst(Pred, B, A);
5416   // C u</u>= (C - D) --> C u</u>= D
5417   if (C == Op0 && (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE))
5418     return new ICmpInst(Pred, C, D);
5419   // (A - B) u>=/u< A --> B u>/u<= A  iff B != 0
5420   if (A == Op1 && (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
5421       isKnownNonZero(B, Q))
5422     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), B, A);
5423   // C u<=/u> (C - D) --> C u</u>= D  iff B != 0
5424   if (C == Op0 && (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_UGT) &&
5425       isKnownNonZero(D, Q))
5426     return new ICmpInst(CmpInst::getFlippedStrictnessPredicate(Pred), C, D);
5427 
5428   // icmp (A-B), (C-B) -> icmp A, C for equalities or if there is no overflow.
5429   if (B && D && B == D && NoOp0WrapProblem && NoOp1WrapProblem)
5430     return new ICmpInst(Pred, A, C);
5431 
5432   // icmp (A-B), (A-D) -> icmp D, B for equalities or if there is no overflow.
5433   if (A && C && A == C && NoOp0WrapProblem && NoOp1WrapProblem)
5434     return new ICmpInst(Pred, D, B);
5435 
5436   // icmp (0-X) < cst --> x > -cst
5437   if (NoOp0WrapProblem && ICmpInst::isSigned(Pred)) {
5438     Value *X;
5439     if (match(BO0, m_Neg(m_Value(X))))
5440       if (Constant *RHSC = dyn_cast<Constant>(Op1))
5441         if (RHSC->isNotMinSignedValue())
5442           return new ICmpInst(I.getSwappedPredicate(), X,
5443                               ConstantExpr::getNeg(RHSC));
5444   }
5445 
5446   if (Instruction *R = foldICmpXorXX(I, Q, *this))
5447     return R;
5448   if (Instruction *R = foldICmpOrXX(I, Q, *this))
5449     return R;
5450 
5451   {
5452     // Try to remove shared multiplier from comparison:
5453     // X * Z pred Y * Z
5454     Value *X, *Y, *Z;
5455     if ((match(Op0, m_Mul(m_Value(X), m_Value(Z))) &&
5456          match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y)))) ||
5457         (match(Op0, m_Mul(m_Value(Z), m_Value(X))) &&
5458          match(Op1, m_c_Mul(m_Specific(Z), m_Value(Y))))) {
5459       if (ICmpInst::isSigned(Pred)) {
5460         if (Op0HasNSW && Op1HasNSW) {
5461           KnownBits ZKnown = computeKnownBits(Z, &I);
5462           if (ZKnown.isStrictlyPositive())
5463             return new ICmpInst(Pred, X, Y);
5464           if (ZKnown.isNegative())
5465             return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), X, Y);
5466           Value *LessThan = simplifyICmpInst(ICmpInst::ICMP_SLT, X, Y,
5467                                              SQ.getWithInstruction(&I));
5468           if (LessThan && match(LessThan, m_One()))
5469             return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Z,
5470                                 Constant::getNullValue(Z->getType()));
5471           Value *GreaterThan = simplifyICmpInst(ICmpInst::ICMP_SGT, X, Y,
5472                                                 SQ.getWithInstruction(&I));
5473           if (GreaterThan && match(GreaterThan, m_One()))
5474             return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
5475         }
5476       } else {
5477         bool NonZero;
5478         if (ICmpInst::isEquality(Pred)) {
5479           // If X != Y, fold (X *nw Z) eq/ne (Y *nw Z) -> Z eq/ne 0
5480           if (((Op0HasNSW && Op1HasNSW) || (Op0HasNUW && Op1HasNUW)) &&
5481               isKnownNonEqual(X, Y, SQ))
5482             return new ICmpInst(Pred, Z, Constant::getNullValue(Z->getType()));
5483 
5484           KnownBits ZKnown = computeKnownBits(Z, &I);
5485           // if Z % 2 != 0
5486           //    X * Z eq/ne Y * Z -> X eq/ne Y
5487           if (ZKnown.countMaxTrailingZeros() == 0)
5488             return new ICmpInst(Pred, X, Y);
5489           NonZero = !ZKnown.One.isZero() || isKnownNonZero(Z, Q);
5490           // if Z != 0 and nsw(X * Z) and nsw(Y * Z)
5491           //    X * Z eq/ne Y * Z -> X eq/ne Y
5492           if (NonZero && BO0 && BO1 && Op0HasNSW && Op1HasNSW)
5493             return new ICmpInst(Pred, X, Y);
5494         } else
5495           NonZero = isKnownNonZero(Z, Q);
5496 
5497         // If Z != 0 and nuw(X * Z) and nuw(Y * Z)
5498         //    X * Z u{lt/le/gt/ge}/eq/ne Y * Z -> X u{lt/le/gt/ge}/eq/ne Y
5499         if (NonZero && BO0 && BO1 && Op0HasNUW && Op1HasNUW)
5500           return new ICmpInst(Pred, X, Y);
5501       }
5502     }
5503   }
5504 
5505   BinaryOperator *SRem = nullptr;
5506   // icmp (srem X, Y), Y
5507   if (BO0 && BO0->getOpcode() == Instruction::SRem && Op1 == BO0->getOperand(1))
5508     SRem = BO0;
5509   // icmp Y, (srem X, Y)
5510   else if (BO1 && BO1->getOpcode() == Instruction::SRem &&
5511            Op0 == BO1->getOperand(1))
5512     SRem = BO1;
5513   if (SRem) {
5514     // We don't check hasOneUse to avoid increasing register pressure because
5515     // the value we use is the same value this instruction was already using.
5516     switch (SRem == BO0 ? ICmpInst::getSwappedPredicate(Pred) : Pred) {
5517     default:
5518       break;
5519     case ICmpInst::ICMP_EQ:
5520       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5521     case ICmpInst::ICMP_NE:
5522       return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5523     case ICmpInst::ICMP_SGT:
5524     case ICmpInst::ICMP_SGE:
5525       return new ICmpInst(ICmpInst::ICMP_SGT, SRem->getOperand(1),
5526                           Constant::getAllOnesValue(SRem->getType()));
5527     case ICmpInst::ICMP_SLT:
5528     case ICmpInst::ICMP_SLE:
5529       return new ICmpInst(ICmpInst::ICMP_SLT, SRem->getOperand(1),
5530                           Constant::getNullValue(SRem->getType()));
5531     }
5532   }
5533 
5534   if (BO0 && BO1 && BO0->getOpcode() == BO1->getOpcode() &&
5535       (BO0->hasOneUse() || BO1->hasOneUse()) &&
5536       BO0->getOperand(1) == BO1->getOperand(1)) {
5537     switch (BO0->getOpcode()) {
5538     default:
5539       break;
5540     case Instruction::Add:
5541     case Instruction::Sub:
5542     case Instruction::Xor: {
5543       if (I.isEquality()) // a+x icmp eq/ne b+x --> a icmp b
5544         return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5545 
5546       const APInt *C;
5547       if (match(BO0->getOperand(1), m_APInt(C))) {
5548         // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
5549         if (C->isSignMask()) {
5550           ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
5551           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
5552         }
5553 
5554         // icmp u/s (a ^ maxsignval), (b ^ maxsignval) --> icmp s/u' a, b
5555         if (BO0->getOpcode() == Instruction::Xor && C->isMaxSignedValue()) {
5556           ICmpInst::Predicate NewPred = I.getFlippedSignednessPredicate();
5557           NewPred = I.getSwappedPredicate(NewPred);
5558           return new ICmpInst(NewPred, BO0->getOperand(0), BO1->getOperand(0));
5559         }
5560       }
5561       break;
5562     }
5563     case Instruction::Mul: {
5564       if (!I.isEquality())
5565         break;
5566 
5567       const APInt *C;
5568       if (match(BO0->getOperand(1), m_APInt(C)) && !C->isZero() &&
5569           !C->isOne()) {
5570         // icmp eq/ne (X * C), (Y * C) --> icmp (X & Mask), (Y & Mask)
5571         // Mask = -1 >> count-trailing-zeros(C).
5572         if (unsigned TZs = C->countr_zero()) {
5573           Constant *Mask = ConstantInt::get(
5574               BO0->getType(),
5575               APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
5576           Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
5577           Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
5578           return new ICmpInst(Pred, And1, And2);
5579         }
5580       }
5581       break;
5582     }
5583     case Instruction::UDiv:
5584     case Instruction::LShr:
5585       if (I.isSigned() || !BO0->isExact() || !BO1->isExact())
5586         break;
5587       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5588 
5589     case Instruction::SDiv:
5590       if (!(I.isEquality() || match(BO0->getOperand(1), m_NonNegative())) ||
5591           !BO0->isExact() || !BO1->isExact())
5592         break;
5593       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5594 
5595     case Instruction::AShr:
5596       if (!BO0->isExact() || !BO1->isExact())
5597         break;
5598       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5599 
5600     case Instruction::Shl: {
5601       bool NUW = Op0HasNUW && Op1HasNUW;
5602       bool NSW = Op0HasNSW && Op1HasNSW;
5603       if (!NUW && !NSW)
5604         break;
5605       if (!NSW && I.isSigned())
5606         break;
5607       return new ICmpInst(Pred, BO0->getOperand(0), BO1->getOperand(0));
5608     }
5609     }
5610   }
5611 
5612   if (BO0) {
5613     // Transform  A & (L - 1) `ult` L --> L != 0
5614     auto LSubOne = m_Add(m_Specific(Op1), m_AllOnes());
5615     auto BitwiseAnd = m_c_And(m_Value(), LSubOne);
5616 
5617     if (match(BO0, BitwiseAnd) && Pred == ICmpInst::ICMP_ULT) {
5618       auto *Zero = Constant::getNullValue(BO0->getType());
5619       return new ICmpInst(ICmpInst::ICMP_NE, Op1, Zero);
5620     }
5621   }
5622 
5623   // For unsigned predicates / eq / ne:
5624   // icmp pred (x << 1), x --> icmp getSignedPredicate(pred) x, 0
5625   // icmp pred x, (x << 1) --> icmp getSignedPredicate(pred) 0, x
5626   if (!ICmpInst::isSigned(Pred)) {
5627     if (match(Op0, m_Shl(m_Specific(Op1), m_One())))
5628       return new ICmpInst(ICmpInst::getSignedPredicate(Pred), Op1,
5629                           Constant::getNullValue(Op1->getType()));
5630     else if (match(Op1, m_Shl(m_Specific(Op0), m_One())))
5631       return new ICmpInst(ICmpInst::getSignedPredicate(Pred),
5632                           Constant::getNullValue(Op0->getType()), Op0);
5633   }
5634 
5635   if (Value *V = foldMultiplicationOverflowCheck(I))
5636     return replaceInstUsesWith(I, V);
5637 
5638   if (Instruction *R = foldICmpAndXX(I, Q, *this))
5639     return R;
5640 
5641   if (Value *V = foldICmpWithTruncSignExtendedVal(I, Builder))
5642     return replaceInstUsesWith(I, V);
5643 
5644   if (Value *V = foldShiftIntoShiftInAnotherHandOfAndInICmp(I, SQ, Builder))
5645     return replaceInstUsesWith(I, V);
5646 
5647   return nullptr;
5648 }
5649 
5650 /// Fold icmp Pred min|max(X, Y), Z.
foldICmpWithMinMax(Instruction & I,MinMaxIntrinsic * MinMax,Value * Z,CmpPredicate Pred)5651 Instruction *InstCombinerImpl::foldICmpWithMinMax(Instruction &I,
5652                                                   MinMaxIntrinsic *MinMax,
5653                                                   Value *Z, CmpPredicate Pred) {
5654   Value *X = MinMax->getLHS();
5655   Value *Y = MinMax->getRHS();
5656   if (ICmpInst::isSigned(Pred) && !MinMax->isSigned())
5657     return nullptr;
5658   if (ICmpInst::isUnsigned(Pred) && MinMax->isSigned()) {
5659     // Revert the transform signed pred -> unsigned pred
5660     // TODO: We can flip the signedness of predicate if both operands of icmp
5661     // are negative.
5662     if (isKnownNonNegative(Z, SQ.getWithInstruction(&I)) &&
5663         isKnownNonNegative(MinMax, SQ.getWithInstruction(&I))) {
5664       Pred = ICmpInst::getFlippedSignednessPredicate(Pred);
5665     } else
5666       return nullptr;
5667   }
5668   SimplifyQuery Q = SQ.getWithInstruction(&I);
5669   auto IsCondKnownTrue = [](Value *Val) -> std::optional<bool> {
5670     if (!Val)
5671       return std::nullopt;
5672     if (match(Val, m_One()))
5673       return true;
5674     if (match(Val, m_Zero()))
5675       return false;
5676     return std::nullopt;
5677   };
5678   // Remove samesign here since it is illegal to keep it when we speculatively
5679   // execute comparisons. For example, `icmp samesign ult umax(X, -46), -32`
5680   // cannot be decomposed into `(icmp samesign ult X, -46) or (icmp samesign ult
5681   // -46, -32)`. `X` is allowed to be non-negative here.
5682   Pred = Pred.dropSameSign();
5683   auto CmpXZ = IsCondKnownTrue(simplifyICmpInst(Pred, X, Z, Q));
5684   auto CmpYZ = IsCondKnownTrue(simplifyICmpInst(Pred, Y, Z, Q));
5685   if (!CmpXZ.has_value() && !CmpYZ.has_value())
5686     return nullptr;
5687   if (!CmpXZ.has_value()) {
5688     std::swap(X, Y);
5689     std::swap(CmpXZ, CmpYZ);
5690   }
5691 
5692   auto FoldIntoCmpYZ = [&]() -> Instruction * {
5693     if (CmpYZ.has_value())
5694       return replaceInstUsesWith(I, ConstantInt::getBool(I.getType(), *CmpYZ));
5695     return ICmpInst::Create(Instruction::ICmp, Pred, Y, Z);
5696   };
5697 
5698   switch (Pred) {
5699   case ICmpInst::ICMP_EQ:
5700   case ICmpInst::ICMP_NE: {
5701     // If X == Z:
5702     //     Expr       Result
5703     // min(X, Y) == Z X <= Y
5704     // max(X, Y) == Z X >= Y
5705     // min(X, Y) != Z X > Y
5706     // max(X, Y) != Z X < Y
5707     if ((Pred == ICmpInst::ICMP_EQ) == *CmpXZ) {
5708       ICmpInst::Predicate NewPred =
5709           ICmpInst::getNonStrictPredicate(MinMax->getPredicate());
5710       if (Pred == ICmpInst::ICMP_NE)
5711         NewPred = ICmpInst::getInversePredicate(NewPred);
5712       return ICmpInst::Create(Instruction::ICmp, NewPred, X, Y);
5713     }
5714     // Otherwise (X != Z):
5715     ICmpInst::Predicate NewPred = MinMax->getPredicate();
5716     auto MinMaxCmpXZ = IsCondKnownTrue(simplifyICmpInst(NewPred, X, Z, Q));
5717     if (!MinMaxCmpXZ.has_value()) {
5718       std::swap(X, Y);
5719       std::swap(CmpXZ, CmpYZ);
5720       // Re-check pre-condition X != Z
5721       if (!CmpXZ.has_value() || (Pred == ICmpInst::ICMP_EQ) == *CmpXZ)
5722         break;
5723       MinMaxCmpXZ = IsCondKnownTrue(simplifyICmpInst(NewPred, X, Z, Q));
5724     }
5725     if (!MinMaxCmpXZ.has_value())
5726       break;
5727     if (*MinMaxCmpXZ) {
5728       //    Expr         Fact    Result
5729       // min(X, Y) == Z  X < Z   false
5730       // max(X, Y) == Z  X > Z   false
5731       // min(X, Y) != Z  X < Z    true
5732       // max(X, Y) != Z  X > Z    true
5733       return replaceInstUsesWith(
5734           I, ConstantInt::getBool(I.getType(), Pred == ICmpInst::ICMP_NE));
5735     } else {
5736       //    Expr         Fact    Result
5737       // min(X, Y) == Z  X > Z   Y == Z
5738       // max(X, Y) == Z  X < Z   Y == Z
5739       // min(X, Y) != Z  X > Z   Y != Z
5740       // max(X, Y) != Z  X < Z   Y != Z
5741       return FoldIntoCmpYZ();
5742     }
5743     break;
5744   }
5745   case ICmpInst::ICMP_SLT:
5746   case ICmpInst::ICMP_ULT:
5747   case ICmpInst::ICMP_SLE:
5748   case ICmpInst::ICMP_ULE:
5749   case ICmpInst::ICMP_SGT:
5750   case ICmpInst::ICMP_UGT:
5751   case ICmpInst::ICMP_SGE:
5752   case ICmpInst::ICMP_UGE: {
5753     bool IsSame = MinMax->getPredicate() == ICmpInst::getStrictPredicate(Pred);
5754     if (*CmpXZ) {
5755       if (IsSame) {
5756         //      Expr        Fact    Result
5757         // min(X, Y) < Z    X < Z   true
5758         // min(X, Y) <= Z   X <= Z  true
5759         // max(X, Y) > Z    X > Z   true
5760         // max(X, Y) >= Z   X >= Z  true
5761         return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
5762       } else {
5763         //      Expr        Fact    Result
5764         // max(X, Y) < Z    X < Z   Y < Z
5765         // max(X, Y) <= Z   X <= Z  Y <= Z
5766         // min(X, Y) > Z    X > Z   Y > Z
5767         // min(X, Y) >= Z   X >= Z  Y >= Z
5768         return FoldIntoCmpYZ();
5769       }
5770     } else {
5771       if (IsSame) {
5772         //      Expr        Fact    Result
5773         // min(X, Y) < Z    X >= Z  Y < Z
5774         // min(X, Y) <= Z   X > Z   Y <= Z
5775         // max(X, Y) > Z    X <= Z  Y > Z
5776         // max(X, Y) >= Z   X < Z   Y >= Z
5777         return FoldIntoCmpYZ();
5778       } else {
5779         //      Expr        Fact    Result
5780         // max(X, Y) < Z    X >= Z  false
5781         // max(X, Y) <= Z   X > Z   false
5782         // min(X, Y) > Z    X <= Z  false
5783         // min(X, Y) >= Z   X < Z   false
5784         return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
5785       }
5786     }
5787     break;
5788   }
5789   default:
5790     break;
5791   }
5792 
5793   return nullptr;
5794 }
5795 
5796 // Canonicalize checking for a power-of-2-or-zero value:
foldICmpPow2Test(ICmpInst & I,InstCombiner::BuilderTy & Builder)5797 static Instruction *foldICmpPow2Test(ICmpInst &I,
5798                                      InstCombiner::BuilderTy &Builder) {
5799   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5800   const CmpInst::Predicate Pred = I.getPredicate();
5801   Value *A = nullptr;
5802   bool CheckIs;
5803   if (I.isEquality()) {
5804     // (A & (A-1)) == 0 --> ctpop(A) < 2 (two commuted variants)
5805     // ((A-1) & A) != 0 --> ctpop(A) > 1 (two commuted variants)
5806     if (!match(Op0, m_OneUse(m_c_And(m_Add(m_Value(A), m_AllOnes()),
5807                                      m_Deferred(A)))) ||
5808         !match(Op1, m_ZeroInt()))
5809       A = nullptr;
5810 
5811     // (A & -A) == A --> ctpop(A) < 2 (four commuted variants)
5812     // (-A & A) != A --> ctpop(A) > 1 (four commuted variants)
5813     if (match(Op0, m_OneUse(m_c_And(m_Neg(m_Specific(Op1)), m_Specific(Op1)))))
5814       A = Op1;
5815     else if (match(Op1,
5816                    m_OneUse(m_c_And(m_Neg(m_Specific(Op0)), m_Specific(Op0)))))
5817       A = Op0;
5818 
5819     CheckIs = Pred == ICmpInst::ICMP_EQ;
5820   } else if (ICmpInst::isUnsigned(Pred)) {
5821     // (A ^ (A-1)) u>= A --> ctpop(A) < 2 (two commuted variants)
5822     // ((A-1) ^ A) u< A --> ctpop(A) > 1 (two commuted variants)
5823 
5824     if ((Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_ULT) &&
5825         match(Op0, m_OneUse(m_c_Xor(m_Add(m_Specific(Op1), m_AllOnes()),
5826                                     m_Specific(Op1))))) {
5827       A = Op1;
5828       CheckIs = Pred == ICmpInst::ICMP_UGE;
5829     } else if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
5830                match(Op1, m_OneUse(m_c_Xor(m_Add(m_Specific(Op0), m_AllOnes()),
5831                                            m_Specific(Op0))))) {
5832       A = Op0;
5833       CheckIs = Pred == ICmpInst::ICMP_ULE;
5834     }
5835   }
5836 
5837   if (A) {
5838     Type *Ty = A->getType();
5839     CallInst *CtPop = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, A);
5840     return CheckIs ? new ICmpInst(ICmpInst::ICMP_ULT, CtPop,
5841                                   ConstantInt::get(Ty, 2))
5842                    : new ICmpInst(ICmpInst::ICMP_UGT, CtPop,
5843                                   ConstantInt::get(Ty, 1));
5844   }
5845 
5846   return nullptr;
5847 }
5848 
5849 /// Find all possible pairs (BinOp, RHS) that BinOp V, RHS can be simplified.
5850 using OffsetOp = std::pair<Instruction::BinaryOps, Value *>;
collectOffsetOp(Value * V,SmallVectorImpl<OffsetOp> & Offsets,bool AllowRecursion)5851 static void collectOffsetOp(Value *V, SmallVectorImpl<OffsetOp> &Offsets,
5852                             bool AllowRecursion) {
5853   Instruction *Inst = dyn_cast<Instruction>(V);
5854   if (!Inst || !Inst->hasOneUse())
5855     return;
5856 
5857   switch (Inst->getOpcode()) {
5858   case Instruction::Add:
5859     Offsets.emplace_back(Instruction::Sub, Inst->getOperand(1));
5860     Offsets.emplace_back(Instruction::Sub, Inst->getOperand(0));
5861     break;
5862   case Instruction::Sub:
5863     Offsets.emplace_back(Instruction::Add, Inst->getOperand(1));
5864     break;
5865   case Instruction::Xor:
5866     Offsets.emplace_back(Instruction::Xor, Inst->getOperand(1));
5867     Offsets.emplace_back(Instruction::Xor, Inst->getOperand(0));
5868     break;
5869   case Instruction::Select:
5870     if (AllowRecursion) {
5871       collectOffsetOp(Inst->getOperand(1), Offsets, /*AllowRecursion=*/false);
5872       collectOffsetOp(Inst->getOperand(2), Offsets, /*AllowRecursion=*/false);
5873     }
5874     break;
5875   default:
5876     break;
5877   }
5878 }
5879 
5880 enum class OffsetKind { Invalid, Value, Select };
5881 
5882 struct OffsetResult {
5883   OffsetKind Kind;
5884   Value *V0, *V1, *V2;
5885 
invalidOffsetResult5886   static OffsetResult invalid() {
5887     return {OffsetKind::Invalid, nullptr, nullptr, nullptr};
5888   }
valueOffsetResult5889   static OffsetResult value(Value *V) {
5890     return {OffsetKind::Value, V, nullptr, nullptr};
5891   }
selectOffsetResult5892   static OffsetResult select(Value *Cond, Value *TrueV, Value *FalseV) {
5893     return {OffsetKind::Select, Cond, TrueV, FalseV};
5894   }
isValidOffsetResult5895   bool isValid() const { return Kind != OffsetKind::Invalid; }
materializeOffsetResult5896   Value *materialize(InstCombiner::BuilderTy &Builder) const {
5897     switch (Kind) {
5898     case OffsetKind::Invalid:
5899       llvm_unreachable("Invalid offset result");
5900     case OffsetKind::Value:
5901       return V0;
5902     case OffsetKind::Select:
5903       return Builder.CreateSelect(V0, V1, V2);
5904     }
5905     llvm_unreachable("Unknown OffsetKind enum");
5906   }
5907 };
5908 
5909 /// Offset both sides of an equality icmp to see if we can save some
5910 /// instructions: icmp eq/ne X, Y -> icmp eq/ne X op Z, Y op Z.
5911 /// Note: This operation should not introduce poison.
foldICmpEqualityWithOffset(ICmpInst & I,InstCombiner::BuilderTy & Builder,const SimplifyQuery & SQ)5912 static Instruction *foldICmpEqualityWithOffset(ICmpInst &I,
5913                                                InstCombiner::BuilderTy &Builder,
5914                                                const SimplifyQuery &SQ) {
5915   assert(I.isEquality() && "Expected an equality icmp");
5916   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5917   if (!Op0->getType()->isIntOrIntVectorTy())
5918     return nullptr;
5919 
5920   SmallVector<OffsetOp, 4> OffsetOps;
5921   collectOffsetOp(Op0, OffsetOps, /*AllowRecursion=*/true);
5922   collectOffsetOp(Op1, OffsetOps, /*AllowRecursion=*/true);
5923 
5924   auto ApplyOffsetImpl = [&](Value *V, unsigned BinOpc, Value *RHS) -> Value * {
5925     Value *Simplified = simplifyBinOp(BinOpc, V, RHS, SQ);
5926     // Avoid infinite loops by checking if RHS is an identity for the BinOp.
5927     if (!Simplified || Simplified == V)
5928       return nullptr;
5929     // Reject constant expressions as they don't simplify things.
5930     if (isa<Constant>(Simplified) && !match(Simplified, m_ImmConstant()))
5931       return nullptr;
5932     // Check if the transformation introduces poison.
5933     return impliesPoison(RHS, V) ? Simplified : nullptr;
5934   };
5935 
5936   auto ApplyOffset = [&](Value *V, unsigned BinOpc,
5937                          Value *RHS) -> OffsetResult {
5938     if (auto *Sel = dyn_cast<SelectInst>(V)) {
5939       if (!Sel->hasOneUse())
5940         return OffsetResult::invalid();
5941       Value *TrueVal = ApplyOffsetImpl(Sel->getTrueValue(), BinOpc, RHS);
5942       if (!TrueVal)
5943         return OffsetResult::invalid();
5944       Value *FalseVal = ApplyOffsetImpl(Sel->getFalseValue(), BinOpc, RHS);
5945       if (!FalseVal)
5946         return OffsetResult::invalid();
5947       return OffsetResult::select(Sel->getCondition(), TrueVal, FalseVal);
5948     }
5949     if (Value *Simplified = ApplyOffsetImpl(V, BinOpc, RHS))
5950       return OffsetResult::value(Simplified);
5951     return OffsetResult::invalid();
5952   };
5953 
5954   for (auto [BinOp, RHS] : OffsetOps) {
5955     auto BinOpc = static_cast<unsigned>(BinOp);
5956 
5957     auto Op0Result = ApplyOffset(Op0, BinOpc, RHS);
5958     if (!Op0Result.isValid())
5959       continue;
5960     auto Op1Result = ApplyOffset(Op1, BinOpc, RHS);
5961     if (!Op1Result.isValid())
5962       continue;
5963 
5964     Value *NewLHS = Op0Result.materialize(Builder);
5965     Value *NewRHS = Op1Result.materialize(Builder);
5966     return new ICmpInst(I.getPredicate(), NewLHS, NewRHS);
5967   }
5968 
5969   return nullptr;
5970 }
5971 
foldICmpEquality(ICmpInst & I)5972 Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
5973   if (!I.isEquality())
5974     return nullptr;
5975 
5976   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
5977   const CmpInst::Predicate Pred = I.getPredicate();
5978   Value *A, *B, *C, *D;
5979   if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) {
5980     if (A == Op1 || B == Op1) { // (A^B) == A  ->  B == 0
5981       Value *OtherVal = A == Op1 ? B : A;
5982       return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
5983     }
5984 
5985     if (match(Op1, m_Xor(m_Value(C), m_Value(D)))) {
5986       // A^c1 == C^c2 --> A == C^(c1^c2)
5987       ConstantInt *C1, *C2;
5988       if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
5989           Op1->hasOneUse()) {
5990         Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
5991         Value *Xor = Builder.CreateXor(C, NC);
5992         return new ICmpInst(Pred, A, Xor);
5993       }
5994 
5995       // A^B == A^D -> B == D
5996       if (A == C)
5997         return new ICmpInst(Pred, B, D);
5998       if (A == D)
5999         return new ICmpInst(Pred, B, C);
6000       if (B == C)
6001         return new ICmpInst(Pred, A, D);
6002       if (B == D)
6003         return new ICmpInst(Pred, A, C);
6004     }
6005   }
6006 
6007   if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && (A == Op0 || B == Op0)) {
6008     // A == (A^B)  ->  B == 0
6009     Value *OtherVal = A == Op0 ? B : A;
6010     return new ICmpInst(Pred, OtherVal, Constant::getNullValue(A->getType()));
6011   }
6012 
6013   // (X&Z) == (Y&Z) -> (X^Y) & Z == 0
6014   if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
6015       match(Op1, m_And(m_Value(C), m_Value(D)))) {
6016     Value *X = nullptr, *Y = nullptr, *Z = nullptr;
6017 
6018     if (A == C) {
6019       X = B;
6020       Y = D;
6021       Z = A;
6022     } else if (A == D) {
6023       X = B;
6024       Y = C;
6025       Z = A;
6026     } else if (B == C) {
6027       X = A;
6028       Y = D;
6029       Z = B;
6030     } else if (B == D) {
6031       X = A;
6032       Y = C;
6033       Z = B;
6034     }
6035 
6036     if (X) {
6037       // If X^Y is a negative power of two, then `icmp eq/ne (Z & NegP2), 0`
6038       // will fold to `icmp ult/uge Z, -NegP2` incurringb no additional
6039       // instructions.
6040       const APInt *C0, *C1;
6041       bool XorIsNegP2 = match(X, m_APInt(C0)) && match(Y, m_APInt(C1)) &&
6042                         (*C0 ^ *C1).isNegatedPowerOf2();
6043 
6044       // If either Op0/Op1 are both one use or X^Y will constant fold and one of
6045       // Op0/Op1 are one use, proceed. In those cases we are instruction neutral
6046       // but `icmp eq/ne A, 0` is easier to analyze than `icmp eq/ne A, B`.
6047       int UseCnt =
6048           int(Op0->hasOneUse()) + int(Op1->hasOneUse()) +
6049           (int(match(X, m_ImmConstant()) && match(Y, m_ImmConstant())));
6050       if (XorIsNegP2 || UseCnt >= 2) {
6051         // Build (X^Y) & Z
6052         Op1 = Builder.CreateXor(X, Y);
6053         Op1 = Builder.CreateAnd(Op1, Z);
6054         return new ICmpInst(Pred, Op1, Constant::getNullValue(Op1->getType()));
6055       }
6056     }
6057   }
6058 
6059   {
6060     // Similar to above, but specialized for constant because invert is needed:
6061     // (X | C) == (Y | C) --> (X ^ Y) & ~C == 0
6062     Value *X, *Y;
6063     Constant *C;
6064     if (match(Op0, m_OneUse(m_Or(m_Value(X), m_Constant(C)))) &&
6065         match(Op1, m_OneUse(m_Or(m_Value(Y), m_Specific(C))))) {
6066       Value *Xor = Builder.CreateXor(X, Y);
6067       Value *And = Builder.CreateAnd(Xor, ConstantExpr::getNot(C));
6068       return new ICmpInst(Pred, And, Constant::getNullValue(And->getType()));
6069     }
6070   }
6071 
6072   if (match(Op1, m_ZExt(m_Value(A))) &&
6073       (Op0->hasOneUse() || Op1->hasOneUse())) {
6074     // (B & (Pow2C-1)) == zext A --> A == trunc B
6075     // (B & (Pow2C-1)) != zext A --> A != trunc B
6076     const APInt *MaskC;
6077     if (match(Op0, m_And(m_Value(B), m_LowBitMask(MaskC))) &&
6078         MaskC->countr_one() == A->getType()->getScalarSizeInBits())
6079       return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
6080   }
6081 
6082   // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
6083   // For lshr and ashr pairs.
6084   const APInt *AP1, *AP2;
6085   if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
6086        match(Op1, m_OneUse(m_LShr(m_Value(B), m_APIntAllowPoison(AP2))))) ||
6087       (match(Op0, m_OneUse(m_AShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
6088        match(Op1, m_OneUse(m_AShr(m_Value(B), m_APIntAllowPoison(AP2)))))) {
6089     if (AP1 != AP2)
6090       return nullptr;
6091     unsigned TypeBits = AP1->getBitWidth();
6092     unsigned ShAmt = AP1->getLimitedValue(TypeBits);
6093     if (ShAmt < TypeBits && ShAmt != 0) {
6094       ICmpInst::Predicate NewPred =
6095           Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
6096       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
6097       APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
6098       return new ICmpInst(NewPred, Xor, ConstantInt::get(A->getType(), CmpVal));
6099     }
6100   }
6101 
6102   // (A << C) == (B << C) --> ((A^B) & (~0U >> C)) == 0
6103   ConstantInt *Cst1;
6104   if (match(Op0, m_OneUse(m_Shl(m_Value(A), m_ConstantInt(Cst1)))) &&
6105       match(Op1, m_OneUse(m_Shl(m_Value(B), m_Specific(Cst1))))) {
6106     unsigned TypeBits = Cst1->getBitWidth();
6107     unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
6108     if (ShAmt < TypeBits && ShAmt != 0) {
6109       Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
6110       APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
6111       Value *And =
6112           Builder.CreateAnd(Xor, Builder.getInt(AndVal), I.getName() + ".mask");
6113       return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
6114     }
6115   }
6116 
6117   // Transform "icmp eq (trunc (lshr(X, cst1)), cst" to
6118   // "icmp (and X, mask), cst"
6119   uint64_t ShAmt = 0;
6120   if (Op0->hasOneUse() &&
6121       match(Op0, m_Trunc(m_OneUse(m_LShr(m_Value(A), m_ConstantInt(ShAmt))))) &&
6122       match(Op1, m_ConstantInt(Cst1)) &&
6123       // Only do this when A has multiple uses.  This is most important to do
6124       // when it exposes other optimizations.
6125       !A->hasOneUse()) {
6126     unsigned ASize = cast<IntegerType>(A->getType())->getPrimitiveSizeInBits();
6127 
6128     if (ShAmt < ASize) {
6129       APInt MaskV =
6130           APInt::getLowBitsSet(ASize, Op0->getType()->getPrimitiveSizeInBits());
6131       MaskV <<= ShAmt;
6132 
6133       APInt CmpV = Cst1->getValue().zext(ASize);
6134       CmpV <<= ShAmt;
6135 
6136       Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
6137       return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
6138     }
6139   }
6140 
6141   if (Instruction *ICmp = foldICmpIntrinsicWithIntrinsic(I, Builder))
6142     return ICmp;
6143 
6144   // Match icmp eq (trunc (lshr A, BW), (ashr (trunc A), BW-1)), which checks
6145   // the top BW/2 + 1 bits are all the same. Create "A >=s INT_MIN && A <=s
6146   // INT_MAX", which we generate as "icmp ult (add A, 2^(BW-1)), 2^BW" to skip a
6147   // few steps of instcombine.
6148   unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6149   if (match(Op0, m_AShr(m_Trunc(m_Value(A)), m_SpecificInt(BitWidth - 1))) &&
6150       match(Op1, m_Trunc(m_LShr(m_Specific(A), m_SpecificInt(BitWidth)))) &&
6151       A->getType()->getScalarSizeInBits() == BitWidth * 2 &&
6152       (I.getOperand(0)->hasOneUse() || I.getOperand(1)->hasOneUse())) {
6153     APInt C = APInt::getOneBitSet(BitWidth * 2, BitWidth - 1);
6154     Value *Add = Builder.CreateAdd(A, ConstantInt::get(A->getType(), C));
6155     return new ICmpInst(Pred == ICmpInst::ICMP_EQ ? ICmpInst::ICMP_ULT
6156                                                   : ICmpInst::ICMP_UGE,
6157                         Add, ConstantInt::get(A->getType(), C.shl(1)));
6158   }
6159 
6160   // Canonicalize:
6161   // Assume B_Pow2 != 0
6162   // 1. A & B_Pow2 != B_Pow2 -> A & B_Pow2 == 0
6163   // 2. A & B_Pow2 == B_Pow2 -> A & B_Pow2 != 0
6164   if (match(Op0, m_c_And(m_Specific(Op1), m_Value())) &&
6165       isKnownToBeAPowerOfTwo(Op1, /* OrZero */ false, &I))
6166     return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
6167                         ConstantInt::getNullValue(Op0->getType()));
6168 
6169   if (match(Op1, m_c_And(m_Specific(Op0), m_Value())) &&
6170       isKnownToBeAPowerOfTwo(Op0, /* OrZero */ false, &I))
6171     return new ICmpInst(CmpInst::getInversePredicate(Pred), Op1,
6172                         ConstantInt::getNullValue(Op1->getType()));
6173 
6174   // Canonicalize:
6175   // icmp eq/ne X, OneUse(rotate-right(X))
6176   //    -> icmp eq/ne X, rotate-left(X)
6177   // We generally try to convert rotate-right -> rotate-left, this just
6178   // canonicalizes another case.
6179   if (match(&I, m_c_ICmp(m_Value(A),
6180                          m_OneUse(m_Intrinsic<Intrinsic::fshr>(
6181                              m_Deferred(A), m_Deferred(A), m_Value(B))))))
6182     return new ICmpInst(
6183         Pred, A,
6184         Builder.CreateIntrinsic(Op0->getType(), Intrinsic::fshl, {A, A, B}));
6185 
6186   // Canonicalize:
6187   // icmp eq/ne OneUse(A ^ Cst), B --> icmp eq/ne (A ^ B), Cst
6188   Constant *Cst;
6189   if (match(&I, m_c_ICmp(m_OneUse(m_Xor(m_Value(A), m_ImmConstant(Cst))),
6190                          m_CombineAnd(m_Value(B), m_Unless(m_ImmConstant())))))
6191     return new ICmpInst(Pred, Builder.CreateXor(A, B), Cst);
6192 
6193   {
6194     // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
6195     auto m_Matcher =
6196         m_CombineOr(m_CombineOr(m_c_Add(m_Value(B), m_Deferred(A)),
6197                                 m_c_Xor(m_Value(B), m_Deferred(A))),
6198                     m_Sub(m_Value(B), m_Deferred(A)));
6199     std::optional<bool> IsZero = std::nullopt;
6200     if (match(&I, m_c_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)),
6201                            m_Deferred(A))))
6202       IsZero = false;
6203     // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
6204     else if (match(&I,
6205                    m_ICmp(m_OneUse(m_c_And(m_Value(A), m_Matcher)), m_Zero())))
6206       IsZero = true;
6207 
6208     if (IsZero && isKnownToBeAPowerOfTwo(A, /* OrZero */ true, &I))
6209       // (icmp eq/ne (and (add/sub/xor X, P2), P2), P2)
6210       //    -> (icmp eq/ne (and X, P2), 0)
6211       // (icmp eq/ne (and (add/sub/xor X, P2), P2), 0)
6212       //    -> (icmp eq/ne (and X, P2), P2)
6213       return new ICmpInst(Pred, Builder.CreateAnd(B, A),
6214                           *IsZero ? A
6215                                   : ConstantInt::getNullValue(A->getType()));
6216   }
6217 
6218   if (auto *Res = foldICmpEqualityWithOffset(
6219           I, Builder, getSimplifyQuery().getWithInstruction(&I)))
6220     return Res;
6221 
6222   return nullptr;
6223 }
6224 
foldICmpWithTrunc(ICmpInst & ICmp)6225 Instruction *InstCombinerImpl::foldICmpWithTrunc(ICmpInst &ICmp) {
6226   ICmpInst::Predicate Pred = ICmp.getPredicate();
6227   Value *Op0 = ICmp.getOperand(0), *Op1 = ICmp.getOperand(1);
6228 
6229   // Try to canonicalize trunc + compare-to-constant into a mask + cmp.
6230   // The trunc masks high bits while the compare may effectively mask low bits.
6231   Value *X;
6232   const APInt *C;
6233   if (!match(Op0, m_OneUse(m_Trunc(m_Value(X)))) || !match(Op1, m_APInt(C)))
6234     return nullptr;
6235 
6236   // This matches patterns corresponding to tests of the signbit as well as:
6237   // (trunc X) pred C2 --> (X & Mask) == C
6238   if (auto Res = decomposeBitTestICmp(Op0, Op1, Pred, /*WithTrunc=*/true,
6239                                       /*AllowNonZeroC=*/true)) {
6240     Value *And = Builder.CreateAnd(Res->X, Res->Mask);
6241     Constant *C = ConstantInt::get(Res->X->getType(), Res->C);
6242     return new ICmpInst(Res->Pred, And, C);
6243   }
6244 
6245   unsigned SrcBits = X->getType()->getScalarSizeInBits();
6246   if (auto *II = dyn_cast<IntrinsicInst>(X)) {
6247     if (II->getIntrinsicID() == Intrinsic::cttz ||
6248         II->getIntrinsicID() == Intrinsic::ctlz) {
6249       unsigned MaxRet = SrcBits;
6250       // If the "is_zero_poison" argument is set, then we know at least
6251       // one bit is set in the input, so the result is always at least one
6252       // less than the full bitwidth of that input.
6253       if (match(II->getArgOperand(1), m_One()))
6254         MaxRet--;
6255 
6256       // Make sure the destination is wide enough to hold the largest output of
6257       // the intrinsic.
6258       if (llvm::Log2_32(MaxRet) + 1 <= Op0->getType()->getScalarSizeInBits())
6259         if (Instruction *I =
6260                 foldICmpIntrinsicWithConstant(ICmp, II, C->zext(SrcBits)))
6261           return I;
6262     }
6263   }
6264 
6265   return nullptr;
6266 }
6267 
foldICmpWithZextOrSext(ICmpInst & ICmp)6268 Instruction *InstCombinerImpl::foldICmpWithZextOrSext(ICmpInst &ICmp) {
6269   assert(isa<CastInst>(ICmp.getOperand(0)) && "Expected cast for operand 0");
6270   auto *CastOp0 = cast<CastInst>(ICmp.getOperand(0));
6271   Value *X;
6272   if (!match(CastOp0, m_ZExtOrSExt(m_Value(X))))
6273     return nullptr;
6274 
6275   bool IsSignedExt = CastOp0->getOpcode() == Instruction::SExt;
6276   bool IsSignedCmp = ICmp.isSigned();
6277 
6278   // icmp Pred (ext X), (ext Y)
6279   Value *Y;
6280   if (match(ICmp.getOperand(1), m_ZExtOrSExt(m_Value(Y)))) {
6281     bool IsZext0 = isa<ZExtInst>(ICmp.getOperand(0));
6282     bool IsZext1 = isa<ZExtInst>(ICmp.getOperand(1));
6283 
6284     if (IsZext0 != IsZext1) {
6285       // If X and Y and both i1
6286       // (icmp eq/ne (zext X) (sext Y))
6287       //      eq -> (icmp eq (or X, Y), 0)
6288       //      ne -> (icmp ne (or X, Y), 0)
6289       if (ICmp.isEquality() && X->getType()->isIntOrIntVectorTy(1) &&
6290           Y->getType()->isIntOrIntVectorTy(1))
6291         return new ICmpInst(ICmp.getPredicate(), Builder.CreateOr(X, Y),
6292                             Constant::getNullValue(X->getType()));
6293 
6294       // If we have mismatched casts and zext has the nneg flag, we can
6295       //  treat the "zext nneg" as "sext". Otherwise, we cannot fold and quit.
6296 
6297       auto *NonNegInst0 = dyn_cast<PossiblyNonNegInst>(ICmp.getOperand(0));
6298       auto *NonNegInst1 = dyn_cast<PossiblyNonNegInst>(ICmp.getOperand(1));
6299 
6300       bool IsNonNeg0 = NonNegInst0 && NonNegInst0->hasNonNeg();
6301       bool IsNonNeg1 = NonNegInst1 && NonNegInst1->hasNonNeg();
6302 
6303       if ((IsZext0 && IsNonNeg0) || (IsZext1 && IsNonNeg1))
6304         IsSignedExt = true;
6305       else
6306         return nullptr;
6307     }
6308 
6309     // Not an extension from the same type?
6310     Type *XTy = X->getType(), *YTy = Y->getType();
6311     if (XTy != YTy) {
6312       // One of the casts must have one use because we are creating a new cast.
6313       if (!ICmp.getOperand(0)->hasOneUse() && !ICmp.getOperand(1)->hasOneUse())
6314         return nullptr;
6315       // Extend the narrower operand to the type of the wider operand.
6316       CastInst::CastOps CastOpcode =
6317           IsSignedExt ? Instruction::SExt : Instruction::ZExt;
6318       if (XTy->getScalarSizeInBits() < YTy->getScalarSizeInBits())
6319         X = Builder.CreateCast(CastOpcode, X, YTy);
6320       else if (YTy->getScalarSizeInBits() < XTy->getScalarSizeInBits())
6321         Y = Builder.CreateCast(CastOpcode, Y, XTy);
6322       else
6323         return nullptr;
6324     }
6325 
6326     // (zext X) == (zext Y) --> X == Y
6327     // (sext X) == (sext Y) --> X == Y
6328     if (ICmp.isEquality())
6329       return new ICmpInst(ICmp.getPredicate(), X, Y);
6330 
6331     // A signed comparison of sign extended values simplifies into a
6332     // signed comparison.
6333     if (IsSignedCmp && IsSignedExt)
6334       return new ICmpInst(ICmp.getPredicate(), X, Y);
6335 
6336     // The other three cases all fold into an unsigned comparison.
6337     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Y);
6338   }
6339 
6340   // Below here, we are only folding a compare with constant.
6341   auto *C = dyn_cast<Constant>(ICmp.getOperand(1));
6342   if (!C)
6343     return nullptr;
6344 
6345   // If a lossless truncate is possible...
6346   Type *SrcTy = CastOp0->getSrcTy();
6347   Constant *Res = getLosslessTrunc(C, SrcTy, CastOp0->getOpcode());
6348   if (Res) {
6349     if (ICmp.isEquality())
6350       return new ICmpInst(ICmp.getPredicate(), X, Res);
6351 
6352     // A signed comparison of sign extended values simplifies into a
6353     // signed comparison.
6354     if (IsSignedExt && IsSignedCmp)
6355       return new ICmpInst(ICmp.getPredicate(), X, Res);
6356 
6357     // The other three cases all fold into an unsigned comparison.
6358     return new ICmpInst(ICmp.getUnsignedPredicate(), X, Res);
6359   }
6360 
6361   // The re-extended constant changed, partly changed (in the case of a vector),
6362   // or could not be determined to be equal (in the case of a constant
6363   // expression), so the constant cannot be represented in the shorter type.
6364   // All the cases that fold to true or false will have already been handled
6365   // by simplifyICmpInst, so only deal with the tricky case.
6366   if (IsSignedCmp || !IsSignedExt || !isa<ConstantInt>(C))
6367     return nullptr;
6368 
6369   // Is source op positive?
6370   // icmp ult (sext X), C --> icmp sgt X, -1
6371   if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
6372     return new ICmpInst(CmpInst::ICMP_SGT, X, Constant::getAllOnesValue(SrcTy));
6373 
6374   // Is source op negative?
6375   // icmp ugt (sext X), C --> icmp slt X, 0
6376   assert(ICmp.getPredicate() == ICmpInst::ICMP_UGT && "ICmp should be folded!");
6377   return new ICmpInst(CmpInst::ICMP_SLT, X, Constant::getNullValue(SrcTy));
6378 }
6379 
6380 /// Handle icmp (cast x), (cast or constant).
foldICmpWithCastOp(ICmpInst & ICmp)6381 Instruction *InstCombinerImpl::foldICmpWithCastOp(ICmpInst &ICmp) {
6382   // If any operand of ICmp is a inttoptr roundtrip cast then remove it as
6383   // icmp compares only pointer's value.
6384   // icmp (inttoptr (ptrtoint p1)), p2 --> icmp p1, p2.
6385   Value *SimplifiedOp0 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(0));
6386   Value *SimplifiedOp1 = simplifyIntToPtrRoundTripCast(ICmp.getOperand(1));
6387   if (SimplifiedOp0 || SimplifiedOp1)
6388     return new ICmpInst(ICmp.getPredicate(),
6389                         SimplifiedOp0 ? SimplifiedOp0 : ICmp.getOperand(0),
6390                         SimplifiedOp1 ? SimplifiedOp1 : ICmp.getOperand(1));
6391 
6392   auto *CastOp0 = dyn_cast<CastInst>(ICmp.getOperand(0));
6393   if (!CastOp0)
6394     return nullptr;
6395   if (!isa<Constant>(ICmp.getOperand(1)) && !isa<CastInst>(ICmp.getOperand(1)))
6396     return nullptr;
6397 
6398   Value *Op0Src = CastOp0->getOperand(0);
6399   Type *SrcTy = CastOp0->getSrcTy();
6400   Type *DestTy = CastOp0->getDestTy();
6401 
6402   // Turn icmp (ptrtoint x), (ptrtoint/c) into a compare of the input if the
6403   // integer type is the same size as the pointer type.
6404   auto CompatibleSizes = [&](Type *PtrTy, Type *IntTy) {
6405     if (isa<VectorType>(PtrTy)) {
6406       PtrTy = cast<VectorType>(PtrTy)->getElementType();
6407       IntTy = cast<VectorType>(IntTy)->getElementType();
6408     }
6409     return DL.getPointerTypeSizeInBits(PtrTy) == IntTy->getIntegerBitWidth();
6410   };
6411   if (CastOp0->getOpcode() == Instruction::PtrToInt &&
6412       CompatibleSizes(SrcTy, DestTy)) {
6413     Value *NewOp1 = nullptr;
6414     if (auto *PtrToIntOp1 = dyn_cast<PtrToIntOperator>(ICmp.getOperand(1))) {
6415       Value *PtrSrc = PtrToIntOp1->getOperand(0);
6416       if (PtrSrc->getType() == Op0Src->getType())
6417         NewOp1 = PtrToIntOp1->getOperand(0);
6418     } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
6419       NewOp1 = ConstantExpr::getIntToPtr(RHSC, SrcTy);
6420     }
6421 
6422     if (NewOp1)
6423       return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
6424   }
6425 
6426   // Do the same in the other direction for icmp (inttoptr x), (inttoptr/c).
6427   if (CastOp0->getOpcode() == Instruction::IntToPtr &&
6428       CompatibleSizes(DestTy, SrcTy)) {
6429     Value *NewOp1 = nullptr;
6430     if (auto *IntToPtrOp1 = dyn_cast<IntToPtrInst>(ICmp.getOperand(1))) {
6431       Value *IntSrc = IntToPtrOp1->getOperand(0);
6432       if (IntSrc->getType() == Op0Src->getType())
6433         NewOp1 = IntToPtrOp1->getOperand(0);
6434     } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
6435       NewOp1 = ConstantFoldConstant(ConstantExpr::getPtrToInt(RHSC, SrcTy), DL);
6436     }
6437 
6438     if (NewOp1)
6439       return new ICmpInst(ICmp.getPredicate(), Op0Src, NewOp1);
6440   }
6441 
6442   if (Instruction *R = foldICmpWithTrunc(ICmp))
6443     return R;
6444 
6445   return foldICmpWithZextOrSext(ICmp);
6446 }
6447 
isNeutralValue(Instruction::BinaryOps BinaryOp,Value * RHS,bool IsSigned)6448 static bool isNeutralValue(Instruction::BinaryOps BinaryOp, Value *RHS,
6449                            bool IsSigned) {
6450   switch (BinaryOp) {
6451   default:
6452     llvm_unreachable("Unsupported binary op");
6453   case Instruction::Add:
6454   case Instruction::Sub:
6455     return match(RHS, m_Zero());
6456   case Instruction::Mul:
6457     return !(RHS->getType()->isIntOrIntVectorTy(1) && IsSigned) &&
6458            match(RHS, m_One());
6459   }
6460 }
6461 
6462 OverflowResult
computeOverflow(Instruction::BinaryOps BinaryOp,bool IsSigned,Value * LHS,Value * RHS,Instruction * CxtI) const6463 InstCombinerImpl::computeOverflow(Instruction::BinaryOps BinaryOp,
6464                                   bool IsSigned, Value *LHS, Value *RHS,
6465                                   Instruction *CxtI) const {
6466   switch (BinaryOp) {
6467   default:
6468     llvm_unreachable("Unsupported binary op");
6469   case Instruction::Add:
6470     if (IsSigned)
6471       return computeOverflowForSignedAdd(LHS, RHS, CxtI);
6472     else
6473       return computeOverflowForUnsignedAdd(LHS, RHS, CxtI);
6474   case Instruction::Sub:
6475     if (IsSigned)
6476       return computeOverflowForSignedSub(LHS, RHS, CxtI);
6477     else
6478       return computeOverflowForUnsignedSub(LHS, RHS, CxtI);
6479   case Instruction::Mul:
6480     if (IsSigned)
6481       return computeOverflowForSignedMul(LHS, RHS, CxtI);
6482     else
6483       return computeOverflowForUnsignedMul(LHS, RHS, CxtI);
6484   }
6485 }
6486 
OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,bool IsSigned,Value * LHS,Value * RHS,Instruction & OrigI,Value * & Result,Constant * & Overflow)6487 bool InstCombinerImpl::OptimizeOverflowCheck(Instruction::BinaryOps BinaryOp,
6488                                              bool IsSigned, Value *LHS,
6489                                              Value *RHS, Instruction &OrigI,
6490                                              Value *&Result,
6491                                              Constant *&Overflow) {
6492   if (OrigI.isCommutative() && isa<Constant>(LHS) && !isa<Constant>(RHS))
6493     std::swap(LHS, RHS);
6494 
6495   // If the overflow check was an add followed by a compare, the insertion point
6496   // may be pointing to the compare.  We want to insert the new instructions
6497   // before the add in case there are uses of the add between the add and the
6498   // compare.
6499   Builder.SetInsertPoint(&OrigI);
6500 
6501   Type *OverflowTy = Type::getInt1Ty(LHS->getContext());
6502   if (auto *LHSTy = dyn_cast<VectorType>(LHS->getType()))
6503     OverflowTy = VectorType::get(OverflowTy, LHSTy->getElementCount());
6504 
6505   if (isNeutralValue(BinaryOp, RHS, IsSigned)) {
6506     Result = LHS;
6507     Overflow = ConstantInt::getFalse(OverflowTy);
6508     return true;
6509   }
6510 
6511   switch (computeOverflow(BinaryOp, IsSigned, LHS, RHS, &OrigI)) {
6512   case OverflowResult::MayOverflow:
6513     return false;
6514   case OverflowResult::AlwaysOverflowsLow:
6515   case OverflowResult::AlwaysOverflowsHigh:
6516     Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
6517     Result->takeName(&OrigI);
6518     Overflow = ConstantInt::getTrue(OverflowTy);
6519     return true;
6520   case OverflowResult::NeverOverflows:
6521     Result = Builder.CreateBinOp(BinaryOp, LHS, RHS);
6522     Result->takeName(&OrigI);
6523     Overflow = ConstantInt::getFalse(OverflowTy);
6524     if (auto *Inst = dyn_cast<Instruction>(Result)) {
6525       if (IsSigned)
6526         Inst->setHasNoSignedWrap();
6527       else
6528         Inst->setHasNoUnsignedWrap();
6529     }
6530     return true;
6531   }
6532 
6533   llvm_unreachable("Unexpected overflow result");
6534 }
6535 
6536 /// Recognize and process idiom involving test for multiplication
6537 /// overflow.
6538 ///
6539 /// The caller has matched a pattern of the form:
6540 ///   I = cmp u (mul(zext A, zext B), V
6541 /// The function checks if this is a test for overflow and if so replaces
6542 /// multiplication with call to 'mul.with.overflow' intrinsic.
6543 ///
6544 /// \param I Compare instruction.
6545 /// \param MulVal Result of 'mult' instruction.  It is one of the arguments of
6546 ///               the compare instruction.  Must be of integer type.
6547 /// \param OtherVal The other argument of compare instruction.
6548 /// \returns Instruction which must replace the compare instruction, NULL if no
6549 ///          replacement required.
processUMulZExtIdiom(ICmpInst & I,Value * MulVal,const APInt * OtherVal,InstCombinerImpl & IC)6550 static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
6551                                          const APInt *OtherVal,
6552                                          InstCombinerImpl &IC) {
6553   // Don't bother doing this transformation for pointers, don't do it for
6554   // vectors.
6555   if (!isa<IntegerType>(MulVal->getType()))
6556     return nullptr;
6557 
6558   auto *MulInstr = dyn_cast<Instruction>(MulVal);
6559   if (!MulInstr)
6560     return nullptr;
6561   assert(MulInstr->getOpcode() == Instruction::Mul);
6562 
6563   auto *LHS = cast<ZExtInst>(MulInstr->getOperand(0)),
6564        *RHS = cast<ZExtInst>(MulInstr->getOperand(1));
6565   assert(LHS->getOpcode() == Instruction::ZExt);
6566   assert(RHS->getOpcode() == Instruction::ZExt);
6567   Value *A = LHS->getOperand(0), *B = RHS->getOperand(0);
6568 
6569   // Calculate type and width of the result produced by mul.with.overflow.
6570   Type *TyA = A->getType(), *TyB = B->getType();
6571   unsigned WidthA = TyA->getPrimitiveSizeInBits(),
6572            WidthB = TyB->getPrimitiveSizeInBits();
6573   unsigned MulWidth;
6574   Type *MulType;
6575   if (WidthB > WidthA) {
6576     MulWidth = WidthB;
6577     MulType = TyB;
6578   } else {
6579     MulWidth = WidthA;
6580     MulType = TyA;
6581   }
6582 
6583   // In order to replace the original mul with a narrower mul.with.overflow,
6584   // all uses must ignore upper bits of the product.  The number of used low
6585   // bits must be not greater than the width of mul.with.overflow.
6586   if (MulVal->hasNUsesOrMore(2))
6587     for (User *U : MulVal->users()) {
6588       if (U == &I)
6589         continue;
6590       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
6591         // Check if truncation ignores bits above MulWidth.
6592         unsigned TruncWidth = TI->getType()->getPrimitiveSizeInBits();
6593         if (TruncWidth > MulWidth)
6594           return nullptr;
6595       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
6596         // Check if AND ignores bits above MulWidth.
6597         if (BO->getOpcode() != Instruction::And)
6598           return nullptr;
6599         if (ConstantInt *CI = dyn_cast<ConstantInt>(BO->getOperand(1))) {
6600           const APInt &CVal = CI->getValue();
6601           if (CVal.getBitWidth() - CVal.countl_zero() > MulWidth)
6602             return nullptr;
6603         } else {
6604           // In this case we could have the operand of the binary operation
6605           // being defined in another block, and performing the replacement
6606           // could break the dominance relation.
6607           return nullptr;
6608         }
6609       } else {
6610         // Other uses prohibit this transformation.
6611         return nullptr;
6612       }
6613     }
6614 
6615   // Recognize patterns
6616   switch (I.getPredicate()) {
6617   case ICmpInst::ICMP_UGT: {
6618     // Recognize pattern:
6619     //   mulval = mul(zext A, zext B)
6620     //   cmp ugt mulval, max
6621     APInt MaxVal = APInt::getMaxValue(MulWidth);
6622     MaxVal = MaxVal.zext(OtherVal->getBitWidth());
6623     if (MaxVal.eq(*OtherVal))
6624       break; // Recognized
6625     return nullptr;
6626   }
6627 
6628   case ICmpInst::ICMP_ULT: {
6629     // Recognize pattern:
6630     //   mulval = mul(zext A, zext B)
6631     //   cmp ule mulval, max + 1
6632     APInt MaxVal = APInt::getOneBitSet(OtherVal->getBitWidth(), MulWidth);
6633     if (MaxVal.eq(*OtherVal))
6634       break; // Recognized
6635     return nullptr;
6636   }
6637 
6638   default:
6639     return nullptr;
6640   }
6641 
6642   InstCombiner::BuilderTy &Builder = IC.Builder;
6643   Builder.SetInsertPoint(MulInstr);
6644 
6645   // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
6646   Value *MulA = A, *MulB = B;
6647   if (WidthA < MulWidth)
6648     MulA = Builder.CreateZExt(A, MulType);
6649   if (WidthB < MulWidth)
6650     MulB = Builder.CreateZExt(B, MulType);
6651   CallInst *Call =
6652       Builder.CreateIntrinsic(Intrinsic::umul_with_overflow, MulType,
6653                               {MulA, MulB}, /*FMFSource=*/nullptr, "umul");
6654   IC.addToWorklist(MulInstr);
6655 
6656   // If there are uses of mul result other than the comparison, we know that
6657   // they are truncation or binary AND. Change them to use result of
6658   // mul.with.overflow and adjust properly mask/size.
6659   if (MulVal->hasNUsesOrMore(2)) {
6660     Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
6661     for (User *U : make_early_inc_range(MulVal->users())) {
6662       if (U == &I)
6663         continue;
6664       if (TruncInst *TI = dyn_cast<TruncInst>(U)) {
6665         if (TI->getType()->getPrimitiveSizeInBits() == MulWidth)
6666           IC.replaceInstUsesWith(*TI, Mul);
6667         else
6668           TI->setOperand(0, Mul);
6669       } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U)) {
6670         assert(BO->getOpcode() == Instruction::And);
6671         // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
6672         ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
6673         APInt ShortMask = CI->getValue().trunc(MulWidth);
6674         Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
6675         Value *Zext = Builder.CreateZExt(ShortAnd, BO->getType());
6676         IC.replaceInstUsesWith(*BO, Zext);
6677       } else {
6678         llvm_unreachable("Unexpected Binary operation");
6679       }
6680       IC.addToWorklist(cast<Instruction>(U));
6681     }
6682   }
6683 
6684   // The original icmp gets replaced with the overflow value, maybe inverted
6685   // depending on predicate.
6686   if (I.getPredicate() == ICmpInst::ICMP_ULT) {
6687     Value *Res = Builder.CreateExtractValue(Call, 1);
6688     return BinaryOperator::CreateNot(Res);
6689   }
6690 
6691   return ExtractValueInst::Create(Call, 1);
6692 }
6693 
6694 /// When performing a comparison against a constant, it is possible that not all
6695 /// the bits in the LHS are demanded. This helper method computes the mask that
6696 /// IS demanded.
getDemandedBitsLHSMask(ICmpInst & I,unsigned BitWidth)6697 static APInt getDemandedBitsLHSMask(ICmpInst &I, unsigned BitWidth) {
6698   const APInt *RHS;
6699   if (!match(I.getOperand(1), m_APInt(RHS)))
6700     return APInt::getAllOnes(BitWidth);
6701 
6702   // If this is a normal comparison, it demands all bits. If it is a sign bit
6703   // comparison, it only demands the sign bit.
6704   bool UnusedBit;
6705   if (isSignBitCheck(I.getPredicate(), *RHS, UnusedBit))
6706     return APInt::getSignMask(BitWidth);
6707 
6708   switch (I.getPredicate()) {
6709   // For a UGT comparison, we don't care about any bits that
6710   // correspond to the trailing ones of the comparand.  The value of these
6711   // bits doesn't impact the outcome of the comparison, because any value
6712   // greater than the RHS must differ in a bit higher than these due to carry.
6713   case ICmpInst::ICMP_UGT:
6714     return APInt::getBitsSetFrom(BitWidth, RHS->countr_one());
6715 
6716   // Similarly, for a ULT comparison, we don't care about the trailing zeros.
6717   // Any value less than the RHS must differ in a higher bit because of carries.
6718   case ICmpInst::ICMP_ULT:
6719     return APInt::getBitsSetFrom(BitWidth, RHS->countr_zero());
6720 
6721   default:
6722     return APInt::getAllOnes(BitWidth);
6723   }
6724 }
6725 
6726 /// Check that one use is in the same block as the definition and all
6727 /// other uses are in blocks dominated by a given block.
6728 ///
6729 /// \param DI Definition
6730 /// \param UI Use
6731 /// \param DB Block that must dominate all uses of \p DI outside
6732 ///           the parent block
6733 /// \return true when \p UI is the only use of \p DI in the parent block
6734 /// and all other uses of \p DI are in blocks dominated by \p DB.
6735 ///
dominatesAllUses(const Instruction * DI,const Instruction * UI,const BasicBlock * DB) const6736 bool InstCombinerImpl::dominatesAllUses(const Instruction *DI,
6737                                         const Instruction *UI,
6738                                         const BasicBlock *DB) const {
6739   assert(DI && UI && "Instruction not defined\n");
6740   // Ignore incomplete definitions.
6741   if (!DI->getParent())
6742     return false;
6743   // DI and UI must be in the same block.
6744   if (DI->getParent() != UI->getParent())
6745     return false;
6746   // Protect from self-referencing blocks.
6747   if (DI->getParent() == DB)
6748     return false;
6749   for (const User *U : DI->users()) {
6750     auto *Usr = cast<Instruction>(U);
6751     if (Usr != UI && !DT.dominates(DB, Usr->getParent()))
6752       return false;
6753   }
6754   return true;
6755 }
6756 
6757 /// Return true when the instruction sequence within a block is select-cmp-br.
isChainSelectCmpBranch(const SelectInst * SI)6758 static bool isChainSelectCmpBranch(const SelectInst *SI) {
6759   const BasicBlock *BB = SI->getParent();
6760   if (!BB)
6761     return false;
6762   auto *BI = dyn_cast_or_null<BranchInst>(BB->getTerminator());
6763   if (!BI || BI->getNumSuccessors() != 2)
6764     return false;
6765   auto *IC = dyn_cast<ICmpInst>(BI->getCondition());
6766   if (!IC || (IC->getOperand(0) != SI && IC->getOperand(1) != SI))
6767     return false;
6768   return true;
6769 }
6770 
6771 /// True when a select result is replaced by one of its operands
6772 /// in select-icmp sequence. This will eventually result in the elimination
6773 /// of the select.
6774 ///
6775 /// \param SI    Select instruction
6776 /// \param Icmp  Compare instruction
6777 /// \param SIOpd Operand that replaces the select
6778 ///
6779 /// Notes:
6780 /// - The replacement is global and requires dominator information
6781 /// - The caller is responsible for the actual replacement
6782 ///
6783 /// Example:
6784 ///
6785 /// entry:
6786 ///  %4 = select i1 %3, %C* %0, %C* null
6787 ///  %5 = icmp eq %C* %4, null
6788 ///  br i1 %5, label %9, label %7
6789 ///  ...
6790 ///  ; <label>:7                                       ; preds = %entry
6791 ///  %8 = getelementptr inbounds %C* %4, i64 0, i32 0
6792 ///  ...
6793 ///
6794 /// can be transformed to
6795 ///
6796 ///  %5 = icmp eq %C* %0, null
6797 ///  %6 = select i1 %3, i1 %5, i1 true
6798 ///  br i1 %6, label %9, label %7
6799 ///  ...
6800 ///  ; <label>:7                                       ; preds = %entry
6801 ///  %8 = getelementptr inbounds %C* %0, i64 0, i32 0  // replace by %0!
6802 ///
6803 /// Similar when the first operand of the select is a constant or/and
6804 /// the compare is for not equal rather than equal.
6805 ///
6806 /// NOTE: The function is only called when the select and compare constants
6807 /// are equal, the optimization can work only for EQ predicates. This is not a
6808 /// major restriction since a NE compare should be 'normalized' to an equal
6809 /// compare, which usually happens in the combiner and test case
6810 /// select-cmp-br.ll checks for it.
replacedSelectWithOperand(SelectInst * SI,const ICmpInst * Icmp,const unsigned SIOpd)6811 bool InstCombinerImpl::replacedSelectWithOperand(SelectInst *SI,
6812                                                  const ICmpInst *Icmp,
6813                                                  const unsigned SIOpd) {
6814   assert((SIOpd == 1 || SIOpd == 2) && "Invalid select operand!");
6815   if (isChainSelectCmpBranch(SI) && Icmp->getPredicate() == ICmpInst::ICMP_EQ) {
6816     BasicBlock *Succ = SI->getParent()->getTerminator()->getSuccessor(1);
6817     // The check for the single predecessor is not the best that can be
6818     // done. But it protects efficiently against cases like when SI's
6819     // home block has two successors, Succ and Succ1, and Succ1 predecessor
6820     // of Succ. Then SI can't be replaced by SIOpd because the use that gets
6821     // replaced can be reached on either path. So the uniqueness check
6822     // guarantees that the path all uses of SI (outside SI's parent) are on
6823     // is disjoint from all other paths out of SI. But that information
6824     // is more expensive to compute, and the trade-off here is in favor
6825     // of compile-time. It should also be noticed that we check for a single
6826     // predecessor and not only uniqueness. This to handle the situation when
6827     // Succ and Succ1 points to the same basic block.
6828     if (Succ->getSinglePredecessor() && dominatesAllUses(SI, Icmp, Succ)) {
6829       NumSel++;
6830       SI->replaceUsesOutsideBlock(SI->getOperand(SIOpd), SI->getParent());
6831       return true;
6832     }
6833   }
6834   return false;
6835 }
6836 
6837 /// Try to fold the comparison based on range information we can get by checking
6838 /// whether bits are known to be zero or one in the inputs.
foldICmpUsingKnownBits(ICmpInst & I)6839 Instruction *InstCombinerImpl::foldICmpUsingKnownBits(ICmpInst &I) {
6840   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
6841   Type *Ty = Op0->getType();
6842   ICmpInst::Predicate Pred = I.getPredicate();
6843 
6844   // Get scalar or pointer size.
6845   unsigned BitWidth = Ty->isIntOrIntVectorTy()
6846                           ? Ty->getScalarSizeInBits()
6847                           : DL.getPointerTypeSizeInBits(Ty->getScalarType());
6848 
6849   if (!BitWidth)
6850     return nullptr;
6851 
6852   KnownBits Op0Known(BitWidth);
6853   KnownBits Op1Known(BitWidth);
6854 
6855   {
6856     // Don't use dominating conditions when folding icmp using known bits. This
6857     // may convert signed into unsigned predicates in ways that other passes
6858     // (especially IndVarSimplify) may not be able to reliably undo.
6859     SimplifyQuery Q = SQ.getWithoutDomCondCache().getWithInstruction(&I);
6860     if (SimplifyDemandedBits(&I, 0, getDemandedBitsLHSMask(I, BitWidth),
6861                              Op0Known, Q))
6862       return &I;
6863 
6864     if (SimplifyDemandedBits(&I, 1, APInt::getAllOnes(BitWidth), Op1Known, Q))
6865       return &I;
6866   }
6867 
6868   if (!isa<Constant>(Op0) && Op0Known.isConstant())
6869     return new ICmpInst(
6870         Pred, ConstantExpr::getIntegerValue(Ty, Op0Known.getConstant()), Op1);
6871   if (!isa<Constant>(Op1) && Op1Known.isConstant())
6872     return new ICmpInst(
6873         Pred, Op0, ConstantExpr::getIntegerValue(Ty, Op1Known.getConstant()));
6874 
6875   if (std::optional<bool> Res = ICmpInst::compare(Op0Known, Op1Known, Pred))
6876     return replaceInstUsesWith(I, ConstantInt::getBool(I.getType(), *Res));
6877 
6878   // Given the known and unknown bits, compute a range that the LHS could be
6879   // in.  Compute the Min, Max and RHS values based on the known bits. For the
6880   // EQ and NE we use unsigned values.
6881   APInt Op0Min(BitWidth, 0), Op0Max(BitWidth, 0);
6882   APInt Op1Min(BitWidth, 0), Op1Max(BitWidth, 0);
6883   if (I.isSigned()) {
6884     Op0Min = Op0Known.getSignedMinValue();
6885     Op0Max = Op0Known.getSignedMaxValue();
6886     Op1Min = Op1Known.getSignedMinValue();
6887     Op1Max = Op1Known.getSignedMaxValue();
6888   } else {
6889     Op0Min = Op0Known.getMinValue();
6890     Op0Max = Op0Known.getMaxValue();
6891     Op1Min = Op1Known.getMinValue();
6892     Op1Max = Op1Known.getMaxValue();
6893   }
6894 
6895   // Don't break up a clamp pattern -- (min(max X, Y), Z) -- by replacing a
6896   // min/max canonical compare with some other compare. That could lead to
6897   // conflict with select canonicalization and infinite looping.
6898   // FIXME: This constraint may go away if min/max intrinsics are canonical.
6899   auto isMinMaxCmp = [&](Instruction &Cmp) {
6900     if (!Cmp.hasOneUse())
6901       return false;
6902     Value *A, *B;
6903     SelectPatternFlavor SPF = matchSelectPattern(Cmp.user_back(), A, B).Flavor;
6904     if (!SelectPatternResult::isMinOrMax(SPF))
6905       return false;
6906     return match(Op0, m_MaxOrMin(m_Value(), m_Value())) ||
6907            match(Op1, m_MaxOrMin(m_Value(), m_Value()));
6908   };
6909   if (!isMinMaxCmp(I)) {
6910     switch (Pred) {
6911     default:
6912       break;
6913     case ICmpInst::ICMP_ULT: {
6914       if (Op1Min == Op0Max) // A <u B -> A != B if max(A) == min(B)
6915         return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6916       const APInt *CmpC;
6917       if (match(Op1, m_APInt(CmpC))) {
6918         // A <u C -> A == C-1 if min(A)+1 == C
6919         if (*CmpC == Op0Min + 1)
6920           return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6921                               ConstantInt::get(Op1->getType(), *CmpC - 1));
6922         // X <u C --> X == 0, if the number of zero bits in the bottom of X
6923         // exceeds the log2 of C.
6924         if (Op0Known.countMinTrailingZeros() >= CmpC->ceilLogBase2())
6925           return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6926                               Constant::getNullValue(Op1->getType()));
6927       }
6928       break;
6929     }
6930     case ICmpInst::ICMP_UGT: {
6931       if (Op1Max == Op0Min) // A >u B -> A != B if min(A) == max(B)
6932         return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6933       const APInt *CmpC;
6934       if (match(Op1, m_APInt(CmpC))) {
6935         // A >u C -> A == C+1 if max(a)-1 == C
6936         if (*CmpC == Op0Max - 1)
6937           return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6938                               ConstantInt::get(Op1->getType(), *CmpC + 1));
6939         // X >u C --> X != 0, if the number of zero bits in the bottom of X
6940         // exceeds the log2 of C.
6941         if (Op0Known.countMinTrailingZeros() >= CmpC->getActiveBits())
6942           return new ICmpInst(ICmpInst::ICMP_NE, Op0,
6943                               Constant::getNullValue(Op1->getType()));
6944       }
6945       break;
6946     }
6947     case ICmpInst::ICMP_SLT: {
6948       if (Op1Min == Op0Max) // A <s B -> A != B if max(A) == min(B)
6949         return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6950       const APInt *CmpC;
6951       if (match(Op1, m_APInt(CmpC))) {
6952         if (*CmpC == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
6953           return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6954                               ConstantInt::get(Op1->getType(), *CmpC - 1));
6955       }
6956       break;
6957     }
6958     case ICmpInst::ICMP_SGT: {
6959       if (Op1Max == Op0Min) // A >s B -> A != B if min(A) == max(B)
6960         return new ICmpInst(ICmpInst::ICMP_NE, Op0, Op1);
6961       const APInt *CmpC;
6962       if (match(Op1, m_APInt(CmpC))) {
6963         if (*CmpC == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
6964           return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
6965                               ConstantInt::get(Op1->getType(), *CmpC + 1));
6966       }
6967       break;
6968     }
6969     }
6970   }
6971 
6972   // Based on the range information we know about the LHS, see if we can
6973   // simplify this comparison.  For example, (x&4) < 8 is always true.
6974   switch (Pred) {
6975   default:
6976     break;
6977   case ICmpInst::ICMP_EQ:
6978   case ICmpInst::ICMP_NE: {
6979     // If all bits are known zero except for one, then we know at most one bit
6980     // is set. If the comparison is against zero, then this is a check to see if
6981     // *that* bit is set.
6982     APInt Op0KnownZeroInverted = ~Op0Known.Zero;
6983     if (Op1Known.isZero()) {
6984       // If the LHS is an AND with the same constant, look through it.
6985       Value *LHS = nullptr;
6986       const APInt *LHSC;
6987       if (!match(Op0, m_And(m_Value(LHS), m_APInt(LHSC))) ||
6988           *LHSC != Op0KnownZeroInverted)
6989         LHS = Op0;
6990 
6991       Value *X;
6992       const APInt *C1;
6993       if (match(LHS, m_Shl(m_Power2(C1), m_Value(X)))) {
6994         Type *XTy = X->getType();
6995         unsigned Log2C1 = C1->countr_zero();
6996         APInt C2 = Op0KnownZeroInverted;
6997         APInt C2Pow2 = (C2 & ~(*C1 - 1)) + *C1;
6998         if (C2Pow2.isPowerOf2()) {
6999           // iff (C1 is pow2) & ((C2 & ~(C1-1)) + C1) is pow2):
7000           // ((C1 << X) & C2) == 0 -> X >= (Log2(C2+C1) - Log2(C1))
7001           // ((C1 << X) & C2) != 0 -> X  < (Log2(C2+C1) - Log2(C1))
7002           unsigned Log2C2 = C2Pow2.countr_zero();
7003           auto *CmpC = ConstantInt::get(XTy, Log2C2 - Log2C1);
7004           auto NewPred =
7005               Pred == CmpInst::ICMP_EQ ? CmpInst::ICMP_UGE : CmpInst::ICMP_ULT;
7006           return new ICmpInst(NewPred, X, CmpC);
7007         }
7008       }
7009     }
7010 
7011     // Op0 eq C_Pow2 -> Op0 ne 0 if Op0 is known to be C_Pow2 or zero.
7012     if (Op1Known.isConstant() && Op1Known.getConstant().isPowerOf2() &&
7013         (Op0Known & Op1Known) == Op0Known)
7014       return new ICmpInst(CmpInst::getInversePredicate(Pred), Op0,
7015                           ConstantInt::getNullValue(Op1->getType()));
7016     break;
7017   }
7018   case ICmpInst::ICMP_SGE:
7019     if (Op1Min == Op0Max) // A >=s B -> A == B if max(A) == min(B)
7020       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7021     break;
7022   case ICmpInst::ICMP_SLE:
7023     if (Op1Max == Op0Min) // A <=s B -> A == B if min(A) == max(B)
7024       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7025     break;
7026   case ICmpInst::ICMP_UGE:
7027     if (Op1Min == Op0Max) // A >=u B -> A == B if max(A) == min(B)
7028       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7029     break;
7030   case ICmpInst::ICMP_ULE:
7031     if (Op1Max == Op0Min) // A <=u B -> A == B if min(A) == max(B)
7032       return new ICmpInst(ICmpInst::ICMP_EQ, Op0, Op1);
7033     break;
7034   }
7035 
7036   // Turn a signed comparison into an unsigned one if both operands are known to
7037   // have the same sign. Set samesign if possible (except for equality
7038   // predicates).
7039   if ((I.isSigned() || (I.isUnsigned() && !I.hasSameSign())) &&
7040       ((Op0Known.Zero.isNegative() && Op1Known.Zero.isNegative()) ||
7041        (Op0Known.One.isNegative() && Op1Known.One.isNegative()))) {
7042     I.setPredicate(I.getUnsignedPredicate());
7043     I.setSameSign();
7044     return &I;
7045   }
7046 
7047   return nullptr;
7048 }
7049 
7050 /// If one operand of an icmp is effectively a bool (value range of {0,1}),
7051 /// then try to reduce patterns based on that limit.
foldICmpUsingBoolRange(ICmpInst & I)7052 Instruction *InstCombinerImpl::foldICmpUsingBoolRange(ICmpInst &I) {
7053   Value *X, *Y;
7054   CmpPredicate Pred;
7055 
7056   // X must be 0 and bool must be true for "ULT":
7057   // X <u (zext i1 Y) --> (X == 0) & Y
7058   if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_ZExt(m_Value(Y))))) &&
7059       Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULT)
7060     return BinaryOperator::CreateAnd(Builder.CreateIsNull(X), Y);
7061 
7062   // X must be 0 or bool must be true for "ULE":
7063   // X <=u (sext i1 Y) --> (X == 0) | Y
7064   if (match(&I, m_c_ICmp(Pred, m_Value(X), m_OneUse(m_SExt(m_Value(Y))))) &&
7065       Y->getType()->isIntOrIntVectorTy(1) && Pred == ICmpInst::ICMP_ULE)
7066     return BinaryOperator::CreateOr(Builder.CreateIsNull(X), Y);
7067 
7068   // icmp eq/ne X, (zext/sext (icmp eq/ne X, C))
7069   CmpPredicate Pred1, Pred2;
7070   const APInt *C;
7071   Instruction *ExtI;
7072   if (match(&I, m_c_ICmp(Pred1, m_Value(X),
7073                          m_CombineAnd(m_Instruction(ExtI),
7074                                       m_ZExtOrSExt(m_ICmp(Pred2, m_Deferred(X),
7075                                                           m_APInt(C)))))) &&
7076       ICmpInst::isEquality(Pred1) && ICmpInst::isEquality(Pred2)) {
7077     bool IsSExt = ExtI->getOpcode() == Instruction::SExt;
7078     bool HasOneUse = ExtI->hasOneUse() && ExtI->getOperand(0)->hasOneUse();
7079     auto CreateRangeCheck = [&] {
7080       Value *CmpV1 =
7081           Builder.CreateICmp(Pred1, X, Constant::getNullValue(X->getType()));
7082       Value *CmpV2 = Builder.CreateICmp(
7083           Pred1, X, ConstantInt::getSigned(X->getType(), IsSExt ? -1 : 1));
7084       return BinaryOperator::Create(
7085           Pred1 == ICmpInst::ICMP_EQ ? Instruction::Or : Instruction::And,
7086           CmpV1, CmpV2);
7087     };
7088     if (C->isZero()) {
7089       if (Pred2 == ICmpInst::ICMP_EQ) {
7090         // icmp eq X, (zext/sext (icmp eq X, 0)) --> false
7091         // icmp ne X, (zext/sext (icmp eq X, 0)) --> true
7092         return replaceInstUsesWith(
7093             I, ConstantInt::getBool(I.getType(), Pred1 == ICmpInst::ICMP_NE));
7094       } else if (!IsSExt || HasOneUse) {
7095         // icmp eq X, (zext (icmp ne X, 0)) --> X == 0 || X == 1
7096         // icmp ne X, (zext (icmp ne X, 0)) --> X != 0 && X != 1
7097         // icmp eq X, (sext (icmp ne X, 0)) --> X == 0 || X == -1
7098         // icmp ne X, (sext (icmp ne X, 0)) --> X != 0 && X != -1
7099         return CreateRangeCheck();
7100       }
7101     } else if (IsSExt ? C->isAllOnes() : C->isOne()) {
7102       if (Pred2 == ICmpInst::ICMP_NE) {
7103         // icmp eq X, (zext (icmp ne X, 1)) --> false
7104         // icmp ne X, (zext (icmp ne X, 1)) --> true
7105         // icmp eq X, (sext (icmp ne X, -1)) --> false
7106         // icmp ne X, (sext (icmp ne X, -1)) --> true
7107         return replaceInstUsesWith(
7108             I, ConstantInt::getBool(I.getType(), Pred1 == ICmpInst::ICMP_NE));
7109       } else if (!IsSExt || HasOneUse) {
7110         // icmp eq X, (zext (icmp eq X, 1)) --> X == 0 || X == 1
7111         // icmp ne X, (zext (icmp eq X, 1)) --> X != 0 && X != 1
7112         // icmp eq X, (sext (icmp eq X, -1)) --> X == 0 || X == -1
7113         // icmp ne X, (sext (icmp eq X, -1)) --> X != 0 && X == -1
7114         return CreateRangeCheck();
7115       }
7116     } else {
7117       // when C != 0 && C != 1:
7118       //   icmp eq X, (zext (icmp eq X, C)) --> icmp eq X, 0
7119       //   icmp eq X, (zext (icmp ne X, C)) --> icmp eq X, 1
7120       //   icmp ne X, (zext (icmp eq X, C)) --> icmp ne X, 0
7121       //   icmp ne X, (zext (icmp ne X, C)) --> icmp ne X, 1
7122       // when C != 0 && C != -1:
7123       //   icmp eq X, (sext (icmp eq X, C)) --> icmp eq X, 0
7124       //   icmp eq X, (sext (icmp ne X, C)) --> icmp eq X, -1
7125       //   icmp ne X, (sext (icmp eq X, C)) --> icmp ne X, 0
7126       //   icmp ne X, (sext (icmp ne X, C)) --> icmp ne X, -1
7127       return ICmpInst::Create(
7128           Instruction::ICmp, Pred1, X,
7129           ConstantInt::getSigned(X->getType(), Pred2 == ICmpInst::ICMP_NE
7130                                                    ? (IsSExt ? -1 : 1)
7131                                                    : 0));
7132     }
7133   }
7134 
7135   return nullptr;
7136 }
7137 
7138 /// If we have an icmp le or icmp ge instruction with a constant operand, turn
7139 /// it into the appropriate icmp lt or icmp gt instruction. This transform
7140 /// allows them to be folded in visitICmpInst.
canonicalizeCmpWithConstant(ICmpInst & I)7141 static ICmpInst *canonicalizeCmpWithConstant(ICmpInst &I) {
7142   ICmpInst::Predicate Pred = I.getPredicate();
7143   if (ICmpInst::isEquality(Pred) || !ICmpInst::isIntPredicate(Pred) ||
7144       InstCombiner::isCanonicalPredicate(Pred))
7145     return nullptr;
7146 
7147   Value *Op0 = I.getOperand(0);
7148   Value *Op1 = I.getOperand(1);
7149   auto *Op1C = dyn_cast<Constant>(Op1);
7150   if (!Op1C)
7151     return nullptr;
7152 
7153   auto FlippedStrictness = getFlippedStrictnessPredicateAndConstant(Pred, Op1C);
7154   if (!FlippedStrictness)
7155     return nullptr;
7156 
7157   return new ICmpInst(FlippedStrictness->first, Op0, FlippedStrictness->second);
7158 }
7159 
7160 /// If we have a comparison with a non-canonical predicate, if we can update
7161 /// all the users, invert the predicate and adjust all the users.
canonicalizeICmpPredicate(CmpInst & I)7162 CmpInst *InstCombinerImpl::canonicalizeICmpPredicate(CmpInst &I) {
7163   // Is the predicate already canonical?
7164   CmpInst::Predicate Pred = I.getPredicate();
7165   if (InstCombiner::isCanonicalPredicate(Pred))
7166     return nullptr;
7167 
7168   // Can all users be adjusted to predicate inversion?
7169   if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
7170     return nullptr;
7171 
7172   // Ok, we can canonicalize comparison!
7173   // Let's first invert the comparison's predicate.
7174   I.setPredicate(CmpInst::getInversePredicate(Pred));
7175   I.setName(I.getName() + ".not");
7176 
7177   // And, adapt users.
7178   freelyInvertAllUsersOf(&I);
7179 
7180   return &I;
7181 }
7182 
7183 /// Integer compare with boolean values can always be turned into bitwise ops.
canonicalizeICmpBool(ICmpInst & I,InstCombiner::BuilderTy & Builder)7184 static Instruction *canonicalizeICmpBool(ICmpInst &I,
7185                                          InstCombiner::BuilderTy &Builder) {
7186   Value *A = I.getOperand(0), *B = I.getOperand(1);
7187   assert(A->getType()->isIntOrIntVectorTy(1) && "Bools only");
7188 
7189   // A boolean compared to true/false can be simplified to Op0/true/false in
7190   // 14 out of the 20 (10 predicates * 2 constants) possible combinations.
7191   // Cases not handled by InstSimplify are always 'not' of Op0.
7192   if (match(B, m_Zero())) {
7193     switch (I.getPredicate()) {
7194     case CmpInst::ICMP_EQ:  // A ==   0 -> !A
7195     case CmpInst::ICMP_ULE: // A <=u  0 -> !A
7196     case CmpInst::ICMP_SGE: // A >=s  0 -> !A
7197       return BinaryOperator::CreateNot(A);
7198     default:
7199       llvm_unreachable("ICmp i1 X, C not simplified as expected.");
7200     }
7201   } else if (match(B, m_One())) {
7202     switch (I.getPredicate()) {
7203     case CmpInst::ICMP_NE:  // A !=  1 -> !A
7204     case CmpInst::ICMP_ULT: // A <u  1 -> !A
7205     case CmpInst::ICMP_SGT: // A >s -1 -> !A
7206       return BinaryOperator::CreateNot(A);
7207     default:
7208       llvm_unreachable("ICmp i1 X, C not simplified as expected.");
7209     }
7210   }
7211 
7212   switch (I.getPredicate()) {
7213   default:
7214     llvm_unreachable("Invalid icmp instruction!");
7215   case ICmpInst::ICMP_EQ:
7216     // icmp eq i1 A, B -> ~(A ^ B)
7217     return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
7218 
7219   case ICmpInst::ICMP_NE:
7220     // icmp ne i1 A, B -> A ^ B
7221     return BinaryOperator::CreateXor(A, B);
7222 
7223   case ICmpInst::ICMP_UGT:
7224     // icmp ugt -> icmp ult
7225     std::swap(A, B);
7226     [[fallthrough]];
7227   case ICmpInst::ICMP_ULT:
7228     // icmp ult i1 A, B -> ~A & B
7229     return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
7230 
7231   case ICmpInst::ICMP_SGT:
7232     // icmp sgt -> icmp slt
7233     std::swap(A, B);
7234     [[fallthrough]];
7235   case ICmpInst::ICMP_SLT:
7236     // icmp slt i1 A, B -> A & ~B
7237     return BinaryOperator::CreateAnd(Builder.CreateNot(B), A);
7238 
7239   case ICmpInst::ICMP_UGE:
7240     // icmp uge -> icmp ule
7241     std::swap(A, B);
7242     [[fallthrough]];
7243   case ICmpInst::ICMP_ULE:
7244     // icmp ule i1 A, B -> ~A | B
7245     return BinaryOperator::CreateOr(Builder.CreateNot(A), B);
7246 
7247   case ICmpInst::ICMP_SGE:
7248     // icmp sge -> icmp sle
7249     std::swap(A, B);
7250     [[fallthrough]];
7251   case ICmpInst::ICMP_SLE:
7252     // icmp sle i1 A, B -> A | ~B
7253     return BinaryOperator::CreateOr(Builder.CreateNot(B), A);
7254   }
7255 }
7256 
7257 // Transform pattern like:
7258 //   (1 << Y) u<= X  or  ~(-1 << Y) u<  X  or  ((1 << Y)+(-1)) u<  X
7259 //   (1 << Y) u>  X  or  ~(-1 << Y) u>= X  or  ((1 << Y)+(-1)) u>= X
7260 // Into:
7261 //   (X l>> Y) != 0
7262 //   (X l>> Y) == 0
foldICmpWithHighBitMask(ICmpInst & Cmp,InstCombiner::BuilderTy & Builder)7263 static Instruction *foldICmpWithHighBitMask(ICmpInst &Cmp,
7264                                             InstCombiner::BuilderTy &Builder) {
7265   CmpPredicate Pred, NewPred;
7266   Value *X, *Y;
7267   if (match(&Cmp,
7268             m_c_ICmp(Pred, m_OneUse(m_Shl(m_One(), m_Value(Y))), m_Value(X)))) {
7269     switch (Pred) {
7270     case ICmpInst::ICMP_ULE:
7271       NewPred = ICmpInst::ICMP_NE;
7272       break;
7273     case ICmpInst::ICMP_UGT:
7274       NewPred = ICmpInst::ICMP_EQ;
7275       break;
7276     default:
7277       return nullptr;
7278     }
7279   } else if (match(&Cmp, m_c_ICmp(Pred,
7280                                   m_OneUse(m_CombineOr(
7281                                       m_Not(m_Shl(m_AllOnes(), m_Value(Y))),
7282                                       m_Add(m_Shl(m_One(), m_Value(Y)),
7283                                             m_AllOnes()))),
7284                                   m_Value(X)))) {
7285     // The variant with 'add' is not canonical, (the variant with 'not' is)
7286     // we only get it because it has extra uses, and can't be canonicalized,
7287 
7288     switch (Pred) {
7289     case ICmpInst::ICMP_ULT:
7290       NewPred = ICmpInst::ICMP_NE;
7291       break;
7292     case ICmpInst::ICMP_UGE:
7293       NewPred = ICmpInst::ICMP_EQ;
7294       break;
7295     default:
7296       return nullptr;
7297     }
7298   } else
7299     return nullptr;
7300 
7301   Value *NewX = Builder.CreateLShr(X, Y, X->getName() + ".highbits");
7302   Constant *Zero = Constant::getNullValue(NewX->getType());
7303   return CmpInst::Create(Instruction::ICmp, NewPred, NewX, Zero);
7304 }
7305 
foldVectorCmp(CmpInst & Cmp,InstCombiner::BuilderTy & Builder)7306 static Instruction *foldVectorCmp(CmpInst &Cmp,
7307                                   InstCombiner::BuilderTy &Builder) {
7308   const CmpInst::Predicate Pred = Cmp.getPredicate();
7309   Value *LHS = Cmp.getOperand(0), *RHS = Cmp.getOperand(1);
7310   Value *V1, *V2;
7311 
7312   auto createCmpReverse = [&](CmpInst::Predicate Pred, Value *X, Value *Y) {
7313     Value *V = Builder.CreateCmp(Pred, X, Y, Cmp.getName());
7314     if (auto *I = dyn_cast<Instruction>(V))
7315       I->copyIRFlags(&Cmp);
7316     Module *M = Cmp.getModule();
7317     Function *F = Intrinsic::getOrInsertDeclaration(
7318         M, Intrinsic::vector_reverse, V->getType());
7319     return CallInst::Create(F, V);
7320   };
7321 
7322   if (match(LHS, m_VecReverse(m_Value(V1)))) {
7323     // cmp Pred, rev(V1), rev(V2) --> rev(cmp Pred, V1, V2)
7324     if (match(RHS, m_VecReverse(m_Value(V2))) &&
7325         (LHS->hasOneUse() || RHS->hasOneUse()))
7326       return createCmpReverse(Pred, V1, V2);
7327 
7328     // cmp Pred, rev(V1), RHSSplat --> rev(cmp Pred, V1, RHSSplat)
7329     if (LHS->hasOneUse() && isSplatValue(RHS))
7330       return createCmpReverse(Pred, V1, RHS);
7331   }
7332   // cmp Pred, LHSSplat, rev(V2) --> rev(cmp Pred, LHSSplat, V2)
7333   else if (isSplatValue(LHS) && match(RHS, m_OneUse(m_VecReverse(m_Value(V2)))))
7334     return createCmpReverse(Pred, LHS, V2);
7335 
7336   ArrayRef<int> M;
7337   if (!match(LHS, m_Shuffle(m_Value(V1), m_Undef(), m_Mask(M))))
7338     return nullptr;
7339 
7340   // If both arguments of the cmp are shuffles that use the same mask and
7341   // shuffle within a single vector, move the shuffle after the cmp:
7342   // cmp (shuffle V1, M), (shuffle V2, M) --> shuffle (cmp V1, V2), M
7343   Type *V1Ty = V1->getType();
7344   if (match(RHS, m_Shuffle(m_Value(V2), m_Undef(), m_SpecificMask(M))) &&
7345       V1Ty == V2->getType() && (LHS->hasOneUse() || RHS->hasOneUse())) {
7346     Value *NewCmp = Builder.CreateCmp(Pred, V1, V2);
7347     return new ShuffleVectorInst(NewCmp, M);
7348   }
7349 
7350   // Try to canonicalize compare with splatted operand and splat constant.
7351   // TODO: We could generalize this for more than splats. See/use the code in
7352   //       InstCombiner::foldVectorBinop().
7353   Constant *C;
7354   if (!LHS->hasOneUse() || !match(RHS, m_Constant(C)))
7355     return nullptr;
7356 
7357   // Length-changing splats are ok, so adjust the constants as needed:
7358   // cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
7359   Constant *ScalarC = C->getSplatValue(/* AllowPoison */ true);
7360   int MaskSplatIndex;
7361   if (ScalarC && match(M, m_SplatOrPoisonMask(MaskSplatIndex))) {
7362     // We allow poison in matching, but this transform removes it for safety.
7363     // Demanded elements analysis should be able to recover some/all of that.
7364     C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
7365                                  ScalarC);
7366     SmallVector<int, 8> NewM(M.size(), MaskSplatIndex);
7367     Value *NewCmp = Builder.CreateCmp(Pred, V1, C);
7368     return new ShuffleVectorInst(NewCmp, NewM);
7369   }
7370 
7371   return nullptr;
7372 }
7373 
7374 // extract(uadd.with.overflow(A, B), 0) ult A
7375 //  -> extract(uadd.with.overflow(A, B), 1)
foldICmpOfUAddOv(ICmpInst & I)7376 static Instruction *foldICmpOfUAddOv(ICmpInst &I) {
7377   CmpInst::Predicate Pred = I.getPredicate();
7378   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7379 
7380   Value *UAddOv;
7381   Value *A, *B;
7382   auto UAddOvResultPat = m_ExtractValue<0>(
7383       m_Intrinsic<Intrinsic::uadd_with_overflow>(m_Value(A), m_Value(B)));
7384   if (match(Op0, UAddOvResultPat) &&
7385       ((Pred == ICmpInst::ICMP_ULT && (Op1 == A || Op1 == B)) ||
7386        (Pred == ICmpInst::ICMP_EQ && match(Op1, m_ZeroInt()) &&
7387         (match(A, m_One()) || match(B, m_One()))) ||
7388        (Pred == ICmpInst::ICMP_NE && match(Op1, m_AllOnes()) &&
7389         (match(A, m_AllOnes()) || match(B, m_AllOnes())))))
7390     // extract(uadd.with.overflow(A, B), 0) < A
7391     // extract(uadd.with.overflow(A, 1), 0) == 0
7392     // extract(uadd.with.overflow(A, -1), 0) != -1
7393     UAddOv = cast<ExtractValueInst>(Op0)->getAggregateOperand();
7394   else if (match(Op1, UAddOvResultPat) && Pred == ICmpInst::ICMP_UGT &&
7395            (Op0 == A || Op0 == B))
7396     // A > extract(uadd.with.overflow(A, B), 0)
7397     UAddOv = cast<ExtractValueInst>(Op1)->getAggregateOperand();
7398   else
7399     return nullptr;
7400 
7401   return ExtractValueInst::Create(UAddOv, 1);
7402 }
7403 
foldICmpInvariantGroup(ICmpInst & I)7404 static Instruction *foldICmpInvariantGroup(ICmpInst &I) {
7405   if (!I.getOperand(0)->getType()->isPointerTy() ||
7406       NullPointerIsDefined(
7407           I.getParent()->getParent(),
7408           I.getOperand(0)->getType()->getPointerAddressSpace())) {
7409     return nullptr;
7410   }
7411   Instruction *Op;
7412   if (match(I.getOperand(0), m_Instruction(Op)) &&
7413       match(I.getOperand(1), m_Zero()) &&
7414       Op->isLaunderOrStripInvariantGroup()) {
7415     return ICmpInst::Create(Instruction::ICmp, I.getPredicate(),
7416                             Op->getOperand(0), I.getOperand(1));
7417   }
7418   return nullptr;
7419 }
7420 
7421 /// This function folds patterns produced by lowering of reduce idioms, such as
7422 /// llvm.vector.reduce.and which are lowered into instruction chains. This code
7423 /// attempts to generate fewer number of scalar comparisons instead of vector
7424 /// comparisons when possible.
foldReductionIdiom(ICmpInst & I,InstCombiner::BuilderTy & Builder,const DataLayout & DL)7425 static Instruction *foldReductionIdiom(ICmpInst &I,
7426                                        InstCombiner::BuilderTy &Builder,
7427                                        const DataLayout &DL) {
7428   if (I.getType()->isVectorTy())
7429     return nullptr;
7430   CmpPredicate OuterPred, InnerPred;
7431   Value *LHS, *RHS;
7432 
7433   // Match lowering of @llvm.vector.reduce.and. Turn
7434   ///   %vec_ne = icmp ne <8 x i8> %lhs, %rhs
7435   ///   %scalar_ne = bitcast <8 x i1> %vec_ne to i8
7436   ///   %res = icmp <pred> i8 %scalar_ne, 0
7437   ///
7438   /// into
7439   ///
7440   ///   %lhs.scalar = bitcast <8 x i8> %lhs to i64
7441   ///   %rhs.scalar = bitcast <8 x i8> %rhs to i64
7442   ///   %res = icmp <pred> i64 %lhs.scalar, %rhs.scalar
7443   ///
7444   /// for <pred> in {ne, eq}.
7445   if (!match(&I, m_ICmp(OuterPred,
7446                         m_OneUse(m_BitCast(m_OneUse(
7447                             m_ICmp(InnerPred, m_Value(LHS), m_Value(RHS))))),
7448                         m_Zero())))
7449     return nullptr;
7450   auto *LHSTy = dyn_cast<FixedVectorType>(LHS->getType());
7451   if (!LHSTy || !LHSTy->getElementType()->isIntegerTy())
7452     return nullptr;
7453   unsigned NumBits =
7454       LHSTy->getNumElements() * LHSTy->getElementType()->getIntegerBitWidth();
7455   // TODO: Relax this to "not wider than max legal integer type"?
7456   if (!DL.isLegalInteger(NumBits))
7457     return nullptr;
7458 
7459   if (ICmpInst::isEquality(OuterPred) && InnerPred == ICmpInst::ICMP_NE) {
7460     auto *ScalarTy = Builder.getIntNTy(NumBits);
7461     LHS = Builder.CreateBitCast(LHS, ScalarTy, LHS->getName() + ".scalar");
7462     RHS = Builder.CreateBitCast(RHS, ScalarTy, RHS->getName() + ".scalar");
7463     return ICmpInst::Create(Instruction::ICmp, OuterPred, LHS, RHS,
7464                             I.getName());
7465   }
7466 
7467   return nullptr;
7468 }
7469 
7470 // This helper will be called with icmp operands in both orders.
foldICmpCommutative(CmpPredicate Pred,Value * Op0,Value * Op1,ICmpInst & CxtI)7471 Instruction *InstCombinerImpl::foldICmpCommutative(CmpPredicate Pred,
7472                                                    Value *Op0, Value *Op1,
7473                                                    ICmpInst &CxtI) {
7474   // Try to optimize 'icmp GEP, P' or 'icmp P, GEP'.
7475   if (auto *GEP = dyn_cast<GEPOperator>(Op0))
7476     if (Instruction *NI = foldGEPICmp(GEP, Op1, Pred, CxtI))
7477       return NI;
7478 
7479   if (auto *SI = dyn_cast<SelectInst>(Op0))
7480     if (Instruction *NI = foldSelectICmp(Pred, SI, Op1, CxtI))
7481       return NI;
7482 
7483   if (auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op0))
7484     if (Instruction *Res = foldICmpWithMinMax(CxtI, MinMax, Op1, Pred))
7485       return Res;
7486 
7487   {
7488     Value *X;
7489     const APInt *C;
7490     // icmp X+Cst, X
7491     if (match(Op0, m_Add(m_Value(X), m_APInt(C))) && Op1 == X)
7492       return foldICmpAddOpConst(X, *C, Pred);
7493   }
7494 
7495   // abs(X) >=  X --> true
7496   // abs(X) u<= X --> true
7497   // abs(X) <   X --> false
7498   // abs(X) u>  X --> false
7499   // abs(X) u>= X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7500   // abs(X) <=  X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7501   // abs(X) ==  X --> IsIntMinPosion ? `X > -1`: `X u<= INTMIN`
7502   // abs(X) u<  X --> IsIntMinPosion ? `X < 0` : `X >   INTMIN`
7503   // abs(X) >   X --> IsIntMinPosion ? `X < 0` : `X >   INTMIN`
7504   // abs(X) !=  X --> IsIntMinPosion ? `X < 0` : `X >   INTMIN`
7505   {
7506     Value *X;
7507     Constant *C;
7508     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X), m_Constant(C))) &&
7509         match(Op1, m_Specific(X))) {
7510       Value *NullValue = Constant::getNullValue(X->getType());
7511       Value *AllOnesValue = Constant::getAllOnesValue(X->getType());
7512       const APInt SMin =
7513           APInt::getSignedMinValue(X->getType()->getScalarSizeInBits());
7514       bool IsIntMinPosion = C->isAllOnesValue();
7515       switch (Pred) {
7516       case CmpInst::ICMP_ULE:
7517       case CmpInst::ICMP_SGE:
7518         return replaceInstUsesWith(CxtI, ConstantInt::getTrue(CxtI.getType()));
7519       case CmpInst::ICMP_UGT:
7520       case CmpInst::ICMP_SLT:
7521         return replaceInstUsesWith(CxtI, ConstantInt::getFalse(CxtI.getType()));
7522       case CmpInst::ICMP_UGE:
7523       case CmpInst::ICMP_SLE:
7524       case CmpInst::ICMP_EQ: {
7525         return replaceInstUsesWith(
7526             CxtI, IsIntMinPosion
7527                       ? Builder.CreateICmpSGT(X, AllOnesValue)
7528                       : Builder.CreateICmpULT(
7529                             X, ConstantInt::get(X->getType(), SMin + 1)));
7530       }
7531       case CmpInst::ICMP_ULT:
7532       case CmpInst::ICMP_SGT:
7533       case CmpInst::ICMP_NE: {
7534         return replaceInstUsesWith(
7535             CxtI, IsIntMinPosion
7536                       ? Builder.CreateICmpSLT(X, NullValue)
7537                       : Builder.CreateICmpUGT(
7538                             X, ConstantInt::get(X->getType(), SMin)));
7539       }
7540       default:
7541         llvm_unreachable("Invalid predicate!");
7542       }
7543     }
7544   }
7545 
7546   const SimplifyQuery Q = SQ.getWithInstruction(&CxtI);
7547   if (Value *V = foldICmpWithLowBitMaskedVal(Pred, Op0, Op1, Q, *this))
7548     return replaceInstUsesWith(CxtI, V);
7549 
7550   // Folding (X / Y) pred X => X swap(pred) 0 for constant Y other than 0 or 1
7551   auto CheckUGT1 = [](const APInt &Divisor) { return Divisor.ugt(1); };
7552   {
7553     if (match(Op0, m_UDiv(m_Specific(Op1), m_CheckedInt(CheckUGT1)))) {
7554       return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7555                           Constant::getNullValue(Op1->getType()));
7556     }
7557 
7558     if (!ICmpInst::isUnsigned(Pred) &&
7559         match(Op0, m_SDiv(m_Specific(Op1), m_CheckedInt(CheckUGT1)))) {
7560       return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7561                           Constant::getNullValue(Op1->getType()));
7562     }
7563   }
7564 
7565   // Another case of this fold is (X >> Y) pred X => X swap(pred) 0 if Y != 0
7566   auto CheckNE0 = [](const APInt &Shift) { return !Shift.isZero(); };
7567   {
7568     if (match(Op0, m_LShr(m_Specific(Op1), m_CheckedInt(CheckNE0)))) {
7569       return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7570                           Constant::getNullValue(Op1->getType()));
7571     }
7572 
7573     if ((Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SGE) &&
7574         match(Op0, m_AShr(m_Specific(Op1), m_CheckedInt(CheckNE0)))) {
7575       return new ICmpInst(ICmpInst::getSwappedPredicate(Pred), Op1,
7576                           Constant::getNullValue(Op1->getType()));
7577     }
7578   }
7579 
7580   return nullptr;
7581 }
7582 
visitICmpInst(ICmpInst & I)7583 Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
7584   bool Changed = false;
7585   const SimplifyQuery Q = SQ.getWithInstruction(&I);
7586   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
7587   unsigned Op0Cplxity = getComplexity(Op0);
7588   unsigned Op1Cplxity = getComplexity(Op1);
7589 
7590   /// Orders the operands of the compare so that they are listed from most
7591   /// complex to least complex.  This puts constants before unary operators,
7592   /// before binary operators.
7593   if (Op0Cplxity < Op1Cplxity) {
7594     I.swapOperands();
7595     std::swap(Op0, Op1);
7596     Changed = true;
7597   }
7598 
7599   if (Value *V = simplifyICmpInst(I.getCmpPredicate(), Op0, Op1, Q))
7600     return replaceInstUsesWith(I, V);
7601 
7602   // Comparing -val or val with non-zero is the same as just comparing val
7603   // ie, abs(val) != 0 -> val != 0
7604   if (I.getPredicate() == ICmpInst::ICMP_NE && match(Op1, m_Zero())) {
7605     Value *Cond, *SelectTrue, *SelectFalse;
7606     if (match(Op0, m_Select(m_Value(Cond), m_Value(SelectTrue),
7607                             m_Value(SelectFalse)))) {
7608       if (Value *V = dyn_castNegVal(SelectTrue)) {
7609         if (V == SelectFalse)
7610           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
7611       } else if (Value *V = dyn_castNegVal(SelectFalse)) {
7612         if (V == SelectTrue)
7613           return CmpInst::Create(Instruction::ICmp, I.getPredicate(), V, Op1);
7614       }
7615     }
7616   }
7617 
7618   if (Instruction *Res = foldICmpTruncWithTruncOrExt(I, Q))
7619     return Res;
7620 
7621   if (Op0->getType()->isIntOrIntVectorTy(1))
7622     if (Instruction *Res = canonicalizeICmpBool(I, Builder))
7623       return Res;
7624 
7625   if (Instruction *Res = canonicalizeCmpWithConstant(I))
7626     return Res;
7627 
7628   if (Instruction *Res = canonicalizeICmpPredicate(I))
7629     return Res;
7630 
7631   if (Instruction *Res = foldICmpWithConstant(I))
7632     return Res;
7633 
7634   if (Instruction *Res = foldICmpWithDominatingICmp(I))
7635     return Res;
7636 
7637   if (Instruction *Res = foldICmpUsingBoolRange(I))
7638     return Res;
7639 
7640   if (Instruction *Res = foldICmpUsingKnownBits(I))
7641     return Res;
7642 
7643   // Test if the ICmpInst instruction is used exclusively by a select as
7644   // part of a minimum or maximum operation. If so, refrain from doing
7645   // any other folding. This helps out other analyses which understand
7646   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
7647   // and CodeGen. And in this case, at least one of the comparison
7648   // operands has at least one user besides the compare (the select),
7649   // which would often largely negate the benefit of folding anyway.
7650   //
7651   // Do the same for the other patterns recognized by matchSelectPattern.
7652   if (I.hasOneUse())
7653     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
7654       Value *A, *B;
7655       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
7656       if (SPR.Flavor != SPF_UNKNOWN)
7657         return nullptr;
7658     }
7659 
7660   // Do this after checking for min/max to prevent infinite looping.
7661   if (Instruction *Res = foldICmpWithZero(I))
7662     return Res;
7663 
7664   // FIXME: We only do this after checking for min/max to prevent infinite
7665   // looping caused by a reverse canonicalization of these patterns for min/max.
7666   // FIXME: The organization of folds is a mess. These would naturally go into
7667   // canonicalizeCmpWithConstant(), but we can't move all of the above folds
7668   // down here after the min/max restriction.
7669   ICmpInst::Predicate Pred = I.getPredicate();
7670   const APInt *C;
7671   if (match(Op1, m_APInt(C))) {
7672     // For i32: x >u 2147483647 -> x <s 0  -> true if sign bit set
7673     if (Pred == ICmpInst::ICMP_UGT && C->isMaxSignedValue()) {
7674       Constant *Zero = Constant::getNullValue(Op0->getType());
7675       return new ICmpInst(ICmpInst::ICMP_SLT, Op0, Zero);
7676     }
7677 
7678     // For i32: x <u 2147483648 -> x >s -1  -> true if sign bit clear
7679     if (Pred == ICmpInst::ICMP_ULT && C->isMinSignedValue()) {
7680       Constant *AllOnes = Constant::getAllOnesValue(Op0->getType());
7681       return new ICmpInst(ICmpInst::ICMP_SGT, Op0, AllOnes);
7682     }
7683   }
7684 
7685   // The folds in here may rely on wrapping flags and special constants, so
7686   // they can break up min/max idioms in some cases but not seemingly similar
7687   // patterns.
7688   // FIXME: It may be possible to enhance select folding to make this
7689   //        unnecessary. It may also be moot if we canonicalize to min/max
7690   //        intrinsics.
7691   if (Instruction *Res = foldICmpBinOp(I, Q))
7692     return Res;
7693 
7694   if (Instruction *Res = foldICmpInstWithConstant(I))
7695     return Res;
7696 
7697   // Try to match comparison as a sign bit test. Intentionally do this after
7698   // foldICmpInstWithConstant() to potentially let other folds to happen first.
7699   if (Instruction *New = foldSignBitTest(I))
7700     return New;
7701 
7702   if (Instruction *Res = foldICmpInstWithConstantNotInt(I))
7703     return Res;
7704 
7705   if (Instruction *Res = foldICmpCommutative(I.getCmpPredicate(), Op0, Op1, I))
7706     return Res;
7707   if (Instruction *Res =
7708           foldICmpCommutative(I.getSwappedCmpPredicate(), Op1, Op0, I))
7709     return Res;
7710 
7711   if (I.isCommutative()) {
7712     if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
7713       replaceOperand(I, 0, Pair->first);
7714       replaceOperand(I, 1, Pair->second);
7715       return &I;
7716     }
7717   }
7718 
7719   // In case of a comparison with two select instructions having the same
7720   // condition, check whether one of the resulting branches can be simplified.
7721   // If so, just compare the other branch and select the appropriate result.
7722   // For example:
7723   //   %tmp1 = select i1 %cmp, i32 %y, i32 %x
7724   //   %tmp2 = select i1 %cmp, i32 %z, i32 %x
7725   //   %cmp2 = icmp slt i32 %tmp2, %tmp1
7726   // The icmp will result false for the false value of selects and the result
7727   // will depend upon the comparison of true values of selects if %cmp is
7728   // true. Thus, transform this into:
7729   //   %cmp = icmp slt i32 %y, %z
7730   //   %sel = select i1 %cond, i1 %cmp, i1 false
7731   // This handles similar cases to transform.
7732   {
7733     Value *Cond, *A, *B, *C, *D;
7734     if (match(Op0, m_Select(m_Value(Cond), m_Value(A), m_Value(B))) &&
7735         match(Op1, m_Select(m_Specific(Cond), m_Value(C), m_Value(D))) &&
7736         (Op0->hasOneUse() || Op1->hasOneUse())) {
7737       // Check whether comparison of TrueValues can be simplified
7738       if (Value *Res = simplifyICmpInst(Pred, A, C, SQ)) {
7739         Value *NewICMP = Builder.CreateICmp(Pred, B, D);
7740         return SelectInst::Create(Cond, Res, NewICMP);
7741       }
7742       // Check whether comparison of FalseValues can be simplified
7743       if (Value *Res = simplifyICmpInst(Pred, B, D, SQ)) {
7744         Value *NewICMP = Builder.CreateICmp(Pred, A, C);
7745         return SelectInst::Create(Cond, NewICMP, Res);
7746       }
7747     }
7748   }
7749 
7750   // icmp slt (sub nsw x, y), (add nsw x, y)  -->  icmp sgt y, 0
7751   // icmp ult (sub nuw x, y), (add nuw x, y)  -->  icmp ugt y, 0
7752   // icmp eq (sub nsw/nuw x, y), (add nsw/nuw x, y)   -->  icmp eq y, 0
7753   {
7754     Value *A, *B;
7755     CmpPredicate CmpPred;
7756     if (match(&I, m_c_ICmp(CmpPred, m_Sub(m_Value(A), m_Value(B)),
7757                            m_c_Add(m_Deferred(A), m_Deferred(B))))) {
7758       auto *I0 = cast<OverflowingBinaryOperator>(Op0);
7759       auto *I1 = cast<OverflowingBinaryOperator>(Op1);
7760       bool I0NUW = I0->hasNoUnsignedWrap();
7761       bool I1NUW = I1->hasNoUnsignedWrap();
7762       bool I0NSW = I0->hasNoSignedWrap();
7763       bool I1NSW = I1->hasNoSignedWrap();
7764       if ((ICmpInst::isUnsigned(Pred) && I0NUW && I1NUW) ||
7765           (ICmpInst::isSigned(Pred) && I0NSW && I1NSW) ||
7766           (ICmpInst::isEquality(Pred) &&
7767            ((I0NUW || I0NSW) && (I1NUW || I1NSW)))) {
7768         return new ICmpInst(CmpPredicate::getSwapped(CmpPred), B,
7769                             ConstantInt::get(Op0->getType(), 0));
7770       }
7771     }
7772   }
7773 
7774   // Try to optimize equality comparisons against alloca-based pointers.
7775   if (Op0->getType()->isPointerTy() && I.isEquality()) {
7776     assert(Op1->getType()->isPointerTy() &&
7777            "Comparing pointer with non-pointer?");
7778     if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op0)))
7779       if (foldAllocaCmp(Alloca))
7780         return nullptr;
7781     if (auto *Alloca = dyn_cast<AllocaInst>(getUnderlyingObject(Op1)))
7782       if (foldAllocaCmp(Alloca))
7783         return nullptr;
7784   }
7785 
7786   if (Instruction *Res = foldICmpBitCast(I))
7787     return Res;
7788 
7789   // TODO: Hoist this above the min/max bailout.
7790   if (Instruction *R = foldICmpWithCastOp(I))
7791     return R;
7792 
7793   {
7794     Value *X, *Y;
7795     // Transform (X & ~Y) == 0 --> (X & Y) != 0
7796     // and       (X & ~Y) != 0 --> (X & Y) == 0
7797     // if A is a power of 2.
7798     if (match(Op0, m_And(m_Value(X), m_Not(m_Value(Y)))) &&
7799         match(Op1, m_Zero()) && isKnownToBeAPowerOfTwo(X, false, &I) &&
7800         I.isEquality())
7801       return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(X, Y),
7802                           Op1);
7803 
7804     // Op0 pred Op1 -> ~Op1 pred ~Op0, if this allows us to drop an instruction.
7805     if (Op0->getType()->isIntOrIntVectorTy()) {
7806       bool ConsumesOp0, ConsumesOp1;
7807       if (isFreeToInvert(Op0, Op0->hasOneUse(), ConsumesOp0) &&
7808           isFreeToInvert(Op1, Op1->hasOneUse(), ConsumesOp1) &&
7809           (ConsumesOp0 || ConsumesOp1)) {
7810         Value *InvOp0 = getFreelyInverted(Op0, Op0->hasOneUse(), &Builder);
7811         Value *InvOp1 = getFreelyInverted(Op1, Op1->hasOneUse(), &Builder);
7812         assert(InvOp0 && InvOp1 &&
7813                "Mismatch between isFreeToInvert and getFreelyInverted");
7814         return new ICmpInst(I.getSwappedPredicate(), InvOp0, InvOp1);
7815       }
7816     }
7817 
7818     Instruction *AddI = nullptr;
7819     if (match(&I, m_UAddWithOverflow(m_Value(X), m_Value(Y),
7820                                      m_Instruction(AddI))) &&
7821         isa<IntegerType>(X->getType())) {
7822       Value *Result;
7823       Constant *Overflow;
7824       // m_UAddWithOverflow can match patterns that do not include  an explicit
7825       // "add" instruction, so check the opcode of the matched op.
7826       if (AddI->getOpcode() == Instruction::Add &&
7827           OptimizeOverflowCheck(Instruction::Add, /*Signed*/ false, X, Y, *AddI,
7828                                 Result, Overflow)) {
7829         replaceInstUsesWith(*AddI, Result);
7830         eraseInstFromFunction(*AddI);
7831         return replaceInstUsesWith(I, Overflow);
7832       }
7833     }
7834 
7835     // (zext X) * (zext Y)  --> llvm.umul.with.overflow.
7836     if (match(Op0, m_NUWMul(m_ZExt(m_Value(X)), m_ZExt(m_Value(Y)))) &&
7837         match(Op1, m_APInt(C))) {
7838       if (Instruction *R = processUMulZExtIdiom(I, Op0, C, *this))
7839         return R;
7840     }
7841 
7842     // Signbit test folds
7843     // Fold (X u>> BitWidth - 1 Pred ZExt(i1))  -->  X s< 0 Pred i1
7844     // Fold (X s>> BitWidth - 1 Pred SExt(i1))  -->  X s< 0 Pred i1
7845     Instruction *ExtI;
7846     if ((I.isUnsigned() || I.isEquality()) &&
7847         match(Op1,
7848               m_CombineAnd(m_Instruction(ExtI), m_ZExtOrSExt(m_Value(Y)))) &&
7849         Y->getType()->getScalarSizeInBits() == 1 &&
7850         (Op0->hasOneUse() || Op1->hasOneUse())) {
7851       unsigned OpWidth = Op0->getType()->getScalarSizeInBits();
7852       Instruction *ShiftI;
7853       if (match(Op0, m_CombineAnd(m_Instruction(ShiftI),
7854                                   m_Shr(m_Value(X), m_SpecificIntAllowPoison(
7855                                                         OpWidth - 1))))) {
7856         unsigned ExtOpc = ExtI->getOpcode();
7857         unsigned ShiftOpc = ShiftI->getOpcode();
7858         if ((ExtOpc == Instruction::ZExt && ShiftOpc == Instruction::LShr) ||
7859             (ExtOpc == Instruction::SExt && ShiftOpc == Instruction::AShr)) {
7860           Value *SLTZero =
7861               Builder.CreateICmpSLT(X, Constant::getNullValue(X->getType()));
7862           Value *Cmp = Builder.CreateICmp(Pred, SLTZero, Y, I.getName());
7863           return replaceInstUsesWith(I, Cmp);
7864         }
7865       }
7866     }
7867   }
7868 
7869   if (Instruction *Res = foldICmpEquality(I))
7870     return Res;
7871 
7872   if (Instruction *Res = foldICmpPow2Test(I, Builder))
7873     return Res;
7874 
7875   if (Instruction *Res = foldICmpOfUAddOv(I))
7876     return Res;
7877 
7878   // The 'cmpxchg' instruction returns an aggregate containing the old value and
7879   // an i1 which indicates whether or not we successfully did the swap.
7880   //
7881   // Replace comparisons between the old value and the expected value with the
7882   // indicator that 'cmpxchg' returns.
7883   //
7884   // N.B.  This transform is only valid when the 'cmpxchg' is not permitted to
7885   // spuriously fail.  In those cases, the old value may equal the expected
7886   // value but it is possible for the swap to not occur.
7887   if (I.getPredicate() == ICmpInst::ICMP_EQ)
7888     if (auto *EVI = dyn_cast<ExtractValueInst>(Op0))
7889       if (auto *ACXI = dyn_cast<AtomicCmpXchgInst>(EVI->getAggregateOperand()))
7890         if (EVI->getIndices()[0] == 0 && ACXI->getCompareOperand() == Op1 &&
7891             !ACXI->isWeak())
7892           return ExtractValueInst::Create(ACXI, 1);
7893 
7894   if (Instruction *Res = foldICmpWithHighBitMask(I, Builder))
7895     return Res;
7896 
7897   if (I.getType()->isVectorTy())
7898     if (Instruction *Res = foldVectorCmp(I, Builder))
7899       return Res;
7900 
7901   if (Instruction *Res = foldICmpInvariantGroup(I))
7902     return Res;
7903 
7904   if (Instruction *Res = foldReductionIdiom(I, Builder, DL))
7905     return Res;
7906 
7907   {
7908     Value *A;
7909     const APInt *C1, *C2;
7910     ICmpInst::Predicate Pred = I.getPredicate();
7911     if (ICmpInst::isEquality(Pred)) {
7912       // sext(a) & c1 == c2 --> a & c3 == trunc(c2)
7913       // sext(a) & c1 != c2 --> a & c3 != trunc(c2)
7914       if (match(Op0, m_And(m_SExt(m_Value(A)), m_APInt(C1))) &&
7915           match(Op1, m_APInt(C2))) {
7916         Type *InputTy = A->getType();
7917         unsigned InputBitWidth = InputTy->getScalarSizeInBits();
7918         // c2 must be non-negative at the bitwidth of a.
7919         if (C2->getActiveBits() < InputBitWidth) {
7920           APInt TruncC1 = C1->trunc(InputBitWidth);
7921           // Check if there are 1s in C1 high bits of size InputBitWidth.
7922           if (C1->uge(APInt::getOneBitSet(C1->getBitWidth(), InputBitWidth)))
7923             TruncC1.setBit(InputBitWidth - 1);
7924           Value *AndInst = Builder.CreateAnd(A, TruncC1);
7925           return new ICmpInst(
7926               Pred, AndInst,
7927               ConstantInt::get(InputTy, C2->trunc(InputBitWidth)));
7928         }
7929       }
7930     }
7931   }
7932 
7933   return Changed ? &I : nullptr;
7934 }
7935 
7936 /// Fold fcmp ([us]itofp x, cst) if possible.
foldFCmpIntToFPConst(FCmpInst & I,Instruction * LHSI,Constant * RHSC)7937 Instruction *InstCombinerImpl::foldFCmpIntToFPConst(FCmpInst &I,
7938                                                     Instruction *LHSI,
7939                                                     Constant *RHSC) {
7940   const APFloat *RHS;
7941   if (!match(RHSC, m_APFloat(RHS)))
7942     return nullptr;
7943 
7944   // Get the width of the mantissa.  We don't want to hack on conversions that
7945   // might lose information from the integer, e.g. "i64 -> float"
7946   int MantissaWidth = LHSI->getType()->getFPMantissaWidth();
7947   if (MantissaWidth == -1)
7948     return nullptr; // Unknown.
7949 
7950   Type *IntTy = LHSI->getOperand(0)->getType();
7951   unsigned IntWidth = IntTy->getScalarSizeInBits();
7952   bool LHSUnsigned = isa<UIToFPInst>(LHSI);
7953 
7954   if (I.isEquality()) {
7955     FCmpInst::Predicate P = I.getPredicate();
7956     bool IsExact = false;
7957     APSInt RHSCvt(IntWidth, LHSUnsigned);
7958     RHS->convertToInteger(RHSCvt, APFloat::rmNearestTiesToEven, &IsExact);
7959 
7960     // If the floating point constant isn't an integer value, we know if we will
7961     // ever compare equal / not equal to it.
7962     if (!IsExact) {
7963       // TODO: Can never be -0.0 and other non-representable values
7964       APFloat RHSRoundInt(*RHS);
7965       RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
7966       if (*RHS != RHSRoundInt) {
7967         if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
7968           return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
7969 
7970         assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
7971         return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
7972       }
7973     }
7974 
7975     // TODO: If the constant is exactly representable, is it always OK to do
7976     // equality compares as integer?
7977   }
7978 
7979   // Check to see that the input is converted from an integer type that is small
7980   // enough that preserves all bits.  TODO: check here for "known" sign bits.
7981   // This would allow us to handle (fptosi (x >>s 62) to float) if x is i64 f.e.
7982 
7983   // Following test does NOT adjust IntWidth downwards for signed inputs,
7984   // because the most negative value still requires all the mantissa bits
7985   // to distinguish it from one less than that value.
7986   if ((int)IntWidth > MantissaWidth) {
7987     // Conversion would lose accuracy. Check if loss can impact comparison.
7988     int Exp = ilogb(*RHS);
7989     if (Exp == APFloat::IEK_Inf) {
7990       int MaxExponent = ilogb(APFloat::getLargest(RHS->getSemantics()));
7991       if (MaxExponent < (int)IntWidth - !LHSUnsigned)
7992         // Conversion could create infinity.
7993         return nullptr;
7994     } else {
7995       // Note that if RHS is zero or NaN, then Exp is negative
7996       // and first condition is trivially false.
7997       if (MantissaWidth <= Exp && Exp <= (int)IntWidth - !LHSUnsigned)
7998         // Conversion could affect comparison.
7999         return nullptr;
8000     }
8001   }
8002 
8003   // Otherwise, we can potentially simplify the comparison.  We know that it
8004   // will always come through as an integer value and we know the constant is
8005   // not a NAN (it would have been previously simplified).
8006   assert(!RHS->isNaN() && "NaN comparison not already folded!");
8007 
8008   ICmpInst::Predicate Pred;
8009   switch (I.getPredicate()) {
8010   default:
8011     llvm_unreachable("Unexpected predicate!");
8012   case FCmpInst::FCMP_UEQ:
8013   case FCmpInst::FCMP_OEQ:
8014     Pred = ICmpInst::ICMP_EQ;
8015     break;
8016   case FCmpInst::FCMP_UGT:
8017   case FCmpInst::FCMP_OGT:
8018     Pred = LHSUnsigned ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_SGT;
8019     break;
8020   case FCmpInst::FCMP_UGE:
8021   case FCmpInst::FCMP_OGE:
8022     Pred = LHSUnsigned ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_SGE;
8023     break;
8024   case FCmpInst::FCMP_ULT:
8025   case FCmpInst::FCMP_OLT:
8026     Pred = LHSUnsigned ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_SLT;
8027     break;
8028   case FCmpInst::FCMP_ULE:
8029   case FCmpInst::FCMP_OLE:
8030     Pred = LHSUnsigned ? ICmpInst::ICMP_ULE : ICmpInst::ICMP_SLE;
8031     break;
8032   case FCmpInst::FCMP_UNE:
8033   case FCmpInst::FCMP_ONE:
8034     Pred = ICmpInst::ICMP_NE;
8035     break;
8036   case FCmpInst::FCMP_ORD:
8037     return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8038   case FCmpInst::FCMP_UNO:
8039     return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8040   }
8041 
8042   // Now we know that the APFloat is a normal number, zero or inf.
8043 
8044   // See if the FP constant is too large for the integer.  For example,
8045   // comparing an i8 to 300.0.
8046   if (!LHSUnsigned) {
8047     // If the RHS value is > SignedMax, fold the comparison.  This handles +INF
8048     // and large values.
8049     APFloat SMax(RHS->getSemantics());
8050     SMax.convertFromAPInt(APInt::getSignedMaxValue(IntWidth), true,
8051                           APFloat::rmNearestTiesToEven);
8052     if (SMax < *RHS) { // smax < 13123.0
8053       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
8054           Pred == ICmpInst::ICMP_SLE)
8055         return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8056       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8057     }
8058   } else {
8059     // If the RHS value is > UnsignedMax, fold the comparison. This handles
8060     // +INF and large values.
8061     APFloat UMax(RHS->getSemantics());
8062     UMax.convertFromAPInt(APInt::getMaxValue(IntWidth), false,
8063                           APFloat::rmNearestTiesToEven);
8064     if (UMax < *RHS) { // umax < 13123.0
8065       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
8066           Pred == ICmpInst::ICMP_ULE)
8067         return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8068       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8069     }
8070   }
8071 
8072   if (!LHSUnsigned) {
8073     // See if the RHS value is < SignedMin.
8074     APFloat SMin(RHS->getSemantics());
8075     SMin.convertFromAPInt(APInt::getSignedMinValue(IntWidth), true,
8076                           APFloat::rmNearestTiesToEven);
8077     if (SMin > *RHS) { // smin > 12312.0
8078       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
8079           Pred == ICmpInst::ICMP_SGE)
8080         return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8081       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8082     }
8083   } else {
8084     // See if the RHS value is < UnsignedMin.
8085     APFloat UMin(RHS->getSemantics());
8086     UMin.convertFromAPInt(APInt::getMinValue(IntWidth), false,
8087                           APFloat::rmNearestTiesToEven);
8088     if (UMin > *RHS) { // umin > 12312.0
8089       if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
8090           Pred == ICmpInst::ICMP_UGE)
8091         return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8092       return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8093     }
8094   }
8095 
8096   // Okay, now we know that the FP constant fits in the range [SMIN, SMAX] or
8097   // [0, UMAX], but it may still be fractional. Check whether this is the case
8098   // using the IsExact flag.
8099   // Don't do this for zero, because -0.0 is not fractional.
8100   APSInt RHSInt(IntWidth, LHSUnsigned);
8101   bool IsExact;
8102   RHS->convertToInteger(RHSInt, APFloat::rmTowardZero, &IsExact);
8103   if (!RHS->isZero()) {
8104     if (!IsExact) {
8105       // If we had a comparison against a fractional value, we have to adjust
8106       // the compare predicate and sometimes the value.  RHSC is rounded towards
8107       // zero at this point.
8108       switch (Pred) {
8109       default:
8110         llvm_unreachable("Unexpected integer comparison!");
8111       case ICmpInst::ICMP_NE: // (float)int != 4.4   --> true
8112         return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8113       case ICmpInst::ICMP_EQ: // (float)int == 4.4   --> false
8114         return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8115       case ICmpInst::ICMP_ULE:
8116         // (float)int <= 4.4   --> int <= 4
8117         // (float)int <= -4.4  --> false
8118         if (RHS->isNegative())
8119           return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8120         break;
8121       case ICmpInst::ICMP_SLE:
8122         // (float)int <= 4.4   --> int <= 4
8123         // (float)int <= -4.4  --> int < -4
8124         if (RHS->isNegative())
8125           Pred = ICmpInst::ICMP_SLT;
8126         break;
8127       case ICmpInst::ICMP_ULT:
8128         // (float)int < -4.4   --> false
8129         // (float)int < 4.4    --> int <= 4
8130         if (RHS->isNegative())
8131           return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8132         Pred = ICmpInst::ICMP_ULE;
8133         break;
8134       case ICmpInst::ICMP_SLT:
8135         // (float)int < -4.4   --> int < -4
8136         // (float)int < 4.4    --> int <= 4
8137         if (!RHS->isNegative())
8138           Pred = ICmpInst::ICMP_SLE;
8139         break;
8140       case ICmpInst::ICMP_UGT:
8141         // (float)int > 4.4    --> int > 4
8142         // (float)int > -4.4   --> true
8143         if (RHS->isNegative())
8144           return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8145         break;
8146       case ICmpInst::ICMP_SGT:
8147         // (float)int > 4.4    --> int > 4
8148         // (float)int > -4.4   --> int >= -4
8149         if (RHS->isNegative())
8150           Pred = ICmpInst::ICMP_SGE;
8151         break;
8152       case ICmpInst::ICMP_UGE:
8153         // (float)int >= -4.4   --> true
8154         // (float)int >= 4.4    --> int > 4
8155         if (RHS->isNegative())
8156           return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8157         Pred = ICmpInst::ICMP_UGT;
8158         break;
8159       case ICmpInst::ICMP_SGE:
8160         // (float)int >= -4.4   --> int >= -4
8161         // (float)int >= 4.4    --> int > 4
8162         if (!RHS->isNegative())
8163           Pred = ICmpInst::ICMP_SGT;
8164         break;
8165       }
8166     }
8167   }
8168 
8169   // Lower this FP comparison into an appropriate integer version of the
8170   // comparison.
8171   return new ICmpInst(Pred, LHSI->getOperand(0),
8172                       ConstantInt::get(LHSI->getOperand(0)->getType(), RHSInt));
8173 }
8174 
8175 /// Fold (C / X) < 0.0 --> X < 0.0 if possible. Swap predicate if necessary.
foldFCmpReciprocalAndZero(FCmpInst & I,Instruction * LHSI,Constant * RHSC)8176 static Instruction *foldFCmpReciprocalAndZero(FCmpInst &I, Instruction *LHSI,
8177                                               Constant *RHSC) {
8178   // When C is not 0.0 and infinities are not allowed:
8179   // (C / X) < 0.0 is a sign-bit test of X
8180   // (C / X) < 0.0 --> X < 0.0 (if C is positive)
8181   // (C / X) < 0.0 --> X > 0.0 (if C is negative, swap the predicate)
8182   //
8183   // Proof:
8184   // Multiply (C / X) < 0.0 by X * X / C.
8185   // - X is non zero, if it is the flag 'ninf' is violated.
8186   // - C defines the sign of X * X * C. Thus it also defines whether to swap
8187   //   the predicate. C is also non zero by definition.
8188   //
8189   // Thus X * X / C is non zero and the transformation is valid. [qed]
8190 
8191   FCmpInst::Predicate Pred = I.getPredicate();
8192 
8193   // Check that predicates are valid.
8194   if ((Pred != FCmpInst::FCMP_OGT) && (Pred != FCmpInst::FCMP_OLT) &&
8195       (Pred != FCmpInst::FCMP_OGE) && (Pred != FCmpInst::FCMP_OLE))
8196     return nullptr;
8197 
8198   // Check that RHS operand is zero.
8199   if (!match(RHSC, m_AnyZeroFP()))
8200     return nullptr;
8201 
8202   // Check fastmath flags ('ninf').
8203   if (!LHSI->hasNoInfs() || !I.hasNoInfs())
8204     return nullptr;
8205 
8206   // Check the properties of the dividend. It must not be zero to avoid a
8207   // division by zero (see Proof).
8208   const APFloat *C;
8209   if (!match(LHSI->getOperand(0), m_APFloat(C)))
8210     return nullptr;
8211 
8212   if (C->isZero())
8213     return nullptr;
8214 
8215   // Get swapped predicate if necessary.
8216   if (C->isNegative())
8217     Pred = I.getSwappedPredicate();
8218 
8219   return new FCmpInst(Pred, LHSI->getOperand(1), RHSC, "", &I);
8220 }
8221 
8222 /// Optimize fabs(X) compared with zero.
foldFabsWithFcmpZero(FCmpInst & I,InstCombinerImpl & IC)8223 static Instruction *foldFabsWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) {
8224   Value *X;
8225   if (!match(I.getOperand(0), m_FAbs(m_Value(X))))
8226     return nullptr;
8227 
8228   const APFloat *C;
8229   if (!match(I.getOperand(1), m_APFloat(C)))
8230     return nullptr;
8231 
8232   if (!C->isPosZero()) {
8233     if (!C->isSmallestNormalized())
8234       return nullptr;
8235 
8236     const Function *F = I.getFunction();
8237     DenormalMode Mode = F->getDenormalMode(C->getSemantics());
8238     if (Mode.Input == DenormalMode::PreserveSign ||
8239         Mode.Input == DenormalMode::PositiveZero) {
8240 
8241       auto replaceFCmp = [](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
8242         Constant *Zero = ConstantFP::getZero(X->getType());
8243         return new FCmpInst(P, X, Zero, "", I);
8244       };
8245 
8246       switch (I.getPredicate()) {
8247       case FCmpInst::FCMP_OLT:
8248         // fcmp olt fabs(x), smallest_normalized_number -> fcmp oeq x, 0.0
8249         return replaceFCmp(&I, FCmpInst::FCMP_OEQ, X);
8250       case FCmpInst::FCMP_UGE:
8251         // fcmp uge fabs(x), smallest_normalized_number -> fcmp une x, 0.0
8252         return replaceFCmp(&I, FCmpInst::FCMP_UNE, X);
8253       case FCmpInst::FCMP_OGE:
8254         // fcmp oge fabs(x), smallest_normalized_number -> fcmp one x, 0.0
8255         return replaceFCmp(&I, FCmpInst::FCMP_ONE, X);
8256       case FCmpInst::FCMP_ULT:
8257         // fcmp ult fabs(x), smallest_normalized_number -> fcmp ueq x, 0.0
8258         return replaceFCmp(&I, FCmpInst::FCMP_UEQ, X);
8259       default:
8260         break;
8261       }
8262     }
8263 
8264     return nullptr;
8265   }
8266 
8267   auto replacePredAndOp0 = [&IC](FCmpInst *I, FCmpInst::Predicate P, Value *X) {
8268     I->setPredicate(P);
8269     return IC.replaceOperand(*I, 0, X);
8270   };
8271 
8272   switch (I.getPredicate()) {
8273   case FCmpInst::FCMP_UGE:
8274   case FCmpInst::FCMP_OLT:
8275     // fabs(X) >= 0.0 --> true
8276     // fabs(X) <  0.0 --> false
8277     llvm_unreachable("fcmp should have simplified");
8278 
8279   case FCmpInst::FCMP_OGT:
8280     // fabs(X) > 0.0 --> X != 0.0
8281     return replacePredAndOp0(&I, FCmpInst::FCMP_ONE, X);
8282 
8283   case FCmpInst::FCMP_UGT:
8284     // fabs(X) u> 0.0 --> X u!= 0.0
8285     return replacePredAndOp0(&I, FCmpInst::FCMP_UNE, X);
8286 
8287   case FCmpInst::FCMP_OLE:
8288     // fabs(X) <= 0.0 --> X == 0.0
8289     return replacePredAndOp0(&I, FCmpInst::FCMP_OEQ, X);
8290 
8291   case FCmpInst::FCMP_ULE:
8292     // fabs(X) u<= 0.0 --> X u== 0.0
8293     return replacePredAndOp0(&I, FCmpInst::FCMP_UEQ, X);
8294 
8295   case FCmpInst::FCMP_OGE:
8296     // fabs(X) >= 0.0 --> !isnan(X)
8297     assert(!I.hasNoNaNs() && "fcmp should have simplified");
8298     return replacePredAndOp0(&I, FCmpInst::FCMP_ORD, X);
8299 
8300   case FCmpInst::FCMP_ULT:
8301     // fabs(X) u< 0.0 --> isnan(X)
8302     assert(!I.hasNoNaNs() && "fcmp should have simplified");
8303     return replacePredAndOp0(&I, FCmpInst::FCMP_UNO, X);
8304 
8305   case FCmpInst::FCMP_OEQ:
8306   case FCmpInst::FCMP_UEQ:
8307   case FCmpInst::FCMP_ONE:
8308   case FCmpInst::FCMP_UNE:
8309   case FCmpInst::FCMP_ORD:
8310   case FCmpInst::FCMP_UNO:
8311     // Look through the fabs() because it doesn't change anything but the sign.
8312     // fabs(X) == 0.0 --> X == 0.0,
8313     // fabs(X) != 0.0 --> X != 0.0
8314     // isnan(fabs(X)) --> isnan(X)
8315     // !isnan(fabs(X) --> !isnan(X)
8316     return replacePredAndOp0(&I, I.getPredicate(), X);
8317 
8318   default:
8319     return nullptr;
8320   }
8321 }
8322 
8323 /// Optimize sqrt(X) compared with zero.
foldSqrtWithFcmpZero(FCmpInst & I,InstCombinerImpl & IC)8324 static Instruction *foldSqrtWithFcmpZero(FCmpInst &I, InstCombinerImpl &IC) {
8325   Value *X;
8326   if (!match(I.getOperand(0), m_Sqrt(m_Value(X))))
8327     return nullptr;
8328 
8329   if (!match(I.getOperand(1), m_PosZeroFP()))
8330     return nullptr;
8331 
8332   auto ReplacePredAndOp0 = [&](FCmpInst::Predicate P) {
8333     I.setPredicate(P);
8334     return IC.replaceOperand(I, 0, X);
8335   };
8336 
8337   // Clear ninf flag if sqrt doesn't have it.
8338   if (!cast<Instruction>(I.getOperand(0))->hasNoInfs())
8339     I.setHasNoInfs(false);
8340 
8341   switch (I.getPredicate()) {
8342   case FCmpInst::FCMP_OLT:
8343   case FCmpInst::FCMP_UGE:
8344     // sqrt(X) < 0.0 --> false
8345     // sqrt(X) u>= 0.0 --> true
8346     llvm_unreachable("fcmp should have simplified");
8347   case FCmpInst::FCMP_ULT:
8348   case FCmpInst::FCMP_ULE:
8349   case FCmpInst::FCMP_OGT:
8350   case FCmpInst::FCMP_OGE:
8351   case FCmpInst::FCMP_OEQ:
8352   case FCmpInst::FCMP_UNE:
8353     // sqrt(X) u< 0.0 --> X u< 0.0
8354     // sqrt(X) u<= 0.0 --> X u<= 0.0
8355     // sqrt(X) > 0.0 --> X > 0.0
8356     // sqrt(X) >= 0.0 --> X >= 0.0
8357     // sqrt(X) == 0.0 --> X == 0.0
8358     // sqrt(X) u!= 0.0 --> X u!= 0.0
8359     return IC.replaceOperand(I, 0, X);
8360 
8361   case FCmpInst::FCMP_OLE:
8362     // sqrt(X) <= 0.0 --> X == 0.0
8363     return ReplacePredAndOp0(FCmpInst::FCMP_OEQ);
8364   case FCmpInst::FCMP_UGT:
8365     // sqrt(X) u> 0.0 --> X u!= 0.0
8366     return ReplacePredAndOp0(FCmpInst::FCMP_UNE);
8367   case FCmpInst::FCMP_UEQ:
8368     // sqrt(X) u== 0.0 --> X u<= 0.0
8369     return ReplacePredAndOp0(FCmpInst::FCMP_ULE);
8370   case FCmpInst::FCMP_ONE:
8371     // sqrt(X) != 0.0 --> X > 0.0
8372     return ReplacePredAndOp0(FCmpInst::FCMP_OGT);
8373   case FCmpInst::FCMP_ORD:
8374     // !isnan(sqrt(X)) --> X >= 0.0
8375     return ReplacePredAndOp0(FCmpInst::FCMP_OGE);
8376   case FCmpInst::FCMP_UNO:
8377     // isnan(sqrt(X)) --> X u< 0.0
8378     return ReplacePredAndOp0(FCmpInst::FCMP_ULT);
8379   default:
8380     llvm_unreachable("Unexpected predicate!");
8381   }
8382 }
8383 
foldFCmpFNegCommonOp(FCmpInst & I)8384 static Instruction *foldFCmpFNegCommonOp(FCmpInst &I) {
8385   CmpInst::Predicate Pred = I.getPredicate();
8386   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
8387 
8388   // Canonicalize fneg as Op1.
8389   if (match(Op0, m_FNeg(m_Value())) && !match(Op1, m_FNeg(m_Value()))) {
8390     std::swap(Op0, Op1);
8391     Pred = I.getSwappedPredicate();
8392   }
8393 
8394   if (!match(Op1, m_FNeg(m_Specific(Op0))))
8395     return nullptr;
8396 
8397   // Replace the negated operand with 0.0:
8398   // fcmp Pred Op0, -Op0 --> fcmp Pred Op0, 0.0
8399   Constant *Zero = ConstantFP::getZero(Op0->getType());
8400   return new FCmpInst(Pred, Op0, Zero, "", &I);
8401 }
8402 
foldFCmpFSubIntoFCmp(FCmpInst & I,Instruction * LHSI,Constant * RHSC,InstCombinerImpl & CI)8403 static Instruction *foldFCmpFSubIntoFCmp(FCmpInst &I, Instruction *LHSI,
8404                                          Constant *RHSC, InstCombinerImpl &CI) {
8405   const CmpInst::Predicate Pred = I.getPredicate();
8406   Value *X = LHSI->getOperand(0);
8407   Value *Y = LHSI->getOperand(1);
8408   switch (Pred) {
8409   default:
8410     break;
8411   case FCmpInst::FCMP_UGT:
8412   case FCmpInst::FCMP_ULT:
8413   case FCmpInst::FCMP_UNE:
8414   case FCmpInst::FCMP_OEQ:
8415   case FCmpInst::FCMP_OGE:
8416   case FCmpInst::FCMP_OLE:
8417     // The optimization is not valid if X and Y are infinities of the same
8418     // sign, i.e. the inf - inf = nan case. If the fsub has the ninf or nnan
8419     // flag then we can assume we do not have that case. Otherwise we might be
8420     // able to prove that either X or Y is not infinity.
8421     if (!LHSI->hasNoNaNs() && !LHSI->hasNoInfs() &&
8422         !isKnownNeverInfinity(Y,
8423                               CI.getSimplifyQuery().getWithInstruction(&I)) &&
8424         !isKnownNeverInfinity(X, CI.getSimplifyQuery().getWithInstruction(&I)))
8425       break;
8426 
8427     [[fallthrough]];
8428   case FCmpInst::FCMP_OGT:
8429   case FCmpInst::FCMP_OLT:
8430   case FCmpInst::FCMP_ONE:
8431   case FCmpInst::FCMP_UEQ:
8432   case FCmpInst::FCMP_UGE:
8433   case FCmpInst::FCMP_ULE:
8434     // fcmp pred (x - y), 0 --> fcmp pred x, y
8435     if (match(RHSC, m_AnyZeroFP()) &&
8436         I.getFunction()->getDenormalMode(
8437             LHSI->getType()->getScalarType()->getFltSemantics()) ==
8438             DenormalMode::getIEEE()) {
8439       CI.replaceOperand(I, 0, X);
8440       CI.replaceOperand(I, 1, Y);
8441       return &I;
8442     }
8443     break;
8444   }
8445 
8446   return nullptr;
8447 }
8448 
foldFCmpWithFloorAndCeil(FCmpInst & I,InstCombinerImpl & IC)8449 static Instruction *foldFCmpWithFloorAndCeil(FCmpInst &I,
8450                                              InstCombinerImpl &IC) {
8451   Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
8452   Type *OpType = LHS->getType();
8453   CmpInst::Predicate Pred = I.getPredicate();
8454 
8455   bool FloorX = match(LHS, m_Intrinsic<Intrinsic::floor>(m_Specific(RHS)));
8456   bool CeilX = match(LHS, m_Intrinsic<Intrinsic::ceil>(m_Specific(RHS)));
8457 
8458   if (!FloorX && !CeilX) {
8459     if ((FloorX = match(RHS, m_Intrinsic<Intrinsic::floor>(m_Specific(LHS)))) ||
8460         (CeilX = match(RHS, m_Intrinsic<Intrinsic::ceil>(m_Specific(LHS))))) {
8461       std::swap(LHS, RHS);
8462       Pred = I.getSwappedPredicate();
8463     }
8464   }
8465 
8466   switch (Pred) {
8467   case FCmpInst::FCMP_OLE:
8468     // fcmp ole floor(x), x => fcmp ord x, 0
8469     if (FloorX)
8470       return new FCmpInst(FCmpInst::FCMP_ORD, RHS, ConstantFP::getZero(OpType),
8471                           "", &I);
8472     break;
8473   case FCmpInst::FCMP_OGT:
8474     // fcmp ogt floor(x), x => false
8475     if (FloorX)
8476       return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8477     break;
8478   case FCmpInst::FCMP_OGE:
8479     // fcmp oge ceil(x), x => fcmp ord x, 0
8480     if (CeilX)
8481       return new FCmpInst(FCmpInst::FCMP_ORD, RHS, ConstantFP::getZero(OpType),
8482                           "", &I);
8483     break;
8484   case FCmpInst::FCMP_OLT:
8485     // fcmp olt ceil(x), x => false
8486     if (CeilX)
8487       return IC.replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8488     break;
8489   case FCmpInst::FCMP_ULE:
8490     // fcmp ule floor(x), x => true
8491     if (FloorX)
8492       return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8493     break;
8494   case FCmpInst::FCMP_UGT:
8495     // fcmp ugt floor(x), x => fcmp uno x, 0
8496     if (FloorX)
8497       return new FCmpInst(FCmpInst::FCMP_UNO, RHS, ConstantFP::getZero(OpType),
8498                           "", &I);
8499     break;
8500   case FCmpInst::FCMP_UGE:
8501     // fcmp uge ceil(x), x => true
8502     if (CeilX)
8503       return IC.replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8504     break;
8505   case FCmpInst::FCMP_ULT:
8506     // fcmp ult ceil(x), x => fcmp uno x, 0
8507     if (CeilX)
8508       return new FCmpInst(FCmpInst::FCMP_UNO, RHS, ConstantFP::getZero(OpType),
8509                           "", &I);
8510     break;
8511   default:
8512     break;
8513   }
8514 
8515   return nullptr;
8516 }
8517 
visitFCmpInst(FCmpInst & I)8518 Instruction *InstCombinerImpl::visitFCmpInst(FCmpInst &I) {
8519   bool Changed = false;
8520 
8521   /// Orders the operands of the compare so that they are listed from most
8522   /// complex to least complex.  This puts constants before unary operators,
8523   /// before binary operators.
8524   if (getComplexity(I.getOperand(0)) < getComplexity(I.getOperand(1))) {
8525     I.swapOperands();
8526     Changed = true;
8527   }
8528 
8529   const CmpInst::Predicate Pred = I.getPredicate();
8530   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
8531   if (Value *V = simplifyFCmpInst(Pred, Op0, Op1, I.getFastMathFlags(),
8532                                   SQ.getWithInstruction(&I)))
8533     return replaceInstUsesWith(I, V);
8534 
8535   // Simplify 'fcmp pred X, X'
8536   Type *OpType = Op0->getType();
8537   assert(OpType == Op1->getType() && "fcmp with different-typed operands?");
8538   if (Op0 == Op1) {
8539     switch (Pred) {
8540     default:
8541       break;
8542     case FCmpInst::FCMP_UNO: // True if unordered: isnan(X) | isnan(Y)
8543     case FCmpInst::FCMP_ULT: // True if unordered or less than
8544     case FCmpInst::FCMP_UGT: // True if unordered or greater than
8545     case FCmpInst::FCMP_UNE: // True if unordered or not equal
8546       // Canonicalize these to be 'fcmp uno %X, 0.0'.
8547       I.setPredicate(FCmpInst::FCMP_UNO);
8548       I.setOperand(1, Constant::getNullValue(OpType));
8549       return &I;
8550 
8551     case FCmpInst::FCMP_ORD: // True if ordered (no nans)
8552     case FCmpInst::FCMP_OEQ: // True if ordered and equal
8553     case FCmpInst::FCMP_OGE: // True if ordered and greater than or equal
8554     case FCmpInst::FCMP_OLE: // True if ordered and less than or equal
8555       // Canonicalize these to be 'fcmp ord %X, 0.0'.
8556       I.setPredicate(FCmpInst::FCMP_ORD);
8557       I.setOperand(1, Constant::getNullValue(OpType));
8558       return &I;
8559     }
8560   }
8561 
8562   if (I.isCommutative()) {
8563     if (auto Pair = matchSymmetricPair(I.getOperand(0), I.getOperand(1))) {
8564       replaceOperand(I, 0, Pair->first);
8565       replaceOperand(I, 1, Pair->second);
8566       return &I;
8567     }
8568   }
8569 
8570   // If we're just checking for a NaN (ORD/UNO) and have a non-NaN operand,
8571   // then canonicalize the operand to 0.0.
8572   if (Pred == CmpInst::FCMP_ORD || Pred == CmpInst::FCMP_UNO) {
8573     if (!match(Op0, m_PosZeroFP()) &&
8574         isKnownNeverNaN(Op0, getSimplifyQuery().getWithInstruction(&I)))
8575       return replaceOperand(I, 0, ConstantFP::getZero(OpType));
8576 
8577     if (!match(Op1, m_PosZeroFP()) &&
8578         isKnownNeverNaN(Op1, getSimplifyQuery().getWithInstruction(&I)))
8579       return replaceOperand(I, 1, ConstantFP::getZero(OpType));
8580   }
8581 
8582   // fcmp pred (fneg X), (fneg Y) -> fcmp swap(pred) X, Y
8583   Value *X, *Y;
8584   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
8585     return new FCmpInst(I.getSwappedPredicate(), X, Y, "", &I);
8586 
8587   if (Instruction *R = foldFCmpFNegCommonOp(I))
8588     return R;
8589 
8590   // Test if the FCmpInst instruction is used exclusively by a select as
8591   // part of a minimum or maximum operation. If so, refrain from doing
8592   // any other folding. This helps out other analyses which understand
8593   // non-obfuscated minimum and maximum idioms, such as ScalarEvolution
8594   // and CodeGen. And in this case, at least one of the comparison
8595   // operands has at least one user besides the compare (the select),
8596   // which would often largely negate the benefit of folding anyway.
8597   if (I.hasOneUse())
8598     if (SelectInst *SI = dyn_cast<SelectInst>(I.user_back())) {
8599       Value *A, *B;
8600       SelectPatternResult SPR = matchSelectPattern(SI, A, B);
8601       if (SPR.Flavor != SPF_UNKNOWN)
8602         return nullptr;
8603     }
8604 
8605   // The sign of 0.0 is ignored by fcmp, so canonicalize to +0.0:
8606   // fcmp Pred X, -0.0 --> fcmp Pred X, 0.0
8607   if (match(Op1, m_AnyZeroFP()) && !match(Op1, m_PosZeroFP()))
8608     return replaceOperand(I, 1, ConstantFP::getZero(OpType));
8609 
8610   // Canonicalize:
8611   // fcmp olt X, +inf -> fcmp one X, +inf
8612   // fcmp ole X, +inf -> fcmp ord X, 0
8613   // fcmp ogt X, +inf -> false
8614   // fcmp oge X, +inf -> fcmp oeq X, +inf
8615   // fcmp ult X, +inf -> fcmp une X, +inf
8616   // fcmp ule X, +inf -> true
8617   // fcmp ugt X, +inf -> fcmp uno X, 0
8618   // fcmp uge X, +inf -> fcmp ueq X, +inf
8619   // fcmp olt X, -inf -> false
8620   // fcmp ole X, -inf -> fcmp oeq X, -inf
8621   // fcmp ogt X, -inf -> fcmp one X, -inf
8622   // fcmp oge X, -inf -> fcmp ord X, 0
8623   // fcmp ult X, -inf -> fcmp uno X, 0
8624   // fcmp ule X, -inf -> fcmp ueq X, -inf
8625   // fcmp ugt X, -inf -> fcmp une X, -inf
8626   // fcmp uge X, -inf -> true
8627   const APFloat *C;
8628   if (match(Op1, m_APFloat(C)) && C->isInfinity()) {
8629     switch (C->isNegative() ? FCmpInst::getSwappedPredicate(Pred) : Pred) {
8630     default:
8631       break;
8632     case FCmpInst::FCMP_ORD:
8633     case FCmpInst::FCMP_UNO:
8634     case FCmpInst::FCMP_TRUE:
8635     case FCmpInst::FCMP_FALSE:
8636     case FCmpInst::FCMP_OGT:
8637     case FCmpInst::FCMP_ULE:
8638       llvm_unreachable("Should be simplified by InstSimplify");
8639     case FCmpInst::FCMP_OLT:
8640       return new FCmpInst(FCmpInst::FCMP_ONE, Op0, Op1, "", &I);
8641     case FCmpInst::FCMP_OLE:
8642       return new FCmpInst(FCmpInst::FCMP_ORD, Op0, ConstantFP::getZero(OpType),
8643                           "", &I);
8644     case FCmpInst::FCMP_OGE:
8645       return new FCmpInst(FCmpInst::FCMP_OEQ, Op0, Op1, "", &I);
8646     case FCmpInst::FCMP_ULT:
8647       return new FCmpInst(FCmpInst::FCMP_UNE, Op0, Op1, "", &I);
8648     case FCmpInst::FCMP_UGT:
8649       return new FCmpInst(FCmpInst::FCMP_UNO, Op0, ConstantFP::getZero(OpType),
8650                           "", &I);
8651     case FCmpInst::FCMP_UGE:
8652       return new FCmpInst(FCmpInst::FCMP_UEQ, Op0, Op1, "", &I);
8653     }
8654   }
8655 
8656   // Ignore signbit of bitcasted int when comparing equality to FP 0.0:
8657   // fcmp oeq/une (bitcast X), 0.0 --> (and X, SignMaskC) ==/!= 0
8658   if (match(Op1, m_PosZeroFP()) &&
8659       match(Op0, m_OneUse(m_ElementWiseBitCast(m_Value(X))))) {
8660     ICmpInst::Predicate IntPred = ICmpInst::BAD_ICMP_PREDICATE;
8661     if (Pred == FCmpInst::FCMP_OEQ)
8662       IntPred = ICmpInst::ICMP_EQ;
8663     else if (Pred == FCmpInst::FCMP_UNE)
8664       IntPred = ICmpInst::ICMP_NE;
8665 
8666     if (IntPred != ICmpInst::BAD_ICMP_PREDICATE) {
8667       Type *IntTy = X->getType();
8668       const APInt &SignMask = ~APInt::getSignMask(IntTy->getScalarSizeInBits());
8669       Value *MaskX = Builder.CreateAnd(X, ConstantInt::get(IntTy, SignMask));
8670       return new ICmpInst(IntPred, MaskX, ConstantInt::getNullValue(IntTy));
8671     }
8672   }
8673 
8674   // Handle fcmp with instruction LHS and constant RHS.
8675   Instruction *LHSI;
8676   Constant *RHSC;
8677   if (match(Op0, m_Instruction(LHSI)) && match(Op1, m_Constant(RHSC))) {
8678     switch (LHSI->getOpcode()) {
8679     case Instruction::Select:
8680       // fcmp eq (cond ? x : -x), 0 --> fcmp eq x, 0
8681       if (FCmpInst::isEquality(Pred) && match(RHSC, m_AnyZeroFP()) &&
8682           match(LHSI, m_c_Select(m_FNeg(m_Value(X)), m_Deferred(X))))
8683         return replaceOperand(I, 0, X);
8684       if (Instruction *NV = FoldOpIntoSelect(I, cast<SelectInst>(LHSI)))
8685         return NV;
8686       break;
8687     case Instruction::FSub:
8688       if (LHSI->hasOneUse())
8689         if (Instruction *NV = foldFCmpFSubIntoFCmp(I, LHSI, RHSC, *this))
8690           return NV;
8691       break;
8692     case Instruction::PHI:
8693       if (Instruction *NV = foldOpIntoPhi(I, cast<PHINode>(LHSI)))
8694         return NV;
8695       break;
8696     case Instruction::SIToFP:
8697     case Instruction::UIToFP:
8698       if (Instruction *NV = foldFCmpIntToFPConst(I, LHSI, RHSC))
8699         return NV;
8700       break;
8701     case Instruction::FDiv:
8702       if (Instruction *NV = foldFCmpReciprocalAndZero(I, LHSI, RHSC))
8703         return NV;
8704       break;
8705     case Instruction::Load:
8706       if (auto *GEP = dyn_cast<GetElementPtrInst>(LHSI->getOperand(0)))
8707         if (auto *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0)))
8708           if (Instruction *Res = foldCmpLoadFromIndexedGlobal(
8709                   cast<LoadInst>(LHSI), GEP, GV, I))
8710             return Res;
8711       break;
8712     }
8713   }
8714 
8715   if (Instruction *R = foldFabsWithFcmpZero(I, *this))
8716     return R;
8717 
8718   if (Instruction *R = foldSqrtWithFcmpZero(I, *this))
8719     return R;
8720 
8721   if (Instruction *R = foldFCmpWithFloorAndCeil(I, *this))
8722     return R;
8723 
8724   if (match(Op0, m_FNeg(m_Value(X)))) {
8725     // fcmp pred (fneg X), C --> fcmp swap(pred) X, -C
8726     Constant *C;
8727     if (match(Op1, m_Constant(C)))
8728       if (Constant *NegC = ConstantFoldUnaryOpOperand(Instruction::FNeg, C, DL))
8729         return new FCmpInst(I.getSwappedPredicate(), X, NegC, "", &I);
8730   }
8731 
8732   // fcmp (fadd X, 0.0), Y --> fcmp X, Y
8733   if (match(Op0, m_FAdd(m_Value(X), m_AnyZeroFP())))
8734     return new FCmpInst(Pred, X, Op1, "", &I);
8735 
8736   // fcmp X, (fadd Y, 0.0) --> fcmp X, Y
8737   if (match(Op1, m_FAdd(m_Value(Y), m_AnyZeroFP())))
8738     return new FCmpInst(Pred, Op0, Y, "", &I);
8739 
8740   if (match(Op0, m_FPExt(m_Value(X)))) {
8741     // fcmp (fpext X), (fpext Y) -> fcmp X, Y
8742     if (match(Op1, m_FPExt(m_Value(Y))) && X->getType() == Y->getType())
8743       return new FCmpInst(Pred, X, Y, "", &I);
8744 
8745     const APFloat *C;
8746     if (match(Op1, m_APFloat(C))) {
8747       const fltSemantics &FPSem =
8748           X->getType()->getScalarType()->getFltSemantics();
8749       bool Lossy;
8750       APFloat TruncC = *C;
8751       TruncC.convert(FPSem, APFloat::rmNearestTiesToEven, &Lossy);
8752 
8753       if (Lossy) {
8754         // X can't possibly equal the higher-precision constant, so reduce any
8755         // equality comparison.
8756         // TODO: Other predicates can be handled via getFCmpCode().
8757         switch (Pred) {
8758         case FCmpInst::FCMP_OEQ:
8759           // X is ordered and equal to an impossible constant --> false
8760           return replaceInstUsesWith(I, ConstantInt::getFalse(I.getType()));
8761         case FCmpInst::FCMP_ONE:
8762           // X is ordered and not equal to an impossible constant --> ordered
8763           return new FCmpInst(FCmpInst::FCMP_ORD, X,
8764                               ConstantFP::getZero(X->getType()));
8765         case FCmpInst::FCMP_UEQ:
8766           // X is unordered or equal to an impossible constant --> unordered
8767           return new FCmpInst(FCmpInst::FCMP_UNO, X,
8768                               ConstantFP::getZero(X->getType()));
8769         case FCmpInst::FCMP_UNE:
8770           // X is unordered or not equal to an impossible constant --> true
8771           return replaceInstUsesWith(I, ConstantInt::getTrue(I.getType()));
8772         default:
8773           break;
8774         }
8775       }
8776 
8777       // fcmp (fpext X), C -> fcmp X, (fptrunc C) if fptrunc is lossless
8778       // Avoid lossy conversions and denormals.
8779       // Zero is a special case that's OK to convert.
8780       APFloat Fabs = TruncC;
8781       Fabs.clearSign();
8782       if (!Lossy &&
8783           (Fabs.isZero() || !(Fabs < APFloat::getSmallestNormalized(FPSem)))) {
8784         Constant *NewC = ConstantFP::get(X->getType(), TruncC);
8785         return new FCmpInst(Pred, X, NewC, "", &I);
8786       }
8787     }
8788   }
8789 
8790   // Convert a sign-bit test of an FP value into a cast and integer compare.
8791   // TODO: Simplify if the copysign constant is 0.0 or NaN.
8792   // TODO: Handle non-zero compare constants.
8793   // TODO: Handle other predicates.
8794   if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::copysign>(m_APFloat(C),
8795                                                            m_Value(X)))) &&
8796       match(Op1, m_AnyZeroFP()) && !C->isZero() && !C->isNaN()) {
8797     Type *IntType = Builder.getIntNTy(X->getType()->getScalarSizeInBits());
8798     if (auto *VecTy = dyn_cast<VectorType>(OpType))
8799       IntType = VectorType::get(IntType, VecTy->getElementCount());
8800 
8801     // copysign(non-zero constant, X) < 0.0 --> (bitcast X) < 0
8802     if (Pred == FCmpInst::FCMP_OLT) {
8803       Value *IntX = Builder.CreateBitCast(X, IntType);
8804       return new ICmpInst(ICmpInst::ICMP_SLT, IntX,
8805                           ConstantInt::getNullValue(IntType));
8806     }
8807   }
8808 
8809   {
8810     Value *CanonLHS = nullptr, *CanonRHS = nullptr;
8811     match(Op0, m_Intrinsic<Intrinsic::canonicalize>(m_Value(CanonLHS)));
8812     match(Op1, m_Intrinsic<Intrinsic::canonicalize>(m_Value(CanonRHS)));
8813 
8814     // (canonicalize(x) == x) => (x == x)
8815     if (CanonLHS == Op1)
8816       return new FCmpInst(Pred, Op1, Op1, "", &I);
8817 
8818     // (x == canonicalize(x)) => (x == x)
8819     if (CanonRHS == Op0)
8820       return new FCmpInst(Pred, Op0, Op0, "", &I);
8821 
8822     // (canonicalize(x) == canonicalize(y)) => (x == y)
8823     if (CanonLHS && CanonRHS)
8824       return new FCmpInst(Pred, CanonLHS, CanonRHS, "", &I);
8825   }
8826 
8827   if (I.getType()->isVectorTy())
8828     if (Instruction *Res = foldVectorCmp(I, Builder))
8829       return Res;
8830 
8831   return Changed ? &I : nullptr;
8832 }
8833