xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (revision 8bcb0991864975618c09697b1aca10683346d9f0)
1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitAnd, visitOr, and visitXor functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/Analysis/CmpInstAnalysis.h"
15 #include "llvm/Analysis/InstructionSimplify.h"
16 #include "llvm/Transforms/Utils/Local.h"
17 #include "llvm/IR/ConstantRange.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/PatternMatch.h"
20 using namespace llvm;
21 using namespace PatternMatch;
22 
23 #define DEBUG_TYPE "instcombine"
24 
25 /// Similar to getICmpCode but for FCmpInst. This encodes a fcmp predicate into
26 /// a four bit mask.
27 static unsigned getFCmpCode(FCmpInst::Predicate CC) {
28   assert(FCmpInst::FCMP_FALSE <= CC && CC <= FCmpInst::FCMP_TRUE &&
29          "Unexpected FCmp predicate!");
30   // Take advantage of the bit pattern of FCmpInst::Predicate here.
31   //                                                 U L G E
32   static_assert(FCmpInst::FCMP_FALSE ==  0, "");  // 0 0 0 0
33   static_assert(FCmpInst::FCMP_OEQ   ==  1, "");  // 0 0 0 1
34   static_assert(FCmpInst::FCMP_OGT   ==  2, "");  // 0 0 1 0
35   static_assert(FCmpInst::FCMP_OGE   ==  3, "");  // 0 0 1 1
36   static_assert(FCmpInst::FCMP_OLT   ==  4, "");  // 0 1 0 0
37   static_assert(FCmpInst::FCMP_OLE   ==  5, "");  // 0 1 0 1
38   static_assert(FCmpInst::FCMP_ONE   ==  6, "");  // 0 1 1 0
39   static_assert(FCmpInst::FCMP_ORD   ==  7, "");  // 0 1 1 1
40   static_assert(FCmpInst::FCMP_UNO   ==  8, "");  // 1 0 0 0
41   static_assert(FCmpInst::FCMP_UEQ   ==  9, "");  // 1 0 0 1
42   static_assert(FCmpInst::FCMP_UGT   == 10, "");  // 1 0 1 0
43   static_assert(FCmpInst::FCMP_UGE   == 11, "");  // 1 0 1 1
44   static_assert(FCmpInst::FCMP_ULT   == 12, "");  // 1 1 0 0
45   static_assert(FCmpInst::FCMP_ULE   == 13, "");  // 1 1 0 1
46   static_assert(FCmpInst::FCMP_UNE   == 14, "");  // 1 1 1 0
47   static_assert(FCmpInst::FCMP_TRUE  == 15, "");  // 1 1 1 1
48   return CC;
49 }
50 
51 /// This is the complement of getICmpCode, which turns an opcode and two
52 /// operands into either a constant true or false, or a brand new ICmp
53 /// instruction. The sign is passed in to determine which kind of predicate to
54 /// use in the new icmp instruction.
55 static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS,
56                               InstCombiner::BuilderTy &Builder) {
57   ICmpInst::Predicate NewPred;
58   if (Constant *TorF = getPredForICmpCode(Code, Sign, LHS->getType(), NewPred))
59     return TorF;
60   return Builder.CreateICmp(NewPred, LHS, RHS);
61 }
62 
63 /// This is the complement of getFCmpCode, which turns an opcode and two
64 /// operands into either a FCmp instruction, or a true/false constant.
65 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
66                            InstCombiner::BuilderTy &Builder) {
67   const auto Pred = static_cast<FCmpInst::Predicate>(Code);
68   assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE &&
69          "Unexpected FCmp predicate!");
70   if (Pred == FCmpInst::FCMP_FALSE)
71     return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
72   if (Pred == FCmpInst::FCMP_TRUE)
73     return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
74   return Builder.CreateFCmp(Pred, LHS, RHS);
75 }
76 
77 /// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or
78 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B))
79 /// \param I Binary operator to transform.
80 /// \return Pointer to node that must replace the original binary operator, or
81 ///         null pointer if no transformation was made.
82 static Value *SimplifyBSwap(BinaryOperator &I,
83                             InstCombiner::BuilderTy &Builder) {
84   assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying");
85 
86   Value *OldLHS = I.getOperand(0);
87   Value *OldRHS = I.getOperand(1);
88 
89   Value *NewLHS;
90   if (!match(OldLHS, m_BSwap(m_Value(NewLHS))))
91     return nullptr;
92 
93   Value *NewRHS;
94   const APInt *C;
95 
96   if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) {
97     // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
98     if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse())
99       return nullptr;
100     // NewRHS initialized by the matcher.
101   } else if (match(OldRHS, m_APInt(C))) {
102     // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
103     if (!OldLHS->hasOneUse())
104       return nullptr;
105     NewRHS = ConstantInt::get(I.getType(), C->byteSwap());
106   } else
107     return nullptr;
108 
109   Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
110   Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap,
111                                           I.getType());
112   return Builder.CreateCall(F, BinOp);
113 }
114 
115 /// This handles expressions of the form ((val OP C1) & C2).  Where
116 /// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'.
117 Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
118                                     ConstantInt *OpRHS,
119                                     ConstantInt *AndRHS,
120                                     BinaryOperator &TheAnd) {
121   Value *X = Op->getOperand(0);
122 
123   switch (Op->getOpcode()) {
124   default: break;
125   case Instruction::Add:
126     if (Op->hasOneUse()) {
127       // Adding a one to a single bit bit-field should be turned into an XOR
128       // of the bit.  First thing to check is to see if this AND is with a
129       // single bit constant.
130       const APInt &AndRHSV = AndRHS->getValue();
131 
132       // If there is only one bit set.
133       if (AndRHSV.isPowerOf2()) {
134         // Ok, at this point, we know that we are masking the result of the
135         // ADD down to exactly one bit.  If the constant we are adding has
136         // no bits set below this bit, then we can eliminate the ADD.
137         const APInt& AddRHS = OpRHS->getValue();
138 
139         // Check to see if any bits below the one bit set in AndRHSV are set.
140         if ((AddRHS & (AndRHSV - 1)).isNullValue()) {
141           // If not, the only thing that can effect the output of the AND is
142           // the bit specified by AndRHSV.  If that bit is set, the effect of
143           // the XOR is to toggle the bit.  If it is clear, then the ADD has
144           // no effect.
145           if ((AddRHS & AndRHSV).isNullValue()) { // Bit is not set, noop
146             TheAnd.setOperand(0, X);
147             return &TheAnd;
148           } else {
149             // Pull the XOR out of the AND.
150             Value *NewAnd = Builder.CreateAnd(X, AndRHS);
151             NewAnd->takeName(Op);
152             return BinaryOperator::CreateXor(NewAnd, AndRHS);
153           }
154         }
155       }
156     }
157     break;
158   }
159   return nullptr;
160 }
161 
162 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
163 /// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates
164 /// whether to treat V, Lo, and Hi as signed or not.
165 Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
166                                      bool isSigned, bool Inside) {
167   assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) &&
168          "Lo is not < Hi in range emission code!");
169 
170   Type *Ty = V->getType();
171 
172   // V >= Min && V <  Hi --> V <  Hi
173   // V <  Min || V >= Hi --> V >= Hi
174   ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
175   if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
176     Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
177     return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
178   }
179 
180   // V >= Lo && V <  Hi --> V - Lo u<  Hi - Lo
181   // V <  Lo || V >= Hi --> V - Lo u>= Hi - Lo
182   Value *VMinusLo =
183       Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
184   Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
185   return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
186 }
187 
188 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
189 /// that can be simplified.
190 /// One of A and B is considered the mask. The other is the value. This is
191 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
192 /// only "Mask", then both A and B can be considered masks. If A is the mask,
193 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
194 /// If both A and C are constants, this proof is also easy.
195 /// For the following explanations, we assume that A is the mask.
196 ///
197 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
198 /// bits of A are set in B.
199 ///   Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
200 ///
201 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
202 /// bits of A are cleared in B.
203 ///   Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
204 ///
205 /// "Mixed" declares that (A & B) == C and C might or might not contain any
206 /// number of one bits and zero bits.
207 ///   Example: (icmp eq (A & 3), 1) -> AMask_Mixed
208 ///
209 /// "Not" means that in above descriptions "==" should be replaced by "!=".
210 ///   Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
211 ///
212 /// If the mask A contains a single bit, then the following is equivalent:
213 ///    (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
214 ///    (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
215 enum MaskedICmpType {
216   AMask_AllOnes           =     1,
217   AMask_NotAllOnes        =     2,
218   BMask_AllOnes           =     4,
219   BMask_NotAllOnes        =     8,
220   Mask_AllZeros           =    16,
221   Mask_NotAllZeros        =    32,
222   AMask_Mixed             =    64,
223   AMask_NotMixed          =   128,
224   BMask_Mixed             =   256,
225   BMask_NotMixed          =   512
226 };
227 
228 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
229 /// satisfies.
230 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
231                                   ICmpInst::Predicate Pred) {
232   ConstantInt *ACst = dyn_cast<ConstantInt>(A);
233   ConstantInt *BCst = dyn_cast<ConstantInt>(B);
234   ConstantInt *CCst = dyn_cast<ConstantInt>(C);
235   bool IsEq = (Pred == ICmpInst::ICMP_EQ);
236   bool IsAPow2 = (ACst && !ACst->isZero() && ACst->getValue().isPowerOf2());
237   bool IsBPow2 = (BCst && !BCst->isZero() && BCst->getValue().isPowerOf2());
238   unsigned MaskVal = 0;
239   if (CCst && CCst->isZero()) {
240     // if C is zero, then both A and B qualify as mask
241     MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
242                      : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
243     if (IsAPow2)
244       MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
245                        : (AMask_AllOnes | AMask_Mixed));
246     if (IsBPow2)
247       MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
248                        : (BMask_AllOnes | BMask_Mixed));
249     return MaskVal;
250   }
251 
252   if (A == C) {
253     MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
254                      : (AMask_NotAllOnes | AMask_NotMixed));
255     if (IsAPow2)
256       MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
257                        : (Mask_AllZeros | AMask_Mixed));
258   } else if (ACst && CCst && ConstantExpr::getAnd(ACst, CCst) == CCst) {
259     MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
260   }
261 
262   if (B == C) {
263     MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
264                      : (BMask_NotAllOnes | BMask_NotMixed));
265     if (IsBPow2)
266       MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
267                        : (Mask_AllZeros | BMask_Mixed));
268   } else if (BCst && CCst && ConstantExpr::getAnd(BCst, CCst) == CCst) {
269     MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
270   }
271 
272   return MaskVal;
273 }
274 
275 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
276 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
277 /// is adjacent to the corresponding normal flag (recording ==), this just
278 /// involves swapping those bits over.
279 static unsigned conjugateICmpMask(unsigned Mask) {
280   unsigned NewMask;
281   NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
282                      AMask_Mixed | BMask_Mixed))
283             << 1;
284 
285   NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
286                       AMask_NotMixed | BMask_NotMixed))
287              >> 1;
288 
289   return NewMask;
290 }
291 
292 // Adapts the external decomposeBitTestICmp for local use.
293 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
294                                  Value *&X, Value *&Y, Value *&Z) {
295   APInt Mask;
296   if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask))
297     return false;
298 
299   Y = ConstantInt::get(X->getType(), Mask);
300   Z = ConstantInt::get(X->getType(), 0);
301   return true;
302 }
303 
304 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
305 /// Return the pattern classes (from MaskedICmpType) for the left hand side and
306 /// the right hand side as a pair.
307 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL
308 /// and PredR are their predicates, respectively.
309 static
310 Optional<std::pair<unsigned, unsigned>>
311 getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C,
312                          Value *&D, Value *&E, ICmpInst *LHS,
313                          ICmpInst *RHS,
314                          ICmpInst::Predicate &PredL,
315                          ICmpInst::Predicate &PredR) {
316   // vectors are not (yet?) supported. Don't support pointers either.
317   if (!LHS->getOperand(0)->getType()->isIntegerTy() ||
318       !RHS->getOperand(0)->getType()->isIntegerTy())
319     return None;
320 
321   // Here comes the tricky part:
322   // LHS might be of the form L11 & L12 == X, X == L21 & L22,
323   // and L11 & L12 == L21 & L22. The same goes for RHS.
324   // Now we must find those components L** and R**, that are equal, so
325   // that we can extract the parameters A, B, C, D, and E for the canonical
326   // above.
327   Value *L1 = LHS->getOperand(0);
328   Value *L2 = LHS->getOperand(1);
329   Value *L11, *L12, *L21, *L22;
330   // Check whether the icmp can be decomposed into a bit test.
331   if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) {
332     L21 = L22 = L1 = nullptr;
333   } else {
334     // Look for ANDs in the LHS icmp.
335     if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
336       // Any icmp can be viewed as being trivially masked; if it allows us to
337       // remove one, it's worth it.
338       L11 = L1;
339       L12 = Constant::getAllOnesValue(L1->getType());
340     }
341 
342     if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
343       L21 = L2;
344       L22 = Constant::getAllOnesValue(L2->getType());
345     }
346   }
347 
348   // Bail if LHS was a icmp that can't be decomposed into an equality.
349   if (!ICmpInst::isEquality(PredL))
350     return None;
351 
352   Value *R1 = RHS->getOperand(0);
353   Value *R2 = RHS->getOperand(1);
354   Value *R11, *R12;
355   bool Ok = false;
356   if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) {
357     if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
358       A = R11;
359       D = R12;
360     } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
361       A = R12;
362       D = R11;
363     } else {
364       return None;
365     }
366     E = R2;
367     R1 = nullptr;
368     Ok = true;
369   } else {
370     if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
371       // As before, model no mask as a trivial mask if it'll let us do an
372       // optimization.
373       R11 = R1;
374       R12 = Constant::getAllOnesValue(R1->getType());
375     }
376 
377     if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
378       A = R11;
379       D = R12;
380       E = R2;
381       Ok = true;
382     } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
383       A = R12;
384       D = R11;
385       E = R2;
386       Ok = true;
387     }
388   }
389 
390   // Bail if RHS was a icmp that can't be decomposed into an equality.
391   if (!ICmpInst::isEquality(PredR))
392     return None;
393 
394   // Look for ANDs on the right side of the RHS icmp.
395   if (!Ok) {
396     if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
397       R11 = R2;
398       R12 = Constant::getAllOnesValue(R2->getType());
399     }
400 
401     if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
402       A = R11;
403       D = R12;
404       E = R1;
405       Ok = true;
406     } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
407       A = R12;
408       D = R11;
409       E = R1;
410       Ok = true;
411     } else {
412       return None;
413     }
414   }
415   if (!Ok)
416     return None;
417 
418   if (L11 == A) {
419     B = L12;
420     C = L2;
421   } else if (L12 == A) {
422     B = L11;
423     C = L2;
424   } else if (L21 == A) {
425     B = L22;
426     C = L1;
427   } else if (L22 == A) {
428     B = L21;
429     C = L1;
430   }
431 
432   unsigned LeftType = getMaskedICmpType(A, B, C, PredL);
433   unsigned RightType = getMaskedICmpType(A, D, E, PredR);
434   return Optional<std::pair<unsigned, unsigned>>(std::make_pair(LeftType, RightType));
435 }
436 
437 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
438 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
439 /// and the right hand side is of type BMask_Mixed. For example,
440 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
441 static Value * foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
442     ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
443     Value *A, Value *B, Value *C, Value *D, Value *E,
444     ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
445     llvm::InstCombiner::BuilderTy &Builder) {
446   // We are given the canonical form:
447   //   (icmp ne (A & B), 0) & (icmp eq (A & D), E).
448   // where D & E == E.
449   //
450   // If IsAnd is false, we get it in negated form:
451   //   (icmp eq (A & B), 0) | (icmp ne (A & D), E) ->
452   //      !((icmp ne (A & B), 0) & (icmp eq (A & D), E)).
453   //
454   // We currently handle the case of B, C, D, E are constant.
455   //
456   ConstantInt *BCst = dyn_cast<ConstantInt>(B);
457   if (!BCst)
458     return nullptr;
459   ConstantInt *CCst = dyn_cast<ConstantInt>(C);
460   if (!CCst)
461     return nullptr;
462   ConstantInt *DCst = dyn_cast<ConstantInt>(D);
463   if (!DCst)
464     return nullptr;
465   ConstantInt *ECst = dyn_cast<ConstantInt>(E);
466   if (!ECst)
467     return nullptr;
468 
469   ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
470 
471   // Update E to the canonical form when D is a power of two and RHS is
472   // canonicalized as,
473   // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or
474   // (icmp ne (A & D), D) -> (icmp eq (A & D), 0).
475   if (PredR != NewCC)
476     ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst));
477 
478   // If B or D is zero, skip because if LHS or RHS can be trivially folded by
479   // other folding rules and this pattern won't apply any more.
480   if (BCst->getValue() == 0 || DCst->getValue() == 0)
481     return nullptr;
482 
483   // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't
484   // deduce anything from it.
485   // For example,
486   // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding.
487   if ((BCst->getValue() & DCst->getValue()) == 0)
488     return nullptr;
489 
490   // If the following two conditions are met:
491   //
492   // 1. mask B covers only a single bit that's not covered by mask D, that is,
493   // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of
494   // B and D has only one bit set) and,
495   //
496   // 2. RHS (and E) indicates that the rest of B's bits are zero (in other
497   // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0
498   //
499   // then that single bit in B must be one and thus the whole expression can be
500   // folded to
501   //   (A & (B | D)) == (B & (B ^ D)) | E.
502   //
503   // For example,
504   // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9)
505   // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8)
506   if ((((BCst->getValue() & DCst->getValue()) & ECst->getValue()) == 0) &&
507       (BCst->getValue() & (BCst->getValue() ^ DCst->getValue())).isPowerOf2()) {
508     APInt BorD = BCst->getValue() | DCst->getValue();
509     APInt BandBxorDorE = (BCst->getValue() & (BCst->getValue() ^ DCst->getValue())) |
510         ECst->getValue();
511     Value *NewMask = ConstantInt::get(BCst->getType(), BorD);
512     Value *NewMaskedValue = ConstantInt::get(BCst->getType(), BandBxorDorE);
513     Value *NewAnd = Builder.CreateAnd(A, NewMask);
514     return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue);
515   }
516 
517   auto IsSubSetOrEqual = [](ConstantInt *C1, ConstantInt *C2) {
518     return (C1->getValue() & C2->getValue()) == C1->getValue();
519   };
520   auto IsSuperSetOrEqual = [](ConstantInt *C1, ConstantInt *C2) {
521     return (C1->getValue() & C2->getValue()) == C2->getValue();
522   };
523 
524   // In the following, we consider only the cases where B is a superset of D, B
525   // is a subset of D, or B == D because otherwise there's at least one bit
526   // covered by B but not D, in which case we can't deduce much from it, so
527   // no folding (aside from the single must-be-one bit case right above.)
528   // For example,
529   // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding.
530   if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
531     return nullptr;
532 
533   // At this point, either B is a superset of D, B is a subset of D or B == D.
534 
535   // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict
536   // and the whole expression becomes false (or true if negated), otherwise, no
537   // folding.
538   // For example,
539   // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false.
540   // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding.
541   if (ECst->isZero()) {
542     if (IsSubSetOrEqual(BCst, DCst))
543       return ConstantInt::get(LHS->getType(), !IsAnd);
544     return nullptr;
545   }
546 
547   // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B ==
548   // D. If B is a superset of (or equal to) D, since E is not zero, LHS is
549   // subsumed by RHS (RHS implies LHS.) So the whole expression becomes
550   // RHS. For example,
551   // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
552   // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
553   if (IsSuperSetOrEqual(BCst, DCst))
554     return RHS;
555   // Otherwise, B is a subset of D. If B and E have a common bit set,
556   // ie. (B & E) != 0, then LHS is subsumed by RHS. For example.
557   // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
558   assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code");
559   if ((BCst->getValue() & ECst->getValue()) != 0)
560     return RHS;
561   // Otherwise, LHS and RHS contradict and the whole expression becomes false
562   // (or true if negated.) For example,
563   // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false.
564   // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false.
565   return ConstantInt::get(LHS->getType(), !IsAnd);
566 }
567 
568 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
569 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
570 /// aren't of the common mask pattern type.
571 static Value *foldLogOpOfMaskedICmpsAsymmetric(
572     ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
573     Value *A, Value *B, Value *C, Value *D, Value *E,
574     ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
575     unsigned LHSMask, unsigned RHSMask,
576     llvm::InstCombiner::BuilderTy &Builder) {
577   assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
578          "Expected equality predicates for masked type of icmps.");
579   // Handle Mask_NotAllZeros-BMask_Mixed cases.
580   // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or
581   // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E)
582   //    which gets swapped to
583   //    (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C).
584   if (!IsAnd) {
585     LHSMask = conjugateICmpMask(LHSMask);
586     RHSMask = conjugateICmpMask(RHSMask);
587   }
588   if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) {
589     if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
590             LHS, RHS, IsAnd, A, B, C, D, E,
591             PredL, PredR, Builder)) {
592       return V;
593     }
594   } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) {
595     if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
596             RHS, LHS, IsAnd, A, D, E, B, C,
597             PredR, PredL, Builder)) {
598       return V;
599     }
600   }
601   return nullptr;
602 }
603 
604 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
605 /// into a single (icmp(A & X) ==/!= Y).
606 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
607                                      llvm::InstCombiner::BuilderTy &Builder) {
608   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
609   ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
610   Optional<std::pair<unsigned, unsigned>> MaskPair =
611       getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
612   if (!MaskPair)
613     return nullptr;
614   assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
615          "Expected equality predicates for masked type of icmps.");
616   unsigned LHSMask = MaskPair->first;
617   unsigned RHSMask = MaskPair->second;
618   unsigned Mask = LHSMask & RHSMask;
619   if (Mask == 0) {
620     // Even if the two sides don't share a common pattern, check if folding can
621     // still happen.
622     if (Value *V = foldLogOpOfMaskedICmpsAsymmetric(
623             LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask,
624             Builder))
625       return V;
626     return nullptr;
627   }
628 
629   // In full generality:
630   //     (icmp (A & B) Op C) | (icmp (A & D) Op E)
631   // ==  ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
632   //
633   // If the latter can be converted into (icmp (A & X) Op Y) then the former is
634   // equivalent to (icmp (A & X) !Op Y).
635   //
636   // Therefore, we can pretend for the rest of this function that we're dealing
637   // with the conjunction, provided we flip the sense of any comparisons (both
638   // input and output).
639 
640   // In most cases we're going to produce an EQ for the "&&" case.
641   ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
642   if (!IsAnd) {
643     // Convert the masking analysis into its equivalent with negated
644     // comparisons.
645     Mask = conjugateICmpMask(Mask);
646   }
647 
648   if (Mask & Mask_AllZeros) {
649     // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
650     // -> (icmp eq (A & (B|D)), 0)
651     Value *NewOr = Builder.CreateOr(B, D);
652     Value *NewAnd = Builder.CreateAnd(A, NewOr);
653     // We can't use C as zero because we might actually handle
654     //   (icmp ne (A & B), B) & (icmp ne (A & D), D)
655     // with B and D, having a single bit set.
656     Value *Zero = Constant::getNullValue(A->getType());
657     return Builder.CreateICmp(NewCC, NewAnd, Zero);
658   }
659   if (Mask & BMask_AllOnes) {
660     // (icmp eq (A & B), B) & (icmp eq (A & D), D)
661     // -> (icmp eq (A & (B|D)), (B|D))
662     Value *NewOr = Builder.CreateOr(B, D);
663     Value *NewAnd = Builder.CreateAnd(A, NewOr);
664     return Builder.CreateICmp(NewCC, NewAnd, NewOr);
665   }
666   if (Mask & AMask_AllOnes) {
667     // (icmp eq (A & B), A) & (icmp eq (A & D), A)
668     // -> (icmp eq (A & (B&D)), A)
669     Value *NewAnd1 = Builder.CreateAnd(B, D);
670     Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
671     return Builder.CreateICmp(NewCC, NewAnd2, A);
672   }
673 
674   // Remaining cases assume at least that B and D are constant, and depend on
675   // their actual values. This isn't strictly necessary, just a "handle the
676   // easy cases for now" decision.
677   ConstantInt *BCst = dyn_cast<ConstantInt>(B);
678   if (!BCst)
679     return nullptr;
680   ConstantInt *DCst = dyn_cast<ConstantInt>(D);
681   if (!DCst)
682     return nullptr;
683 
684   if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
685     // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
686     // (icmp ne (A & B), B) & (icmp ne (A & D), D)
687     //     -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
688     // Only valid if one of the masks is a superset of the other (check "B&D" is
689     // the same as either B or D).
690     APInt NewMask = BCst->getValue() & DCst->getValue();
691 
692     if (NewMask == BCst->getValue())
693       return LHS;
694     else if (NewMask == DCst->getValue())
695       return RHS;
696   }
697 
698   if (Mask & AMask_NotAllOnes) {
699     // (icmp ne (A & B), B) & (icmp ne (A & D), D)
700     //     -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
701     // Only valid if one of the masks is a superset of the other (check "B|D" is
702     // the same as either B or D).
703     APInt NewMask = BCst->getValue() | DCst->getValue();
704 
705     if (NewMask == BCst->getValue())
706       return LHS;
707     else if (NewMask == DCst->getValue())
708       return RHS;
709   }
710 
711   if (Mask & BMask_Mixed) {
712     // (icmp eq (A & B), C) & (icmp eq (A & D), E)
713     // We already know that B & C == C && D & E == E.
714     // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
715     // C and E, which are shared by both the mask B and the mask D, don't
716     // contradict, then we can transform to
717     // -> (icmp eq (A & (B|D)), (C|E))
718     // Currently, we only handle the case of B, C, D, and E being constant.
719     // We can't simply use C and E because we might actually handle
720     //   (icmp ne (A & B), B) & (icmp eq (A & D), D)
721     // with B and D, having a single bit set.
722     ConstantInt *CCst = dyn_cast<ConstantInt>(C);
723     if (!CCst)
724       return nullptr;
725     ConstantInt *ECst = dyn_cast<ConstantInt>(E);
726     if (!ECst)
727       return nullptr;
728     if (PredL != NewCC)
729       CCst = cast<ConstantInt>(ConstantExpr::getXor(BCst, CCst));
730     if (PredR != NewCC)
731       ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst));
732 
733     // If there is a conflict, we should actually return a false for the
734     // whole construct.
735     if (((BCst->getValue() & DCst->getValue()) &
736          (CCst->getValue() ^ ECst->getValue())).getBoolValue())
737       return ConstantInt::get(LHS->getType(), !IsAnd);
738 
739     Value *NewOr1 = Builder.CreateOr(B, D);
740     Value *NewOr2 = ConstantExpr::getOr(CCst, ECst);
741     Value *NewAnd = Builder.CreateAnd(A, NewOr1);
742     return Builder.CreateICmp(NewCC, NewAnd, NewOr2);
743   }
744 
745   return nullptr;
746 }
747 
748 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
749 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
750 /// If \p Inverted is true then the check is for the inverted range, e.g.
751 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
752 Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
753                                         bool Inverted) {
754   // Check the lower range comparison, e.g. x >= 0
755   // InstCombine already ensured that if there is a constant it's on the RHS.
756   ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
757   if (!RangeStart)
758     return nullptr;
759 
760   ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
761                                Cmp0->getPredicate());
762 
763   // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
764   if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
765         (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
766     return nullptr;
767 
768   ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
769                                Cmp1->getPredicate());
770 
771   Value *Input = Cmp0->getOperand(0);
772   Value *RangeEnd;
773   if (Cmp1->getOperand(0) == Input) {
774     // For the upper range compare we have: icmp x, n
775     RangeEnd = Cmp1->getOperand(1);
776   } else if (Cmp1->getOperand(1) == Input) {
777     // For the upper range compare we have: icmp n, x
778     RangeEnd = Cmp1->getOperand(0);
779     Pred1 = ICmpInst::getSwappedPredicate(Pred1);
780   } else {
781     return nullptr;
782   }
783 
784   // Check the upper range comparison, e.g. x < n
785   ICmpInst::Predicate NewPred;
786   switch (Pred1) {
787     case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
788     case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
789     default: return nullptr;
790   }
791 
792   // This simplification is only valid if the upper range is not negative.
793   KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1);
794   if (!Known.isNonNegative())
795     return nullptr;
796 
797   if (Inverted)
798     NewPred = ICmpInst::getInversePredicate(NewPred);
799 
800   return Builder.CreateICmp(NewPred, Input, RangeEnd);
801 }
802 
803 static Value *
804 foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
805                                      bool JoinedByAnd,
806                                      InstCombiner::BuilderTy &Builder) {
807   Value *X = LHS->getOperand(0);
808   if (X != RHS->getOperand(0))
809     return nullptr;
810 
811   const APInt *C1, *C2;
812   if (!match(LHS->getOperand(1), m_APInt(C1)) ||
813       !match(RHS->getOperand(1), m_APInt(C2)))
814     return nullptr;
815 
816   // We only handle (X != C1 && X != C2) and (X == C1 || X == C2).
817   ICmpInst::Predicate Pred = LHS->getPredicate();
818   if (Pred !=  RHS->getPredicate())
819     return nullptr;
820   if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
821     return nullptr;
822   if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
823     return nullptr;
824 
825   // The larger unsigned constant goes on the right.
826   if (C1->ugt(*C2))
827     std::swap(C1, C2);
828 
829   APInt Xor = *C1 ^ *C2;
830   if (Xor.isPowerOf2()) {
831     // If LHSC and RHSC differ by only one bit, then set that bit in X and
832     // compare against the larger constant:
833     // (X == C1 || X == C2) --> (X | (C1 ^ C2)) == C2
834     // (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2
835     // We choose an 'or' with a Pow2 constant rather than the inverse mask with
836     // 'and' because that may lead to smaller codegen from a smaller constant.
837     Value *Or = Builder.CreateOr(X, ConstantInt::get(X->getType(), Xor));
838     return Builder.CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2));
839   }
840 
841   // Special case: get the ordering right when the values wrap around zero.
842   // Ie, we assumed the constants were unsigned when swapping earlier.
843   if (C1->isNullValue() && C2->isAllOnesValue())
844     std::swap(C1, C2);
845 
846   if (*C1 == *C2 - 1) {
847     // (X == 13 || X == 14) --> X - 13 <=u 1
848     // (X != 13 && X != 14) --> X - 13  >u 1
849     // An 'add' is the canonical IR form, so favor that over a 'sub'.
850     Value *Add = Builder.CreateAdd(X, ConstantInt::get(X->getType(), -(*C1)));
851     auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE;
852     return Builder.CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1));
853   }
854 
855   return nullptr;
856 }
857 
858 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
859 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
860 Value *InstCombiner::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
861                                                    bool JoinedByAnd,
862                                                    Instruction &CxtI) {
863   ICmpInst::Predicate Pred = LHS->getPredicate();
864   if (Pred != RHS->getPredicate())
865     return nullptr;
866   if (JoinedByAnd && Pred != ICmpInst::ICMP_NE)
867     return nullptr;
868   if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ)
869     return nullptr;
870 
871   // TODO support vector splats
872   ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
873   ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
874   if (!LHSC || !RHSC || !LHSC->isZero() || !RHSC->isZero())
875     return nullptr;
876 
877   Value *A, *B, *C, *D;
878   if (match(LHS->getOperand(0), m_And(m_Value(A), m_Value(B))) &&
879       match(RHS->getOperand(0), m_And(m_Value(C), m_Value(D)))) {
880     if (A == D || B == D)
881       std::swap(C, D);
882     if (B == C)
883       std::swap(A, B);
884 
885     if (A == C &&
886         isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) &&
887         isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) {
888       Value *Mask = Builder.CreateOr(B, D);
889       Value *Masked = Builder.CreateAnd(A, Mask);
890       auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
891       return Builder.CreateICmp(NewPred, Masked, Mask);
892     }
893   }
894 
895   return nullptr;
896 }
897 
898 /// General pattern:
899 ///   X & Y
900 ///
901 /// Where Y is checking that all the high bits (covered by a mask 4294967168)
902 /// are uniform, i.e.  %arg & 4294967168  can be either  4294967168  or  0
903 /// Pattern can be one of:
904 ///   %t = add        i32 %arg,    128
905 ///   %r = icmp   ult i32 %t,      256
906 /// Or
907 ///   %t0 = shl       i32 %arg,    24
908 ///   %t1 = ashr      i32 %t0,     24
909 ///   %r  = icmp  eq  i32 %t1,     %arg
910 /// Or
911 ///   %t0 = trunc     i32 %arg  to i8
912 ///   %t1 = sext      i8  %t0   to i32
913 ///   %r  = icmp  eq  i32 %t1,     %arg
914 /// This pattern is a signed truncation check.
915 ///
916 /// And X is checking that some bit in that same mask is zero.
917 /// I.e. can be one of:
918 ///   %r = icmp sgt i32   %arg,    -1
919 /// Or
920 ///   %t = and      i32   %arg,    2147483648
921 ///   %r = icmp eq  i32   %t,      0
922 ///
923 /// Since we are checking that all the bits in that mask are the same,
924 /// and a particular bit is zero, what we are really checking is that all the
925 /// masked bits are zero.
926 /// So this should be transformed to:
927 ///   %r = icmp ult i32 %arg, 128
928 static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
929                                         Instruction &CxtI,
930                                         InstCombiner::BuilderTy &Builder) {
931   assert(CxtI.getOpcode() == Instruction::And);
932 
933   // Match  icmp ult (add %arg, C01), C1   (C1 == C01 << 1; powers of two)
934   auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X,
935                                             APInt &SignBitMask) -> bool {
936     CmpInst::Predicate Pred;
937     const APInt *I01, *I1; // powers of two; I1 == I01 << 1
938     if (!(match(ICmp,
939                 m_ICmp(Pred, m_Add(m_Value(X), m_Power2(I01)), m_Power2(I1))) &&
940           Pred == ICmpInst::ICMP_ULT && I1->ugt(*I01) && I01->shl(1) == *I1))
941       return false;
942     // Which bit is the new sign bit as per the 'signed truncation' pattern?
943     SignBitMask = *I01;
944     return true;
945   };
946 
947   // One icmp needs to be 'signed truncation check'.
948   // We need to match this first, else we will mismatch commutative cases.
949   Value *X1;
950   APInt HighestBit;
951   ICmpInst *OtherICmp;
952   if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
953     OtherICmp = ICmp0;
954   else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
955     OtherICmp = ICmp1;
956   else
957     return nullptr;
958 
959   assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)");
960 
961   // Try to match/decompose into:  icmp eq (X & Mask), 0
962   auto tryToDecompose = [](ICmpInst *ICmp, Value *&X,
963                            APInt &UnsetBitsMask) -> bool {
964     CmpInst::Predicate Pred = ICmp->getPredicate();
965     // Can it be decomposed into  icmp eq (X & Mask), 0  ?
966     if (llvm::decomposeBitTestICmp(ICmp->getOperand(0), ICmp->getOperand(1),
967                                    Pred, X, UnsetBitsMask,
968                                    /*LookThroughTrunc=*/false) &&
969         Pred == ICmpInst::ICMP_EQ)
970       return true;
971     // Is it  icmp eq (X & Mask), 0  already?
972     const APInt *Mask;
973     if (match(ICmp, m_ICmp(Pred, m_And(m_Value(X), m_APInt(Mask)), m_Zero())) &&
974         Pred == ICmpInst::ICMP_EQ) {
975       UnsetBitsMask = *Mask;
976       return true;
977     }
978     return false;
979   };
980 
981   // And the other icmp needs to be decomposable into a bit test.
982   Value *X0;
983   APInt UnsetBitsMask;
984   if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
985     return nullptr;
986 
987   assert(!UnsetBitsMask.isNullValue() && "empty mask makes no sense.");
988 
989   // Are they working on the same value?
990   Value *X;
991   if (X1 == X0) {
992     // Ok as is.
993     X = X1;
994   } else if (match(X0, m_Trunc(m_Specific(X1)))) {
995     UnsetBitsMask = UnsetBitsMask.zext(X1->getType()->getScalarSizeInBits());
996     X = X1;
997   } else
998     return nullptr;
999 
1000   // So which bits should be uniform as per the 'signed truncation check'?
1001   // (all the bits starting with (i.e. including) HighestBit)
1002   APInt SignBitsMask = ~(HighestBit - 1U);
1003 
1004   // UnsetBitsMask must have some common bits with SignBitsMask,
1005   if (!UnsetBitsMask.intersects(SignBitsMask))
1006     return nullptr;
1007 
1008   // Does UnsetBitsMask contain any bits outside of SignBitsMask?
1009   if (!UnsetBitsMask.isSubsetOf(SignBitsMask)) {
1010     APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
1011     if (!OtherHighestBit.isPowerOf2())
1012       return nullptr;
1013     HighestBit = APIntOps::umin(HighestBit, OtherHighestBit);
1014   }
1015   // Else, if it does not, then all is ok as-is.
1016 
1017   // %r = icmp ult %X, SignBit
1018   return Builder.CreateICmpULT(X, ConstantInt::get(X->getType(), HighestBit),
1019                                CxtI.getName() + ".simplified");
1020 }
1021 
1022 /// Reduce a pair of compares that check if a value has exactly 1 bit set.
1023 static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd,
1024                              InstCombiner::BuilderTy &Builder) {
1025   // Handle 'and' / 'or' commutation: make the equality check the first operand.
1026   if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE)
1027     std::swap(Cmp0, Cmp1);
1028   else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ)
1029     std::swap(Cmp0, Cmp1);
1030 
1031   // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1
1032   CmpInst::Predicate Pred0, Pred1;
1033   Value *X;
1034   if (JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) &&
1035       match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)),
1036                          m_SpecificInt(2))) &&
1037       Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT) {
1038     Value *CtPop = Cmp1->getOperand(0);
1039     return Builder.CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1));
1040   }
1041   // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1
1042   if (!JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) &&
1043       match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)),
1044                          m_SpecificInt(1))) &&
1045       Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_UGT) {
1046     Value *CtPop = Cmp1->getOperand(0);
1047     return Builder.CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1));
1048   }
1049   return nullptr;
1050 }
1051 
1052 /// Commuted variants are assumed to be handled by calling this function again
1053 /// with the parameters swapped.
1054 static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
1055                                          ICmpInst *UnsignedICmp, bool IsAnd,
1056                                          const SimplifyQuery &Q,
1057                                          InstCombiner::BuilderTy &Builder) {
1058   Value *ZeroCmpOp;
1059   ICmpInst::Predicate EqPred;
1060   if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(ZeroCmpOp), m_Zero())) ||
1061       !ICmpInst::isEquality(EqPred))
1062     return nullptr;
1063 
1064   auto IsKnownNonZero = [&](Value *V) {
1065     return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1066   };
1067 
1068   ICmpInst::Predicate UnsignedPred;
1069 
1070   Value *A, *B;
1071   if (match(UnsignedICmp,
1072             m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) &&
1073       match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) &&
1074       (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
1075     if (UnsignedICmp->getOperand(0) != ZeroCmpOp)
1076       UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1077 
1078     auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
1079       if (!IsKnownNonZero(NonZero))
1080         std::swap(NonZero, Other);
1081       return IsKnownNonZero(NonZero);
1082     };
1083 
1084     // Given  ZeroCmpOp = (A + B)
1085     //   ZeroCmpOp <= A && ZeroCmpOp != 0  -->  (0-B) <  A
1086     //   ZeroCmpOp >  A || ZeroCmpOp == 0  -->  (0-B) >= A
1087     //
1088     //   ZeroCmpOp <  A && ZeroCmpOp != 0  -->  (0-X) <  Y  iff
1089     //   ZeroCmpOp >= A || ZeroCmpOp == 0  -->  (0-X) >= Y  iff
1090     //     with X being the value (A/B) that is known to be non-zero,
1091     //     and Y being remaining value.
1092     if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1093         IsAnd)
1094       return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
1095     if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
1096         IsAnd && GetKnownNonZeroAndOther(B, A))
1097       return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
1098     if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1099         !IsAnd)
1100       return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
1101     if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
1102         !IsAnd && GetKnownNonZeroAndOther(B, A))
1103       return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
1104   }
1105 
1106   Value *Base, *Offset;
1107   if (!match(ZeroCmpOp, m_Sub(m_Value(Base), m_Value(Offset))))
1108     return nullptr;
1109 
1110   if (!match(UnsignedICmp,
1111              m_c_ICmp(UnsignedPred, m_Specific(Base), m_Specific(Offset))) ||
1112       !ICmpInst::isUnsigned(UnsignedPred))
1113     return nullptr;
1114   if (UnsignedICmp->getOperand(0) != Base)
1115     UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1116 
1117   // Base >=/> Offset && (Base - Offset) != 0  <-->  Base > Offset
1118   // (no overflow and not null)
1119   if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1120        UnsignedPred == ICmpInst::ICMP_UGT) &&
1121       EqPred == ICmpInst::ICMP_NE && IsAnd)
1122     return Builder.CreateICmpUGT(Base, Offset);
1123 
1124   // Base <=/< Offset || (Base - Offset) == 0  <-->  Base <= Offset
1125   // (overflow or null)
1126   if ((UnsignedPred == ICmpInst::ICMP_ULE ||
1127        UnsignedPred == ICmpInst::ICMP_ULT) &&
1128       EqPred == ICmpInst::ICMP_EQ && !IsAnd)
1129     return Builder.CreateICmpULE(Base, Offset);
1130 
1131   // Base <= Offset && (Base - Offset) != 0  -->  Base < Offset
1132   if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1133       IsAnd)
1134     return Builder.CreateICmpULT(Base, Offset);
1135 
1136   // Base > Offset || (Base - Offset) == 0  -->  Base >= Offset
1137   if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1138       !IsAnd)
1139     return Builder.CreateICmpUGE(Base, Offset);
1140 
1141   return nullptr;
1142 }
1143 
1144 /// Fold (icmp)&(icmp) if possible.
1145 Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
1146                                     Instruction &CxtI) {
1147   const SimplifyQuery Q = SQ.getWithInstruction(&CxtI);
1148 
1149   // Fold (!iszero(A & K1) & !iszero(A & K2)) ->  (A & (K1 | K2)) == (K1 | K2)
1150   // if K1 and K2 are a one-bit mask.
1151   if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, true, CxtI))
1152     return V;
1153 
1154   ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1155 
1156   // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
1157   if (predicatesFoldable(PredL, PredR)) {
1158     if (LHS->getOperand(0) == RHS->getOperand(1) &&
1159         LHS->getOperand(1) == RHS->getOperand(0))
1160       LHS->swapOperands();
1161     if (LHS->getOperand(0) == RHS->getOperand(0) &&
1162         LHS->getOperand(1) == RHS->getOperand(1)) {
1163       Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
1164       unsigned Code = getICmpCode(LHS) & getICmpCode(RHS);
1165       bool IsSigned = LHS->isSigned() || RHS->isSigned();
1166       return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder);
1167     }
1168   }
1169 
1170   // handle (roughly):  (icmp eq (A & B), C) & (icmp eq (A & D), E)
1171   if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder))
1172     return V;
1173 
1174   // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
1175   if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false))
1176     return V;
1177 
1178   // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
1179   if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false))
1180     return V;
1181 
1182   if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, true, Builder))
1183     return V;
1184 
1185   if (Value *V = foldSignedTruncationCheck(LHS, RHS, CxtI, Builder))
1186     return V;
1187 
1188   if (Value *V = foldIsPowerOf2(LHS, RHS, true /* JoinedByAnd */, Builder))
1189     return V;
1190 
1191   if (Value *X =
1192           foldUnsignedUnderflowCheck(LHS, RHS, /*IsAnd=*/true, Q, Builder))
1193     return X;
1194   if (Value *X =
1195           foldUnsignedUnderflowCheck(RHS, LHS, /*IsAnd=*/true, Q, Builder))
1196     return X;
1197 
1198   // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2).
1199   Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
1200   ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
1201   ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
1202   if (!LHSC || !RHSC)
1203     return nullptr;
1204 
1205   if (LHSC == RHSC && PredL == PredR) {
1206     // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C)
1207     // where C is a power of 2 or
1208     // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
1209     if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) ||
1210         (PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) {
1211       Value *NewOr = Builder.CreateOr(LHS0, RHS0);
1212       return Builder.CreateICmp(PredL, NewOr, LHSC);
1213     }
1214   }
1215 
1216   // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
1217   // where CMAX is the all ones value for the truncated type,
1218   // iff the lower bits of C2 and CA are zero.
1219   if (PredL == ICmpInst::ICMP_EQ && PredL == PredR && LHS->hasOneUse() &&
1220       RHS->hasOneUse()) {
1221     Value *V;
1222     ConstantInt *AndC, *SmallC = nullptr, *BigC = nullptr;
1223 
1224     // (trunc x) == C1 & (and x, CA) == C2
1225     // (and x, CA) == C2 & (trunc x) == C1
1226     if (match(RHS0, m_Trunc(m_Value(V))) &&
1227         match(LHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
1228       SmallC = RHSC;
1229       BigC = LHSC;
1230     } else if (match(LHS0, m_Trunc(m_Value(V))) &&
1231                match(RHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) {
1232       SmallC = LHSC;
1233       BigC = RHSC;
1234     }
1235 
1236     if (SmallC && BigC) {
1237       unsigned BigBitSize = BigC->getType()->getBitWidth();
1238       unsigned SmallBitSize = SmallC->getType()->getBitWidth();
1239 
1240       // Check that the low bits are zero.
1241       APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
1242       if ((Low & AndC->getValue()).isNullValue() &&
1243           (Low & BigC->getValue()).isNullValue()) {
1244         Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue());
1245         APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue();
1246         Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N);
1247         return Builder.CreateICmp(PredL, NewAnd, NewVal);
1248       }
1249     }
1250   }
1251 
1252   // From here on, we only handle:
1253   //    (icmp1 A, C1) & (icmp2 A, C2) --> something simpler.
1254   if (LHS0 != RHS0)
1255     return nullptr;
1256 
1257   // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
1258   if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
1259       PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
1260       PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
1261       PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
1262     return nullptr;
1263 
1264   // We can't fold (ugt x, C) & (sgt x, C2).
1265   if (!predicatesFoldable(PredL, PredR))
1266     return nullptr;
1267 
1268   // Ensure that the larger constant is on the RHS.
1269   bool ShouldSwap;
1270   if (CmpInst::isSigned(PredL) ||
1271       (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
1272     ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
1273   else
1274     ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
1275 
1276   if (ShouldSwap) {
1277     std::swap(LHS, RHS);
1278     std::swap(LHSC, RHSC);
1279     std::swap(PredL, PredR);
1280   }
1281 
1282   // At this point, we know we have two icmp instructions
1283   // comparing a value against two constants and and'ing the result
1284   // together.  Because of the above check, we know that we only have
1285   // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know
1286   // (from the icmp folding check above), that the two constants
1287   // are not equal and that the larger constant is on the RHS
1288   assert(LHSC != RHSC && "Compares not folded above?");
1289 
1290   switch (PredL) {
1291   default:
1292     llvm_unreachable("Unknown integer condition code!");
1293   case ICmpInst::ICMP_NE:
1294     switch (PredR) {
1295     default:
1296       llvm_unreachable("Unknown integer condition code!");
1297     case ICmpInst::ICMP_ULT:
1298       // (X != 13 & X u< 14) -> X < 13
1299       if (LHSC->getValue() == (RHSC->getValue() - 1))
1300         return Builder.CreateICmpULT(LHS0, LHSC);
1301       if (LHSC->isZero()) // (X != 0 & X u< C) -> X-1 u< C-1
1302         return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
1303                                false, true);
1304       break; // (X != 13 & X u< 15) -> no change
1305     case ICmpInst::ICMP_SLT:
1306       // (X != 13 & X s< 14) -> X < 13
1307       if (LHSC->getValue() == (RHSC->getValue() - 1))
1308         return Builder.CreateICmpSLT(LHS0, LHSC);
1309       // (X != INT_MIN & X s< C) -> X-(INT_MIN+1) u< (C-(INT_MIN+1))
1310       if (LHSC->isMinValue(true))
1311         return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
1312                                true, true);
1313       break; // (X != 13 & X s< 15) -> no change
1314     case ICmpInst::ICMP_NE:
1315       // Potential folds for this case should already be handled.
1316       break;
1317     }
1318     break;
1319   case ICmpInst::ICMP_UGT:
1320     switch (PredR) {
1321     default:
1322       llvm_unreachable("Unknown integer condition code!");
1323     case ICmpInst::ICMP_NE:
1324       // (X u> 13 & X != 14) -> X u> 14
1325       if (RHSC->getValue() == (LHSC->getValue() + 1))
1326         return Builder.CreateICmp(PredL, LHS0, RHSC);
1327       // X u> C & X != UINT_MAX -> (X-(C+1)) u< UINT_MAX-(C+1)
1328       if (RHSC->isMaxValue(false))
1329         return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
1330                                false, true);
1331       break;                 // (X u> 13 & X != 15) -> no change
1332     case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) u< 1
1333       return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
1334                              false, true);
1335     }
1336     break;
1337   case ICmpInst::ICMP_SGT:
1338     switch (PredR) {
1339     default:
1340       llvm_unreachable("Unknown integer condition code!");
1341     case ICmpInst::ICMP_NE:
1342       // (X s> 13 & X != 14) -> X s> 14
1343       if (RHSC->getValue() == (LHSC->getValue() + 1))
1344         return Builder.CreateICmp(PredL, LHS0, RHSC);
1345       // X s> C & X != INT_MAX -> (X-(C+1)) u< INT_MAX-(C+1)
1346       if (RHSC->isMaxValue(true))
1347         return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
1348                                true, true);
1349       break;                 // (X s> 13 & X != 15) -> no change
1350     case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) u< 1
1351       return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true,
1352                              true);
1353     }
1354     break;
1355   }
1356 
1357   return nullptr;
1358 }
1359 
1360 Value *InstCombiner::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1361   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1362   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1363   FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1364 
1365   if (LHS0 == RHS1 && RHS0 == LHS1) {
1366     // Swap RHS operands to match LHS.
1367     PredR = FCmpInst::getSwappedPredicate(PredR);
1368     std::swap(RHS0, RHS1);
1369   }
1370 
1371   // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1372   // Suppose the relation between x and y is R, where R is one of
1373   // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
1374   // testing the desired relations.
1375   //
1376   // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1377   //    bool(R & CC0) && bool(R & CC1)
1378   //  = bool((R & CC0) & (R & CC1))
1379   //  = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
1380   //
1381   // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1382   //    bool(R & CC0) || bool(R & CC1)
1383   //  = bool((R & CC0) | (R & CC1))
1384   //  = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1385   if (LHS0 == RHS0 && LHS1 == RHS1) {
1386     unsigned FCmpCodeL = getFCmpCode(PredL);
1387     unsigned FCmpCodeR = getFCmpCode(PredR);
1388     unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1389     return getFCmpValue(NewPred, LHS0, LHS1, Builder);
1390   }
1391 
1392   if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1393       (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1394     if (LHS0->getType() != RHS0->getType())
1395       return nullptr;
1396 
1397     // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
1398     // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
1399     if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP()))
1400       // Ignore the constants because they are obviously not NANs:
1401       // (fcmp ord x, 0.0) & (fcmp ord y, 0.0)  -> (fcmp ord x, y)
1402       // (fcmp uno x, 0.0) | (fcmp uno y, 0.0)  -> (fcmp uno x, y)
1403       return Builder.CreateFCmp(PredL, LHS0, RHS0);
1404   }
1405 
1406   return nullptr;
1407 }
1408 
1409 /// This a limited reassociation for a special case (see above) where we are
1410 /// checking if two values are either both NAN (unordered) or not-NAN (ordered).
1411 /// This could be handled more generally in '-reassociation', but it seems like
1412 /// an unlikely pattern for a large number of logic ops and fcmps.
1413 static Instruction *reassociateFCmps(BinaryOperator &BO,
1414                                      InstCombiner::BuilderTy &Builder) {
1415   Instruction::BinaryOps Opcode = BO.getOpcode();
1416   assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1417          "Expecting and/or op for fcmp transform");
1418 
1419   // There are 4 commuted variants of the pattern. Canonicalize operands of this
1420   // logic op so an fcmp is operand 0 and a matching logic op is operand 1.
1421   Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1), *X;
1422   FCmpInst::Predicate Pred;
1423   if (match(Op1, m_FCmp(Pred, m_Value(), m_AnyZeroFP())))
1424     std::swap(Op0, Op1);
1425 
1426   // Match inner binop and the predicate for combining 2 NAN checks into 1.
1427   BinaryOperator *BO1;
1428   FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD
1429                                                            : FCmpInst::FCMP_UNO;
1430   if (!match(Op0, m_FCmp(Pred, m_Value(X), m_AnyZeroFP())) || Pred != NanPred ||
1431       !match(Op1, m_BinOp(BO1)) || BO1->getOpcode() != Opcode)
1432     return nullptr;
1433 
1434   // The inner logic op must have a matching fcmp operand.
1435   Value *BO10 = BO1->getOperand(0), *BO11 = BO1->getOperand(1), *Y;
1436   if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) ||
1437       Pred != NanPred || X->getType() != Y->getType())
1438     std::swap(BO10, BO11);
1439 
1440   if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) ||
1441       Pred != NanPred || X->getType() != Y->getType())
1442     return nullptr;
1443 
1444   // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z
1445   // or  (fcmp uno X, 0), (or  (fcmp uno Y, 0), Z) --> or  (fcmp uno X, Y), Z
1446   Value *NewFCmp = Builder.CreateFCmp(Pred, X, Y);
1447   if (auto *NewFCmpInst = dyn_cast<FCmpInst>(NewFCmp)) {
1448     // Intersect FMF from the 2 source fcmps.
1449     NewFCmpInst->copyIRFlags(Op0);
1450     NewFCmpInst->andIRFlags(BO10);
1451   }
1452   return BinaryOperator::Create(Opcode, NewFCmp, BO11);
1453 }
1454 
1455 /// Match De Morgan's Laws:
1456 /// (~A & ~B) == (~(A | B))
1457 /// (~A | ~B) == (~(A & B))
1458 static Instruction *matchDeMorgansLaws(BinaryOperator &I,
1459                                        InstCombiner::BuilderTy &Builder) {
1460   auto Opcode = I.getOpcode();
1461   assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1462          "Trying to match De Morgan's Laws with something other than and/or");
1463 
1464   // Flip the logic operation.
1465   Opcode = (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1466 
1467   Value *A, *B;
1468   if (match(I.getOperand(0), m_OneUse(m_Not(m_Value(A)))) &&
1469       match(I.getOperand(1), m_OneUse(m_Not(m_Value(B)))) &&
1470       !isFreeToInvert(A, A->hasOneUse()) &&
1471       !isFreeToInvert(B, B->hasOneUse())) {
1472     Value *AndOr = Builder.CreateBinOp(Opcode, A, B, I.getName() + ".demorgan");
1473     return BinaryOperator::CreateNot(AndOr);
1474   }
1475 
1476   return nullptr;
1477 }
1478 
1479 bool InstCombiner::shouldOptimizeCast(CastInst *CI) {
1480   Value *CastSrc = CI->getOperand(0);
1481 
1482   // Noop casts and casts of constants should be eliminated trivially.
1483   if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc))
1484     return false;
1485 
1486   // If this cast is paired with another cast that can be eliminated, we prefer
1487   // to have it eliminated.
1488   if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
1489     if (isEliminableCastPair(PrecedingCI, CI))
1490       return false;
1491 
1492   return true;
1493 }
1494 
1495 /// Fold {and,or,xor} (cast X), C.
1496 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
1497                                           InstCombiner::BuilderTy &Builder) {
1498   Constant *C = dyn_cast<Constant>(Logic.getOperand(1));
1499   if (!C)
1500     return nullptr;
1501 
1502   auto LogicOpc = Logic.getOpcode();
1503   Type *DestTy = Logic.getType();
1504   Type *SrcTy = Cast->getSrcTy();
1505 
1506   // Move the logic operation ahead of a zext or sext if the constant is
1507   // unchanged in the smaller source type. Performing the logic in a smaller
1508   // type may provide more information to later folds, and the smaller logic
1509   // instruction may be cheaper (particularly in the case of vectors).
1510   Value *X;
1511   if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) {
1512     Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1513     Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy);
1514     if (ZextTruncC == C) {
1515       // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1516       Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
1517       return new ZExtInst(NewOp, DestTy);
1518     }
1519   }
1520 
1521   if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) {
1522     Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy);
1523     Constant *SextTruncC = ConstantExpr::getSExt(TruncC, DestTy);
1524     if (SextTruncC == C) {
1525       // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1526       Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
1527       return new SExtInst(NewOp, DestTy);
1528     }
1529   }
1530 
1531   return nullptr;
1532 }
1533 
1534 /// Fold {and,or,xor} (cast X), Y.
1535 Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
1536   auto LogicOpc = I.getOpcode();
1537   assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1538 
1539   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1540   CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1541   if (!Cast0)
1542     return nullptr;
1543 
1544   // This must be a cast from an integer or integer vector source type to allow
1545   // transformation of the logic operation to the source type.
1546   Type *DestTy = I.getType();
1547   Type *SrcTy = Cast0->getSrcTy();
1548   if (!SrcTy->isIntOrIntVectorTy())
1549     return nullptr;
1550 
1551   if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder))
1552     return Ret;
1553 
1554   CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1555   if (!Cast1)
1556     return nullptr;
1557 
1558   // Both operands of the logic operation are casts. The casts must be of the
1559   // same type for reduction.
1560   auto CastOpcode = Cast0->getOpcode();
1561   if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy())
1562     return nullptr;
1563 
1564   Value *Cast0Src = Cast0->getOperand(0);
1565   Value *Cast1Src = Cast1->getOperand(0);
1566 
1567   // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1568   if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1569     Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1570                                         I.getName());
1571     return CastInst::Create(CastOpcode, NewOp, DestTy);
1572   }
1573 
1574   // For now, only 'and'/'or' have optimizations after this.
1575   if (LogicOpc == Instruction::Xor)
1576     return nullptr;
1577 
1578   // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the
1579   // cast is otherwise not optimizable.  This happens for vector sexts.
1580   ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src);
1581   ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src);
1582   if (ICmp0 && ICmp1) {
1583     Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1, I)
1584                                               : foldOrOfICmps(ICmp0, ICmp1, I);
1585     if (Res)
1586       return CastInst::Create(CastOpcode, Res, DestTy);
1587     return nullptr;
1588   }
1589 
1590   // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the
1591   // cast is otherwise not optimizable.  This happens for vector sexts.
1592   FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src);
1593   FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src);
1594   if (FCmp0 && FCmp1)
1595     if (Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And))
1596       return CastInst::Create(CastOpcode, R, DestTy);
1597 
1598   return nullptr;
1599 }
1600 
1601 static Instruction *foldAndToXor(BinaryOperator &I,
1602                                  InstCombiner::BuilderTy &Builder) {
1603   assert(I.getOpcode() == Instruction::And);
1604   Value *Op0 = I.getOperand(0);
1605   Value *Op1 = I.getOperand(1);
1606   Value *A, *B;
1607 
1608   // Operand complexity canonicalization guarantees that the 'or' is Op0.
1609   // (A | B) & ~(A & B) --> A ^ B
1610   // (A | B) & ~(B & A) --> A ^ B
1611   if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)),
1612                         m_Not(m_c_And(m_Deferred(A), m_Deferred(B))))))
1613     return BinaryOperator::CreateXor(A, B);
1614 
1615   // (A | ~B) & (~A | B) --> ~(A ^ B)
1616   // (A | ~B) & (B | ~A) --> ~(A ^ B)
1617   // (~B | A) & (~A | B) --> ~(A ^ B)
1618   // (~B | A) & (B | ~A) --> ~(A ^ B)
1619   if (Op0->hasOneUse() || Op1->hasOneUse())
1620     if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))),
1621                           m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B)))))
1622       return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1623 
1624   return nullptr;
1625 }
1626 
1627 static Instruction *foldOrToXor(BinaryOperator &I,
1628                                 InstCombiner::BuilderTy &Builder) {
1629   assert(I.getOpcode() == Instruction::Or);
1630   Value *Op0 = I.getOperand(0);
1631   Value *Op1 = I.getOperand(1);
1632   Value *A, *B;
1633 
1634   // Operand complexity canonicalization guarantees that the 'and' is Op0.
1635   // (A & B) | ~(A | B) --> ~(A ^ B)
1636   // (A & B) | ~(B | A) --> ~(A ^ B)
1637   if (Op0->hasOneUse() || Op1->hasOneUse())
1638     if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1639         match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1640       return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1641 
1642   // (A & ~B) | (~A & B) --> A ^ B
1643   // (A & ~B) | (B & ~A) --> A ^ B
1644   // (~B & A) | (~A & B) --> A ^ B
1645   // (~B & A) | (B & ~A) --> A ^ B
1646   if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
1647       match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
1648     return BinaryOperator::CreateXor(A, B);
1649 
1650   return nullptr;
1651 }
1652 
1653 /// Return true if a constant shift amount is always less than the specified
1654 /// bit-width. If not, the shift could create poison in the narrower type.
1655 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {
1656   if (auto *ScalarC = dyn_cast<ConstantInt>(C))
1657     return ScalarC->getZExtValue() < BitWidth;
1658 
1659   if (C->getType()->isVectorTy()) {
1660     // Check each element of a constant vector.
1661     unsigned NumElts = C->getType()->getVectorNumElements();
1662     for (unsigned i = 0; i != NumElts; ++i) {
1663       Constant *Elt = C->getAggregateElement(i);
1664       if (!Elt)
1665         return false;
1666       if (isa<UndefValue>(Elt))
1667         continue;
1668       auto *CI = dyn_cast<ConstantInt>(Elt);
1669       if (!CI || CI->getZExtValue() >= BitWidth)
1670         return false;
1671     }
1672     return true;
1673   }
1674 
1675   // The constant is a constant expression or unknown.
1676   return false;
1677 }
1678 
1679 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1680 /// a common zext operand: and (binop (zext X), C), (zext X).
1681 Instruction *InstCombiner::narrowMaskedBinOp(BinaryOperator &And) {
1682   // This transform could also apply to {or, and, xor}, but there are better
1683   // folds for those cases, so we don't expect those patterns here. AShr is not
1684   // handled because it should always be transformed to LShr in this sequence.
1685   // The subtract transform is different because it has a constant on the left.
1686   // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1687   Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1);
1688   Constant *C;
1689   if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) &&
1690       !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) &&
1691       !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) &&
1692       !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) &&
1693       !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1)))))
1694     return nullptr;
1695 
1696   Value *X;
1697   if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3))
1698     return nullptr;
1699 
1700   Type *Ty = And.getType();
1701   if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType()))
1702     return nullptr;
1703 
1704   // If we're narrowing a shift, the shift amount must be safe (less than the
1705   // width) in the narrower type. If the shift amount is greater, instsimplify
1706   // usually handles that case, but we can't guarantee/assert it.
1707   Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode();
1708   if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1709     if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits()))
1710       return nullptr;
1711 
1712   // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
1713   // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
1714   Value *NewC = ConstantExpr::getTrunc(C, X->getType());
1715   Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X)
1716                                          : Builder.CreateBinOp(Opc, X, NewC);
1717   return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty);
1718 }
1719 
1720 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
1721 // here. We should standardize that construct where it is needed or choose some
1722 // other way to ensure that commutated variants of patterns are not missed.
1723 Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
1724   if (Value *V = SimplifyAndInst(I.getOperand(0), I.getOperand(1),
1725                                  SQ.getWithInstruction(&I)))
1726     return replaceInstUsesWith(I, V);
1727 
1728   if (SimplifyAssociativeOrCommutative(I))
1729     return &I;
1730 
1731   if (Instruction *X = foldVectorBinop(I))
1732     return X;
1733 
1734   // See if we can simplify any instructions used by the instruction whose sole
1735   // purpose is to compute bits we don't care about.
1736   if (SimplifyDemandedInstructionBits(I))
1737     return &I;
1738 
1739   // Do this before using distributive laws to catch simple and/or/not patterns.
1740   if (Instruction *Xor = foldAndToXor(I, Builder))
1741     return Xor;
1742 
1743   // (A|B)&(A|C) -> A|(B&C) etc
1744   if (Value *V = SimplifyUsingDistributiveLaws(I))
1745     return replaceInstUsesWith(I, V);
1746 
1747   if (Value *V = SimplifyBSwap(I, Builder))
1748     return replaceInstUsesWith(I, V);
1749 
1750   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1751   const APInt *C;
1752   if (match(Op1, m_APInt(C))) {
1753     Value *X, *Y;
1754     if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) &&
1755         C->isOneValue()) {
1756       // (1 << X) & 1 --> zext(X == 0)
1757       // (1 >> X) & 1 --> zext(X == 0)
1758       Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(I.getType(), 0));
1759       return new ZExtInst(IsZero, I.getType());
1760     }
1761 
1762     const APInt *XorC;
1763     if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) {
1764       // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
1765       Constant *NewC = ConstantInt::get(I.getType(), *C & *XorC);
1766       Value *And = Builder.CreateAnd(X, Op1);
1767       And->takeName(Op0);
1768       return BinaryOperator::CreateXor(And, NewC);
1769     }
1770 
1771     const APInt *OrC;
1772     if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
1773       // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
1774       // NOTE: This reduces the number of bits set in the & mask, which
1775       // can expose opportunities for store narrowing for scalars.
1776       // NOTE: SimplifyDemandedBits should have already removed bits from C1
1777       // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
1778       // above, but this feels safer.
1779       APInt Together = *C & *OrC;
1780       Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(),
1781                                                          Together ^ *C));
1782       And->takeName(Op0);
1783       return BinaryOperator::CreateOr(And, ConstantInt::get(I.getType(),
1784                                                             Together));
1785     }
1786 
1787     // If the mask is only needed on one incoming arm, push the 'and' op up.
1788     if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
1789         match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
1790       APInt NotAndMask(~(*C));
1791       BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
1792       if (MaskedValueIsZero(X, NotAndMask, 0, &I)) {
1793         // Not masking anything out for the LHS, move mask to RHS.
1794         // and ({x}or X, Y), C --> {x}or X, (and Y, C)
1795         Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
1796         return BinaryOperator::Create(BinOp, X, NewRHS);
1797       }
1798       if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) {
1799         // Not masking anything out for the RHS, move mask to LHS.
1800         // and ({x}or X, Y), C --> {x}or (and X, C), Y
1801         Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
1802         return BinaryOperator::Create(BinOp, NewLHS, Y);
1803       }
1804     }
1805 
1806   }
1807 
1808   if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) {
1809     const APInt &AndRHSMask = AndRHS->getValue();
1810 
1811     // Optimize a variety of ((val OP C1) & C2) combinations...
1812     if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
1813       // ((C1 OP zext(X)) & C2) -> zext((C1-X) & C2) if C2 fits in the bitwidth
1814       // of X and OP behaves well when given trunc(C1) and X.
1815       // TODO: Do this for vectors by using m_APInt isntead of m_ConstantInt.
1816       switch (Op0I->getOpcode()) {
1817       default:
1818         break;
1819       case Instruction::Xor:
1820       case Instruction::Or:
1821       case Instruction::Mul:
1822       case Instruction::Add:
1823       case Instruction::Sub:
1824         Value *X;
1825         ConstantInt *C1;
1826         // TODO: The one use restrictions could be relaxed a little if the AND
1827         // is going to be removed.
1828         if (match(Op0I, m_OneUse(m_c_BinOp(m_OneUse(m_ZExt(m_Value(X))),
1829                                            m_ConstantInt(C1))))) {
1830           if (AndRHSMask.isIntN(X->getType()->getScalarSizeInBits())) {
1831             auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType());
1832             Value *BinOp;
1833             Value *Op0LHS = Op0I->getOperand(0);
1834             if (isa<ZExtInst>(Op0LHS))
1835               BinOp = Builder.CreateBinOp(Op0I->getOpcode(), X, TruncC1);
1836             else
1837               BinOp = Builder.CreateBinOp(Op0I->getOpcode(), TruncC1, X);
1838             auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType());
1839             auto *And = Builder.CreateAnd(BinOp, TruncC2);
1840             return new ZExtInst(And, I.getType());
1841           }
1842         }
1843       }
1844 
1845       if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1)))
1846         if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I))
1847           return Res;
1848     }
1849 
1850     // If this is an integer truncation, and if the source is an 'and' with
1851     // immediate, transform it.  This frequently occurs for bitfield accesses.
1852     {
1853       Value *X = nullptr; ConstantInt *YC = nullptr;
1854       if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) {
1855         // Change: and (trunc (and X, YC) to T), C2
1856         // into  : and (trunc X to T), trunc(YC) & C2
1857         // This will fold the two constants together, which may allow
1858         // other simplifications.
1859         Value *NewCast = Builder.CreateTrunc(X, I.getType(), "and.shrunk");
1860         Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
1861         C3 = ConstantExpr::getAnd(C3, AndRHS);
1862         return BinaryOperator::CreateAnd(NewCast, C3);
1863       }
1864     }
1865   }
1866 
1867   if (Instruction *Z = narrowMaskedBinOp(I))
1868     return Z;
1869 
1870   if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
1871     return FoldedLogic;
1872 
1873   if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
1874     return DeMorgan;
1875 
1876   {
1877     Value *A, *B, *C;
1878     // A & (A ^ B) --> A & ~B
1879     if (match(Op1, m_OneUse(m_c_Xor(m_Specific(Op0), m_Value(B)))))
1880       return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(B));
1881     // (A ^ B) & A --> A & ~B
1882     if (match(Op0, m_OneUse(m_c_Xor(m_Specific(Op1), m_Value(B)))))
1883       return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(B));
1884 
1885     // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
1886     if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
1887       if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
1888         if (Op1->hasOneUse() || isFreeToInvert(C, C->hasOneUse()))
1889           return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C));
1890 
1891     // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
1892     if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
1893       if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
1894         if (Op0->hasOneUse() || isFreeToInvert(C, C->hasOneUse()))
1895           return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
1896 
1897     // (A | B) & ((~A) ^ B) -> (A & B)
1898     // (A | B) & (B ^ (~A)) -> (A & B)
1899     // (B | A) & ((~A) ^ B) -> (A & B)
1900     // (B | A) & (B ^ (~A)) -> (A & B)
1901     if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1902         match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
1903       return BinaryOperator::CreateAnd(A, B);
1904 
1905     // ((~A) ^ B) & (A | B) -> (A & B)
1906     // ((~A) ^ B) & (B | A) -> (A & B)
1907     // (B ^ (~A)) & (A | B) -> (A & B)
1908     // (B ^ (~A)) & (B | A) -> (A & B)
1909     if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
1910         match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
1911       return BinaryOperator::CreateAnd(A, B);
1912   }
1913 
1914   {
1915     ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
1916     ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
1917     if (LHS && RHS)
1918       if (Value *Res = foldAndOfICmps(LHS, RHS, I))
1919         return replaceInstUsesWith(I, Res);
1920 
1921     // TODO: Make this recursive; it's a little tricky because an arbitrary
1922     // number of 'and' instructions might have to be created.
1923     Value *X, *Y;
1924     if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1925       if (auto *Cmp = dyn_cast<ICmpInst>(X))
1926         if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
1927           return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
1928       if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1929         if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
1930           return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
1931     }
1932     if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
1933       if (auto *Cmp = dyn_cast<ICmpInst>(X))
1934         if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
1935           return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
1936       if (auto *Cmp = dyn_cast<ICmpInst>(Y))
1937         if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
1938           return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
1939     }
1940   }
1941 
1942   if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
1943     if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
1944       if (Value *Res = foldLogicOfFCmps(LHS, RHS, true))
1945         return replaceInstUsesWith(I, Res);
1946 
1947   if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder))
1948     return FoldedFCmps;
1949 
1950   if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
1951     return CastedAnd;
1952 
1953   // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
1954   Value *A;
1955   if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
1956       A->getType()->isIntOrIntVectorTy(1))
1957     return SelectInst::Create(A, Op1, Constant::getNullValue(I.getType()));
1958   if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
1959       A->getType()->isIntOrIntVectorTy(1))
1960     return SelectInst::Create(A, Op0, Constant::getNullValue(I.getType()));
1961 
1962   // and(ashr(subNSW(Y, X), ScalarSizeInBits(Y)-1), X) --> X s> Y ? X : 0.
1963   {
1964     Value *X, *Y;
1965     const APInt *ShAmt;
1966     Type *Ty = I.getType();
1967     if (match(&I, m_c_And(m_OneUse(m_AShr(m_NSWSub(m_Value(Y), m_Value(X)),
1968                                           m_APInt(ShAmt))),
1969                           m_Deferred(X))) &&
1970         *ShAmt == Ty->getScalarSizeInBits() - 1) {
1971       Value *NewICmpInst = Builder.CreateICmpSGT(X, Y);
1972       return SelectInst::Create(NewICmpInst, X, ConstantInt::getNullValue(Ty));
1973     }
1974   }
1975 
1976   return nullptr;
1977 }
1978 
1979 Instruction *InstCombiner::matchBSwap(BinaryOperator &Or) {
1980   assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'");
1981   Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1);
1982 
1983   // Look through zero extends.
1984   if (Instruction *Ext = dyn_cast<ZExtInst>(Op0))
1985     Op0 = Ext->getOperand(0);
1986 
1987   if (Instruction *Ext = dyn_cast<ZExtInst>(Op1))
1988     Op1 = Ext->getOperand(0);
1989 
1990   // (A | B) | C  and  A | (B | C)                  -> bswap if possible.
1991   bool OrOfOrs = match(Op0, m_Or(m_Value(), m_Value())) ||
1992                  match(Op1, m_Or(m_Value(), m_Value()));
1993 
1994   // (A >> B) | (C << D)  and  (A << B) | (B >> C)  -> bswap if possible.
1995   bool OrOfShifts = match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
1996                     match(Op1, m_LogicalShift(m_Value(), m_Value()));
1997 
1998   // (A & B) | (C & D)                              -> bswap if possible.
1999   bool OrOfAnds = match(Op0, m_And(m_Value(), m_Value())) &&
2000                   match(Op1, m_And(m_Value(), m_Value()));
2001 
2002   // (A << B) | (C & D)                              -> bswap if possible.
2003   // The bigger pattern here is ((A & C1) << C2) | ((B >> C2) & C1), which is a
2004   // part of the bswap idiom for specific values of C1, C2 (e.g. C1 = 16711935,
2005   // C2 = 8 for i32).
2006   // This pattern can occur when the operands of the 'or' are not canonicalized
2007   // for some reason (not having only one use, for example).
2008   bool OrOfAndAndSh = (match(Op0, m_LogicalShift(m_Value(), m_Value())) &&
2009                        match(Op1, m_And(m_Value(), m_Value()))) ||
2010                       (match(Op0, m_And(m_Value(), m_Value())) &&
2011                        match(Op1, m_LogicalShift(m_Value(), m_Value())));
2012 
2013   if (!OrOfOrs && !OrOfShifts && !OrOfAnds && !OrOfAndAndSh)
2014     return nullptr;
2015 
2016   SmallVector<Instruction*, 4> Insts;
2017   if (!recognizeBSwapOrBitReverseIdiom(&Or, true, false, Insts))
2018     return nullptr;
2019   Instruction *LastInst = Insts.pop_back_val();
2020   LastInst->removeFromParent();
2021 
2022   for (auto *Inst : Insts)
2023     Worklist.Add(Inst);
2024   return LastInst;
2025 }
2026 
2027 /// Transform UB-safe variants of bitwise rotate to the funnel shift intrinsic.
2028 static Instruction *matchRotate(Instruction &Or) {
2029   // TODO: Can we reduce the code duplication between this and the related
2030   // rotate matching code under visitSelect and visitTrunc?
2031   unsigned Width = Or.getType()->getScalarSizeInBits();
2032   if (!isPowerOf2_32(Width))
2033     return nullptr;
2034 
2035   // First, find an or'd pair of opposite shifts with the same shifted operand:
2036   // or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1)
2037   BinaryOperator *Or0, *Or1;
2038   if (!match(Or.getOperand(0), m_BinOp(Or0)) ||
2039       !match(Or.getOperand(1), m_BinOp(Or1)))
2040     return nullptr;
2041 
2042   Value *ShVal, *ShAmt0, *ShAmt1;
2043   if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) ||
2044       !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1)))))
2045     return nullptr;
2046 
2047   BinaryOperator::BinaryOps ShiftOpcode0 = Or0->getOpcode();
2048   BinaryOperator::BinaryOps ShiftOpcode1 = Or1->getOpcode();
2049   if (ShiftOpcode0 == ShiftOpcode1)
2050     return nullptr;
2051 
2052   // Match the shift amount operands for a rotate pattern. This always matches
2053   // a subtraction on the R operand.
2054   auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * {
2055     // The shift amount may be masked with negation:
2056     // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
2057     Value *X;
2058     unsigned Mask = Width - 1;
2059     if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
2060         match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
2061       return X;
2062 
2063     // Similar to above, but the shift amount may be extended after masking,
2064     // so return the extended value as the parameter for the intrinsic.
2065     if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
2066         match(R, m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))),
2067                        m_SpecificInt(Mask))))
2068       return L;
2069 
2070     return nullptr;
2071   };
2072 
2073   Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
2074   bool SubIsOnLHS = false;
2075   if (!ShAmt) {
2076     ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
2077     SubIsOnLHS = true;
2078   }
2079   if (!ShAmt)
2080     return nullptr;
2081 
2082   bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) ||
2083                 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl);
2084   Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
2085   Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType());
2086   return IntrinsicInst::Create(F, { ShVal, ShVal, ShAmt });
2087 }
2088 
2089 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
2090 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
2091   unsigned NumElts = C1->getType()->getVectorNumElements();
2092   for (unsigned i = 0; i != NumElts; ++i) {
2093     Constant *EltC1 = C1->getAggregateElement(i);
2094     Constant *EltC2 = C2->getAggregateElement(i);
2095     if (!EltC1 || !EltC2)
2096       return false;
2097 
2098     // One element must be all ones, and the other must be all zeros.
2099     if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) ||
2100           (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes()))))
2101       return false;
2102   }
2103   return true;
2104 }
2105 
2106 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
2107 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
2108 /// B, it can be used as the condition operand of a select instruction.
2109 Value *InstCombiner::getSelectCondition(Value *A, Value *B) {
2110   // Step 1: We may have peeked through bitcasts in the caller.
2111   // Exit immediately if we don't have (vector) integer types.
2112   Type *Ty = A->getType();
2113   if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy())
2114     return nullptr;
2115 
2116   // Step 2: We need 0 or all-1's bitmasks.
2117   if (ComputeNumSignBits(A) != Ty->getScalarSizeInBits())
2118     return nullptr;
2119 
2120   // Step 3: If B is the 'not' value of A, we have our answer.
2121   if (match(A, m_Not(m_Specific(B)))) {
2122     // If these are scalars or vectors of i1, A can be used directly.
2123     if (Ty->isIntOrIntVectorTy(1))
2124       return A;
2125     return Builder.CreateTrunc(A, CmpInst::makeCmpResultType(Ty));
2126   }
2127 
2128   // If both operands are constants, see if the constants are inverse bitmasks.
2129   Constant *AConst, *BConst;
2130   if (match(A, m_Constant(AConst)) && match(B, m_Constant(BConst)))
2131     if (AConst == ConstantExpr::getNot(BConst))
2132       return Builder.CreateZExtOrTrunc(A, CmpInst::makeCmpResultType(Ty));
2133 
2134   // Look for more complex patterns. The 'not' op may be hidden behind various
2135   // casts. Look through sexts and bitcasts to find the booleans.
2136   Value *Cond;
2137   Value *NotB;
2138   if (match(A, m_SExt(m_Value(Cond))) &&
2139       Cond->getType()->isIntOrIntVectorTy(1) &&
2140       match(B, m_OneUse(m_Not(m_Value(NotB))))) {
2141     NotB = peekThroughBitcast(NotB, true);
2142     if (match(NotB, m_SExt(m_Specific(Cond))))
2143       return Cond;
2144   }
2145 
2146   // All scalar (and most vector) possibilities should be handled now.
2147   // Try more matches that only apply to non-splat constant vectors.
2148   if (!Ty->isVectorTy())
2149     return nullptr;
2150 
2151   // If both operands are xor'd with constants using the same sexted boolean
2152   // operand, see if the constants are inverse bitmasks.
2153   // TODO: Use ConstantExpr::getNot()?
2154   if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AConst)))) &&
2155       match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BConst)))) &&
2156       Cond->getType()->isIntOrIntVectorTy(1) &&
2157       areInverseVectorBitmasks(AConst, BConst)) {
2158     AConst = ConstantExpr::getTrunc(AConst, CmpInst::makeCmpResultType(Ty));
2159     return Builder.CreateXor(Cond, AConst);
2160   }
2161   return nullptr;
2162 }
2163 
2164 /// We have an expression of the form (A & C) | (B & D). Try to simplify this
2165 /// to "A' ? C : D", where A' is a boolean or vector of booleans.
2166 Value *InstCombiner::matchSelectFromAndOr(Value *A, Value *C, Value *B,
2167                                           Value *D) {
2168   // The potential condition of the select may be bitcasted. In that case, look
2169   // through its bitcast and the corresponding bitcast of the 'not' condition.
2170   Type *OrigType = A->getType();
2171   A = peekThroughBitcast(A, true);
2172   B = peekThroughBitcast(B, true);
2173   if (Value *Cond = getSelectCondition(A, B)) {
2174     // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D))
2175     // The bitcasts will either all exist or all not exist. The builder will
2176     // not create unnecessary casts if the types already match.
2177     Value *BitcastC = Builder.CreateBitCast(C, A->getType());
2178     Value *BitcastD = Builder.CreateBitCast(D, A->getType());
2179     Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD);
2180     return Builder.CreateBitCast(Select, OrigType);
2181   }
2182 
2183   return nullptr;
2184 }
2185 
2186 /// Fold (icmp)|(icmp) if possible.
2187 Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
2188                                    Instruction &CxtI) {
2189   const SimplifyQuery Q = SQ.getWithInstruction(&CxtI);
2190 
2191   // Fold (iszero(A & K1) | iszero(A & K2)) ->  (A & (K1 | K2)) != (K1 | K2)
2192   // if K1 and K2 are a one-bit mask.
2193   if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, false, CxtI))
2194     return V;
2195 
2196   ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
2197 
2198   ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1));
2199   ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1));
2200 
2201   // Fold (icmp ult/ule (A + C1), C3) | (icmp ult/ule (A + C2), C3)
2202   //                   -->  (icmp ult/ule ((A & ~(C1 ^ C2)) + max(C1, C2)), C3)
2203   // The original condition actually refers to the following two ranges:
2204   // [MAX_UINT-C1+1, MAX_UINT-C1+1+C3] and [MAX_UINT-C2+1, MAX_UINT-C2+1+C3]
2205   // We can fold these two ranges if:
2206   // 1) C1 and C2 is unsigned greater than C3.
2207   // 2) The two ranges are separated.
2208   // 3) C1 ^ C2 is one-bit mask.
2209   // 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask.
2210   // This implies all values in the two ranges differ by exactly one bit.
2211 
2212   if ((PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) &&
2213       PredL == PredR && LHSC && RHSC && LHS->hasOneUse() && RHS->hasOneUse() &&
2214       LHSC->getType() == RHSC->getType() &&
2215       LHSC->getValue() == (RHSC->getValue())) {
2216 
2217     Value *LAdd = LHS->getOperand(0);
2218     Value *RAdd = RHS->getOperand(0);
2219 
2220     Value *LAddOpnd, *RAddOpnd;
2221     ConstantInt *LAddC, *RAddC;
2222     if (match(LAdd, m_Add(m_Value(LAddOpnd), m_ConstantInt(LAddC))) &&
2223         match(RAdd, m_Add(m_Value(RAddOpnd), m_ConstantInt(RAddC))) &&
2224         LAddC->getValue().ugt(LHSC->getValue()) &&
2225         RAddC->getValue().ugt(LHSC->getValue())) {
2226 
2227       APInt DiffC = LAddC->getValue() ^ RAddC->getValue();
2228       if (LAddOpnd == RAddOpnd && DiffC.isPowerOf2()) {
2229         ConstantInt *MaxAddC = nullptr;
2230         if (LAddC->getValue().ult(RAddC->getValue()))
2231           MaxAddC = RAddC;
2232         else
2233           MaxAddC = LAddC;
2234 
2235         APInt RRangeLow = -RAddC->getValue();
2236         APInt RRangeHigh = RRangeLow + LHSC->getValue();
2237         APInt LRangeLow = -LAddC->getValue();
2238         APInt LRangeHigh = LRangeLow + LHSC->getValue();
2239         APInt LowRangeDiff = RRangeLow ^ LRangeLow;
2240         APInt HighRangeDiff = RRangeHigh ^ LRangeHigh;
2241         APInt RangeDiff = LRangeLow.sgt(RRangeLow) ? LRangeLow - RRangeLow
2242                                                    : RRangeLow - LRangeLow;
2243 
2244         if (LowRangeDiff.isPowerOf2() && LowRangeDiff == HighRangeDiff &&
2245             RangeDiff.ugt(LHSC->getValue())) {
2246           Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC);
2247 
2248           Value *NewAnd = Builder.CreateAnd(LAddOpnd, MaskC);
2249           Value *NewAdd = Builder.CreateAdd(NewAnd, MaxAddC);
2250           return Builder.CreateICmp(LHS->getPredicate(), NewAdd, LHSC);
2251         }
2252       }
2253     }
2254   }
2255 
2256   // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
2257   if (predicatesFoldable(PredL, PredR)) {
2258     if (LHS->getOperand(0) == RHS->getOperand(1) &&
2259         LHS->getOperand(1) == RHS->getOperand(0))
2260       LHS->swapOperands();
2261     if (LHS->getOperand(0) == RHS->getOperand(0) &&
2262         LHS->getOperand(1) == RHS->getOperand(1)) {
2263       Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2264       unsigned Code = getICmpCode(LHS) | getICmpCode(RHS);
2265       bool IsSigned = LHS->isSigned() || RHS->isSigned();
2266       return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder);
2267     }
2268   }
2269 
2270   // handle (roughly):
2271   // (icmp ne (A & B), C) | (icmp ne (A & D), E)
2272   if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder))
2273     return V;
2274 
2275   Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
2276   if (LHS->hasOneUse() || RHS->hasOneUse()) {
2277     // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1)
2278     // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1)
2279     Value *A = nullptr, *B = nullptr;
2280     if (PredL == ICmpInst::ICMP_EQ && LHSC && LHSC->isZero()) {
2281       B = LHS0;
2282       if (PredR == ICmpInst::ICMP_ULT && LHS0 == RHS->getOperand(1))
2283         A = RHS0;
2284       else if (PredR == ICmpInst::ICMP_UGT && LHS0 == RHS0)
2285         A = RHS->getOperand(1);
2286     }
2287     // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1)
2288     // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1)
2289     else if (PredR == ICmpInst::ICMP_EQ && RHSC && RHSC->isZero()) {
2290       B = RHS0;
2291       if (PredL == ICmpInst::ICMP_ULT && RHS0 == LHS->getOperand(1))
2292         A = LHS0;
2293       else if (PredL == ICmpInst::ICMP_UGT && LHS0 == RHS0)
2294         A = LHS->getOperand(1);
2295     }
2296     if (A && B)
2297       return Builder.CreateICmp(
2298           ICmpInst::ICMP_UGE,
2299           Builder.CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
2300   }
2301 
2302   // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
2303   if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true))
2304     return V;
2305 
2306   // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
2307   if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true))
2308     return V;
2309 
2310   if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, false, Builder))
2311     return V;
2312 
2313   if (Value *V = foldIsPowerOf2(LHS, RHS, false /* JoinedByAnd */, Builder))
2314     return V;
2315 
2316   if (Value *X =
2317           foldUnsignedUnderflowCheck(LHS, RHS, /*IsAnd=*/false, Q, Builder))
2318     return X;
2319   if (Value *X =
2320           foldUnsignedUnderflowCheck(RHS, LHS, /*IsAnd=*/false, Q, Builder))
2321     return X;
2322 
2323   // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
2324   if (!LHSC || !RHSC)
2325     return nullptr;
2326 
2327   if (LHSC == RHSC && PredL == PredR) {
2328     // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
2329     if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) {
2330       Value *NewOr = Builder.CreateOr(LHS0, RHS0);
2331       return Builder.CreateICmp(PredL, NewOr, LHSC);
2332     }
2333   }
2334 
2335   // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1)
2336   //   iff C2 + CA == C1.
2337   if (PredL == ICmpInst::ICMP_ULT && PredR == ICmpInst::ICMP_EQ) {
2338     ConstantInt *AddC;
2339     if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC))))
2340       if (RHSC->getValue() + AddC->getValue() == LHSC->getValue())
2341         return Builder.CreateICmpULE(LHS0, LHSC);
2342   }
2343 
2344   // From here on, we only handle:
2345   //    (icmp1 A, C1) | (icmp2 A, C2) --> something simpler.
2346   if (LHS0 != RHS0)
2347     return nullptr;
2348 
2349   // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere.
2350   if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE ||
2351       PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE ||
2352       PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE ||
2353       PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE)
2354     return nullptr;
2355 
2356   // We can't fold (ugt x, C) | (sgt x, C2).
2357   if (!predicatesFoldable(PredL, PredR))
2358     return nullptr;
2359 
2360   // Ensure that the larger constant is on the RHS.
2361   bool ShouldSwap;
2362   if (CmpInst::isSigned(PredL) ||
2363       (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR)))
2364     ShouldSwap = LHSC->getValue().sgt(RHSC->getValue());
2365   else
2366     ShouldSwap = LHSC->getValue().ugt(RHSC->getValue());
2367 
2368   if (ShouldSwap) {
2369     std::swap(LHS, RHS);
2370     std::swap(LHSC, RHSC);
2371     std::swap(PredL, PredR);
2372   }
2373 
2374   // At this point, we know we have two icmp instructions
2375   // comparing a value against two constants and or'ing the result
2376   // together.  Because of the above check, we know that we only have
2377   // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the
2378   // icmp folding check above), that the two constants are not
2379   // equal.
2380   assert(LHSC != RHSC && "Compares not folded above?");
2381 
2382   switch (PredL) {
2383   default:
2384     llvm_unreachable("Unknown integer condition code!");
2385   case ICmpInst::ICMP_EQ:
2386     switch (PredR) {
2387     default:
2388       llvm_unreachable("Unknown integer condition code!");
2389     case ICmpInst::ICMP_EQ:
2390       // Potential folds for this case should already be handled.
2391       break;
2392     case ICmpInst::ICMP_UGT:
2393       // (X == 0 || X u> C) -> (X-1) u>= C
2394       if (LHSC->isMinValue(false))
2395         return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue() + 1,
2396                                false, false);
2397       // (X == 13 | X u> 14) -> no change
2398       break;
2399     case ICmpInst::ICMP_SGT:
2400       // (X == INT_MIN || X s> C) -> (X-(INT_MIN+1)) u>= C-INT_MIN
2401       if (LHSC->isMinValue(true))
2402         return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue() + 1,
2403                                true, false);
2404       // (X == 13 | X s> 14) -> no change
2405       break;
2406     }
2407     break;
2408   case ICmpInst::ICMP_ULT:
2409     switch (PredR) {
2410     default:
2411       llvm_unreachable("Unknown integer condition code!");
2412     case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change
2413       // (X u< C || X == UINT_MAX) => (X-C) u>= UINT_MAX-C
2414       if (RHSC->isMaxValue(false))
2415         return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue(),
2416                                false, false);
2417       break;
2418     case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2
2419       assert(!RHSC->isMaxValue(false) && "Missed icmp simplification");
2420       return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1,
2421                              false, false);
2422     }
2423     break;
2424   case ICmpInst::ICMP_SLT:
2425     switch (PredR) {
2426     default:
2427       llvm_unreachable("Unknown integer condition code!");
2428     case ICmpInst::ICMP_EQ:
2429       // (X s< C || X == INT_MAX) => (X-C) u>= INT_MAX-C
2430       if (RHSC->isMaxValue(true))
2431         return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue(),
2432                                true, false);
2433       // (X s< 13 | X == 14) -> no change
2434       break;
2435     case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) u> 2
2436       assert(!RHSC->isMaxValue(true) && "Missed icmp simplification");
2437       return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true,
2438                              false);
2439     }
2440     break;
2441   }
2442   return nullptr;
2443 }
2444 
2445 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2446 // here. We should standardize that construct where it is needed or choose some
2447 // other way to ensure that commutated variants of patterns are not missed.
2448 Instruction *InstCombiner::visitOr(BinaryOperator &I) {
2449   if (Value *V = SimplifyOrInst(I.getOperand(0), I.getOperand(1),
2450                                 SQ.getWithInstruction(&I)))
2451     return replaceInstUsesWith(I, V);
2452 
2453   if (SimplifyAssociativeOrCommutative(I))
2454     return &I;
2455 
2456   if (Instruction *X = foldVectorBinop(I))
2457     return X;
2458 
2459   // See if we can simplify any instructions used by the instruction whose sole
2460   // purpose is to compute bits we don't care about.
2461   if (SimplifyDemandedInstructionBits(I))
2462     return &I;
2463 
2464   // Do this before using distributive laws to catch simple and/or/not patterns.
2465   if (Instruction *Xor = foldOrToXor(I, Builder))
2466     return Xor;
2467 
2468   // (A&B)|(A&C) -> A&(B|C) etc
2469   if (Value *V = SimplifyUsingDistributiveLaws(I))
2470     return replaceInstUsesWith(I, V);
2471 
2472   if (Value *V = SimplifyBSwap(I, Builder))
2473     return replaceInstUsesWith(I, V);
2474 
2475   if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
2476     return FoldedLogic;
2477 
2478   if (Instruction *BSwap = matchBSwap(I))
2479     return BSwap;
2480 
2481   if (Instruction *Rotate = matchRotate(I))
2482     return Rotate;
2483 
2484   Value *X, *Y;
2485   const APInt *CV;
2486   if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
2487       !CV->isAllOnesValue() && MaskedValueIsZero(Y, *CV, 0, &I)) {
2488     // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
2489     // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
2490     Value *Or = Builder.CreateOr(X, Y);
2491     return BinaryOperator::CreateXor(Or, ConstantInt::get(I.getType(), *CV));
2492   }
2493 
2494   // (A & C)|(B & D)
2495   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2496   Value *A, *B, *C, *D;
2497   if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
2498       match(Op1, m_And(m_Value(B), m_Value(D)))) {
2499     ConstantInt *C1 = dyn_cast<ConstantInt>(C);
2500     ConstantInt *C2 = dyn_cast<ConstantInt>(D);
2501     if (C1 && C2) {  // (A & C1)|(B & C2)
2502       Value *V1 = nullptr, *V2 = nullptr;
2503       if ((C1->getValue() & C2->getValue()).isNullValue()) {
2504         // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2)
2505         // iff (C1&C2) == 0 and (N&~C1) == 0
2506         if (match(A, m_Or(m_Value(V1), m_Value(V2))) &&
2507             ((V1 == B &&
2508               MaskedValueIsZero(V2, ~C1->getValue(), 0, &I)) || // (V|N)
2509              (V2 == B &&
2510               MaskedValueIsZero(V1, ~C1->getValue(), 0, &I))))  // (N|V)
2511           return BinaryOperator::CreateAnd(A,
2512                                 Builder.getInt(C1->getValue()|C2->getValue()));
2513         // Or commutes, try both ways.
2514         if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
2515             ((V1 == A &&
2516               MaskedValueIsZero(V2, ~C2->getValue(), 0, &I)) || // (V|N)
2517              (V2 == A &&
2518               MaskedValueIsZero(V1, ~C2->getValue(), 0, &I))))  // (N|V)
2519           return BinaryOperator::CreateAnd(B,
2520                                  Builder.getInt(C1->getValue()|C2->getValue()));
2521 
2522         // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
2523         // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
2524         ConstantInt *C3 = nullptr, *C4 = nullptr;
2525         if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) &&
2526             (C3->getValue() & ~C1->getValue()).isNullValue() &&
2527             match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
2528             (C4->getValue() & ~C2->getValue()).isNullValue()) {
2529           V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
2530           return BinaryOperator::CreateAnd(V2,
2531                                  Builder.getInt(C1->getValue()|C2->getValue()));
2532         }
2533       }
2534 
2535       if (C1->getValue() == ~C2->getValue()) {
2536         Value *X;
2537 
2538         // ((X|B)&C1)|(B&C2) -> (X&C1) | B iff C1 == ~C2
2539         if (match(A, m_c_Or(m_Value(X), m_Specific(B))))
2540           return BinaryOperator::CreateOr(Builder.CreateAnd(X, C1), B);
2541         // (A&C2)|((X|A)&C1) -> (X&C2) | A iff C1 == ~C2
2542         if (match(B, m_c_Or(m_Specific(A), m_Value(X))))
2543           return BinaryOperator::CreateOr(Builder.CreateAnd(X, C2), A);
2544 
2545         // ((X^B)&C1)|(B&C2) -> (X&C1) ^ B iff C1 == ~C2
2546         if (match(A, m_c_Xor(m_Value(X), m_Specific(B))))
2547           return BinaryOperator::CreateXor(Builder.CreateAnd(X, C1), B);
2548         // (A&C2)|((X^A)&C1) -> (X&C2) ^ A iff C1 == ~C2
2549         if (match(B, m_c_Xor(m_Specific(A), m_Value(X))))
2550           return BinaryOperator::CreateXor(Builder.CreateAnd(X, C2), A);
2551       }
2552     }
2553 
2554     // Don't try to form a select if it's unlikely that we'll get rid of at
2555     // least one of the operands. A select is generally more expensive than the
2556     // 'or' that it is replacing.
2557     if (Op0->hasOneUse() || Op1->hasOneUse()) {
2558       // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
2559       if (Value *V = matchSelectFromAndOr(A, C, B, D))
2560         return replaceInstUsesWith(I, V);
2561       if (Value *V = matchSelectFromAndOr(A, C, D, B))
2562         return replaceInstUsesWith(I, V);
2563       if (Value *V = matchSelectFromAndOr(C, A, B, D))
2564         return replaceInstUsesWith(I, V);
2565       if (Value *V = matchSelectFromAndOr(C, A, D, B))
2566         return replaceInstUsesWith(I, V);
2567       if (Value *V = matchSelectFromAndOr(B, D, A, C))
2568         return replaceInstUsesWith(I, V);
2569       if (Value *V = matchSelectFromAndOr(B, D, C, A))
2570         return replaceInstUsesWith(I, V);
2571       if (Value *V = matchSelectFromAndOr(D, B, A, C))
2572         return replaceInstUsesWith(I, V);
2573       if (Value *V = matchSelectFromAndOr(D, B, C, A))
2574         return replaceInstUsesWith(I, V);
2575     }
2576   }
2577 
2578   // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
2579   if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
2580     if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
2581       return BinaryOperator::CreateOr(Op0, C);
2582 
2583   // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
2584   if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
2585     if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
2586       return BinaryOperator::CreateOr(Op1, C);
2587 
2588   // ((B | C) & A) | B -> B | (A & C)
2589   if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
2590     return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C));
2591 
2592   if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
2593     return DeMorgan;
2594 
2595   // Canonicalize xor to the RHS.
2596   bool SwappedForXor = false;
2597   if (match(Op0, m_Xor(m_Value(), m_Value()))) {
2598     std::swap(Op0, Op1);
2599     SwappedForXor = true;
2600   }
2601 
2602   // A | ( A ^ B) -> A |  B
2603   // A | (~A ^ B) -> A | ~B
2604   // (A & B) | (A ^ B)
2605   if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
2606     if (Op0 == A || Op0 == B)
2607       return BinaryOperator::CreateOr(A, B);
2608 
2609     if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
2610         match(Op0, m_And(m_Specific(B), m_Specific(A))))
2611       return BinaryOperator::CreateOr(A, B);
2612 
2613     if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
2614       Value *Not = Builder.CreateNot(B, B->getName() + ".not");
2615       return BinaryOperator::CreateOr(Not, Op0);
2616     }
2617     if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
2618       Value *Not = Builder.CreateNot(A, A->getName() + ".not");
2619       return BinaryOperator::CreateOr(Not, Op0);
2620     }
2621   }
2622 
2623   // A | ~(A | B) -> A | ~B
2624   // A | ~(A ^ B) -> A | ~B
2625   if (match(Op1, m_Not(m_Value(A))))
2626     if (BinaryOperator *B = dyn_cast<BinaryOperator>(A))
2627       if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) &&
2628           Op1->hasOneUse() && (B->getOpcode() == Instruction::Or ||
2629                                B->getOpcode() == Instruction::Xor)) {
2630         Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
2631                                                  B->getOperand(0);
2632         Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not");
2633         return BinaryOperator::CreateOr(Not, Op0);
2634       }
2635 
2636   if (SwappedForXor)
2637     std::swap(Op0, Op1);
2638 
2639   {
2640     ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
2641     ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
2642     if (LHS && RHS)
2643       if (Value *Res = foldOrOfICmps(LHS, RHS, I))
2644         return replaceInstUsesWith(I, Res);
2645 
2646     // TODO: Make this recursive; it's a little tricky because an arbitrary
2647     // number of 'or' instructions might have to be created.
2648     Value *X, *Y;
2649     if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2650       if (auto *Cmp = dyn_cast<ICmpInst>(X))
2651         if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2652           return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
2653       if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2654         if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
2655           return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
2656     }
2657     if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2658       if (auto *Cmp = dyn_cast<ICmpInst>(X))
2659         if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2660           return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
2661       if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2662         if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
2663           return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
2664     }
2665   }
2666 
2667   if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
2668     if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
2669       if (Value *Res = foldLogicOfFCmps(LHS, RHS, false))
2670         return replaceInstUsesWith(I, Res);
2671 
2672   if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder))
2673     return FoldedFCmps;
2674 
2675   if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
2676     return CastedOr;
2677 
2678   // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
2679   if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) &&
2680       A->getType()->isIntOrIntVectorTy(1))
2681     return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1);
2682   if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) &&
2683       A->getType()->isIntOrIntVectorTy(1))
2684     return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0);
2685 
2686   // Note: If we've gotten to the point of visiting the outer OR, then the
2687   // inner one couldn't be simplified.  If it was a constant, then it won't
2688   // be simplified by a later pass either, so we try swapping the inner/outer
2689   // ORs in the hopes that we'll be able to simplify it this way.
2690   // (X|C) | V --> (X|V) | C
2691   ConstantInt *CI;
2692   if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
2693       match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) {
2694     Value *Inner = Builder.CreateOr(A, Op1);
2695     Inner->takeName(Op0);
2696     return BinaryOperator::CreateOr(Inner, CI);
2697   }
2698 
2699   // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
2700   // Since this OR statement hasn't been optimized further yet, we hope
2701   // that this transformation will allow the new ORs to be optimized.
2702   {
2703     Value *X = nullptr, *Y = nullptr;
2704     if (Op0->hasOneUse() && Op1->hasOneUse() &&
2705         match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
2706         match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
2707       Value *orTrue = Builder.CreateOr(A, C);
2708       Value *orFalse = Builder.CreateOr(B, D);
2709       return SelectInst::Create(X, orTrue, orFalse);
2710     }
2711   }
2712 
2713   // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y)-1), X)  --> X s> Y ? -1 : X.
2714   {
2715     Value *X, *Y;
2716     const APInt *ShAmt;
2717     Type *Ty = I.getType();
2718     if (match(&I, m_c_Or(m_OneUse(m_AShr(m_NSWSub(m_Value(Y), m_Value(X)),
2719                                          m_APInt(ShAmt))),
2720                          m_Deferred(X))) &&
2721         *ShAmt == Ty->getScalarSizeInBits() - 1) {
2722       Value *NewICmpInst = Builder.CreateICmpSGT(X, Y);
2723       return SelectInst::Create(NewICmpInst, ConstantInt::getAllOnesValue(Ty),
2724                                 X);
2725     }
2726   }
2727 
2728   if (Instruction *V =
2729           canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
2730     return V;
2731 
2732   return nullptr;
2733 }
2734 
2735 /// A ^ B can be specified using other logic ops in a variety of patterns. We
2736 /// can fold these early and efficiently by morphing an existing instruction.
2737 static Instruction *foldXorToXor(BinaryOperator &I,
2738                                  InstCombiner::BuilderTy &Builder) {
2739   assert(I.getOpcode() == Instruction::Xor);
2740   Value *Op0 = I.getOperand(0);
2741   Value *Op1 = I.getOperand(1);
2742   Value *A, *B;
2743 
2744   // There are 4 commuted variants for each of the basic patterns.
2745 
2746   // (A & B) ^ (A | B) -> A ^ B
2747   // (A & B) ^ (B | A) -> A ^ B
2748   // (A | B) ^ (A & B) -> A ^ B
2749   // (A | B) ^ (B & A) -> A ^ B
2750   if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)),
2751                         m_c_Or(m_Deferred(A), m_Deferred(B))))) {
2752     I.setOperand(0, A);
2753     I.setOperand(1, B);
2754     return &I;
2755   }
2756 
2757   // (A | ~B) ^ (~A | B) -> A ^ B
2758   // (~B | A) ^ (~A | B) -> A ^ B
2759   // (~A | B) ^ (A | ~B) -> A ^ B
2760   // (B | ~A) ^ (A | ~B) -> A ^ B
2761   if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))),
2762                       m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) {
2763     I.setOperand(0, A);
2764     I.setOperand(1, B);
2765     return &I;
2766   }
2767 
2768   // (A & ~B) ^ (~A & B) -> A ^ B
2769   // (~B & A) ^ (~A & B) -> A ^ B
2770   // (~A & B) ^ (A & ~B) -> A ^ B
2771   // (B & ~A) ^ (A & ~B) -> A ^ B
2772   if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))),
2773                       m_c_And(m_Not(m_Deferred(A)), m_Deferred(B))))) {
2774     I.setOperand(0, A);
2775     I.setOperand(1, B);
2776     return &I;
2777   }
2778 
2779   // For the remaining cases we need to get rid of one of the operands.
2780   if (!Op0->hasOneUse() && !Op1->hasOneUse())
2781     return nullptr;
2782 
2783   // (A | B) ^ ~(A & B) -> ~(A ^ B)
2784   // (A | B) ^ ~(B & A) -> ~(A ^ B)
2785   // (A & B) ^ ~(A | B) -> ~(A ^ B)
2786   // (A & B) ^ ~(B | A) -> ~(A ^ B)
2787   // Complexity sorting ensures the not will be on the right side.
2788   if ((match(Op0, m_Or(m_Value(A), m_Value(B))) &&
2789        match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) ||
2790       (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2791        match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))))
2792     return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
2793 
2794   return nullptr;
2795 }
2796 
2797 Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
2798                                     BinaryOperator &I) {
2799   assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS &&
2800          I.getOperand(1) == RHS && "Should be 'xor' with these operands");
2801 
2802   if (predicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) {
2803     if (LHS->getOperand(0) == RHS->getOperand(1) &&
2804         LHS->getOperand(1) == RHS->getOperand(0))
2805       LHS->swapOperands();
2806     if (LHS->getOperand(0) == RHS->getOperand(0) &&
2807         LHS->getOperand(1) == RHS->getOperand(1)) {
2808       // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
2809       Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1);
2810       unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS);
2811       bool IsSigned = LHS->isSigned() || RHS->isSigned();
2812       return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder);
2813     }
2814   }
2815 
2816   // TODO: This can be generalized to compares of non-signbits using
2817   // decomposeBitTestICmp(). It could be enhanced more by using (something like)
2818   // foldLogOpOfMaskedICmps().
2819   ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
2820   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
2821   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
2822   if ((LHS->hasOneUse() || RHS->hasOneUse()) &&
2823       LHS0->getType() == RHS0->getType() &&
2824       LHS0->getType()->isIntOrIntVectorTy()) {
2825     // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0
2826     // (X <  0) ^ (Y <  0) --> (X ^ Y) < 0
2827     if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_AllOnes()) &&
2828          PredR == CmpInst::ICMP_SGT && match(RHS1, m_AllOnes())) ||
2829         (PredL == CmpInst::ICMP_SLT && match(LHS1, m_Zero()) &&
2830          PredR == CmpInst::ICMP_SLT && match(RHS1, m_Zero()))) {
2831       Value *Zero = ConstantInt::getNullValue(LHS0->getType());
2832       return Builder.CreateICmpSLT(Builder.CreateXor(LHS0, RHS0), Zero);
2833     }
2834     // (X > -1) ^ (Y <  0) --> (X ^ Y) > -1
2835     // (X <  0) ^ (Y > -1) --> (X ^ Y) > -1
2836     if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_AllOnes()) &&
2837          PredR == CmpInst::ICMP_SLT && match(RHS1, m_Zero())) ||
2838         (PredL == CmpInst::ICMP_SLT && match(LHS1, m_Zero()) &&
2839          PredR == CmpInst::ICMP_SGT && match(RHS1, m_AllOnes()))) {
2840       Value *MinusOne = ConstantInt::getAllOnesValue(LHS0->getType());
2841       return Builder.CreateICmpSGT(Builder.CreateXor(LHS0, RHS0), MinusOne);
2842     }
2843   }
2844 
2845   // Instead of trying to imitate the folds for and/or, decompose this 'xor'
2846   // into those logic ops. That is, try to turn this into an and-of-icmps
2847   // because we have many folds for that pattern.
2848   //
2849   // This is based on a truth table definition of xor:
2850   // X ^ Y --> (X | Y) & !(X & Y)
2851   if (Value *OrICmp = SimplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
2852     // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
2853     // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
2854     if (Value *AndICmp = SimplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
2855       // TODO: Independently handle cases where the 'and' side is a constant.
2856       ICmpInst *X = nullptr, *Y = nullptr;
2857       if (OrICmp == LHS && AndICmp == RHS) {
2858         // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS  --> X & !Y
2859         X = LHS;
2860         Y = RHS;
2861       }
2862       if (OrICmp == RHS && AndICmp == LHS) {
2863         // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS  --> !Y & X
2864         X = RHS;
2865         Y = LHS;
2866       }
2867       if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(Y, &I))) {
2868         // Invert the predicate of 'Y', thus inverting its output.
2869         Y->setPredicate(Y->getInversePredicate());
2870         // So, are there other uses of Y?
2871         if (!Y->hasOneUse()) {
2872           // We need to adapt other uses of Y though. Get a value that matches
2873           // the original value of Y before inversion. While this increases
2874           // immediate instruction count, we have just ensured that all the
2875           // users are freely-invertible, so that 'not' *will* get folded away.
2876           BuilderTy::InsertPointGuard Guard(Builder);
2877           // Set insertion point to right after the Y.
2878           Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator()));
2879           Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
2880           // Replace all uses of Y (excluding the one in NotY!) with NotY.
2881           Y->replaceUsesWithIf(NotY,
2882                                [NotY](Use &U) { return U.getUser() != NotY; });
2883         }
2884         // All done.
2885         return Builder.CreateAnd(LHS, RHS);
2886       }
2887     }
2888   }
2889 
2890   return nullptr;
2891 }
2892 
2893 /// If we have a masked merge, in the canonical form of:
2894 /// (assuming that A only has one use.)
2895 ///   |        A  |  |B|
2896 ///   ((x ^ y) & M) ^ y
2897 ///    |  D  |
2898 /// * If M is inverted:
2899 ///      |  D  |
2900 ///     ((x ^ y) & ~M) ^ y
2901 ///   We can canonicalize by swapping the final xor operand
2902 ///   to eliminate the 'not' of the mask.
2903 ///     ((x ^ y) & M) ^ x
2904 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
2905 ///   because that shortens the dependency chain and improves analysis:
2906 ///     (x & M) | (y & ~M)
2907 static Instruction *visitMaskedMerge(BinaryOperator &I,
2908                                      InstCombiner::BuilderTy &Builder) {
2909   Value *B, *X, *D;
2910   Value *M;
2911   if (!match(&I, m_c_Xor(m_Value(B),
2912                          m_OneUse(m_c_And(
2913                              m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)),
2914                                           m_Value(D)),
2915                              m_Value(M))))))
2916     return nullptr;
2917 
2918   Value *NotM;
2919   if (match(M, m_Not(m_Value(NotM)))) {
2920     // De-invert the mask and swap the value in B part.
2921     Value *NewA = Builder.CreateAnd(D, NotM);
2922     return BinaryOperator::CreateXor(NewA, X);
2923   }
2924 
2925   Constant *C;
2926   if (D->hasOneUse() && match(M, m_Constant(C))) {
2927     // Unfold.
2928     Value *LHS = Builder.CreateAnd(X, C);
2929     Value *NotC = Builder.CreateNot(C);
2930     Value *RHS = Builder.CreateAnd(B, NotC);
2931     return BinaryOperator::CreateOr(LHS, RHS);
2932   }
2933 
2934   return nullptr;
2935 }
2936 
2937 // Transform
2938 //   ~(x ^ y)
2939 // into:
2940 //   (~x) ^ y
2941 // or into
2942 //   x ^ (~y)
2943 static Instruction *sinkNotIntoXor(BinaryOperator &I,
2944                                    InstCombiner::BuilderTy &Builder) {
2945   Value *X, *Y;
2946   // FIXME: one-use check is not needed in general, but currently we are unable
2947   // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182)
2948   if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y))))))
2949     return nullptr;
2950 
2951   // We only want to do the transform if it is free to do.
2952   if (isFreeToInvert(X, X->hasOneUse())) {
2953     // Ok, good.
2954   } else if (isFreeToInvert(Y, Y->hasOneUse())) {
2955     std::swap(X, Y);
2956   } else
2957     return nullptr;
2958 
2959   Value *NotX = Builder.CreateNot(X, X->getName() + ".not");
2960   return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan");
2961 }
2962 
2963 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2964 // here. We should standardize that construct where it is needed or choose some
2965 // other way to ensure that commutated variants of patterns are not missed.
2966 Instruction *InstCombiner::visitXor(BinaryOperator &I) {
2967   if (Value *V = SimplifyXorInst(I.getOperand(0), I.getOperand(1),
2968                                  SQ.getWithInstruction(&I)))
2969     return replaceInstUsesWith(I, V);
2970 
2971   if (SimplifyAssociativeOrCommutative(I))
2972     return &I;
2973 
2974   if (Instruction *X = foldVectorBinop(I))
2975     return X;
2976 
2977   if (Instruction *NewXor = foldXorToXor(I, Builder))
2978     return NewXor;
2979 
2980   // (A&B)^(A&C) -> A&(B^C) etc
2981   if (Value *V = SimplifyUsingDistributiveLaws(I))
2982     return replaceInstUsesWith(I, V);
2983 
2984   // See if we can simplify any instructions used by the instruction whose sole
2985   // purpose is to compute bits we don't care about.
2986   if (SimplifyDemandedInstructionBits(I))
2987     return &I;
2988 
2989   if (Value *V = SimplifyBSwap(I, Builder))
2990     return replaceInstUsesWith(I, V);
2991 
2992   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2993 
2994   // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M)
2995   // This it a special case in haveNoCommonBitsSet, but the computeKnownBits
2996   // calls in there are unnecessary as SimplifyDemandedInstructionBits should
2997   // have already taken care of those cases.
2998   Value *M;
2999   if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()),
3000                         m_c_And(m_Deferred(M), m_Value()))))
3001     return BinaryOperator::CreateOr(Op0, Op1);
3002 
3003   // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
3004   Value *X, *Y;
3005 
3006   // We must eliminate the and/or (one-use) for these transforms to not increase
3007   // the instruction count.
3008   // ~(~X & Y) --> (X | ~Y)
3009   // ~(Y & ~X) --> (X | ~Y)
3010   if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) {
3011     Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
3012     return BinaryOperator::CreateOr(X, NotY);
3013   }
3014   // ~(~X | Y) --> (X & ~Y)
3015   // ~(Y | ~X) --> (X & ~Y)
3016   if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) {
3017     Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
3018     return BinaryOperator::CreateAnd(X, NotY);
3019   }
3020 
3021   if (Instruction *Xor = visitMaskedMerge(I, Builder))
3022     return Xor;
3023 
3024   // Is this a 'not' (~) fed by a binary operator?
3025   BinaryOperator *NotVal;
3026   if (match(&I, m_Not(m_BinOp(NotVal)))) {
3027     if (NotVal->getOpcode() == Instruction::And ||
3028         NotVal->getOpcode() == Instruction::Or) {
3029       // Apply DeMorgan's Law when inverts are free:
3030       // ~(X & Y) --> (~X | ~Y)
3031       // ~(X | Y) --> (~X & ~Y)
3032       if (isFreeToInvert(NotVal->getOperand(0),
3033                          NotVal->getOperand(0)->hasOneUse()) &&
3034           isFreeToInvert(NotVal->getOperand(1),
3035                          NotVal->getOperand(1)->hasOneUse())) {
3036         Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs");
3037         Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs");
3038         if (NotVal->getOpcode() == Instruction::And)
3039           return BinaryOperator::CreateOr(NotX, NotY);
3040         return BinaryOperator::CreateAnd(NotX, NotY);
3041       }
3042     }
3043 
3044     // ~(X - Y) --> ~X + Y
3045     if (match(NotVal, m_Sub(m_Value(X), m_Value(Y))))
3046       if (isa<Constant>(X) || NotVal->hasOneUse())
3047         return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y);
3048 
3049     // ~(~X >>s Y) --> (X >>s Y)
3050     if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
3051       return BinaryOperator::CreateAShr(X, Y);
3052 
3053     // If we are inverting a right-shifted constant, we may be able to eliminate
3054     // the 'not' by inverting the constant and using the opposite shift type.
3055     // Canonicalization rules ensure that only a negative constant uses 'ashr',
3056     // but we must check that in case that transform has not fired yet.
3057 
3058     // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
3059     Constant *C;
3060     if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) &&
3061         match(C, m_Negative()))
3062       return BinaryOperator::CreateLShr(ConstantExpr::getNot(C), Y);
3063 
3064     // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
3065     if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) &&
3066         match(C, m_NonNegative()))
3067       return BinaryOperator::CreateAShr(ConstantExpr::getNot(C), Y);
3068 
3069     // ~(X + C) --> -(C + 1) - X
3070     if (match(Op0, m_Add(m_Value(X), m_Constant(C))))
3071       return BinaryOperator::CreateSub(ConstantExpr::getNeg(AddOne(C)), X);
3072   }
3073 
3074   // Use DeMorgan and reassociation to eliminate a 'not' op.
3075   Constant *C1;
3076   if (match(Op1, m_Constant(C1))) {
3077     Constant *C2;
3078     if (match(Op0, m_OneUse(m_Or(m_Not(m_Value(X)), m_Constant(C2))))) {
3079       // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1
3080       Value *And = Builder.CreateAnd(X, ConstantExpr::getNot(C2));
3081       return BinaryOperator::CreateXor(And, ConstantExpr::getNot(C1));
3082     }
3083     if (match(Op0, m_OneUse(m_And(m_Not(m_Value(X)), m_Constant(C2))))) {
3084       // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1
3085       Value *Or = Builder.CreateOr(X, ConstantExpr::getNot(C2));
3086       return BinaryOperator::CreateXor(Or, ConstantExpr::getNot(C1));
3087     }
3088   }
3089 
3090   // not (cmp A, B) = !cmp A, B
3091   CmpInst::Predicate Pred;
3092   if (match(&I, m_Not(m_OneUse(m_Cmp(Pred, m_Value(), m_Value()))))) {
3093     cast<CmpInst>(Op0)->setPredicate(CmpInst::getInversePredicate(Pred));
3094     return replaceInstUsesWith(I, Op0);
3095   }
3096 
3097   {
3098     const APInt *RHSC;
3099     if (match(Op1, m_APInt(RHSC))) {
3100       Value *X;
3101       const APInt *C;
3102       if (RHSC->isSignMask() && match(Op0, m_Sub(m_APInt(C), m_Value(X)))) {
3103         // (C - X) ^ signmask -> (C + signmask - X)
3104         Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC);
3105         return BinaryOperator::CreateSub(NewC, X);
3106       }
3107       if (RHSC->isSignMask() && match(Op0, m_Add(m_Value(X), m_APInt(C)))) {
3108         // (X + C) ^ signmask -> (X + C + signmask)
3109         Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC);
3110         return BinaryOperator::CreateAdd(X, NewC);
3111       }
3112 
3113       // (X|C1)^C2 -> X^(C1^C2) iff X&~C1 == 0
3114       if (match(Op0, m_Or(m_Value(X), m_APInt(C))) &&
3115           MaskedValueIsZero(X, *C, 0, &I)) {
3116         Constant *NewC = ConstantInt::get(I.getType(), *C ^ *RHSC);
3117         Worklist.Add(cast<Instruction>(Op0));
3118         I.setOperand(0, X);
3119         I.setOperand(1, NewC);
3120         return &I;
3121       }
3122     }
3123   }
3124 
3125   if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) {
3126     if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) {
3127       if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) {
3128         if (Op0I->getOpcode() == Instruction::LShr) {
3129           // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
3130           // E1 = "X ^ C1"
3131           BinaryOperator *E1;
3132           ConstantInt *C1;
3133           if (Op0I->hasOneUse() &&
3134               (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) &&
3135               E1->getOpcode() == Instruction::Xor &&
3136               (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) {
3137             // fold (C1 >> C2) ^ C3
3138             ConstantInt *C2 = Op0CI, *C3 = RHSC;
3139             APInt FoldConst = C1->getValue().lshr(C2->getValue());
3140             FoldConst ^= C3->getValue();
3141             // Prepare the two operands.
3142             Value *Opnd0 = Builder.CreateLShr(E1->getOperand(0), C2);
3143             Opnd0->takeName(Op0I);
3144             cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc());
3145             Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst);
3146 
3147             return BinaryOperator::CreateXor(Opnd0, FoldVal);
3148           }
3149         }
3150       }
3151     }
3152   }
3153 
3154   if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
3155     return FoldedLogic;
3156 
3157   // Y ^ (X | Y) --> X & ~Y
3158   // Y ^ (Y | X) --> X & ~Y
3159   if (match(Op1, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op0)))))
3160     return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op0));
3161   // (X | Y) ^ Y --> X & ~Y
3162   // (Y | X) ^ Y --> X & ~Y
3163   if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op1)))))
3164     return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op1));
3165 
3166   // Y ^ (X & Y) --> ~X & Y
3167   // Y ^ (Y & X) --> ~X & Y
3168   if (match(Op1, m_OneUse(m_c_And(m_Value(X), m_Specific(Op0)))))
3169     return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(X));
3170   // (X & Y) ^ Y --> ~X & Y
3171   // (Y & X) ^ Y --> ~X & Y
3172   // Canonical form is (X & C) ^ C; don't touch that.
3173   // TODO: A 'not' op is better for analysis and codegen, but demanded bits must
3174   //       be fixed to prefer that (otherwise we get infinite looping).
3175   if (!match(Op1, m_Constant()) &&
3176       match(Op0, m_OneUse(m_c_And(m_Value(X), m_Specific(Op1)))))
3177     return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(X));
3178 
3179   Value *A, *B, *C;
3180   // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
3181   if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
3182                         m_OneUse(m_c_Or(m_Deferred(A), m_Value(C))))))
3183       return BinaryOperator::CreateXor(
3184           Builder.CreateAnd(Builder.CreateNot(A), C), B);
3185 
3186   // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
3187   if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
3188                         m_OneUse(m_c_Or(m_Deferred(B), m_Value(C))))))
3189       return BinaryOperator::CreateXor(
3190           Builder.CreateAnd(Builder.CreateNot(B), C), A);
3191 
3192   // (A & B) ^ (A ^ B) -> (A | B)
3193   if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
3194       match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
3195     return BinaryOperator::CreateOr(A, B);
3196   // (A ^ B) ^ (A & B) -> (A | B)
3197   if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
3198       match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
3199     return BinaryOperator::CreateOr(A, B);
3200 
3201   // (A & ~B) ^ ~A -> ~(A & B)
3202   // (~B & A) ^ ~A -> ~(A & B)
3203   if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
3204       match(Op1, m_Not(m_Specific(A))))
3205     return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
3206 
3207   if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
3208     if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
3209       if (Value *V = foldXorOfICmps(LHS, RHS, I))
3210         return replaceInstUsesWith(I, V);
3211 
3212   if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
3213     return CastedXor;
3214 
3215   // Canonicalize a shifty way to code absolute value to the common pattern.
3216   // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
3217   // We're relying on the fact that we only do this transform when the shift has
3218   // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
3219   // instructions).
3220   if (Op0->hasNUses(2))
3221     std::swap(Op0, Op1);
3222 
3223   const APInt *ShAmt;
3224   Type *Ty = I.getType();
3225   if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
3226       Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
3227       match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) {
3228     // B = ashr i32 A, 31 ; smear the sign bit
3229     // xor (add A, B), B  ; add -1 and flip bits if negative
3230     // --> (A < 0) ? -A : A
3231     Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty));
3232     // Copy the nuw/nsw flags from the add to the negate.
3233     auto *Add = cast<BinaryOperator>(Op0);
3234     Value *Neg = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(),
3235                                    Add->hasNoSignedWrap());
3236     return SelectInst::Create(Cmp, Neg, A);
3237   }
3238 
3239   // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
3240   //
3241   //   %notx = xor i32 %x, -1
3242   //   %cmp1 = icmp sgt i32 %notx, %y
3243   //   %smax = select i1 %cmp1, i32 %notx, i32 %y
3244   //   %res = xor i32 %smax, -1
3245   // =>
3246   //   %noty = xor i32 %y, -1
3247   //   %cmp2 = icmp slt %x, %noty
3248   //   %res = select i1 %cmp2, i32 %x, i32 %noty
3249   //
3250   // Same is applicable for smin/umax/umin.
3251   if (match(Op1, m_AllOnes()) && Op0->hasOneUse()) {
3252     Value *LHS, *RHS;
3253     SelectPatternFlavor SPF = matchSelectPattern(Op0, LHS, RHS).Flavor;
3254     if (SelectPatternResult::isMinOrMax(SPF)) {
3255       // It's possible we get here before the not has been simplified, so make
3256       // sure the input to the not isn't freely invertible.
3257       if (match(LHS, m_Not(m_Value(X))) && !isFreeToInvert(X, X->hasOneUse())) {
3258         Value *NotY = Builder.CreateNot(RHS);
3259         return SelectInst::Create(
3260             Builder.CreateICmp(getInverseMinMaxPred(SPF), X, NotY), X, NotY);
3261       }
3262 
3263       // It's possible we get here before the not has been simplified, so make
3264       // sure the input to the not isn't freely invertible.
3265       if (match(RHS, m_Not(m_Value(Y))) && !isFreeToInvert(Y, Y->hasOneUse())) {
3266         Value *NotX = Builder.CreateNot(LHS);
3267         return SelectInst::Create(
3268             Builder.CreateICmp(getInverseMinMaxPred(SPF), NotX, Y), NotX, Y);
3269       }
3270 
3271       // If both sides are freely invertible, then we can get rid of the xor
3272       // completely.
3273       if (isFreeToInvert(LHS, !LHS->hasNUsesOrMore(3)) &&
3274           isFreeToInvert(RHS, !RHS->hasNUsesOrMore(3))) {
3275         Value *NotLHS = Builder.CreateNot(LHS);
3276         Value *NotRHS = Builder.CreateNot(RHS);
3277         return SelectInst::Create(
3278             Builder.CreateICmp(getInverseMinMaxPred(SPF), NotLHS, NotRHS),
3279             NotLHS, NotRHS);
3280       }
3281     }
3282   }
3283 
3284   if (Instruction *NewXor = sinkNotIntoXor(I, Builder))
3285     return NewXor;
3286 
3287   return nullptr;
3288 }
3289