xref: /freebsd/contrib/llvm-project/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp (revision 1db9f3b21e39176dd5b67cf8ac378633b172463e)
1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitAnd, visitOr, and visitXor functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/Analysis/CmpInstAnalysis.h"
15 #include "llvm/Analysis/InstructionSimplify.h"
16 #include "llvm/IR/ConstantRange.h"
17 #include "llvm/IR/Intrinsics.h"
18 #include "llvm/IR/PatternMatch.h"
19 #include "llvm/Transforms/InstCombine/InstCombiner.h"
20 #include "llvm/Transforms/Utils/Local.h"
21 
22 using namespace llvm;
23 using namespace PatternMatch;
24 
25 #define DEBUG_TYPE "instcombine"
26 
27 /// This is the complement of getICmpCode, which turns an opcode and two
28 /// operands into either a constant true or false, or a brand new ICmp
29 /// instruction. The sign is passed in to determine which kind of predicate to
30 /// use in the new icmp instruction.
31 static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS,
32                               InstCombiner::BuilderTy &Builder) {
33   ICmpInst::Predicate NewPred;
34   if (Constant *TorF = getPredForICmpCode(Code, Sign, LHS->getType(), NewPred))
35     return TorF;
36   return Builder.CreateICmp(NewPred, LHS, RHS);
37 }
38 
39 /// This is the complement of getFCmpCode, which turns an opcode and two
40 /// operands into either a FCmp instruction, or a true/false constant.
41 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
42                            InstCombiner::BuilderTy &Builder) {
43   FCmpInst::Predicate NewPred;
44   if (Constant *TorF = getPredForFCmpCode(Code, LHS->getType(), NewPred))
45     return TorF;
46   return Builder.CreateFCmp(NewPred, LHS, RHS);
47 }
48 
49 /// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or
50 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B))
51 /// \param I Binary operator to transform.
52 /// \return Pointer to node that must replace the original binary operator, or
53 ///         null pointer if no transformation was made.
54 static Value *SimplifyBSwap(BinaryOperator &I,
55                             InstCombiner::BuilderTy &Builder) {
56   assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying");
57 
58   Value *OldLHS = I.getOperand(0);
59   Value *OldRHS = I.getOperand(1);
60 
61   Value *NewLHS;
62   if (!match(OldLHS, m_BSwap(m_Value(NewLHS))))
63     return nullptr;
64 
65   Value *NewRHS;
66   const APInt *C;
67 
68   if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) {
69     // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
70     if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse())
71       return nullptr;
72     // NewRHS initialized by the matcher.
73   } else if (match(OldRHS, m_APInt(C))) {
74     // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
75     if (!OldLHS->hasOneUse())
76       return nullptr;
77     NewRHS = ConstantInt::get(I.getType(), C->byteSwap());
78   } else
79     return nullptr;
80 
81   Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
82   Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap,
83                                           I.getType());
84   return Builder.CreateCall(F, BinOp);
85 }
86 
87 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
88 /// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates
89 /// whether to treat V, Lo, and Hi as signed or not.
90 Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo,
91                                          const APInt &Hi, bool isSigned,
92                                          bool Inside) {
93   assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) &&
94          "Lo is not < Hi in range emission code!");
95 
96   Type *Ty = V->getType();
97 
98   // V >= Min && V <  Hi --> V <  Hi
99   // V <  Min || V >= Hi --> V >= Hi
100   ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
101   if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
102     Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
103     return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
104   }
105 
106   // V >= Lo && V <  Hi --> V - Lo u<  Hi - Lo
107   // V <  Lo || V >= Hi --> V - Lo u>= Hi - Lo
108   Value *VMinusLo =
109       Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
110   Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
111   return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
112 }
113 
114 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
115 /// that can be simplified.
116 /// One of A and B is considered the mask. The other is the value. This is
117 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
118 /// only "Mask", then both A and B can be considered masks. If A is the mask,
119 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
120 /// If both A and C are constants, this proof is also easy.
121 /// For the following explanations, we assume that A is the mask.
122 ///
123 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
124 /// bits of A are set in B.
125 ///   Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
126 ///
127 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
128 /// bits of A are cleared in B.
129 ///   Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
130 ///
131 /// "Mixed" declares that (A & B) == C and C might or might not contain any
132 /// number of one bits and zero bits.
133 ///   Example: (icmp eq (A & 3), 1) -> AMask_Mixed
134 ///
135 /// "Not" means that in above descriptions "==" should be replaced by "!=".
136 ///   Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
137 ///
138 /// If the mask A contains a single bit, then the following is equivalent:
139 ///    (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
140 ///    (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
141 enum MaskedICmpType {
142   AMask_AllOnes           =     1,
143   AMask_NotAllOnes        =     2,
144   BMask_AllOnes           =     4,
145   BMask_NotAllOnes        =     8,
146   Mask_AllZeros           =    16,
147   Mask_NotAllZeros        =    32,
148   AMask_Mixed             =    64,
149   AMask_NotMixed          =   128,
150   BMask_Mixed             =   256,
151   BMask_NotMixed          =   512
152 };
153 
154 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
155 /// satisfies.
156 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
157                                   ICmpInst::Predicate Pred) {
158   const APInt *ConstA = nullptr, *ConstB = nullptr, *ConstC = nullptr;
159   match(A, m_APInt(ConstA));
160   match(B, m_APInt(ConstB));
161   match(C, m_APInt(ConstC));
162   bool IsEq = (Pred == ICmpInst::ICMP_EQ);
163   bool IsAPow2 = ConstA && ConstA->isPowerOf2();
164   bool IsBPow2 = ConstB && ConstB->isPowerOf2();
165   unsigned MaskVal = 0;
166   if (ConstC && ConstC->isZero()) {
167     // if C is zero, then both A and B qualify as mask
168     MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
169                      : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
170     if (IsAPow2)
171       MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
172                        : (AMask_AllOnes | AMask_Mixed));
173     if (IsBPow2)
174       MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
175                        : (BMask_AllOnes | BMask_Mixed));
176     return MaskVal;
177   }
178 
179   if (A == C) {
180     MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
181                      : (AMask_NotAllOnes | AMask_NotMixed));
182     if (IsAPow2)
183       MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
184                        : (Mask_AllZeros | AMask_Mixed));
185   } else if (ConstA && ConstC && ConstC->isSubsetOf(*ConstA)) {
186     MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
187   }
188 
189   if (B == C) {
190     MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
191                      : (BMask_NotAllOnes | BMask_NotMixed));
192     if (IsBPow2)
193       MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
194                        : (Mask_AllZeros | BMask_Mixed));
195   } else if (ConstB && ConstC && ConstC->isSubsetOf(*ConstB)) {
196     MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
197   }
198 
199   return MaskVal;
200 }
201 
202 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
203 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
204 /// is adjacent to the corresponding normal flag (recording ==), this just
205 /// involves swapping those bits over.
206 static unsigned conjugateICmpMask(unsigned Mask) {
207   unsigned NewMask;
208   NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
209                      AMask_Mixed | BMask_Mixed))
210             << 1;
211 
212   NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
213                       AMask_NotMixed | BMask_NotMixed))
214              >> 1;
215 
216   return NewMask;
217 }
218 
219 // Adapts the external decomposeBitTestICmp for local use.
220 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
221                                  Value *&X, Value *&Y, Value *&Z) {
222   APInt Mask;
223   if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask))
224     return false;
225 
226   Y = ConstantInt::get(X->getType(), Mask);
227   Z = ConstantInt::get(X->getType(), 0);
228   return true;
229 }
230 
231 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
232 /// Return the pattern classes (from MaskedICmpType) for the left hand side and
233 /// the right hand side as a pair.
234 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL
235 /// and PredR are their predicates, respectively.
236 static std::optional<std::pair<unsigned, unsigned>> getMaskedTypeForICmpPair(
237     Value *&A, Value *&B, Value *&C, Value *&D, Value *&E, ICmpInst *LHS,
238     ICmpInst *RHS, ICmpInst::Predicate &PredL, ICmpInst::Predicate &PredR) {
239   // Don't allow pointers. Splat vectors are fine.
240   if (!LHS->getOperand(0)->getType()->isIntOrIntVectorTy() ||
241       !RHS->getOperand(0)->getType()->isIntOrIntVectorTy())
242     return std::nullopt;
243 
244   // Here comes the tricky part:
245   // LHS might be of the form L11 & L12 == X, X == L21 & L22,
246   // and L11 & L12 == L21 & L22. The same goes for RHS.
247   // Now we must find those components L** and R**, that are equal, so
248   // that we can extract the parameters A, B, C, D, and E for the canonical
249   // above.
250   Value *L1 = LHS->getOperand(0);
251   Value *L2 = LHS->getOperand(1);
252   Value *L11, *L12, *L21, *L22;
253   // Check whether the icmp can be decomposed into a bit test.
254   if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) {
255     L21 = L22 = L1 = nullptr;
256   } else {
257     // Look for ANDs in the LHS icmp.
258     if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
259       // Any icmp can be viewed as being trivially masked; if it allows us to
260       // remove one, it's worth it.
261       L11 = L1;
262       L12 = Constant::getAllOnesValue(L1->getType());
263     }
264 
265     if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
266       L21 = L2;
267       L22 = Constant::getAllOnesValue(L2->getType());
268     }
269   }
270 
271   // Bail if LHS was a icmp that can't be decomposed into an equality.
272   if (!ICmpInst::isEquality(PredL))
273     return std::nullopt;
274 
275   Value *R1 = RHS->getOperand(0);
276   Value *R2 = RHS->getOperand(1);
277   Value *R11, *R12;
278   bool Ok = false;
279   if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) {
280     if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
281       A = R11;
282       D = R12;
283     } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
284       A = R12;
285       D = R11;
286     } else {
287       return std::nullopt;
288     }
289     E = R2;
290     R1 = nullptr;
291     Ok = true;
292   } else {
293     if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
294       // As before, model no mask as a trivial mask if it'll let us do an
295       // optimization.
296       R11 = R1;
297       R12 = Constant::getAllOnesValue(R1->getType());
298     }
299 
300     if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
301       A = R11;
302       D = R12;
303       E = R2;
304       Ok = true;
305     } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
306       A = R12;
307       D = R11;
308       E = R2;
309       Ok = true;
310     }
311   }
312 
313   // Bail if RHS was a icmp that can't be decomposed into an equality.
314   if (!ICmpInst::isEquality(PredR))
315     return std::nullopt;
316 
317   // Look for ANDs on the right side of the RHS icmp.
318   if (!Ok) {
319     if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
320       R11 = R2;
321       R12 = Constant::getAllOnesValue(R2->getType());
322     }
323 
324     if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
325       A = R11;
326       D = R12;
327       E = R1;
328       Ok = true;
329     } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
330       A = R12;
331       D = R11;
332       E = R1;
333       Ok = true;
334     } else {
335       return std::nullopt;
336     }
337 
338     assert(Ok && "Failed to find AND on the right side of the RHS icmp.");
339   }
340 
341   if (L11 == A) {
342     B = L12;
343     C = L2;
344   } else if (L12 == A) {
345     B = L11;
346     C = L2;
347   } else if (L21 == A) {
348     B = L22;
349     C = L1;
350   } else if (L22 == A) {
351     B = L21;
352     C = L1;
353   }
354 
355   unsigned LeftType = getMaskedICmpType(A, B, C, PredL);
356   unsigned RightType = getMaskedICmpType(A, D, E, PredR);
357   return std::optional<std::pair<unsigned, unsigned>>(
358       std::make_pair(LeftType, RightType));
359 }
360 
361 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
362 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
363 /// and the right hand side is of type BMask_Mixed. For example,
364 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
365 /// Also used for logical and/or, must be poison safe.
366 static Value *foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
367     ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C,
368     Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
369     InstCombiner::BuilderTy &Builder) {
370   // We are given the canonical form:
371   //   (icmp ne (A & B), 0) & (icmp eq (A & D), E).
372   // where D & E == E.
373   //
374   // If IsAnd is false, we get it in negated form:
375   //   (icmp eq (A & B), 0) | (icmp ne (A & D), E) ->
376   //      !((icmp ne (A & B), 0) & (icmp eq (A & D), E)).
377   //
378   // We currently handle the case of B, C, D, E are constant.
379   //
380   const APInt *BCst, *CCst, *DCst, *OrigECst;
381   if (!match(B, m_APInt(BCst)) || !match(C, m_APInt(CCst)) ||
382       !match(D, m_APInt(DCst)) || !match(E, m_APInt(OrigECst)))
383     return nullptr;
384 
385   ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
386 
387   // Update E to the canonical form when D is a power of two and RHS is
388   // canonicalized as,
389   // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or
390   // (icmp ne (A & D), D) -> (icmp eq (A & D), 0).
391   APInt ECst = *OrigECst;
392   if (PredR != NewCC)
393     ECst ^= *DCst;
394 
395   // If B or D is zero, skip because if LHS or RHS can be trivially folded by
396   // other folding rules and this pattern won't apply any more.
397   if (*BCst == 0 || *DCst == 0)
398     return nullptr;
399 
400   // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't
401   // deduce anything from it.
402   // For example,
403   // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding.
404   if ((*BCst & *DCst) == 0)
405     return nullptr;
406 
407   // If the following two conditions are met:
408   //
409   // 1. mask B covers only a single bit that's not covered by mask D, that is,
410   // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of
411   // B and D has only one bit set) and,
412   //
413   // 2. RHS (and E) indicates that the rest of B's bits are zero (in other
414   // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0
415   //
416   // then that single bit in B must be one and thus the whole expression can be
417   // folded to
418   //   (A & (B | D)) == (B & (B ^ D)) | E.
419   //
420   // For example,
421   // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9)
422   // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8)
423   if ((((*BCst & *DCst) & ECst) == 0) &&
424       (*BCst & (*BCst ^ *DCst)).isPowerOf2()) {
425     APInt BorD = *BCst | *DCst;
426     APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst;
427     Value *NewMask = ConstantInt::get(A->getType(), BorD);
428     Value *NewMaskedValue = ConstantInt::get(A->getType(), BandBxorDorE);
429     Value *NewAnd = Builder.CreateAnd(A, NewMask);
430     return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue);
431   }
432 
433   auto IsSubSetOrEqual = [](const APInt *C1, const APInt *C2) {
434     return (*C1 & *C2) == *C1;
435   };
436   auto IsSuperSetOrEqual = [](const APInt *C1, const APInt *C2) {
437     return (*C1 & *C2) == *C2;
438   };
439 
440   // In the following, we consider only the cases where B is a superset of D, B
441   // is a subset of D, or B == D because otherwise there's at least one bit
442   // covered by B but not D, in which case we can't deduce much from it, so
443   // no folding (aside from the single must-be-one bit case right above.)
444   // For example,
445   // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding.
446   if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
447     return nullptr;
448 
449   // At this point, either B is a superset of D, B is a subset of D or B == D.
450 
451   // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict
452   // and the whole expression becomes false (or true if negated), otherwise, no
453   // folding.
454   // For example,
455   // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false.
456   // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding.
457   if (ECst.isZero()) {
458     if (IsSubSetOrEqual(BCst, DCst))
459       return ConstantInt::get(LHS->getType(), !IsAnd);
460     return nullptr;
461   }
462 
463   // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B ==
464   // D. If B is a superset of (or equal to) D, since E is not zero, LHS is
465   // subsumed by RHS (RHS implies LHS.) So the whole expression becomes
466   // RHS. For example,
467   // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
468   // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
469   if (IsSuperSetOrEqual(BCst, DCst))
470     return RHS;
471   // Otherwise, B is a subset of D. If B and E have a common bit set,
472   // ie. (B & E) != 0, then LHS is subsumed by RHS. For example.
473   // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
474   assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code");
475   if ((*BCst & ECst) != 0)
476     return RHS;
477   // Otherwise, LHS and RHS contradict and the whole expression becomes false
478   // (or true if negated.) For example,
479   // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false.
480   // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false.
481   return ConstantInt::get(LHS->getType(), !IsAnd);
482 }
483 
484 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
485 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
486 /// aren't of the common mask pattern type.
487 /// Also used for logical and/or, must be poison safe.
488 static Value *foldLogOpOfMaskedICmpsAsymmetric(
489     ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C,
490     Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
491     unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) {
492   assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
493          "Expected equality predicates for masked type of icmps.");
494   // Handle Mask_NotAllZeros-BMask_Mixed cases.
495   // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or
496   // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E)
497   //    which gets swapped to
498   //    (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C).
499   if (!IsAnd) {
500     LHSMask = conjugateICmpMask(LHSMask);
501     RHSMask = conjugateICmpMask(RHSMask);
502   }
503   if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) {
504     if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
505             LHS, RHS, IsAnd, A, B, C, D, E,
506             PredL, PredR, Builder)) {
507       return V;
508     }
509   } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) {
510     if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
511             RHS, LHS, IsAnd, A, D, E, B, C,
512             PredR, PredL, Builder)) {
513       return V;
514     }
515   }
516   return nullptr;
517 }
518 
519 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
520 /// into a single (icmp(A & X) ==/!= Y).
521 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
522                                      bool IsLogical,
523                                      InstCombiner::BuilderTy &Builder) {
524   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
525   ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
526   std::optional<std::pair<unsigned, unsigned>> MaskPair =
527       getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
528   if (!MaskPair)
529     return nullptr;
530   assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
531          "Expected equality predicates for masked type of icmps.");
532   unsigned LHSMask = MaskPair->first;
533   unsigned RHSMask = MaskPair->second;
534   unsigned Mask = LHSMask & RHSMask;
535   if (Mask == 0) {
536     // Even if the two sides don't share a common pattern, check if folding can
537     // still happen.
538     if (Value *V = foldLogOpOfMaskedICmpsAsymmetric(
539             LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask,
540             Builder))
541       return V;
542     return nullptr;
543   }
544 
545   // In full generality:
546   //     (icmp (A & B) Op C) | (icmp (A & D) Op E)
547   // ==  ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
548   //
549   // If the latter can be converted into (icmp (A & X) Op Y) then the former is
550   // equivalent to (icmp (A & X) !Op Y).
551   //
552   // Therefore, we can pretend for the rest of this function that we're dealing
553   // with the conjunction, provided we flip the sense of any comparisons (both
554   // input and output).
555 
556   // In most cases we're going to produce an EQ for the "&&" case.
557   ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
558   if (!IsAnd) {
559     // Convert the masking analysis into its equivalent with negated
560     // comparisons.
561     Mask = conjugateICmpMask(Mask);
562   }
563 
564   if (Mask & Mask_AllZeros) {
565     // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
566     // -> (icmp eq (A & (B|D)), 0)
567     if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
568       return nullptr; // TODO: Use freeze?
569     Value *NewOr = Builder.CreateOr(B, D);
570     Value *NewAnd = Builder.CreateAnd(A, NewOr);
571     // We can't use C as zero because we might actually handle
572     //   (icmp ne (A & B), B) & (icmp ne (A & D), D)
573     // with B and D, having a single bit set.
574     Value *Zero = Constant::getNullValue(A->getType());
575     return Builder.CreateICmp(NewCC, NewAnd, Zero);
576   }
577   if (Mask & BMask_AllOnes) {
578     // (icmp eq (A & B), B) & (icmp eq (A & D), D)
579     // -> (icmp eq (A & (B|D)), (B|D))
580     if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
581       return nullptr; // TODO: Use freeze?
582     Value *NewOr = Builder.CreateOr(B, D);
583     Value *NewAnd = Builder.CreateAnd(A, NewOr);
584     return Builder.CreateICmp(NewCC, NewAnd, NewOr);
585   }
586   if (Mask & AMask_AllOnes) {
587     // (icmp eq (A & B), A) & (icmp eq (A & D), A)
588     // -> (icmp eq (A & (B&D)), A)
589     if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
590       return nullptr; // TODO: Use freeze?
591     Value *NewAnd1 = Builder.CreateAnd(B, D);
592     Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
593     return Builder.CreateICmp(NewCC, NewAnd2, A);
594   }
595 
596   // Remaining cases assume at least that B and D are constant, and depend on
597   // their actual values. This isn't strictly necessary, just a "handle the
598   // easy cases for now" decision.
599   const APInt *ConstB, *ConstD;
600   if (!match(B, m_APInt(ConstB)) || !match(D, m_APInt(ConstD)))
601     return nullptr;
602 
603   if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
604     // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
605     // (icmp ne (A & B), B) & (icmp ne (A & D), D)
606     //     -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
607     // Only valid if one of the masks is a superset of the other (check "B&D" is
608     // the same as either B or D).
609     APInt NewMask = *ConstB & *ConstD;
610     if (NewMask == *ConstB)
611       return LHS;
612     else if (NewMask == *ConstD)
613       return RHS;
614   }
615 
616   if (Mask & AMask_NotAllOnes) {
617     // (icmp ne (A & B), B) & (icmp ne (A & D), D)
618     //     -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
619     // Only valid if one of the masks is a superset of the other (check "B|D" is
620     // the same as either B or D).
621     APInt NewMask = *ConstB | *ConstD;
622     if (NewMask == *ConstB)
623       return LHS;
624     else if (NewMask == *ConstD)
625       return RHS;
626   }
627 
628   if (Mask & (BMask_Mixed | BMask_NotMixed)) {
629     // Mixed:
630     // (icmp eq (A & B), C) & (icmp eq (A & D), E)
631     // We already know that B & C == C && D & E == E.
632     // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
633     // C and E, which are shared by both the mask B and the mask D, don't
634     // contradict, then we can transform to
635     // -> (icmp eq (A & (B|D)), (C|E))
636     // Currently, we only handle the case of B, C, D, and E being constant.
637     // We can't simply use C and E because we might actually handle
638     //   (icmp ne (A & B), B) & (icmp eq (A & D), D)
639     // with B and D, having a single bit set.
640 
641     // NotMixed:
642     // (icmp ne (A & B), C) & (icmp ne (A & D), E)
643     // -> (icmp ne (A & (B & D)), (C & E))
644     // Check the intersection (B & D) for inequality.
645     // Assume that (B & D) == B || (B & D) == D, i.e B/D is a subset of D/B
646     // and (B & D) & (C ^ E) == 0, bits of C and E, which are shared by both the
647     // B and the D, don't contradict.
648     // Note that we can assume (~B & C) == 0 && (~D & E) == 0, previous
649     // operation should delete these icmps if it hadn't been met.
650 
651     const APInt *OldConstC, *OldConstE;
652     if (!match(C, m_APInt(OldConstC)) || !match(E, m_APInt(OldConstE)))
653       return nullptr;
654 
655     auto FoldBMixed = [&](ICmpInst::Predicate CC, bool IsNot) -> Value * {
656       CC = IsNot ? CmpInst::getInversePredicate(CC) : CC;
657       const APInt ConstC = PredL != CC ? *ConstB ^ *OldConstC : *OldConstC;
658       const APInt ConstE = PredR != CC ? *ConstD ^ *OldConstE : *OldConstE;
659 
660       if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue())
661         return IsNot ? nullptr : ConstantInt::get(LHS->getType(), !IsAnd);
662 
663       if (IsNot && !ConstB->isSubsetOf(*ConstD) && !ConstD->isSubsetOf(*ConstB))
664         return nullptr;
665 
666       APInt BD, CE;
667       if (IsNot) {
668         BD = *ConstB & *ConstD;
669         CE = ConstC & ConstE;
670       } else {
671         BD = *ConstB | *ConstD;
672         CE = ConstC | ConstE;
673       }
674       Value *NewAnd = Builder.CreateAnd(A, BD);
675       Value *CEVal = ConstantInt::get(A->getType(), CE);
676       return Builder.CreateICmp(CC, CEVal, NewAnd);
677     };
678 
679     if (Mask & BMask_Mixed)
680       return FoldBMixed(NewCC, false);
681     if (Mask & BMask_NotMixed) // can be else also
682       return FoldBMixed(NewCC, true);
683   }
684   return nullptr;
685 }
686 
687 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
688 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
689 /// If \p Inverted is true then the check is for the inverted range, e.g.
690 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
691 Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
692                                             bool Inverted) {
693   // Check the lower range comparison, e.g. x >= 0
694   // InstCombine already ensured that if there is a constant it's on the RHS.
695   ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
696   if (!RangeStart)
697     return nullptr;
698 
699   ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
700                                Cmp0->getPredicate());
701 
702   // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
703   if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
704         (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
705     return nullptr;
706 
707   ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
708                                Cmp1->getPredicate());
709 
710   Value *Input = Cmp0->getOperand(0);
711   Value *RangeEnd;
712   if (Cmp1->getOperand(0) == Input) {
713     // For the upper range compare we have: icmp x, n
714     RangeEnd = Cmp1->getOperand(1);
715   } else if (Cmp1->getOperand(1) == Input) {
716     // For the upper range compare we have: icmp n, x
717     RangeEnd = Cmp1->getOperand(0);
718     Pred1 = ICmpInst::getSwappedPredicate(Pred1);
719   } else {
720     return nullptr;
721   }
722 
723   // Check the upper range comparison, e.g. x < n
724   ICmpInst::Predicate NewPred;
725   switch (Pred1) {
726     case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
727     case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
728     default: return nullptr;
729   }
730 
731   // This simplification is only valid if the upper range is not negative.
732   KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1);
733   if (!Known.isNonNegative())
734     return nullptr;
735 
736   if (Inverted)
737     NewPred = ICmpInst::getInversePredicate(NewPred);
738 
739   return Builder.CreateICmp(NewPred, Input, RangeEnd);
740 }
741 
742 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
743 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
744 Value *InstCombinerImpl::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS,
745                                                        ICmpInst *RHS,
746                                                        Instruction *CxtI,
747                                                        bool IsAnd,
748                                                        bool IsLogical) {
749   CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
750   if (LHS->getPredicate() != Pred || RHS->getPredicate() != Pred)
751     return nullptr;
752 
753   if (!match(LHS->getOperand(1), m_Zero()) ||
754       !match(RHS->getOperand(1), m_Zero()))
755     return nullptr;
756 
757   Value *L1, *L2, *R1, *R2;
758   if (match(LHS->getOperand(0), m_And(m_Value(L1), m_Value(L2))) &&
759       match(RHS->getOperand(0), m_And(m_Value(R1), m_Value(R2)))) {
760     if (L1 == R2 || L2 == R2)
761       std::swap(R1, R2);
762     if (L2 == R1)
763       std::swap(L1, L2);
764 
765     if (L1 == R1 &&
766         isKnownToBeAPowerOfTwo(L2, false, 0, CxtI) &&
767         isKnownToBeAPowerOfTwo(R2, false, 0, CxtI)) {
768       // If this is a logical and/or, then we must prevent propagation of a
769       // poison value from the RHS by inserting freeze.
770       if (IsLogical)
771         R2 = Builder.CreateFreeze(R2);
772       Value *Mask = Builder.CreateOr(L2, R2);
773       Value *Masked = Builder.CreateAnd(L1, Mask);
774       auto NewPred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
775       return Builder.CreateICmp(NewPred, Masked, Mask);
776     }
777   }
778 
779   return nullptr;
780 }
781 
782 /// General pattern:
783 ///   X & Y
784 ///
785 /// Where Y is checking that all the high bits (covered by a mask 4294967168)
786 /// are uniform, i.e.  %arg & 4294967168  can be either  4294967168  or  0
787 /// Pattern can be one of:
788 ///   %t = add        i32 %arg,    128
789 ///   %r = icmp   ult i32 %t,      256
790 /// Or
791 ///   %t0 = shl       i32 %arg,    24
792 ///   %t1 = ashr      i32 %t0,     24
793 ///   %r  = icmp  eq  i32 %t1,     %arg
794 /// Or
795 ///   %t0 = trunc     i32 %arg  to i8
796 ///   %t1 = sext      i8  %t0   to i32
797 ///   %r  = icmp  eq  i32 %t1,     %arg
798 /// This pattern is a signed truncation check.
799 ///
800 /// And X is checking that some bit in that same mask is zero.
801 /// I.e. can be one of:
802 ///   %r = icmp sgt i32   %arg,    -1
803 /// Or
804 ///   %t = and      i32   %arg,    2147483648
805 ///   %r = icmp eq  i32   %t,      0
806 ///
807 /// Since we are checking that all the bits in that mask are the same,
808 /// and a particular bit is zero, what we are really checking is that all the
809 /// masked bits are zero.
810 /// So this should be transformed to:
811 ///   %r = icmp ult i32 %arg, 128
812 static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
813                                         Instruction &CxtI,
814                                         InstCombiner::BuilderTy &Builder) {
815   assert(CxtI.getOpcode() == Instruction::And);
816 
817   // Match  icmp ult (add %arg, C01), C1   (C1 == C01 << 1; powers of two)
818   auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X,
819                                             APInt &SignBitMask) -> bool {
820     CmpInst::Predicate Pred;
821     const APInt *I01, *I1; // powers of two; I1 == I01 << 1
822     if (!(match(ICmp,
823                 m_ICmp(Pred, m_Add(m_Value(X), m_Power2(I01)), m_Power2(I1))) &&
824           Pred == ICmpInst::ICMP_ULT && I1->ugt(*I01) && I01->shl(1) == *I1))
825       return false;
826     // Which bit is the new sign bit as per the 'signed truncation' pattern?
827     SignBitMask = *I01;
828     return true;
829   };
830 
831   // One icmp needs to be 'signed truncation check'.
832   // We need to match this first, else we will mismatch commutative cases.
833   Value *X1;
834   APInt HighestBit;
835   ICmpInst *OtherICmp;
836   if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
837     OtherICmp = ICmp0;
838   else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
839     OtherICmp = ICmp1;
840   else
841     return nullptr;
842 
843   assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)");
844 
845   // Try to match/decompose into:  icmp eq (X & Mask), 0
846   auto tryToDecompose = [](ICmpInst *ICmp, Value *&X,
847                            APInt &UnsetBitsMask) -> bool {
848     CmpInst::Predicate Pred = ICmp->getPredicate();
849     // Can it be decomposed into  icmp eq (X & Mask), 0  ?
850     if (llvm::decomposeBitTestICmp(ICmp->getOperand(0), ICmp->getOperand(1),
851                                    Pred, X, UnsetBitsMask,
852                                    /*LookThroughTrunc=*/false) &&
853         Pred == ICmpInst::ICMP_EQ)
854       return true;
855     // Is it  icmp eq (X & Mask), 0  already?
856     const APInt *Mask;
857     if (match(ICmp, m_ICmp(Pred, m_And(m_Value(X), m_APInt(Mask)), m_Zero())) &&
858         Pred == ICmpInst::ICMP_EQ) {
859       UnsetBitsMask = *Mask;
860       return true;
861     }
862     return false;
863   };
864 
865   // And the other icmp needs to be decomposable into a bit test.
866   Value *X0;
867   APInt UnsetBitsMask;
868   if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
869     return nullptr;
870 
871   assert(!UnsetBitsMask.isZero() && "empty mask makes no sense.");
872 
873   // Are they working on the same value?
874   Value *X;
875   if (X1 == X0) {
876     // Ok as is.
877     X = X1;
878   } else if (match(X0, m_Trunc(m_Specific(X1)))) {
879     UnsetBitsMask = UnsetBitsMask.zext(X1->getType()->getScalarSizeInBits());
880     X = X1;
881   } else
882     return nullptr;
883 
884   // So which bits should be uniform as per the 'signed truncation check'?
885   // (all the bits starting with (i.e. including) HighestBit)
886   APInt SignBitsMask = ~(HighestBit - 1U);
887 
888   // UnsetBitsMask must have some common bits with SignBitsMask,
889   if (!UnsetBitsMask.intersects(SignBitsMask))
890     return nullptr;
891 
892   // Does UnsetBitsMask contain any bits outside of SignBitsMask?
893   if (!UnsetBitsMask.isSubsetOf(SignBitsMask)) {
894     APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
895     if (!OtherHighestBit.isPowerOf2())
896       return nullptr;
897     HighestBit = APIntOps::umin(HighestBit, OtherHighestBit);
898   }
899   // Else, if it does not, then all is ok as-is.
900 
901   // %r = icmp ult %X, SignBit
902   return Builder.CreateICmpULT(X, ConstantInt::get(X->getType(), HighestBit),
903                                CxtI.getName() + ".simplified");
904 }
905 
906 /// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and
907 /// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1).
908 /// Also used for logical and/or, must be poison safe.
909 static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd,
910                                    InstCombiner::BuilderTy &Builder) {
911   CmpInst::Predicate Pred0, Pred1;
912   Value *X;
913   if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
914                           m_SpecificInt(1))) ||
915       !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())))
916     return nullptr;
917 
918   Value *CtPop = Cmp0->getOperand(0);
919   if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_NE)
920     return Builder.CreateICmpUGT(CtPop, ConstantInt::get(CtPop->getType(), 1));
921   if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_EQ)
922     return Builder.CreateICmpULT(CtPop, ConstantInt::get(CtPop->getType(), 2));
923 
924   return nullptr;
925 }
926 
927 /// Reduce a pair of compares that check if a value has exactly 1 bit set.
928 /// Also used for logical and/or, must be poison safe.
929 static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd,
930                              InstCombiner::BuilderTy &Builder) {
931   // Handle 'and' / 'or' commutation: make the equality check the first operand.
932   if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE)
933     std::swap(Cmp0, Cmp1);
934   else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ)
935     std::swap(Cmp0, Cmp1);
936 
937   // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1
938   CmpInst::Predicate Pred0, Pred1;
939   Value *X;
940   if (JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) &&
941       match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)),
942                          m_SpecificInt(2))) &&
943       Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT) {
944     Value *CtPop = Cmp1->getOperand(0);
945     return Builder.CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1));
946   }
947   // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1
948   if (!JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) &&
949       match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)),
950                          m_SpecificInt(1))) &&
951       Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_UGT) {
952     Value *CtPop = Cmp1->getOperand(0);
953     return Builder.CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1));
954   }
955   return nullptr;
956 }
957 
958 /// Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff
959 /// B is a contiguous set of ones starting from the most significant bit
960 /// (negative power of 2), D and E are equal, and D is a contiguous set of ones
961 /// starting at the most significant zero bit in B. Parameter B supports masking
962 /// using undef/poison in either scalar or vector values.
963 static Value *foldNegativePower2AndShiftedMask(
964     Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL,
965     ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder) {
966   assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
967          "Expected equality predicates for masked type of icmps.");
968   if (PredL != ICmpInst::ICMP_EQ || PredR != ICmpInst::ICMP_NE)
969     return nullptr;
970 
971   if (!match(B, m_NegatedPower2()) || !match(D, m_ShiftedMask()) ||
972       !match(E, m_ShiftedMask()))
973     return nullptr;
974 
975   // Test scalar arguments for conversion. B has been validated earlier to be a
976   // negative power of two and thus is guaranteed to have one or more contiguous
977   // ones starting from the MSB followed by zero or more contiguous zeros. D has
978   // been validated earlier to be a shifted set of one or more contiguous ones.
979   // In order to match, B leading ones and D leading zeros should be equal. The
980   // predicate that B be a negative power of 2 prevents the condition of there
981   // ever being zero leading ones. Thus 0 == 0 cannot occur. The predicate that
982   // D always be a shifted mask prevents the condition of D equaling 0. This
983   // prevents matching the condition where B contains the maximum number of
984   // leading one bits (-1) and D contains the maximum number of leading zero
985   // bits (0).
986   auto isReducible = [](const Value *B, const Value *D, const Value *E) {
987     const APInt *BCst, *DCst, *ECst;
988     return match(B, m_APIntAllowUndef(BCst)) && match(D, m_APInt(DCst)) &&
989            match(E, m_APInt(ECst)) && *DCst == *ECst &&
990            (isa<UndefValue>(B) ||
991             (BCst->countLeadingOnes() == DCst->countLeadingZeros()));
992   };
993 
994   // Test vector type arguments for conversion.
995   if (const auto *BVTy = dyn_cast<VectorType>(B->getType())) {
996     const auto *BFVTy = dyn_cast<FixedVectorType>(BVTy);
997     const auto *BConst = dyn_cast<Constant>(B);
998     const auto *DConst = dyn_cast<Constant>(D);
999     const auto *EConst = dyn_cast<Constant>(E);
1000 
1001     if (!BFVTy || !BConst || !DConst || !EConst)
1002       return nullptr;
1003 
1004     for (unsigned I = 0; I != BFVTy->getNumElements(); ++I) {
1005       const auto *BElt = BConst->getAggregateElement(I);
1006       const auto *DElt = DConst->getAggregateElement(I);
1007       const auto *EElt = EConst->getAggregateElement(I);
1008 
1009       if (!BElt || !DElt || !EElt)
1010         return nullptr;
1011       if (!isReducible(BElt, DElt, EElt))
1012         return nullptr;
1013     }
1014   } else {
1015     // Test scalar type arguments for conversion.
1016     if (!isReducible(B, D, E))
1017       return nullptr;
1018   }
1019   return Builder.CreateICmp(ICmpInst::ICMP_ULT, A, D);
1020 }
1021 
1022 /// Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) &
1023 /// (icmp(X & M) != M)) into (icmp X u< M). Where P is a power of 2, M < P, and
1024 /// M is a contiguous shifted mask starting at the right most significant zero
1025 /// bit in P. SGT is supported as when P is the largest representable power of
1026 /// 2, an earlier optimization converts the expression into (icmp X s> -1).
1027 /// Parameter P supports masking using undef/poison in either scalar or vector
1028 /// values.
1029 static Value *foldPowerOf2AndShiftedMask(ICmpInst *Cmp0, ICmpInst *Cmp1,
1030                                          bool JoinedByAnd,
1031                                          InstCombiner::BuilderTy &Builder) {
1032   if (!JoinedByAnd)
1033     return nullptr;
1034   Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
1035   ICmpInst::Predicate CmpPred0 = Cmp0->getPredicate(),
1036                       CmpPred1 = Cmp1->getPredicate();
1037   // Assuming P is a 2^n, getMaskedTypeForICmpPair will normalize (icmp X u<
1038   // 2^n) into (icmp (X & ~(2^n-1)) == 0) and (icmp X s> -1) into (icmp (X &
1039   // SignMask) == 0).
1040   std::optional<std::pair<unsigned, unsigned>> MaskPair =
1041       getMaskedTypeForICmpPair(A, B, C, D, E, Cmp0, Cmp1, CmpPred0, CmpPred1);
1042   if (!MaskPair)
1043     return nullptr;
1044 
1045   const auto compareBMask = BMask_NotMixed | BMask_NotAllOnes;
1046   unsigned CmpMask0 = MaskPair->first;
1047   unsigned CmpMask1 = MaskPair->second;
1048   if ((CmpMask0 & Mask_AllZeros) && (CmpMask1 == compareBMask)) {
1049     if (Value *V = foldNegativePower2AndShiftedMask(A, B, D, E, CmpPred0,
1050                                                     CmpPred1, Builder))
1051       return V;
1052   } else if ((CmpMask0 == compareBMask) && (CmpMask1 & Mask_AllZeros)) {
1053     if (Value *V = foldNegativePower2AndShiftedMask(A, D, B, C, CmpPred1,
1054                                                     CmpPred0, Builder))
1055       return V;
1056   }
1057   return nullptr;
1058 }
1059 
1060 /// Commuted variants are assumed to be handled by calling this function again
1061 /// with the parameters swapped.
1062 static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
1063                                          ICmpInst *UnsignedICmp, bool IsAnd,
1064                                          const SimplifyQuery &Q,
1065                                          InstCombiner::BuilderTy &Builder) {
1066   Value *ZeroCmpOp;
1067   ICmpInst::Predicate EqPred;
1068   if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(ZeroCmpOp), m_Zero())) ||
1069       !ICmpInst::isEquality(EqPred))
1070     return nullptr;
1071 
1072   auto IsKnownNonZero = [&](Value *V) {
1073     return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1074   };
1075 
1076   ICmpInst::Predicate UnsignedPred;
1077 
1078   Value *A, *B;
1079   if (match(UnsignedICmp,
1080             m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) &&
1081       match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) &&
1082       (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
1083     auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
1084       if (!IsKnownNonZero(NonZero))
1085         std::swap(NonZero, Other);
1086       return IsKnownNonZero(NonZero);
1087     };
1088 
1089     // Given  ZeroCmpOp = (A + B)
1090     //   ZeroCmpOp <  A && ZeroCmpOp != 0  -->  (0-X) <  Y  iff
1091     //   ZeroCmpOp >= A || ZeroCmpOp == 0  -->  (0-X) >= Y  iff
1092     //     with X being the value (A/B) that is known to be non-zero,
1093     //     and Y being remaining value.
1094     if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
1095         IsAnd && GetKnownNonZeroAndOther(B, A))
1096       return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
1097     if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
1098         !IsAnd && GetKnownNonZeroAndOther(B, A))
1099       return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
1100   }
1101 
1102   return nullptr;
1103 }
1104 
1105 struct IntPart {
1106   Value *From;
1107   unsigned StartBit;
1108   unsigned NumBits;
1109 };
1110 
1111 /// Match an extraction of bits from an integer.
1112 static std::optional<IntPart> matchIntPart(Value *V) {
1113   Value *X;
1114   if (!match(V, m_OneUse(m_Trunc(m_Value(X)))))
1115     return std::nullopt;
1116 
1117   unsigned NumOriginalBits = X->getType()->getScalarSizeInBits();
1118   unsigned NumExtractedBits = V->getType()->getScalarSizeInBits();
1119   Value *Y;
1120   const APInt *Shift;
1121   // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits
1122   // from Y, not any shifted-in zeroes.
1123   if (match(X, m_OneUse(m_LShr(m_Value(Y), m_APInt(Shift)))) &&
1124       Shift->ule(NumOriginalBits - NumExtractedBits))
1125     return {{Y, (unsigned)Shift->getZExtValue(), NumExtractedBits}};
1126   return {{X, 0, NumExtractedBits}};
1127 }
1128 
1129 /// Materialize an extraction of bits from an integer in IR.
1130 static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) {
1131   Value *V = P.From;
1132   if (P.StartBit)
1133     V = Builder.CreateLShr(V, P.StartBit);
1134   Type *TruncTy = V->getType()->getWithNewBitWidth(P.NumBits);
1135   if (TruncTy != V->getType())
1136     V = Builder.CreateTrunc(V, TruncTy);
1137   return V;
1138 }
1139 
1140 /// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01
1141 /// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01
1142 /// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer.
1143 Value *InstCombinerImpl::foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1,
1144                                        bool IsAnd) {
1145   if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
1146     return nullptr;
1147 
1148   CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1149   auto GetMatchPart = [&](ICmpInst *Cmp,
1150                           unsigned OpNo) -> std::optional<IntPart> {
1151     if (Pred == Cmp->getPredicate())
1152       return matchIntPart(Cmp->getOperand(OpNo));
1153 
1154     const APInt *C;
1155     // (icmp eq (lshr x, C), (lshr y, C)) gets optimized to:
1156     // (icmp ult (xor x, y), 1 << C) so also look for that.
1157     if (Pred == CmpInst::ICMP_EQ && Cmp->getPredicate() == CmpInst::ICMP_ULT) {
1158       if (!match(Cmp->getOperand(1), m_Power2(C)) ||
1159           !match(Cmp->getOperand(0), m_Xor(m_Value(), m_Value())))
1160         return std::nullopt;
1161     }
1162 
1163     // (icmp ne (lshr x, C), (lshr y, C)) gets optimized to:
1164     // (icmp ugt (xor x, y), (1 << C) - 1) so also look for that.
1165     else if (Pred == CmpInst::ICMP_NE &&
1166              Cmp->getPredicate() == CmpInst::ICMP_UGT) {
1167       if (!match(Cmp->getOperand(1), m_LowBitMask(C)) ||
1168           !match(Cmp->getOperand(0), m_Xor(m_Value(), m_Value())))
1169         return std::nullopt;
1170     } else {
1171       return std::nullopt;
1172     }
1173 
1174     unsigned From = Pred == CmpInst::ICMP_NE ? C->popcount() : C->countr_zero();
1175     Instruction *I = cast<Instruction>(Cmp->getOperand(0));
1176     return {{I->getOperand(OpNo), From, C->getBitWidth() - From}};
1177   };
1178 
1179   std::optional<IntPart> L0 = GetMatchPart(Cmp0, 0);
1180   std::optional<IntPart> R0 = GetMatchPart(Cmp0, 1);
1181   std::optional<IntPart> L1 = GetMatchPart(Cmp1, 0);
1182   std::optional<IntPart> R1 = GetMatchPart(Cmp1, 1);
1183   if (!L0 || !R0 || !L1 || !R1)
1184     return nullptr;
1185 
1186   // Make sure the LHS/RHS compare a part of the same value, possibly after
1187   // an operand swap.
1188   if (L0->From != L1->From || R0->From != R1->From) {
1189     if (L0->From != R1->From || R0->From != L1->From)
1190       return nullptr;
1191     std::swap(L1, R1);
1192   }
1193 
1194   // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being
1195   // the low part and L1/R1 being the high part.
1196   if (L0->StartBit + L0->NumBits != L1->StartBit ||
1197       R0->StartBit + R0->NumBits != R1->StartBit) {
1198     if (L1->StartBit + L1->NumBits != L0->StartBit ||
1199         R1->StartBit + R1->NumBits != R0->StartBit)
1200       return nullptr;
1201     std::swap(L0, L1);
1202     std::swap(R0, R1);
1203   }
1204 
1205   // We can simplify to a comparison of these larger parts of the integers.
1206   IntPart L = {L0->From, L0->StartBit, L0->NumBits + L1->NumBits};
1207   IntPart R = {R0->From, R0->StartBit, R0->NumBits + R1->NumBits};
1208   Value *LValue = extractIntPart(L, Builder);
1209   Value *RValue = extractIntPart(R, Builder);
1210   return Builder.CreateICmp(Pred, LValue, RValue);
1211 }
1212 
1213 /// Reduce logic-of-compares with equality to a constant by substituting a
1214 /// common operand with the constant. Callers are expected to call this with
1215 /// Cmp0/Cmp1 switched to handle logic op commutativity.
1216 static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1,
1217                                           bool IsAnd, bool IsLogical,
1218                                           InstCombiner::BuilderTy &Builder,
1219                                           const SimplifyQuery &Q) {
1220   // Match an equality compare with a non-poison constant as Cmp0.
1221   // Also, give up if the compare can be constant-folded to avoid looping.
1222   ICmpInst::Predicate Pred0;
1223   Value *X;
1224   Constant *C;
1225   if (!match(Cmp0, m_ICmp(Pred0, m_Value(X), m_Constant(C))) ||
1226       !isGuaranteedNotToBeUndefOrPoison(C) || isa<Constant>(X))
1227     return nullptr;
1228   if ((IsAnd && Pred0 != ICmpInst::ICMP_EQ) ||
1229       (!IsAnd && Pred0 != ICmpInst::ICMP_NE))
1230     return nullptr;
1231 
1232   // The other compare must include a common operand (X). Canonicalize the
1233   // common operand as operand 1 (Pred1 is swapped if the common operand was
1234   // operand 0).
1235   Value *Y;
1236   ICmpInst::Predicate Pred1;
1237   if (!match(Cmp1, m_c_ICmp(Pred1, m_Value(Y), m_Deferred(X))))
1238     return nullptr;
1239 
1240   // Replace variable with constant value equivalence to remove a variable use:
1241   // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C)
1242   // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C)
1243   // Can think of the 'or' substitution with the 'and' bool equivalent:
1244   // A || B --> A || (!A && B)
1245   Value *SubstituteCmp = simplifyICmpInst(Pred1, Y, C, Q);
1246   if (!SubstituteCmp) {
1247     // If we need to create a new instruction, require that the old compare can
1248     // be removed.
1249     if (!Cmp1->hasOneUse())
1250       return nullptr;
1251     SubstituteCmp = Builder.CreateICmp(Pred1, Y, C);
1252   }
1253   if (IsLogical)
1254     return IsAnd ? Builder.CreateLogicalAnd(Cmp0, SubstituteCmp)
1255                  : Builder.CreateLogicalOr(Cmp0, SubstituteCmp);
1256   return Builder.CreateBinOp(IsAnd ? Instruction::And : Instruction::Or, Cmp0,
1257                              SubstituteCmp);
1258 }
1259 
1260 /// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2)
1261 /// or   (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2)
1262 /// into a single comparison using range-based reasoning.
1263 /// NOTE: This is also used for logical and/or, must be poison-safe!
1264 Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1,
1265                                                      ICmpInst *ICmp2,
1266                                                      bool IsAnd) {
1267   ICmpInst::Predicate Pred1, Pred2;
1268   Value *V1, *V2;
1269   const APInt *C1, *C2;
1270   if (!match(ICmp1, m_ICmp(Pred1, m_Value(V1), m_APInt(C1))) ||
1271       !match(ICmp2, m_ICmp(Pred2, m_Value(V2), m_APInt(C2))))
1272     return nullptr;
1273 
1274   // Look through add of a constant offset on V1, V2, or both operands. This
1275   // allows us to interpret the V + C' < C'' range idiom into a proper range.
1276   const APInt *Offset1 = nullptr, *Offset2 = nullptr;
1277   if (V1 != V2) {
1278     Value *X;
1279     if (match(V1, m_Add(m_Value(X), m_APInt(Offset1))))
1280       V1 = X;
1281     if (match(V2, m_Add(m_Value(X), m_APInt(Offset2))))
1282       V2 = X;
1283   }
1284 
1285   if (V1 != V2)
1286     return nullptr;
1287 
1288   ConstantRange CR1 = ConstantRange::makeExactICmpRegion(
1289       IsAnd ? ICmpInst::getInversePredicate(Pred1) : Pred1, *C1);
1290   if (Offset1)
1291     CR1 = CR1.subtract(*Offset1);
1292 
1293   ConstantRange CR2 = ConstantRange::makeExactICmpRegion(
1294       IsAnd ? ICmpInst::getInversePredicate(Pred2) : Pred2, *C2);
1295   if (Offset2)
1296     CR2 = CR2.subtract(*Offset2);
1297 
1298   Type *Ty = V1->getType();
1299   Value *NewV = V1;
1300   std::optional<ConstantRange> CR = CR1.exactUnionWith(CR2);
1301   if (!CR) {
1302     if (!(ICmp1->hasOneUse() && ICmp2->hasOneUse()) || CR1.isWrappedSet() ||
1303         CR2.isWrappedSet())
1304       return nullptr;
1305 
1306     // Check whether we have equal-size ranges that only differ by one bit.
1307     // In that case we can apply a mask to map one range onto the other.
1308     APInt LowerDiff = CR1.getLower() ^ CR2.getLower();
1309     APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1);
1310     APInt CR1Size = CR1.getUpper() - CR1.getLower();
1311     if (!LowerDiff.isPowerOf2() || LowerDiff != UpperDiff ||
1312         CR1Size != CR2.getUpper() - CR2.getLower())
1313       return nullptr;
1314 
1315     CR = CR1.getLower().ult(CR2.getLower()) ? CR1 : CR2;
1316     NewV = Builder.CreateAnd(NewV, ConstantInt::get(Ty, ~LowerDiff));
1317   }
1318 
1319   if (IsAnd)
1320     CR = CR->inverse();
1321 
1322   CmpInst::Predicate NewPred;
1323   APInt NewC, Offset;
1324   CR->getEquivalentICmp(NewPred, NewC, Offset);
1325 
1326   if (Offset != 0)
1327     NewV = Builder.CreateAdd(NewV, ConstantInt::get(Ty, Offset));
1328   return Builder.CreateICmp(NewPred, NewV, ConstantInt::get(Ty, NewC));
1329 }
1330 
1331 /// Ignore all operations which only change the sign of a value, returning the
1332 /// underlying magnitude value.
1333 static Value *stripSignOnlyFPOps(Value *Val) {
1334   match(Val, m_FNeg(m_Value(Val)));
1335   match(Val, m_FAbs(m_Value(Val)));
1336   match(Val, m_CopySign(m_Value(Val), m_Value()));
1337   return Val;
1338 }
1339 
1340 /// Matches canonical form of isnan, fcmp ord x, 0
1341 static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS) {
1342   return P == FCmpInst::FCMP_ORD && match(RHS, m_AnyZeroFP());
1343 }
1344 
1345 /// Matches fcmp u__ x, +/-inf
1346 static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS,
1347                                      Value *RHS) {
1348   return FCmpInst::isUnordered(P) && match(RHS, m_Inf());
1349 }
1350 
1351 /// and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1352 ///
1353 /// Clang emits this pattern for doing an isfinite check in __builtin_isnormal.
1354 static Value *matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS,
1355                                 FCmpInst *RHS) {
1356   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1357   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1358   FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1359 
1360   if (!matchIsNotNaN(PredL, LHS0, LHS1) ||
1361       !matchUnorderedInfCompare(PredR, RHS0, RHS1))
1362     return nullptr;
1363 
1364   IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1365   FastMathFlags FMF = LHS->getFastMathFlags();
1366   FMF &= RHS->getFastMathFlags();
1367   Builder.setFastMathFlags(FMF);
1368 
1369   return Builder.CreateFCmp(FCmpInst::getOrderedPredicate(PredR), RHS0, RHS1);
1370 }
1371 
1372 Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS,
1373                                           bool IsAnd, bool IsLogicalSelect) {
1374   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1375   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1376   FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1377 
1378   if (LHS0 == RHS1 && RHS0 == LHS1) {
1379     // Swap RHS operands to match LHS.
1380     PredR = FCmpInst::getSwappedPredicate(PredR);
1381     std::swap(RHS0, RHS1);
1382   }
1383 
1384   // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1385   // Suppose the relation between x and y is R, where R is one of
1386   // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
1387   // testing the desired relations.
1388   //
1389   // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1390   //    bool(R & CC0) && bool(R & CC1)
1391   //  = bool((R & CC0) & (R & CC1))
1392   //  = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
1393   //
1394   // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1395   //    bool(R & CC0) || bool(R & CC1)
1396   //  = bool((R & CC0) | (R & CC1))
1397   //  = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1398   if (LHS0 == RHS0 && LHS1 == RHS1) {
1399     unsigned FCmpCodeL = getFCmpCode(PredL);
1400     unsigned FCmpCodeR = getFCmpCode(PredR);
1401     unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1402 
1403     // Intersect the fast math flags.
1404     // TODO: We can union the fast math flags unless this is a logical select.
1405     IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1406     FastMathFlags FMF = LHS->getFastMathFlags();
1407     FMF &= RHS->getFastMathFlags();
1408     Builder.setFastMathFlags(FMF);
1409 
1410     return getFCmpValue(NewPred, LHS0, LHS1, Builder);
1411   }
1412 
1413   // This transform is not valid for a logical select.
1414   if (!IsLogicalSelect &&
1415       ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1416        (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO &&
1417         !IsAnd))) {
1418     if (LHS0->getType() != RHS0->getType())
1419       return nullptr;
1420 
1421     // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
1422     // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
1423     if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP()))
1424       // Ignore the constants because they are obviously not NANs:
1425       // (fcmp ord x, 0.0) & (fcmp ord y, 0.0)  -> (fcmp ord x, y)
1426       // (fcmp uno x, 0.0) | (fcmp uno y, 0.0)  -> (fcmp uno x, y)
1427       return Builder.CreateFCmp(PredL, LHS0, RHS0);
1428   }
1429 
1430   if (IsAnd && stripSignOnlyFPOps(LHS0) == stripSignOnlyFPOps(RHS0)) {
1431     // and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1432     // and (fcmp ord x, 0), (fcmp u* fabs(x), inf) -> fcmp o* x, inf
1433     if (Value *Left = matchIsFiniteTest(Builder, LHS, RHS))
1434       return Left;
1435     if (Value *Right = matchIsFiniteTest(Builder, RHS, LHS))
1436       return Right;
1437   }
1438 
1439   // Turn at least two fcmps with constants into llvm.is.fpclass.
1440   //
1441   // If we can represent a combined value test with one class call, we can
1442   // potentially eliminate 4-6 instructions. If we can represent a test with a
1443   // single fcmp with fneg and fabs, that's likely a better canonical form.
1444   if (LHS->hasOneUse() && RHS->hasOneUse()) {
1445     auto [ClassValRHS, ClassMaskRHS] =
1446         fcmpToClassTest(PredR, *RHS->getFunction(), RHS0, RHS1);
1447     if (ClassValRHS) {
1448       auto [ClassValLHS, ClassMaskLHS] =
1449           fcmpToClassTest(PredL, *LHS->getFunction(), LHS0, LHS1);
1450       if (ClassValLHS == ClassValRHS) {
1451         unsigned CombinedMask = IsAnd ? (ClassMaskLHS & ClassMaskRHS)
1452                                       : (ClassMaskLHS | ClassMaskRHS);
1453         return Builder.CreateIntrinsic(
1454             Intrinsic::is_fpclass, {ClassValLHS->getType()},
1455             {ClassValLHS, Builder.getInt32(CombinedMask)});
1456       }
1457     }
1458   }
1459 
1460   return nullptr;
1461 }
1462 
1463 /// Match an fcmp against a special value that performs a test possible by
1464 /// llvm.is.fpclass.
1465 static bool matchIsFPClassLikeFCmp(Value *Op, Value *&ClassVal,
1466                                    uint64_t &ClassMask) {
1467   auto *FCmp = dyn_cast<FCmpInst>(Op);
1468   if (!FCmp || !FCmp->hasOneUse())
1469     return false;
1470 
1471   std::tie(ClassVal, ClassMask) =
1472       fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
1473                       FCmp->getOperand(0), FCmp->getOperand(1));
1474   return ClassVal != nullptr;
1475 }
1476 
1477 /// or (is_fpclass x, mask0), (is_fpclass x, mask1)
1478 ///     -> is_fpclass x, (mask0 | mask1)
1479 /// and (is_fpclass x, mask0), (is_fpclass x, mask1)
1480 ///     -> is_fpclass x, (mask0 & mask1)
1481 /// xor (is_fpclass x, mask0), (is_fpclass x, mask1)
1482 ///     -> is_fpclass x, (mask0 ^ mask1)
1483 Instruction *InstCombinerImpl::foldLogicOfIsFPClass(BinaryOperator &BO,
1484                                                     Value *Op0, Value *Op1) {
1485   Value *ClassVal0 = nullptr;
1486   Value *ClassVal1 = nullptr;
1487   uint64_t ClassMask0, ClassMask1;
1488 
1489   // Restrict to folding one fcmp into one is.fpclass for now, don't introduce a
1490   // new class.
1491   //
1492   // TODO: Support forming is.fpclass out of 2 separate fcmps when codegen is
1493   // better.
1494 
1495   bool IsLHSClass =
1496       match(Op0, m_OneUse(m_Intrinsic<Intrinsic::is_fpclass>(
1497                      m_Value(ClassVal0), m_ConstantInt(ClassMask0))));
1498   bool IsRHSClass =
1499       match(Op1, m_OneUse(m_Intrinsic<Intrinsic::is_fpclass>(
1500                      m_Value(ClassVal1), m_ConstantInt(ClassMask1))));
1501   if ((((IsLHSClass || matchIsFPClassLikeFCmp(Op0, ClassVal0, ClassMask0)) &&
1502         (IsRHSClass || matchIsFPClassLikeFCmp(Op1, ClassVal1, ClassMask1)))) &&
1503       ClassVal0 == ClassVal1) {
1504     unsigned NewClassMask;
1505     switch (BO.getOpcode()) {
1506     case Instruction::And:
1507       NewClassMask = ClassMask0 & ClassMask1;
1508       break;
1509     case Instruction::Or:
1510       NewClassMask = ClassMask0 | ClassMask1;
1511       break;
1512     case Instruction::Xor:
1513       NewClassMask = ClassMask0 ^ ClassMask1;
1514       break;
1515     default:
1516       llvm_unreachable("not a binary logic operator");
1517     }
1518 
1519     if (IsLHSClass) {
1520       auto *II = cast<IntrinsicInst>(Op0);
1521       II->setArgOperand(
1522           1, ConstantInt::get(II->getArgOperand(1)->getType(), NewClassMask));
1523       return replaceInstUsesWith(BO, II);
1524     }
1525 
1526     if (IsRHSClass) {
1527       auto *II = cast<IntrinsicInst>(Op1);
1528       II->setArgOperand(
1529           1, ConstantInt::get(II->getArgOperand(1)->getType(), NewClassMask));
1530       return replaceInstUsesWith(BO, II);
1531     }
1532 
1533     CallInst *NewClass =
1534         Builder.CreateIntrinsic(Intrinsic::is_fpclass, {ClassVal0->getType()},
1535                                 {ClassVal0, Builder.getInt32(NewClassMask)});
1536     return replaceInstUsesWith(BO, NewClass);
1537   }
1538 
1539   return nullptr;
1540 }
1541 
1542 /// Look for the pattern that conditionally negates a value via math operations:
1543 ///   cond.splat = sext i1 cond
1544 ///   sub = add cond.splat, x
1545 ///   xor = xor sub, cond.splat
1546 /// and rewrite it to do the same, but via logical operations:
1547 ///   value.neg = sub 0, value
1548 ///   cond = select i1 neg, value.neg, value
1549 Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
1550     BinaryOperator &I) {
1551   assert(I.getOpcode() == BinaryOperator::Xor && "Only for xor!");
1552   Value *Cond, *X;
1553   // As per complexity ordering, `xor` is not commutative here.
1554   if (!match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())) ||
1555       !match(I.getOperand(1), m_SExt(m_Value(Cond))) ||
1556       !Cond->getType()->isIntOrIntVectorTy(1) ||
1557       !match(I.getOperand(0), m_c_Add(m_SExt(m_Deferred(Cond)), m_Value(X))))
1558     return nullptr;
1559   return SelectInst::Create(Cond, Builder.CreateNeg(X, X->getName() + ".neg"),
1560                             X);
1561 }
1562 
1563 /// This a limited reassociation for a special case (see above) where we are
1564 /// checking if two values are either both NAN (unordered) or not-NAN (ordered).
1565 /// This could be handled more generally in '-reassociation', but it seems like
1566 /// an unlikely pattern for a large number of logic ops and fcmps.
1567 static Instruction *reassociateFCmps(BinaryOperator &BO,
1568                                      InstCombiner::BuilderTy &Builder) {
1569   Instruction::BinaryOps Opcode = BO.getOpcode();
1570   assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1571          "Expecting and/or op for fcmp transform");
1572 
1573   // There are 4 commuted variants of the pattern. Canonicalize operands of this
1574   // logic op so an fcmp is operand 0 and a matching logic op is operand 1.
1575   Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1), *X;
1576   FCmpInst::Predicate Pred;
1577   if (match(Op1, m_FCmp(Pred, m_Value(), m_AnyZeroFP())))
1578     std::swap(Op0, Op1);
1579 
1580   // Match inner binop and the predicate for combining 2 NAN checks into 1.
1581   Value *BO10, *BO11;
1582   FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD
1583                                                            : FCmpInst::FCMP_UNO;
1584   if (!match(Op0, m_FCmp(Pred, m_Value(X), m_AnyZeroFP())) || Pred != NanPred ||
1585       !match(Op1, m_BinOp(Opcode, m_Value(BO10), m_Value(BO11))))
1586     return nullptr;
1587 
1588   // The inner logic op must have a matching fcmp operand.
1589   Value *Y;
1590   if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) ||
1591       Pred != NanPred || X->getType() != Y->getType())
1592     std::swap(BO10, BO11);
1593 
1594   if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) ||
1595       Pred != NanPred || X->getType() != Y->getType())
1596     return nullptr;
1597 
1598   // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z
1599   // or  (fcmp uno X, 0), (or  (fcmp uno Y, 0), Z) --> or  (fcmp uno X, Y), Z
1600   Value *NewFCmp = Builder.CreateFCmp(Pred, X, Y);
1601   if (auto *NewFCmpInst = dyn_cast<FCmpInst>(NewFCmp)) {
1602     // Intersect FMF from the 2 source fcmps.
1603     NewFCmpInst->copyIRFlags(Op0);
1604     NewFCmpInst->andIRFlags(BO10);
1605   }
1606   return BinaryOperator::Create(Opcode, NewFCmp, BO11);
1607 }
1608 
1609 /// Match variations of De Morgan's Laws:
1610 /// (~A & ~B) == (~(A | B))
1611 /// (~A | ~B) == (~(A & B))
1612 static Instruction *matchDeMorgansLaws(BinaryOperator &I,
1613                                        InstCombiner &IC) {
1614   const Instruction::BinaryOps Opcode = I.getOpcode();
1615   assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1616          "Trying to match De Morgan's Laws with something other than and/or");
1617 
1618   // Flip the logic operation.
1619   const Instruction::BinaryOps FlippedOpcode =
1620       (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1621 
1622   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1623   Value *A, *B;
1624   if (match(Op0, m_OneUse(m_Not(m_Value(A)))) &&
1625       match(Op1, m_OneUse(m_Not(m_Value(B)))) &&
1626       !IC.isFreeToInvert(A, A->hasOneUse()) &&
1627       !IC.isFreeToInvert(B, B->hasOneUse())) {
1628     Value *AndOr =
1629         IC.Builder.CreateBinOp(FlippedOpcode, A, B, I.getName() + ".demorgan");
1630     return BinaryOperator::CreateNot(AndOr);
1631   }
1632 
1633   // The 'not' ops may require reassociation.
1634   // (A & ~B) & ~C --> A & ~(B | C)
1635   // (~B & A) & ~C --> A & ~(B | C)
1636   // (A | ~B) | ~C --> A | ~(B & C)
1637   // (~B | A) | ~C --> A | ~(B & C)
1638   Value *C;
1639   if (match(Op0, m_OneUse(m_c_BinOp(Opcode, m_Value(A), m_Not(m_Value(B))))) &&
1640       match(Op1, m_Not(m_Value(C)))) {
1641     Value *FlippedBO = IC.Builder.CreateBinOp(FlippedOpcode, B, C);
1642     return BinaryOperator::Create(Opcode, A, IC.Builder.CreateNot(FlippedBO));
1643   }
1644 
1645   return nullptr;
1646 }
1647 
1648 bool InstCombinerImpl::shouldOptimizeCast(CastInst *CI) {
1649   Value *CastSrc = CI->getOperand(0);
1650 
1651   // Noop casts and casts of constants should be eliminated trivially.
1652   if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc))
1653     return false;
1654 
1655   // If this cast is paired with another cast that can be eliminated, we prefer
1656   // to have it eliminated.
1657   if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
1658     if (isEliminableCastPair(PrecedingCI, CI))
1659       return false;
1660 
1661   return true;
1662 }
1663 
1664 /// Fold {and,or,xor} (cast X), C.
1665 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
1666                                           InstCombinerImpl &IC) {
1667   Constant *C = dyn_cast<Constant>(Logic.getOperand(1));
1668   if (!C)
1669     return nullptr;
1670 
1671   auto LogicOpc = Logic.getOpcode();
1672   Type *DestTy = Logic.getType();
1673   Type *SrcTy = Cast->getSrcTy();
1674 
1675   // Move the logic operation ahead of a zext or sext if the constant is
1676   // unchanged in the smaller source type. Performing the logic in a smaller
1677   // type may provide more information to later folds, and the smaller logic
1678   // instruction may be cheaper (particularly in the case of vectors).
1679   Value *X;
1680   if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) {
1681     if (Constant *TruncC = IC.getLosslessUnsignedTrunc(C, SrcTy)) {
1682       // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1683       Value *NewOp = IC.Builder.CreateBinOp(LogicOpc, X, TruncC);
1684       return new ZExtInst(NewOp, DestTy);
1685     }
1686   }
1687 
1688   if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) {
1689     if (Constant *TruncC = IC.getLosslessSignedTrunc(C, SrcTy)) {
1690       // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1691       Value *NewOp = IC.Builder.CreateBinOp(LogicOpc, X, TruncC);
1692       return new SExtInst(NewOp, DestTy);
1693     }
1694   }
1695 
1696   return nullptr;
1697 }
1698 
1699 /// Fold {and,or,xor} (cast X), Y.
1700 Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) {
1701   auto LogicOpc = I.getOpcode();
1702   assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1703 
1704   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1705 
1706   // fold bitwise(A >> BW - 1, zext(icmp))     (BW is the scalar bits of the
1707   // type of A)
1708   //   -> bitwise(zext(A < 0), zext(icmp))
1709   //   -> zext(bitwise(A < 0, icmp))
1710   auto FoldBitwiseICmpZeroWithICmp = [&](Value *Op0,
1711                                          Value *Op1) -> Instruction * {
1712     ICmpInst::Predicate Pred;
1713     Value *A;
1714     bool IsMatched =
1715         match(Op0,
1716               m_OneUse(m_LShr(
1717                   m_Value(A),
1718                   m_SpecificInt(Op0->getType()->getScalarSizeInBits() - 1)))) &&
1719         match(Op1, m_OneUse(m_ZExt(m_ICmp(Pred, m_Value(), m_Value()))));
1720 
1721     if (!IsMatched)
1722       return nullptr;
1723 
1724     auto *ICmpL =
1725         Builder.CreateICmpSLT(A, Constant::getNullValue(A->getType()));
1726     auto *ICmpR = cast<ZExtInst>(Op1)->getOperand(0);
1727     auto *BitwiseOp = Builder.CreateBinOp(LogicOpc, ICmpL, ICmpR);
1728 
1729     return new ZExtInst(BitwiseOp, Op0->getType());
1730   };
1731 
1732   if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op0, Op1))
1733     return Ret;
1734 
1735   if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op1, Op0))
1736     return Ret;
1737 
1738   CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1739   if (!Cast0)
1740     return nullptr;
1741 
1742   // This must be a cast from an integer or integer vector source type to allow
1743   // transformation of the logic operation to the source type.
1744   Type *DestTy = I.getType();
1745   Type *SrcTy = Cast0->getSrcTy();
1746   if (!SrcTy->isIntOrIntVectorTy())
1747     return nullptr;
1748 
1749   if (Instruction *Ret = foldLogicCastConstant(I, Cast0, *this))
1750     return Ret;
1751 
1752   CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1753   if (!Cast1)
1754     return nullptr;
1755 
1756   // Both operands of the logic operation are casts. The casts must be the
1757   // same kind for reduction.
1758   Instruction::CastOps CastOpcode = Cast0->getOpcode();
1759   if (CastOpcode != Cast1->getOpcode())
1760     return nullptr;
1761 
1762   // If the source types do not match, but the casts are matching extends, we
1763   // can still narrow the logic op.
1764   if (SrcTy != Cast1->getSrcTy()) {
1765     Value *X, *Y;
1766     if (match(Cast0, m_OneUse(m_ZExtOrSExt(m_Value(X)))) &&
1767         match(Cast1, m_OneUse(m_ZExtOrSExt(m_Value(Y))))) {
1768       // Cast the narrower source to the wider source type.
1769       unsigned XNumBits = X->getType()->getScalarSizeInBits();
1770       unsigned YNumBits = Y->getType()->getScalarSizeInBits();
1771       if (XNumBits < YNumBits)
1772         X = Builder.CreateCast(CastOpcode, X, Y->getType());
1773       else
1774         Y = Builder.CreateCast(CastOpcode, Y, X->getType());
1775       // Do the logic op in the intermediate width, then widen more.
1776       Value *NarrowLogic = Builder.CreateBinOp(LogicOpc, X, Y);
1777       return CastInst::Create(CastOpcode, NarrowLogic, DestTy);
1778     }
1779 
1780     // Give up for other cast opcodes.
1781     return nullptr;
1782   }
1783 
1784   Value *Cast0Src = Cast0->getOperand(0);
1785   Value *Cast1Src = Cast1->getOperand(0);
1786 
1787   // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1788   if ((Cast0->hasOneUse() || Cast1->hasOneUse()) &&
1789       shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1790     Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1791                                        I.getName());
1792     return CastInst::Create(CastOpcode, NewOp, DestTy);
1793   }
1794 
1795   return nullptr;
1796 }
1797 
1798 static Instruction *foldAndToXor(BinaryOperator &I,
1799                                  InstCombiner::BuilderTy &Builder) {
1800   assert(I.getOpcode() == Instruction::And);
1801   Value *Op0 = I.getOperand(0);
1802   Value *Op1 = I.getOperand(1);
1803   Value *A, *B;
1804 
1805   // Operand complexity canonicalization guarantees that the 'or' is Op0.
1806   // (A | B) & ~(A & B) --> A ^ B
1807   // (A | B) & ~(B & A) --> A ^ B
1808   if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)),
1809                         m_Not(m_c_And(m_Deferred(A), m_Deferred(B))))))
1810     return BinaryOperator::CreateXor(A, B);
1811 
1812   // (A | ~B) & (~A | B) --> ~(A ^ B)
1813   // (A | ~B) & (B | ~A) --> ~(A ^ B)
1814   // (~B | A) & (~A | B) --> ~(A ^ B)
1815   // (~B | A) & (B | ~A) --> ~(A ^ B)
1816   if (Op0->hasOneUse() || Op1->hasOneUse())
1817     if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))),
1818                           m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B)))))
1819       return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1820 
1821   return nullptr;
1822 }
1823 
1824 static Instruction *foldOrToXor(BinaryOperator &I,
1825                                 InstCombiner::BuilderTy &Builder) {
1826   assert(I.getOpcode() == Instruction::Or);
1827   Value *Op0 = I.getOperand(0);
1828   Value *Op1 = I.getOperand(1);
1829   Value *A, *B;
1830 
1831   // Operand complexity canonicalization guarantees that the 'and' is Op0.
1832   // (A & B) | ~(A | B) --> ~(A ^ B)
1833   // (A & B) | ~(B | A) --> ~(A ^ B)
1834   if (Op0->hasOneUse() || Op1->hasOneUse())
1835     if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1836         match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1837       return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1838 
1839   // Operand complexity canonicalization guarantees that the 'xor' is Op0.
1840   // (A ^ B) | ~(A | B) --> ~(A & B)
1841   // (A ^ B) | ~(B | A) --> ~(A & B)
1842   if (Op0->hasOneUse() || Op1->hasOneUse())
1843     if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
1844         match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1845       return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
1846 
1847   // (A & ~B) | (~A & B) --> A ^ B
1848   // (A & ~B) | (B & ~A) --> A ^ B
1849   // (~B & A) | (~A & B) --> A ^ B
1850   // (~B & A) | (B & ~A) --> A ^ B
1851   if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
1852       match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
1853     return BinaryOperator::CreateXor(A, B);
1854 
1855   return nullptr;
1856 }
1857 
1858 /// Return true if a constant shift amount is always less than the specified
1859 /// bit-width. If not, the shift could create poison in the narrower type.
1860 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {
1861   APInt Threshold(C->getType()->getScalarSizeInBits(), BitWidth);
1862   return match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold));
1863 }
1864 
1865 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1866 /// a common zext operand: and (binop (zext X), C), (zext X).
1867 Instruction *InstCombinerImpl::narrowMaskedBinOp(BinaryOperator &And) {
1868   // This transform could also apply to {or, and, xor}, but there are better
1869   // folds for those cases, so we don't expect those patterns here. AShr is not
1870   // handled because it should always be transformed to LShr in this sequence.
1871   // The subtract transform is different because it has a constant on the left.
1872   // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1873   Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1);
1874   Constant *C;
1875   if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) &&
1876       !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) &&
1877       !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) &&
1878       !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) &&
1879       !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1)))))
1880     return nullptr;
1881 
1882   Value *X;
1883   if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3))
1884     return nullptr;
1885 
1886   Type *Ty = And.getType();
1887   if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType()))
1888     return nullptr;
1889 
1890   // If we're narrowing a shift, the shift amount must be safe (less than the
1891   // width) in the narrower type. If the shift amount is greater, instsimplify
1892   // usually handles that case, but we can't guarantee/assert it.
1893   Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode();
1894   if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1895     if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits()))
1896       return nullptr;
1897 
1898   // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
1899   // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
1900   Value *NewC = ConstantExpr::getTrunc(C, X->getType());
1901   Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X)
1902                                          : Builder.CreateBinOp(Opc, X, NewC);
1903   return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty);
1904 }
1905 
1906 /// Try folding relatively complex patterns for both And and Or operations
1907 /// with all And and Or swapped.
1908 static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
1909                                              InstCombiner::BuilderTy &Builder) {
1910   const Instruction::BinaryOps Opcode = I.getOpcode();
1911   assert(Opcode == Instruction::And || Opcode == Instruction::Or);
1912 
1913   // Flip the logic operation.
1914   const Instruction::BinaryOps FlippedOpcode =
1915       (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1916 
1917   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1918   Value *A, *B, *C, *X, *Y, *Dummy;
1919 
1920   // Match following expressions:
1921   // (~(A | B) & C)
1922   // (~(A & B) | C)
1923   // Captures X = ~(A | B) or ~(A & B)
1924   const auto matchNotOrAnd =
1925       [Opcode, FlippedOpcode](Value *Op, auto m_A, auto m_B, auto m_C,
1926                               Value *&X, bool CountUses = false) -> bool {
1927     if (CountUses && !Op->hasOneUse())
1928       return false;
1929 
1930     if (match(Op, m_c_BinOp(FlippedOpcode,
1931                             m_CombineAnd(m_Value(X),
1932                                          m_Not(m_c_BinOp(Opcode, m_A, m_B))),
1933                             m_C)))
1934       return !CountUses || X->hasOneUse();
1935 
1936     return false;
1937   };
1938 
1939   // (~(A | B) & C) | ... --> ...
1940   // (~(A & B) | C) & ... --> ...
1941   // TODO: One use checks are conservative. We just need to check that a total
1942   //       number of multiple used values does not exceed reduction
1943   //       in operations.
1944   if (matchNotOrAnd(Op0, m_Value(A), m_Value(B), m_Value(C), X)) {
1945     // (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A
1946     // (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A)
1947     if (matchNotOrAnd(Op1, m_Specific(A), m_Specific(C), m_Specific(B), Dummy,
1948                       true)) {
1949       Value *Xor = Builder.CreateXor(B, C);
1950       return (Opcode == Instruction::Or)
1951                  ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(A))
1952                  : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, A));
1953     }
1954 
1955     // (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B
1956     // (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B)
1957     if (matchNotOrAnd(Op1, m_Specific(B), m_Specific(C), m_Specific(A), Dummy,
1958                       true)) {
1959       Value *Xor = Builder.CreateXor(A, C);
1960       return (Opcode == Instruction::Or)
1961                  ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(B))
1962                  : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, B));
1963     }
1964 
1965     // (~(A | B) & C) | ~(A | C) --> ~((B & C) | A)
1966     // (~(A & B) | C) & ~(A & C) --> ~((B | C) & A)
1967     if (match(Op1, m_OneUse(m_Not(m_OneUse(
1968                        m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)))))))
1969       return BinaryOperator::CreateNot(Builder.CreateBinOp(
1970           Opcode, Builder.CreateBinOp(FlippedOpcode, B, C), A));
1971 
1972     // (~(A | B) & C) | ~(B | C) --> ~((A & C) | B)
1973     // (~(A & B) | C) & ~(B & C) --> ~((A | C) & B)
1974     if (match(Op1, m_OneUse(m_Not(m_OneUse(
1975                        m_c_BinOp(Opcode, m_Specific(B), m_Specific(C)))))))
1976       return BinaryOperator::CreateNot(Builder.CreateBinOp(
1977           Opcode, Builder.CreateBinOp(FlippedOpcode, A, C), B));
1978 
1979     // (~(A | B) & C) | ~(C | (A ^ B)) --> ~((A | B) & (C | (A ^ B)))
1980     // Note, the pattern with swapped and/or is not handled because the
1981     // result is more undefined than a source:
1982     // (~(A & B) | C) & ~(C & (A ^ B)) --> (A ^ B ^ C) | ~(A | C) is invalid.
1983     if (Opcode == Instruction::Or && Op0->hasOneUse() &&
1984         match(Op1, m_OneUse(m_Not(m_CombineAnd(
1985                        m_Value(Y),
1986                        m_c_BinOp(Opcode, m_Specific(C),
1987                                  m_c_Xor(m_Specific(A), m_Specific(B)))))))) {
1988       // X = ~(A | B)
1989       // Y = (C | (A ^ B)
1990       Value *Or = cast<BinaryOperator>(X)->getOperand(0);
1991       return BinaryOperator::CreateNot(Builder.CreateAnd(Or, Y));
1992     }
1993   }
1994 
1995   // (~A & B & C) | ... --> ...
1996   // (~A | B | C) | ... --> ...
1997   // TODO: One use checks are conservative. We just need to check that a total
1998   //       number of multiple used values does not exceed reduction
1999   //       in operations.
2000   if (match(Op0,
2001             m_OneUse(m_c_BinOp(FlippedOpcode,
2002                                m_BinOp(FlippedOpcode, m_Value(B), m_Value(C)),
2003                                m_CombineAnd(m_Value(X), m_Not(m_Value(A)))))) ||
2004       match(Op0, m_OneUse(m_c_BinOp(
2005                      FlippedOpcode,
2006                      m_c_BinOp(FlippedOpcode, m_Value(C),
2007                                m_CombineAnd(m_Value(X), m_Not(m_Value(A)))),
2008                      m_Value(B))))) {
2009     // X = ~A
2010     // (~A & B & C) | ~(A | B | C) --> ~(A | (B ^ C))
2011     // (~A | B | C) & ~(A & B & C) --> (~A | (B ^ C))
2012     if (match(Op1, m_OneUse(m_Not(m_c_BinOp(
2013                        Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(B)),
2014                        m_Specific(C))))) ||
2015         match(Op1, m_OneUse(m_Not(m_c_BinOp(
2016                        Opcode, m_c_BinOp(Opcode, m_Specific(B), m_Specific(C)),
2017                        m_Specific(A))))) ||
2018         match(Op1, m_OneUse(m_Not(m_c_BinOp(
2019                        Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)),
2020                        m_Specific(B)))))) {
2021       Value *Xor = Builder.CreateXor(B, C);
2022       return (Opcode == Instruction::Or)
2023                  ? BinaryOperator::CreateNot(Builder.CreateOr(Xor, A))
2024                  : BinaryOperator::CreateOr(Xor, X);
2025     }
2026 
2027     // (~A & B & C) | ~(A | B) --> (C | ~B) & ~A
2028     // (~A | B | C) & ~(A & B) --> (C & ~B) | ~A
2029     if (match(Op1, m_OneUse(m_Not(m_OneUse(
2030                        m_c_BinOp(Opcode, m_Specific(A), m_Specific(B)))))))
2031       return BinaryOperator::Create(
2032           FlippedOpcode, Builder.CreateBinOp(Opcode, C, Builder.CreateNot(B)),
2033           X);
2034 
2035     // (~A & B & C) | ~(A | C) --> (B | ~C) & ~A
2036     // (~A | B | C) & ~(A & C) --> (B & ~C) | ~A
2037     if (match(Op1, m_OneUse(m_Not(m_OneUse(
2038                        m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)))))))
2039       return BinaryOperator::Create(
2040           FlippedOpcode, Builder.CreateBinOp(Opcode, B, Builder.CreateNot(C)),
2041           X);
2042   }
2043 
2044   return nullptr;
2045 }
2046 
2047 /// Try to reassociate a pair of binops so that values with one use only are
2048 /// part of the same instruction. This may enable folds that are limited with
2049 /// multi-use restrictions and makes it more likely to match other patterns that
2050 /// are looking for a common operand.
2051 static Instruction *reassociateForUses(BinaryOperator &BO,
2052                                        InstCombinerImpl::BuilderTy &Builder) {
2053   Instruction::BinaryOps Opcode = BO.getOpcode();
2054   Value *X, *Y, *Z;
2055   if (match(&BO,
2056             m_c_BinOp(Opcode, m_OneUse(m_BinOp(Opcode, m_Value(X), m_Value(Y))),
2057                       m_OneUse(m_Value(Z))))) {
2058     if (!isa<Constant>(X) && !isa<Constant>(Y) && !isa<Constant>(Z)) {
2059       // (X op Y) op Z --> (Y op Z) op X
2060       if (!X->hasOneUse()) {
2061         Value *YZ = Builder.CreateBinOp(Opcode, Y, Z);
2062         return BinaryOperator::Create(Opcode, YZ, X);
2063       }
2064       // (X op Y) op Z --> (X op Z) op Y
2065       if (!Y->hasOneUse()) {
2066         Value *XZ = Builder.CreateBinOp(Opcode, X, Z);
2067         return BinaryOperator::Create(Opcode, XZ, Y);
2068       }
2069     }
2070   }
2071 
2072   return nullptr;
2073 }
2074 
2075 // Match
2076 // (X + C2) | C
2077 // (X + C2) ^ C
2078 // (X + C2) & C
2079 // and convert to do the bitwise logic first:
2080 // (X | C) + C2
2081 // (X ^ C) + C2
2082 // (X & C) + C2
2083 // iff bits affected by logic op are lower than last bit affected by math op
2084 static Instruction *canonicalizeLogicFirst(BinaryOperator &I,
2085                                            InstCombiner::BuilderTy &Builder) {
2086   Type *Ty = I.getType();
2087   Instruction::BinaryOps OpC = I.getOpcode();
2088   Value *Op0 = I.getOperand(0);
2089   Value *Op1 = I.getOperand(1);
2090   Value *X;
2091   const APInt *C, *C2;
2092 
2093   if (!(match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C2)))) &&
2094         match(Op1, m_APInt(C))))
2095     return nullptr;
2096 
2097   unsigned Width = Ty->getScalarSizeInBits();
2098   unsigned LastOneMath = Width - C2->countr_zero();
2099 
2100   switch (OpC) {
2101   case Instruction::And:
2102     if (C->countl_one() < LastOneMath)
2103       return nullptr;
2104     break;
2105   case Instruction::Xor:
2106   case Instruction::Or:
2107     if (C->countl_zero() < LastOneMath)
2108       return nullptr;
2109     break;
2110   default:
2111     llvm_unreachable("Unexpected BinaryOp!");
2112   }
2113 
2114   Value *NewBinOp = Builder.CreateBinOp(OpC, X, ConstantInt::get(Ty, *C));
2115   return BinaryOperator::CreateWithCopiedFlags(Instruction::Add, NewBinOp,
2116                                                ConstantInt::get(Ty, *C2), Op0);
2117 }
2118 
2119 // binop(shift(ShiftedC1, ShAmt), shift(ShiftedC2, add(ShAmt, AddC))) ->
2120 // shift(binop(ShiftedC1, shift(ShiftedC2, AddC)), ShAmt)
2121 // where both shifts are the same and AddC is a valid shift amount.
2122 Instruction *InstCombinerImpl::foldBinOpOfDisplacedShifts(BinaryOperator &I) {
2123   assert((I.isBitwiseLogicOp() || I.getOpcode() == Instruction::Add) &&
2124          "Unexpected opcode");
2125 
2126   Value *ShAmt;
2127   Constant *ShiftedC1, *ShiftedC2, *AddC;
2128   Type *Ty = I.getType();
2129   unsigned BitWidth = Ty->getScalarSizeInBits();
2130   if (!match(&I, m_c_BinOp(m_Shift(m_ImmConstant(ShiftedC1), m_Value(ShAmt)),
2131                            m_Shift(m_ImmConstant(ShiftedC2),
2132                                    m_AddLike(m_Deferred(ShAmt),
2133                                              m_ImmConstant(AddC))))))
2134     return nullptr;
2135 
2136   // Make sure the add constant is a valid shift amount.
2137   if (!match(AddC,
2138              m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(BitWidth, BitWidth))))
2139     return nullptr;
2140 
2141   // Avoid constant expressions.
2142   auto *Op0Inst = dyn_cast<Instruction>(I.getOperand(0));
2143   auto *Op1Inst = dyn_cast<Instruction>(I.getOperand(1));
2144   if (!Op0Inst || !Op1Inst)
2145     return nullptr;
2146 
2147   // Both shifts must be the same.
2148   Instruction::BinaryOps ShiftOp =
2149       static_cast<Instruction::BinaryOps>(Op0Inst->getOpcode());
2150   if (ShiftOp != Op1Inst->getOpcode())
2151     return nullptr;
2152 
2153   // For adds, only left shifts are supported.
2154   if (I.getOpcode() == Instruction::Add && ShiftOp != Instruction::Shl)
2155     return nullptr;
2156 
2157   Value *NewC = Builder.CreateBinOp(
2158       I.getOpcode(), ShiftedC1, Builder.CreateBinOp(ShiftOp, ShiftedC2, AddC));
2159   return BinaryOperator::Create(ShiftOp, NewC, ShAmt);
2160 }
2161 
2162 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2163 // here. We should standardize that construct where it is needed or choose some
2164 // other way to ensure that commutated variants of patterns are not missed.
2165 Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
2166   Type *Ty = I.getType();
2167 
2168   if (Value *V = simplifyAndInst(I.getOperand(0), I.getOperand(1),
2169                                  SQ.getWithInstruction(&I)))
2170     return replaceInstUsesWith(I, V);
2171 
2172   if (SimplifyAssociativeOrCommutative(I))
2173     return &I;
2174 
2175   if (Instruction *X = foldVectorBinop(I))
2176     return X;
2177 
2178   if (Instruction *Phi = foldBinopWithPhiOperands(I))
2179     return Phi;
2180 
2181   // See if we can simplify any instructions used by the instruction whose sole
2182   // purpose is to compute bits we don't care about.
2183   if (SimplifyDemandedInstructionBits(I))
2184     return &I;
2185 
2186   // Do this before using distributive laws to catch simple and/or/not patterns.
2187   if (Instruction *Xor = foldAndToXor(I, Builder))
2188     return Xor;
2189 
2190   if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
2191     return X;
2192 
2193   // (A|B)&(A|C) -> A|(B&C) etc
2194   if (Value *V = foldUsingDistributiveLaws(I))
2195     return replaceInstUsesWith(I, V);
2196 
2197   if (Value *V = SimplifyBSwap(I, Builder))
2198     return replaceInstUsesWith(I, V);
2199 
2200   if (Instruction *R = foldBinOpShiftWithShift(I))
2201     return R;
2202 
2203   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2204 
2205   Value *X, *Y;
2206   if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) &&
2207       match(Op1, m_One())) {
2208     // (1 << X) & 1 --> zext(X == 0)
2209     // (1 >> X) & 1 --> zext(X == 0)
2210     Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, 0));
2211     return new ZExtInst(IsZero, Ty);
2212   }
2213 
2214   // (-(X & 1)) & Y --> (X & 1) == 0 ? 0 : Y
2215   Value *Neg;
2216   if (match(&I,
2217             m_c_And(m_CombineAnd(m_Value(Neg),
2218                                  m_OneUse(m_Neg(m_And(m_Value(), m_One())))),
2219                     m_Value(Y)))) {
2220     Value *Cmp = Builder.CreateIsNull(Neg);
2221     return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Y);
2222   }
2223 
2224   // Canonicalize:
2225   // (X +/- Y) & Y --> ~X & Y when Y is a power of 2.
2226   if (match(&I, m_c_And(m_Value(Y), m_OneUse(m_CombineOr(
2227                                         m_c_Add(m_Value(X), m_Deferred(Y)),
2228                                         m_Sub(m_Value(X), m_Deferred(Y)))))) &&
2229       isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, /*Depth*/ 0, &I))
2230     return BinaryOperator::CreateAnd(Builder.CreateNot(X), Y);
2231 
2232   const APInt *C;
2233   if (match(Op1, m_APInt(C))) {
2234     const APInt *XorC;
2235     if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) {
2236       // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
2237       Constant *NewC = ConstantInt::get(Ty, *C & *XorC);
2238       Value *And = Builder.CreateAnd(X, Op1);
2239       And->takeName(Op0);
2240       return BinaryOperator::CreateXor(And, NewC);
2241     }
2242 
2243     const APInt *OrC;
2244     if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
2245       // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
2246       // NOTE: This reduces the number of bits set in the & mask, which
2247       // can expose opportunities for store narrowing for scalars.
2248       // NOTE: SimplifyDemandedBits should have already removed bits from C1
2249       // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
2250       // above, but this feels safer.
2251       APInt Together = *C & *OrC;
2252       Value *And = Builder.CreateAnd(X, ConstantInt::get(Ty, Together ^ *C));
2253       And->takeName(Op0);
2254       return BinaryOperator::CreateOr(And, ConstantInt::get(Ty, Together));
2255     }
2256 
2257     unsigned Width = Ty->getScalarSizeInBits();
2258     const APInt *ShiftC;
2259     if (match(Op0, m_OneUse(m_SExt(m_AShr(m_Value(X), m_APInt(ShiftC))))) &&
2260         ShiftC->ult(Width)) {
2261       if (*C == APInt::getLowBitsSet(Width, Width - ShiftC->getZExtValue())) {
2262         // We are clearing high bits that were potentially set by sext+ashr:
2263         // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC
2264         Value *Sext = Builder.CreateSExt(X, Ty);
2265         Constant *ShAmtC = ConstantInt::get(Ty, ShiftC->zext(Width));
2266         return BinaryOperator::CreateLShr(Sext, ShAmtC);
2267       }
2268     }
2269 
2270     // If this 'and' clears the sign-bits added by ashr, replace with lshr:
2271     // and (ashr X, ShiftC), C --> lshr X, ShiftC
2272     if (match(Op0, m_AShr(m_Value(X), m_APInt(ShiftC))) && ShiftC->ult(Width) &&
2273         C->isMask(Width - ShiftC->getZExtValue()))
2274       return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, *ShiftC));
2275 
2276     const APInt *AddC;
2277     if (match(Op0, m_Add(m_Value(X), m_APInt(AddC)))) {
2278       // If we are masking the result of the add down to exactly one bit and
2279       // the constant we are adding has no bits set below that bit, then the
2280       // add is flipping a single bit. Example:
2281       // (X + 4) & 4 --> (X & 4) ^ 4
2282       if (Op0->hasOneUse() && C->isPowerOf2() && (*AddC & (*C - 1)) == 0) {
2283         assert((*C & *AddC) != 0 && "Expected common bit");
2284         Value *NewAnd = Builder.CreateAnd(X, Op1);
2285         return BinaryOperator::CreateXor(NewAnd, Op1);
2286       }
2287     }
2288 
2289     // ((C1 OP zext(X)) & C2) -> zext((C1 OP X) & C2) if C2 fits in the
2290     // bitwidth of X and OP behaves well when given trunc(C1) and X.
2291     auto isNarrowableBinOpcode = [](BinaryOperator *B) {
2292       switch (B->getOpcode()) {
2293       case Instruction::Xor:
2294       case Instruction::Or:
2295       case Instruction::Mul:
2296       case Instruction::Add:
2297       case Instruction::Sub:
2298         return true;
2299       default:
2300         return false;
2301       }
2302     };
2303     BinaryOperator *BO;
2304     if (match(Op0, m_OneUse(m_BinOp(BO))) && isNarrowableBinOpcode(BO)) {
2305       Instruction::BinaryOps BOpcode = BO->getOpcode();
2306       Value *X;
2307       const APInt *C1;
2308       // TODO: The one-use restrictions could be relaxed a little if the AND
2309       // is going to be removed.
2310       // Try to narrow the 'and' and a binop with constant operand:
2311       // and (bo (zext X), C1), C --> zext (and (bo X, TruncC1), TruncC)
2312       if (match(BO, m_c_BinOp(m_OneUse(m_ZExt(m_Value(X))), m_APInt(C1))) &&
2313           C->isIntN(X->getType()->getScalarSizeInBits())) {
2314         unsigned XWidth = X->getType()->getScalarSizeInBits();
2315         Constant *TruncC1 = ConstantInt::get(X->getType(), C1->trunc(XWidth));
2316         Value *BinOp = isa<ZExtInst>(BO->getOperand(0))
2317                            ? Builder.CreateBinOp(BOpcode, X, TruncC1)
2318                            : Builder.CreateBinOp(BOpcode, TruncC1, X);
2319         Constant *TruncC = ConstantInt::get(X->getType(), C->trunc(XWidth));
2320         Value *And = Builder.CreateAnd(BinOp, TruncC);
2321         return new ZExtInst(And, Ty);
2322       }
2323 
2324       // Similar to above: if the mask matches the zext input width, then the
2325       // 'and' can be eliminated, so we can truncate the other variable op:
2326       // and (bo (zext X), Y), C --> zext (bo X, (trunc Y))
2327       if (isa<Instruction>(BO->getOperand(0)) &&
2328           match(BO->getOperand(0), m_OneUse(m_ZExt(m_Value(X)))) &&
2329           C->isMask(X->getType()->getScalarSizeInBits())) {
2330         Y = BO->getOperand(1);
2331         Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr");
2332         Value *NewBO =
2333             Builder.CreateBinOp(BOpcode, X, TrY, BO->getName() + ".narrow");
2334         return new ZExtInst(NewBO, Ty);
2335       }
2336       // and (bo Y, (zext X)), C --> zext (bo (trunc Y), X)
2337       if (isa<Instruction>(BO->getOperand(1)) &&
2338           match(BO->getOperand(1), m_OneUse(m_ZExt(m_Value(X)))) &&
2339           C->isMask(X->getType()->getScalarSizeInBits())) {
2340         Y = BO->getOperand(0);
2341         Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr");
2342         Value *NewBO =
2343             Builder.CreateBinOp(BOpcode, TrY, X, BO->getName() + ".narrow");
2344         return new ZExtInst(NewBO, Ty);
2345       }
2346     }
2347 
2348     // This is intentionally placed after the narrowing transforms for
2349     // efficiency (transform directly to the narrow logic op if possible).
2350     // If the mask is only needed on one incoming arm, push the 'and' op up.
2351     if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
2352         match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2353       APInt NotAndMask(~(*C));
2354       BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
2355       if (MaskedValueIsZero(X, NotAndMask, 0, &I)) {
2356         // Not masking anything out for the LHS, move mask to RHS.
2357         // and ({x}or X, Y), C --> {x}or X, (and Y, C)
2358         Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
2359         return BinaryOperator::Create(BinOp, X, NewRHS);
2360       }
2361       if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) {
2362         // Not masking anything out for the RHS, move mask to LHS.
2363         // and ({x}or X, Y), C --> {x}or (and X, C), Y
2364         Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
2365         return BinaryOperator::Create(BinOp, NewLHS, Y);
2366       }
2367     }
2368 
2369     // When the mask is a power-of-2 constant and op0 is a shifted-power-of-2
2370     // constant, test if the shift amount equals the offset bit index:
2371     // (ShiftC << X) & C --> X == (log2(C) - log2(ShiftC)) ? C : 0
2372     // (ShiftC >> X) & C --> X == (log2(ShiftC) - log2(C)) ? C : 0
2373     if (C->isPowerOf2() &&
2374         match(Op0, m_OneUse(m_LogicalShift(m_Power2(ShiftC), m_Value(X))))) {
2375       int Log2ShiftC = ShiftC->exactLogBase2();
2376       int Log2C = C->exactLogBase2();
2377       bool IsShiftLeft =
2378          cast<BinaryOperator>(Op0)->getOpcode() == Instruction::Shl;
2379       int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
2380       assert(BitNum >= 0 && "Expected demanded bits to handle impossible mask");
2381       Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, BitNum));
2382       return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C),
2383                                 ConstantInt::getNullValue(Ty));
2384     }
2385 
2386     Constant *C1, *C2;
2387     const APInt *C3 = C;
2388     Value *X;
2389     if (C3->isPowerOf2()) {
2390       Constant *Log2C3 = ConstantInt::get(Ty, C3->countr_zero());
2391       if (match(Op0, m_OneUse(m_LShr(m_Shl(m_ImmConstant(C1), m_Value(X)),
2392                                      m_ImmConstant(C2)))) &&
2393           match(C1, m_Power2())) {
2394         Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1);
2395         Constant *LshrC = ConstantExpr::getAdd(C2, Log2C3);
2396         KnownBits KnownLShrc = computeKnownBits(LshrC, 0, nullptr);
2397         if (KnownLShrc.getMaxValue().ult(Width)) {
2398           // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth:
2399           // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0
2400           Constant *CmpC = ConstantExpr::getSub(LshrC, Log2C1);
2401           Value *Cmp = Builder.CreateICmpEQ(X, CmpC);
2402           return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3),
2403                                     ConstantInt::getNullValue(Ty));
2404         }
2405       }
2406 
2407       if (match(Op0, m_OneUse(m_Shl(m_LShr(m_ImmConstant(C1), m_Value(X)),
2408                                     m_ImmConstant(C2)))) &&
2409           match(C1, m_Power2())) {
2410         Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1);
2411         Constant *Cmp =
2412             ConstantExpr::getCompare(ICmpInst::ICMP_ULT, Log2C3, C2);
2413         if (Cmp->isZeroValue()) {
2414           // iff C1,C3 is pow2 and Log2(C3) >= C2:
2415           // ((C1 >> X) << C2) & C3 -> X == (cttz(C1)+C2-cttz(C3)) ? C3 : 0
2416           Constant *ShlC = ConstantExpr::getAdd(C2, Log2C1);
2417           Constant *CmpC = ConstantExpr::getSub(ShlC, Log2C3);
2418           Value *Cmp = Builder.CreateICmpEQ(X, CmpC);
2419           return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3),
2420                                     ConstantInt::getNullValue(Ty));
2421         }
2422       }
2423     }
2424   }
2425 
2426   // If we are clearing the sign bit of a floating-point value, convert this to
2427   // fabs, then cast back to integer.
2428   //
2429   // This is a generous interpretation for noimplicitfloat, this is not a true
2430   // floating-point operation.
2431   //
2432   // Assumes any IEEE-represented type has the sign bit in the high bit.
2433   // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
2434   Value *CastOp;
2435   if (match(Op0, m_BitCast(m_Value(CastOp))) &&
2436       match(Op1, m_MaxSignedValue()) &&
2437       !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
2438         Attribute::NoImplicitFloat)) {
2439     Type *EltTy = CastOp->getType()->getScalarType();
2440     if (EltTy->isFloatingPointTy() && EltTy->isIEEE() &&
2441         EltTy->getPrimitiveSizeInBits() ==
2442         I.getType()->getScalarType()->getPrimitiveSizeInBits()) {
2443       Value *FAbs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, CastOp);
2444       return new BitCastInst(FAbs, I.getType());
2445     }
2446   }
2447 
2448   if (match(&I, m_And(m_OneUse(m_Shl(m_ZExt(m_Value(X)), m_Value(Y))),
2449                       m_SignMask())) &&
2450       match(Y, m_SpecificInt_ICMP(
2451                    ICmpInst::Predicate::ICMP_EQ,
2452                    APInt(Ty->getScalarSizeInBits(),
2453                          Ty->getScalarSizeInBits() -
2454                              X->getType()->getScalarSizeInBits())))) {
2455     auto *SExt = Builder.CreateSExt(X, Ty, X->getName() + ".signext");
2456     auto *SanitizedSignMask = cast<Constant>(Op1);
2457     // We must be careful with the undef elements of the sign bit mask, however:
2458     // the mask elt can be undef iff the shift amount for that lane was undef,
2459     // otherwise we need to sanitize undef masks to zero.
2460     SanitizedSignMask = Constant::replaceUndefsWith(
2461         SanitizedSignMask, ConstantInt::getNullValue(Ty->getScalarType()));
2462     SanitizedSignMask =
2463         Constant::mergeUndefsWith(SanitizedSignMask, cast<Constant>(Y));
2464     return BinaryOperator::CreateAnd(SExt, SanitizedSignMask);
2465   }
2466 
2467   if (Instruction *Z = narrowMaskedBinOp(I))
2468     return Z;
2469 
2470   if (I.getType()->isIntOrIntVectorTy(1)) {
2471     if (auto *SI0 = dyn_cast<SelectInst>(Op0)) {
2472       if (auto *R =
2473               foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ true))
2474         return R;
2475     }
2476     if (auto *SI1 = dyn_cast<SelectInst>(Op1)) {
2477       if (auto *R =
2478               foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ true))
2479         return R;
2480     }
2481   }
2482 
2483   if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
2484     return FoldedLogic;
2485 
2486   if (Instruction *DeMorgan = matchDeMorgansLaws(I, *this))
2487     return DeMorgan;
2488 
2489   {
2490     Value *A, *B, *C;
2491     // A & (A ^ B) --> A & ~B
2492     if (match(Op1, m_OneUse(m_c_Xor(m_Specific(Op0), m_Value(B)))))
2493       return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(B));
2494     // (A ^ B) & A --> A & ~B
2495     if (match(Op0, m_OneUse(m_c_Xor(m_Specific(Op1), m_Value(B)))))
2496       return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(B));
2497 
2498     // A & ~(A ^ B) --> A & B
2499     if (match(Op1, m_Not(m_c_Xor(m_Specific(Op0), m_Value(B)))))
2500       return BinaryOperator::CreateAnd(Op0, B);
2501     // ~(A ^ B) & A --> A & B
2502     if (match(Op0, m_Not(m_c_Xor(m_Specific(Op1), m_Value(B)))))
2503       return BinaryOperator::CreateAnd(Op1, B);
2504 
2505     // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
2506     if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2507         match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) {
2508       Value *NotC = Op1->hasOneUse()
2509                         ? Builder.CreateNot(C)
2510                         : getFreelyInverted(C, C->hasOneUse(), &Builder);
2511       if (NotC != nullptr)
2512         return BinaryOperator::CreateAnd(Op0, NotC);
2513     }
2514 
2515     // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
2516     if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))) &&
2517         match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) {
2518       Value *NotC = Op0->hasOneUse()
2519                         ? Builder.CreateNot(C)
2520                         : getFreelyInverted(C, C->hasOneUse(), &Builder);
2521       if (NotC != nullptr)
2522         return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
2523     }
2524 
2525     // (A | B) & (~A ^ B) -> A & B
2526     // (A | B) & (B ^ ~A) -> A & B
2527     // (B | A) & (~A ^ B) -> A & B
2528     // (B | A) & (B ^ ~A) -> A & B
2529     if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2530         match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2531       return BinaryOperator::CreateAnd(A, B);
2532 
2533     // (~A ^ B) & (A | B) -> A & B
2534     // (~A ^ B) & (B | A) -> A & B
2535     // (B ^ ~A) & (A | B) -> A & B
2536     // (B ^ ~A) & (B | A) -> A & B
2537     if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2538         match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
2539       return BinaryOperator::CreateAnd(A, B);
2540 
2541     // (~A | B) & (A ^ B) -> ~A & B
2542     // (~A | B) & (B ^ A) -> ~A & B
2543     // (B | ~A) & (A ^ B) -> ~A & B
2544     // (B | ~A) & (B ^ A) -> ~A & B
2545     if (match(Op0, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2546         match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
2547       return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
2548 
2549     // (A ^ B) & (~A | B) -> ~A & B
2550     // (B ^ A) & (~A | B) -> ~A & B
2551     // (A ^ B) & (B | ~A) -> ~A & B
2552     // (B ^ A) & (B | ~A) -> ~A & B
2553     if (match(Op1, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2554         match(Op0, m_c_Xor(m_Specific(A), m_Specific(B))))
2555       return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
2556   }
2557 
2558   {
2559     ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
2560     ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
2561     if (LHS && RHS)
2562       if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ true))
2563         return replaceInstUsesWith(I, Res);
2564 
2565     // TODO: Make this recursive; it's a little tricky because an arbitrary
2566     // number of 'and' instructions might have to be created.
2567     if (LHS && match(Op1, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) {
2568       bool IsLogical = isa<SelectInst>(Op1);
2569       // LHS & (X && Y) --> (LHS && X) && Y
2570       if (auto *Cmp = dyn_cast<ICmpInst>(X))
2571         if (Value *Res =
2572                 foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true, IsLogical))
2573           return replaceInstUsesWith(I, IsLogical
2574                                             ? Builder.CreateLogicalAnd(Res, Y)
2575                                             : Builder.CreateAnd(Res, Y));
2576       // LHS & (X && Y) --> X && (LHS & Y)
2577       if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2578         if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true,
2579                                           /* IsLogical */ false))
2580           return replaceInstUsesWith(I, IsLogical
2581                                             ? Builder.CreateLogicalAnd(X, Res)
2582                                             : Builder.CreateAnd(X, Res));
2583     }
2584     if (RHS && match(Op0, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) {
2585       bool IsLogical = isa<SelectInst>(Op0);
2586       // (X && Y) & RHS --> (X && RHS) && Y
2587       if (auto *Cmp = dyn_cast<ICmpInst>(X))
2588         if (Value *Res =
2589                 foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true, IsLogical))
2590           return replaceInstUsesWith(I, IsLogical
2591                                             ? Builder.CreateLogicalAnd(Res, Y)
2592                                             : Builder.CreateAnd(Res, Y));
2593       // (X && Y) & RHS --> X && (Y & RHS)
2594       if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2595         if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true,
2596                                           /* IsLogical */ false))
2597           return replaceInstUsesWith(I, IsLogical
2598                                             ? Builder.CreateLogicalAnd(X, Res)
2599                                             : Builder.CreateAnd(X, Res));
2600     }
2601   }
2602 
2603   if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
2604     if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
2605       if (Value *Res = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ true))
2606         return replaceInstUsesWith(I, Res);
2607 
2608   if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder))
2609     return FoldedFCmps;
2610 
2611   if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
2612     return CastedAnd;
2613 
2614   if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
2615     return Sel;
2616 
2617   // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
2618   // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
2619   //       with binop identity constant. But creating a select with non-constant
2620   //       arm may not be reversible due to poison semantics. Is that a good
2621   //       canonicalization?
2622   Value *A, *B;
2623   if (match(&I, m_c_And(m_OneUse(m_SExt(m_Value(A))), m_Value(B))) &&
2624       A->getType()->isIntOrIntVectorTy(1))
2625     return SelectInst::Create(A, B, Constant::getNullValue(Ty));
2626 
2627   // Similarly, a 'not' of the bool translates to a swap of the select arms:
2628   // ~sext(A) & B / B & ~sext(A) --> A ? 0 : B
2629   if (match(&I, m_c_And(m_Not(m_SExt(m_Value(A))), m_Value(B))) &&
2630       A->getType()->isIntOrIntVectorTy(1))
2631     return SelectInst::Create(A, Constant::getNullValue(Ty), B);
2632 
2633   // and(zext(A), B) -> A ? (B & 1) : 0
2634   if (match(&I, m_c_And(m_OneUse(m_ZExt(m_Value(A))), m_Value(B))) &&
2635       A->getType()->isIntOrIntVectorTy(1))
2636     return SelectInst::Create(A, Builder.CreateAnd(B, ConstantInt::get(Ty, 1)),
2637                               Constant::getNullValue(Ty));
2638 
2639   // (-1 + A) & B --> A ? 0 : B where A is 0/1.
2640   if (match(&I, m_c_And(m_OneUse(m_Add(m_ZExtOrSelf(m_Value(A)), m_AllOnes())),
2641                         m_Value(B)))) {
2642     if (A->getType()->isIntOrIntVectorTy(1))
2643       return SelectInst::Create(A, Constant::getNullValue(Ty), B);
2644     if (computeKnownBits(A, /* Depth */ 0, &I).countMaxActiveBits() <= 1) {
2645       return SelectInst::Create(
2646           Builder.CreateICmpEQ(A, Constant::getNullValue(A->getType())), B,
2647           Constant::getNullValue(Ty));
2648     }
2649   }
2650 
2651   // (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext
2652   if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
2653                             m_AShr(m_Value(X), m_APIntAllowUndef(C)))),
2654                         m_Value(Y))) &&
2655       *C == X->getType()->getScalarSizeInBits() - 1) {
2656     Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
2657     return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty));
2658   }
2659   // If there's a 'not' of the shifted value, swap the select operands:
2660   // ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
2661   if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
2662                             m_Not(m_AShr(m_Value(X), m_APIntAllowUndef(C))))),
2663                         m_Value(Y))) &&
2664       *C == X->getType()->getScalarSizeInBits() - 1) {
2665     Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
2666     return SelectInst::Create(IsNeg, ConstantInt::getNullValue(Ty), Y);
2667   }
2668 
2669   // (~x) & y  -->  ~(x | (~y))  iff that gets rid of inversions
2670   if (sinkNotIntoOtherHandOfLogicalOp(I))
2671     return &I;
2672 
2673   // An and recurrence w/loop invariant step is equivelent to (and start, step)
2674   PHINode *PN = nullptr;
2675   Value *Start = nullptr, *Step = nullptr;
2676   if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN))
2677     return replaceInstUsesWith(I, Builder.CreateAnd(Start, Step));
2678 
2679   if (Instruction *R = reassociateForUses(I, Builder))
2680     return R;
2681 
2682   if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
2683     return Canonicalized;
2684 
2685   if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
2686     return Folded;
2687 
2688   if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
2689     return Res;
2690 
2691   return nullptr;
2692 }
2693 
2694 Instruction *InstCombinerImpl::matchBSwapOrBitReverse(Instruction &I,
2695                                                       bool MatchBSwaps,
2696                                                       bool MatchBitReversals) {
2697   SmallVector<Instruction *, 4> Insts;
2698   if (!recognizeBSwapOrBitReverseIdiom(&I, MatchBSwaps, MatchBitReversals,
2699                                        Insts))
2700     return nullptr;
2701   Instruction *LastInst = Insts.pop_back_val();
2702   LastInst->removeFromParent();
2703 
2704   for (auto *Inst : Insts)
2705     Worklist.push(Inst);
2706   return LastInst;
2707 }
2708 
2709 /// Match UB-safe variants of the funnel shift intrinsic.
2710 static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC,
2711                                      const DominatorTree &DT) {
2712   // TODO: Can we reduce the code duplication between this and the related
2713   // rotate matching code under visitSelect and visitTrunc?
2714   unsigned Width = Or.getType()->getScalarSizeInBits();
2715 
2716   Instruction *Or0, *Or1;
2717   if (!match(Or.getOperand(0), m_Instruction(Or0)) ||
2718       !match(Or.getOperand(1), m_Instruction(Or1)))
2719     return nullptr;
2720 
2721   bool IsFshl = true; // Sub on LSHR.
2722   SmallVector<Value *, 3> FShiftArgs;
2723 
2724   // First, find an or'd pair of opposite shifts:
2725   // or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)
2726   if (isa<BinaryOperator>(Or0) && isa<BinaryOperator>(Or1)) {
2727     Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
2728     if (!match(Or0,
2729                m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) ||
2730         !match(Or1,
2731                m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) ||
2732         Or0->getOpcode() == Or1->getOpcode())
2733       return nullptr;
2734 
2735     // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
2736     if (Or0->getOpcode() == BinaryOperator::LShr) {
2737       std::swap(Or0, Or1);
2738       std::swap(ShVal0, ShVal1);
2739       std::swap(ShAmt0, ShAmt1);
2740     }
2741     assert(Or0->getOpcode() == BinaryOperator::Shl &&
2742            Or1->getOpcode() == BinaryOperator::LShr &&
2743            "Illegal or(shift,shift) pair");
2744 
2745     // Match the shift amount operands for a funnel shift pattern. This always
2746     // matches a subtraction on the R operand.
2747     auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
2748       // Check for constant shift amounts that sum to the bitwidth.
2749       const APInt *LI, *RI;
2750       if (match(L, m_APIntAllowUndef(LI)) && match(R, m_APIntAllowUndef(RI)))
2751         if (LI->ult(Width) && RI->ult(Width) && (*LI + *RI) == Width)
2752           return ConstantInt::get(L->getType(), *LI);
2753 
2754       Constant *LC, *RC;
2755       if (match(L, m_Constant(LC)) && match(R, m_Constant(RC)) &&
2756           match(L,
2757                 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
2758           match(R,
2759                 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
2760           match(ConstantExpr::getAdd(LC, RC), m_SpecificIntAllowUndef(Width)))
2761         return ConstantExpr::mergeUndefsWith(LC, RC);
2762 
2763       // (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
2764       // We limit this to X < Width in case the backend re-expands the
2765       // intrinsic, and has to reintroduce a shift modulo operation (InstCombine
2766       // might remove it after this fold). This still doesn't guarantee that the
2767       // final codegen will match this original pattern.
2768       if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) {
2769         KnownBits KnownL = IC.computeKnownBits(L, /*Depth*/ 0, &Or);
2770         return KnownL.getMaxValue().ult(Width) ? L : nullptr;
2771       }
2772 
2773       // For non-constant cases, the following patterns currently only work for
2774       // rotation patterns.
2775       // TODO: Add general funnel-shift compatible patterns.
2776       if (ShVal0 != ShVal1)
2777         return nullptr;
2778 
2779       // For non-constant cases we don't support non-pow2 shift masks.
2780       // TODO: Is it worth matching urem as well?
2781       if (!isPowerOf2_32(Width))
2782         return nullptr;
2783 
2784       // The shift amount may be masked with negation:
2785       // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
2786       Value *X;
2787       unsigned Mask = Width - 1;
2788       if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
2789           match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
2790         return X;
2791 
2792       // Similar to above, but the shift amount may be extended after masking,
2793       // so return the extended value as the parameter for the intrinsic.
2794       if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
2795           match(R,
2796                 m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))),
2797                       m_SpecificInt(Mask))))
2798         return L;
2799 
2800       if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
2801           match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
2802         return L;
2803 
2804       return nullptr;
2805     };
2806 
2807     Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
2808     if (!ShAmt) {
2809       ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
2810       IsFshl = false; // Sub on SHL.
2811     }
2812     if (!ShAmt)
2813       return nullptr;
2814 
2815     FShiftArgs = {ShVal0, ShVal1, ShAmt};
2816   } else if (isa<ZExtInst>(Or0) || isa<ZExtInst>(Or1)) {
2817     // If there are two 'or' instructions concat variables in opposite order:
2818     //
2819     // Slot1 and Slot2 are all zero bits.
2820     // | Slot1 | Low | Slot2 | High |
2821     // LowHigh = or (shl (zext Low), ZextLowShlAmt), (zext High)
2822     // | Slot2 | High | Slot1 | Low |
2823     // HighLow = or (shl (zext High), ZextHighShlAmt), (zext Low)
2824     //
2825     // the latter 'or' can be safely convert to
2826     // -> HighLow = fshl LowHigh, LowHigh, ZextHighShlAmt
2827     // if ZextLowShlAmt + ZextHighShlAmt == Width.
2828     if (!isa<ZExtInst>(Or1))
2829       std::swap(Or0, Or1);
2830 
2831     Value *High, *ZextHigh, *Low;
2832     const APInt *ZextHighShlAmt;
2833     if (!match(Or0,
2834                m_OneUse(m_Shl(m_Value(ZextHigh), m_APInt(ZextHighShlAmt)))))
2835       return nullptr;
2836 
2837     if (!match(Or1, m_ZExt(m_Value(Low))) ||
2838         !match(ZextHigh, m_ZExt(m_Value(High))))
2839       return nullptr;
2840 
2841     unsigned HighSize = High->getType()->getScalarSizeInBits();
2842     unsigned LowSize = Low->getType()->getScalarSizeInBits();
2843     // Make sure High does not overlap with Low and most significant bits of
2844     // High aren't shifted out.
2845     if (ZextHighShlAmt->ult(LowSize) || ZextHighShlAmt->ugt(Width - HighSize))
2846       return nullptr;
2847 
2848     for (User *U : ZextHigh->users()) {
2849       Value *X, *Y;
2850       if (!match(U, m_Or(m_Value(X), m_Value(Y))))
2851         continue;
2852 
2853       if (!isa<ZExtInst>(Y))
2854         std::swap(X, Y);
2855 
2856       const APInt *ZextLowShlAmt;
2857       if (!match(X, m_Shl(m_Specific(Or1), m_APInt(ZextLowShlAmt))) ||
2858           !match(Y, m_Specific(ZextHigh)) || !DT.dominates(U, &Or))
2859         continue;
2860 
2861       // HighLow is good concat. If sum of two shifts amount equals to Width,
2862       // LowHigh must also be a good concat.
2863       if (*ZextLowShlAmt + *ZextHighShlAmt != Width)
2864         continue;
2865 
2866       // Low must not overlap with High and most significant bits of Low must
2867       // not be shifted out.
2868       assert(ZextLowShlAmt->uge(HighSize) &&
2869              ZextLowShlAmt->ule(Width - LowSize) && "Invalid concat");
2870 
2871       FShiftArgs = {U, U, ConstantInt::get(Or0->getType(), *ZextHighShlAmt)};
2872       break;
2873     }
2874   }
2875 
2876   if (FShiftArgs.empty())
2877     return nullptr;
2878 
2879   Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
2880   Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType());
2881   return CallInst::Create(F, FShiftArgs);
2882 }
2883 
2884 /// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
2885 static Instruction *matchOrConcat(Instruction &Or,
2886                                   InstCombiner::BuilderTy &Builder) {
2887   assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'");
2888   Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1);
2889   Type *Ty = Or.getType();
2890 
2891   unsigned Width = Ty->getScalarSizeInBits();
2892   if ((Width & 1) != 0)
2893     return nullptr;
2894   unsigned HalfWidth = Width / 2;
2895 
2896   // Canonicalize zext (lower half) to LHS.
2897   if (!isa<ZExtInst>(Op0))
2898     std::swap(Op0, Op1);
2899 
2900   // Find lower/upper half.
2901   Value *LowerSrc, *ShlVal, *UpperSrc;
2902   const APInt *C;
2903   if (!match(Op0, m_OneUse(m_ZExt(m_Value(LowerSrc)))) ||
2904       !match(Op1, m_OneUse(m_Shl(m_Value(ShlVal), m_APInt(C)))) ||
2905       !match(ShlVal, m_OneUse(m_ZExt(m_Value(UpperSrc)))))
2906     return nullptr;
2907   if (*C != HalfWidth || LowerSrc->getType() != UpperSrc->getType() ||
2908       LowerSrc->getType()->getScalarSizeInBits() != HalfWidth)
2909     return nullptr;
2910 
2911   auto ConcatIntrinsicCalls = [&](Intrinsic::ID id, Value *Lo, Value *Hi) {
2912     Value *NewLower = Builder.CreateZExt(Lo, Ty);
2913     Value *NewUpper = Builder.CreateZExt(Hi, Ty);
2914     NewUpper = Builder.CreateShl(NewUpper, HalfWidth);
2915     Value *BinOp = Builder.CreateOr(NewLower, NewUpper);
2916     Function *F = Intrinsic::getDeclaration(Or.getModule(), id, Ty);
2917     return Builder.CreateCall(F, BinOp);
2918   };
2919 
2920   // BSWAP: Push the concat down, swapping the lower/upper sources.
2921   // concat(bswap(x),bswap(y)) -> bswap(concat(x,y))
2922   Value *LowerBSwap, *UpperBSwap;
2923   if (match(LowerSrc, m_BSwap(m_Value(LowerBSwap))) &&
2924       match(UpperSrc, m_BSwap(m_Value(UpperBSwap))))
2925     return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap);
2926 
2927   // BITREVERSE: Push the concat down, swapping the lower/upper sources.
2928   // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y))
2929   Value *LowerBRev, *UpperBRev;
2930   if (match(LowerSrc, m_BitReverse(m_Value(LowerBRev))) &&
2931       match(UpperSrc, m_BitReverse(m_Value(UpperBRev))))
2932     return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev);
2933 
2934   return nullptr;
2935 }
2936 
2937 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
2938 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
2939   unsigned NumElts = cast<FixedVectorType>(C1->getType())->getNumElements();
2940   for (unsigned i = 0; i != NumElts; ++i) {
2941     Constant *EltC1 = C1->getAggregateElement(i);
2942     Constant *EltC2 = C2->getAggregateElement(i);
2943     if (!EltC1 || !EltC2)
2944       return false;
2945 
2946     // One element must be all ones, and the other must be all zeros.
2947     if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) ||
2948           (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes()))))
2949       return false;
2950   }
2951   return true;
2952 }
2953 
2954 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
2955 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
2956 /// B, it can be used as the condition operand of a select instruction.
2957 /// We will detect (A & C) | ~(B | D) when the flag ABIsTheSame enabled.
2958 Value *InstCombinerImpl::getSelectCondition(Value *A, Value *B,
2959                                             bool ABIsTheSame) {
2960   // We may have peeked through bitcasts in the caller.
2961   // Exit immediately if we don't have (vector) integer types.
2962   Type *Ty = A->getType();
2963   if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy())
2964     return nullptr;
2965 
2966   // If A is the 'not' operand of B and has enough signbits, we have our answer.
2967   if (ABIsTheSame ? (A == B) : match(B, m_Not(m_Specific(A)))) {
2968     // If these are scalars or vectors of i1, A can be used directly.
2969     if (Ty->isIntOrIntVectorTy(1))
2970       return A;
2971 
2972     // If we look through a vector bitcast, the caller will bitcast the operands
2973     // to match the condition's number of bits (N x i1).
2974     // To make this poison-safe, disallow bitcast from wide element to narrow
2975     // element. That could allow poison in lanes where it was not present in the
2976     // original code.
2977     A = peekThroughBitcast(A);
2978     if (A->getType()->isIntOrIntVectorTy()) {
2979       unsigned NumSignBits = ComputeNumSignBits(A);
2980       if (NumSignBits == A->getType()->getScalarSizeInBits() &&
2981           NumSignBits <= Ty->getScalarSizeInBits())
2982         return Builder.CreateTrunc(A, CmpInst::makeCmpResultType(A->getType()));
2983     }
2984     return nullptr;
2985   }
2986 
2987   // TODO: add support for sext and constant case
2988   if (ABIsTheSame)
2989     return nullptr;
2990 
2991   // If both operands are constants, see if the constants are inverse bitmasks.
2992   Constant *AConst, *BConst;
2993   if (match(A, m_Constant(AConst)) && match(B, m_Constant(BConst)))
2994     if (AConst == ConstantExpr::getNot(BConst) &&
2995         ComputeNumSignBits(A) == Ty->getScalarSizeInBits())
2996       return Builder.CreateZExtOrTrunc(A, CmpInst::makeCmpResultType(Ty));
2997 
2998   // Look for more complex patterns. The 'not' op may be hidden behind various
2999   // casts. Look through sexts and bitcasts to find the booleans.
3000   Value *Cond;
3001   Value *NotB;
3002   if (match(A, m_SExt(m_Value(Cond))) &&
3003       Cond->getType()->isIntOrIntVectorTy(1)) {
3004     // A = sext i1 Cond; B = sext (not (i1 Cond))
3005     if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
3006       return Cond;
3007 
3008     // A = sext i1 Cond; B = not ({bitcast} (sext (i1 Cond)))
3009     // TODO: The one-use checks are unnecessary or misplaced. If the caller
3010     //       checked for uses on logic ops/casts, that should be enough to
3011     //       make this transform worthwhile.
3012     if (match(B, m_OneUse(m_Not(m_Value(NotB))))) {
3013       NotB = peekThroughBitcast(NotB, true);
3014       if (match(NotB, m_SExt(m_Specific(Cond))))
3015         return Cond;
3016     }
3017   }
3018 
3019   // All scalar (and most vector) possibilities should be handled now.
3020   // Try more matches that only apply to non-splat constant vectors.
3021   if (!Ty->isVectorTy())
3022     return nullptr;
3023 
3024   // If both operands are xor'd with constants using the same sexted boolean
3025   // operand, see if the constants are inverse bitmasks.
3026   // TODO: Use ConstantExpr::getNot()?
3027   if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AConst)))) &&
3028       match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BConst)))) &&
3029       Cond->getType()->isIntOrIntVectorTy(1) &&
3030       areInverseVectorBitmasks(AConst, BConst)) {
3031     AConst = ConstantExpr::getTrunc(AConst, CmpInst::makeCmpResultType(Ty));
3032     return Builder.CreateXor(Cond, AConst);
3033   }
3034   return nullptr;
3035 }
3036 
3037 /// We have an expression of the form (A & C) | (B & D). Try to simplify this
3038 /// to "A' ? C : D", where A' is a boolean or vector of booleans.
3039 /// When InvertFalseVal is set to true, we try to match the pattern
3040 /// where we have peeked through a 'not' op and A and B are the same:
3041 /// (A & C) | ~(A | D) --> (A & C) | (~A & ~D) --> A' ? C : ~D
3042 Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *C, Value *B,
3043                                               Value *D, bool InvertFalseVal) {
3044   // The potential condition of the select may be bitcasted. In that case, look
3045   // through its bitcast and the corresponding bitcast of the 'not' condition.
3046   Type *OrigType = A->getType();
3047   A = peekThroughBitcast(A, true);
3048   B = peekThroughBitcast(B, true);
3049   if (Value *Cond = getSelectCondition(A, B, InvertFalseVal)) {
3050     // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D))
3051     // If this is a vector, we may need to cast to match the condition's length.
3052     // The bitcasts will either all exist or all not exist. The builder will
3053     // not create unnecessary casts if the types already match.
3054     Type *SelTy = A->getType();
3055     if (auto *VecTy = dyn_cast<VectorType>(Cond->getType())) {
3056       // For a fixed or scalable vector get N from <{vscale x} N x iM>
3057       unsigned Elts = VecTy->getElementCount().getKnownMinValue();
3058       // For a fixed or scalable vector, get the size in bits of N x iM; for a
3059       // scalar this is just M.
3060       unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinValue();
3061       Type *EltTy = Builder.getIntNTy(SelEltSize / Elts);
3062       SelTy = VectorType::get(EltTy, VecTy->getElementCount());
3063     }
3064     Value *BitcastC = Builder.CreateBitCast(C, SelTy);
3065     if (InvertFalseVal)
3066       D = Builder.CreateNot(D);
3067     Value *BitcastD = Builder.CreateBitCast(D, SelTy);
3068     Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD);
3069     return Builder.CreateBitCast(Select, OrigType);
3070   }
3071 
3072   return nullptr;
3073 }
3074 
3075 // (icmp eq X, C) | (icmp ult Other, (X - C)) -> (icmp ule Other, (X - (C + 1)))
3076 // (icmp ne X, C) & (icmp uge Other, (X - C)) -> (icmp ugt Other, (X - (C + 1)))
3077 static Value *foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS,
3078                                                bool IsAnd, bool IsLogical,
3079                                                IRBuilderBase &Builder) {
3080   Value *LHS0 = LHS->getOperand(0);
3081   Value *RHS0 = RHS->getOperand(0);
3082   Value *RHS1 = RHS->getOperand(1);
3083 
3084   ICmpInst::Predicate LPred =
3085       IsAnd ? LHS->getInversePredicate() : LHS->getPredicate();
3086   ICmpInst::Predicate RPred =
3087       IsAnd ? RHS->getInversePredicate() : RHS->getPredicate();
3088 
3089   const APInt *CInt;
3090   if (LPred != ICmpInst::ICMP_EQ ||
3091       !match(LHS->getOperand(1), m_APIntAllowUndef(CInt)) ||
3092       !LHS0->getType()->isIntOrIntVectorTy() ||
3093       !(LHS->hasOneUse() || RHS->hasOneUse()))
3094     return nullptr;
3095 
3096   auto MatchRHSOp = [LHS0, CInt](const Value *RHSOp) {
3097     return match(RHSOp,
3098                  m_Add(m_Specific(LHS0), m_SpecificIntAllowUndef(-*CInt))) ||
3099            (CInt->isZero() && RHSOp == LHS0);
3100   };
3101 
3102   Value *Other;
3103   if (RPred == ICmpInst::ICMP_ULT && MatchRHSOp(RHS1))
3104     Other = RHS0;
3105   else if (RPred == ICmpInst::ICMP_UGT && MatchRHSOp(RHS0))
3106     Other = RHS1;
3107   else
3108     return nullptr;
3109 
3110   if (IsLogical)
3111     Other = Builder.CreateFreeze(Other);
3112 
3113   return Builder.CreateICmp(
3114       IsAnd ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE,
3115       Builder.CreateSub(LHS0, ConstantInt::get(LHS0->getType(), *CInt + 1)),
3116       Other);
3117 }
3118 
3119 /// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible.
3120 /// If IsLogical is true, then the and/or is in select form and the transform
3121 /// must be poison-safe.
3122 Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
3123                                           Instruction &I, bool IsAnd,
3124                                           bool IsLogical) {
3125   const SimplifyQuery Q = SQ.getWithInstruction(&I);
3126 
3127   // Fold (iszero(A & K1) | iszero(A & K2)) ->  (A & (K1 | K2)) != (K1 | K2)
3128   // Fold (!iszero(A & K1) & !iszero(A & K2)) ->  (A & (K1 | K2)) == (K1 | K2)
3129   // if K1 and K2 are a one-bit mask.
3130   if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, &I, IsAnd, IsLogical))
3131     return V;
3132 
3133   ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
3134   Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
3135   Value *LHS1 = LHS->getOperand(1), *RHS1 = RHS->getOperand(1);
3136   const APInt *LHSC = nullptr, *RHSC = nullptr;
3137   match(LHS1, m_APInt(LHSC));
3138   match(RHS1, m_APInt(RHSC));
3139 
3140   // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
3141   // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3142   if (predicatesFoldable(PredL, PredR)) {
3143     if (LHS0 == RHS1 && LHS1 == RHS0) {
3144       PredL = ICmpInst::getSwappedPredicate(PredL);
3145       std::swap(LHS0, LHS1);
3146     }
3147     if (LHS0 == RHS0 && LHS1 == RHS1) {
3148       unsigned Code = IsAnd ? getICmpCode(PredL) & getICmpCode(PredR)
3149                             : getICmpCode(PredL) | getICmpCode(PredR);
3150       bool IsSigned = LHS->isSigned() || RHS->isSigned();
3151       return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder);
3152     }
3153   }
3154 
3155   // handle (roughly):
3156   // (icmp ne (A & B), C) | (icmp ne (A & D), E)
3157   // (icmp eq (A & B), C) & (icmp eq (A & D), E)
3158   if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, IsAnd, IsLogical, Builder))
3159     return V;
3160 
3161   if (Value *V =
3162           foldAndOrOfICmpEqConstantAndICmp(LHS, RHS, IsAnd, IsLogical, Builder))
3163     return V;
3164   // We can treat logical like bitwise here, because both operands are used on
3165   // the LHS, and as such poison from both will propagate.
3166   if (Value *V = foldAndOrOfICmpEqConstantAndICmp(RHS, LHS, IsAnd,
3167                                                   /*IsLogical*/ false, Builder))
3168     return V;
3169 
3170   if (Value *V =
3171           foldAndOrOfICmpsWithConstEq(LHS, RHS, IsAnd, IsLogical, Builder, Q))
3172     return V;
3173   // We can convert this case to bitwise and, because both operands are used
3174   // on the LHS, and as such poison from both will propagate.
3175   if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, IsAnd,
3176                                              /*IsLogical*/ false, Builder, Q))
3177     return V;
3178 
3179   if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, IsAnd, Builder))
3180     return V;
3181   if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, IsAnd, Builder))
3182     return V;
3183 
3184   // TODO: One of these directions is fine with logical and/or, the other could
3185   // be supported by inserting freeze.
3186   if (!IsLogical) {
3187     // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
3188     // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
3189     if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/!IsAnd))
3190       return V;
3191 
3192     // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
3193     // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
3194     if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/!IsAnd))
3195       return V;
3196   }
3197 
3198   // TODO: Add conjugated or fold, check whether it is safe for logical and/or.
3199   if (IsAnd && !IsLogical)
3200     if (Value *V = foldSignedTruncationCheck(LHS, RHS, I, Builder))
3201       return V;
3202 
3203   if (Value *V = foldIsPowerOf2(LHS, RHS, IsAnd, Builder))
3204     return V;
3205 
3206   if (Value *V = foldPowerOf2AndShiftedMask(LHS, RHS, IsAnd, Builder))
3207     return V;
3208 
3209   // TODO: Verify whether this is safe for logical and/or.
3210   if (!IsLogical) {
3211     if (Value *X = foldUnsignedUnderflowCheck(LHS, RHS, IsAnd, Q, Builder))
3212       return X;
3213     if (Value *X = foldUnsignedUnderflowCheck(RHS, LHS, IsAnd, Q, Builder))
3214       return X;
3215   }
3216 
3217   if (Value *X = foldEqOfParts(LHS, RHS, IsAnd))
3218     return X;
3219 
3220   // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
3221   // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
3222   // TODO: Remove this and below when foldLogOpOfMaskedICmps can handle undefs.
3223   if (!IsLogical && PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3224       PredL == PredR && match(LHS1, m_ZeroInt()) && match(RHS1, m_ZeroInt()) &&
3225       LHS0->getType() == RHS0->getType()) {
3226     Value *NewOr = Builder.CreateOr(LHS0, RHS0);
3227     return Builder.CreateICmp(PredL, NewOr,
3228                               Constant::getNullValue(NewOr->getType()));
3229   }
3230 
3231   // (icmp ne A, -1) | (icmp ne B, -1) --> (icmp ne (A&B), -1)
3232   // (icmp eq A, -1) & (icmp eq B, -1) --> (icmp eq (A&B), -1)
3233   if (!IsLogical && PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3234       PredL == PredR && match(LHS1, m_AllOnes()) && match(RHS1, m_AllOnes()) &&
3235       LHS0->getType() == RHS0->getType()) {
3236     Value *NewAnd = Builder.CreateAnd(LHS0, RHS0);
3237     return Builder.CreateICmp(PredL, NewAnd,
3238                               Constant::getAllOnesValue(LHS0->getType()));
3239   }
3240 
3241   // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
3242   if (!LHSC || !RHSC)
3243     return nullptr;
3244 
3245   // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
3246   // (trunc x) != C1 | (and x, CA) != C2 -> (and x, CA|CMAX) != C1|C2
3247   // where CMAX is the all ones value for the truncated type,
3248   // iff the lower bits of C2 and CA are zero.
3249   if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3250       PredL == PredR && LHS->hasOneUse() && RHS->hasOneUse()) {
3251     Value *V;
3252     const APInt *AndC, *SmallC = nullptr, *BigC = nullptr;
3253 
3254     // (trunc x) == C1 & (and x, CA) == C2
3255     // (and x, CA) == C2 & (trunc x) == C1
3256     if (match(RHS0, m_Trunc(m_Value(V))) &&
3257         match(LHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
3258       SmallC = RHSC;
3259       BigC = LHSC;
3260     } else if (match(LHS0, m_Trunc(m_Value(V))) &&
3261                match(RHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
3262       SmallC = LHSC;
3263       BigC = RHSC;
3264     }
3265 
3266     if (SmallC && BigC) {
3267       unsigned BigBitSize = BigC->getBitWidth();
3268       unsigned SmallBitSize = SmallC->getBitWidth();
3269 
3270       // Check that the low bits are zero.
3271       APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
3272       if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) {
3273         Value *NewAnd = Builder.CreateAnd(V, Low | *AndC);
3274         APInt N = SmallC->zext(BigBitSize) | *BigC;
3275         Value *NewVal = ConstantInt::get(NewAnd->getType(), N);
3276         return Builder.CreateICmp(PredL, NewAnd, NewVal);
3277       }
3278     }
3279   }
3280 
3281   // Match naive pattern (and its inverted form) for checking if two values
3282   // share same sign. An example of the pattern:
3283   // (icmp slt (X & Y), 0) | (icmp sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1)
3284   // Inverted form (example):
3285   // (icmp slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0)
3286   bool TrueIfSignedL, TrueIfSignedR;
3287   if (isSignBitCheck(PredL, *LHSC, TrueIfSignedL) &&
3288       isSignBitCheck(PredR, *RHSC, TrueIfSignedR) &&
3289       (RHS->hasOneUse() || LHS->hasOneUse())) {
3290     Value *X, *Y;
3291     if (IsAnd) {
3292       if ((TrueIfSignedL && !TrueIfSignedR &&
3293            match(LHS0, m_Or(m_Value(X), m_Value(Y))) &&
3294            match(RHS0, m_c_And(m_Specific(X), m_Specific(Y)))) ||
3295           (!TrueIfSignedL && TrueIfSignedR &&
3296            match(LHS0, m_And(m_Value(X), m_Value(Y))) &&
3297            match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y))))) {
3298         Value *NewXor = Builder.CreateXor(X, Y);
3299         return Builder.CreateIsNeg(NewXor);
3300       }
3301     } else {
3302       if ((TrueIfSignedL && !TrueIfSignedR &&
3303             match(LHS0, m_And(m_Value(X), m_Value(Y))) &&
3304             match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y)))) ||
3305           (!TrueIfSignedL && TrueIfSignedR &&
3306            match(LHS0, m_Or(m_Value(X), m_Value(Y))) &&
3307            match(RHS0, m_c_And(m_Specific(X), m_Specific(Y))))) {
3308         Value *NewXor = Builder.CreateXor(X, Y);
3309         return Builder.CreateIsNotNeg(NewXor);
3310       }
3311     }
3312   }
3313 
3314   return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd);
3315 }
3316 
3317 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
3318 // here. We should standardize that construct where it is needed or choose some
3319 // other way to ensure that commutated variants of patterns are not missed.
3320 Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
3321   if (Value *V = simplifyOrInst(I.getOperand(0), I.getOperand(1),
3322                                 SQ.getWithInstruction(&I)))
3323     return replaceInstUsesWith(I, V);
3324 
3325   if (SimplifyAssociativeOrCommutative(I))
3326     return &I;
3327 
3328   if (Instruction *X = foldVectorBinop(I))
3329     return X;
3330 
3331   if (Instruction *Phi = foldBinopWithPhiOperands(I))
3332     return Phi;
3333 
3334   // See if we can simplify any instructions used by the instruction whose sole
3335   // purpose is to compute bits we don't care about.
3336   if (SimplifyDemandedInstructionBits(I))
3337     return &I;
3338 
3339   // Do this before using distributive laws to catch simple and/or/not patterns.
3340   if (Instruction *Xor = foldOrToXor(I, Builder))
3341     return Xor;
3342 
3343   if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
3344     return X;
3345 
3346   // (A&B)|(A&C) -> A&(B|C) etc
3347   if (Value *V = foldUsingDistributiveLaws(I))
3348     return replaceInstUsesWith(I, V);
3349 
3350   if (Value *V = SimplifyBSwap(I, Builder))
3351     return replaceInstUsesWith(I, V);
3352 
3353   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3354   Type *Ty = I.getType();
3355   if (Ty->isIntOrIntVectorTy(1)) {
3356     if (auto *SI0 = dyn_cast<SelectInst>(Op0)) {
3357       if (auto *R =
3358               foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ false))
3359         return R;
3360     }
3361     if (auto *SI1 = dyn_cast<SelectInst>(Op1)) {
3362       if (auto *R =
3363               foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ false))
3364         return R;
3365     }
3366   }
3367 
3368   if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
3369     return FoldedLogic;
3370 
3371   if (Instruction *BitOp = matchBSwapOrBitReverse(I, /*MatchBSwaps*/ true,
3372                                                   /*MatchBitReversals*/ true))
3373     return BitOp;
3374 
3375   if (Instruction *Funnel = matchFunnelShift(I, *this, DT))
3376     return Funnel;
3377 
3378   if (Instruction *Concat = matchOrConcat(I, Builder))
3379     return replaceInstUsesWith(I, Concat);
3380 
3381   if (Instruction *R = foldBinOpShiftWithShift(I))
3382     return R;
3383 
3384   Value *X, *Y;
3385   const APInt *CV;
3386   if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
3387       !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, 0, &I)) {
3388     // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
3389     // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
3390     Value *Or = Builder.CreateOr(X, Y);
3391     return BinaryOperator::CreateXor(Or, ConstantInt::get(Ty, *CV));
3392   }
3393 
3394   // If the operands have no common bits set:
3395   // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1)
3396   if (match(&I, m_c_DisjointOr(m_OneUse(m_Mul(m_Value(X), m_Value(Y))),
3397                                m_Deferred(X)))) {
3398     Value *IncrementY = Builder.CreateAdd(Y, ConstantInt::get(Ty, 1));
3399     return BinaryOperator::CreateMul(X, IncrementY);
3400   }
3401 
3402   // X | (X ^ Y) --> X | Y (4 commuted patterns)
3403   if (match(&I, m_c_Or(m_Value(X), m_c_Xor(m_Deferred(X), m_Value(Y)))))
3404     return BinaryOperator::CreateOr(X, Y);
3405 
3406   // (A & C) | (B & D)
3407   Value *A, *B, *C, *D;
3408   if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
3409       match(Op1, m_And(m_Value(B), m_Value(D)))) {
3410 
3411     // (A & C0) | (B & C1)
3412     const APInt *C0, *C1;
3413     if (match(C, m_APInt(C0)) && match(D, m_APInt(C1))) {
3414       Value *X;
3415       if (*C0 == ~*C1) {
3416         // ((X | B) & MaskC) | (B & ~MaskC) -> (X & MaskC) | B
3417         if (match(A, m_c_Or(m_Value(X), m_Specific(B))))
3418           return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C0), B);
3419         // (A & MaskC) | ((X | A) & ~MaskC) -> (X & ~MaskC) | A
3420         if (match(B, m_c_Or(m_Specific(A), m_Value(X))))
3421           return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C1), A);
3422 
3423         // ((X ^ B) & MaskC) | (B & ~MaskC) -> (X & MaskC) ^ B
3424         if (match(A, m_c_Xor(m_Value(X), m_Specific(B))))
3425           return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C0), B);
3426         // (A & MaskC) | ((X ^ A) & ~MaskC) -> (X & ~MaskC) ^ A
3427         if (match(B, m_c_Xor(m_Specific(A), m_Value(X))))
3428           return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C1), A);
3429       }
3430 
3431       if ((*C0 & *C1).isZero()) {
3432         // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1)
3433         // iff (C0 & C1) == 0 and (X & ~C0) == 0
3434         if (match(A, m_c_Or(m_Value(X), m_Specific(B))) &&
3435             MaskedValueIsZero(X, ~*C0, 0, &I)) {
3436           Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3437           return BinaryOperator::CreateAnd(A, C01);
3438         }
3439         // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1)
3440         // iff (C0 & C1) == 0 and (X & ~C1) == 0
3441         if (match(B, m_c_Or(m_Value(X), m_Specific(A))) &&
3442             MaskedValueIsZero(X, ~*C1, 0, &I)) {
3443           Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3444           return BinaryOperator::CreateAnd(B, C01);
3445         }
3446         // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1)
3447         // iff (C0 & C1) == 0 and (C2 & ~C0) == 0 and (C3 & ~C1) == 0.
3448         const APInt *C2, *C3;
3449         if (match(A, m_Or(m_Value(X), m_APInt(C2))) &&
3450             match(B, m_Or(m_Specific(X), m_APInt(C3))) &&
3451             (*C2 & ~*C0).isZero() && (*C3 & ~*C1).isZero()) {
3452           Value *Or = Builder.CreateOr(X, *C2 | *C3, "bitfield");
3453           Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3454           return BinaryOperator::CreateAnd(Or, C01);
3455         }
3456       }
3457     }
3458 
3459     // Don't try to form a select if it's unlikely that we'll get rid of at
3460     // least one of the operands. A select is generally more expensive than the
3461     // 'or' that it is replacing.
3462     if (Op0->hasOneUse() || Op1->hasOneUse()) {
3463       // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
3464       if (Value *V = matchSelectFromAndOr(A, C, B, D))
3465         return replaceInstUsesWith(I, V);
3466       if (Value *V = matchSelectFromAndOr(A, C, D, B))
3467         return replaceInstUsesWith(I, V);
3468       if (Value *V = matchSelectFromAndOr(C, A, B, D))
3469         return replaceInstUsesWith(I, V);
3470       if (Value *V = matchSelectFromAndOr(C, A, D, B))
3471         return replaceInstUsesWith(I, V);
3472       if (Value *V = matchSelectFromAndOr(B, D, A, C))
3473         return replaceInstUsesWith(I, V);
3474       if (Value *V = matchSelectFromAndOr(B, D, C, A))
3475         return replaceInstUsesWith(I, V);
3476       if (Value *V = matchSelectFromAndOr(D, B, A, C))
3477         return replaceInstUsesWith(I, V);
3478       if (Value *V = matchSelectFromAndOr(D, B, C, A))
3479         return replaceInstUsesWith(I, V);
3480     }
3481   }
3482 
3483   if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
3484       match(Op1, m_Not(m_Or(m_Value(B), m_Value(D)))) &&
3485       (Op0->hasOneUse() || Op1->hasOneUse())) {
3486     // (Cond & C) | ~(Cond | D) -> Cond ? C : ~D
3487     if (Value *V = matchSelectFromAndOr(A, C, B, D, true))
3488       return replaceInstUsesWith(I, V);
3489     if (Value *V = matchSelectFromAndOr(A, C, D, B, true))
3490       return replaceInstUsesWith(I, V);
3491     if (Value *V = matchSelectFromAndOr(C, A, B, D, true))
3492       return replaceInstUsesWith(I, V);
3493     if (Value *V = matchSelectFromAndOr(C, A, D, B, true))
3494       return replaceInstUsesWith(I, V);
3495   }
3496 
3497   // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
3498   if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
3499     if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
3500       return BinaryOperator::CreateOr(Op0, C);
3501 
3502   // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C
3503   if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
3504     if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
3505       return BinaryOperator::CreateOr(Op1, C);
3506 
3507   // ((A & B) ^ C) | B -> C | B
3508   if (match(Op0, m_c_Xor(m_c_And(m_Value(A), m_Specific(Op1)), m_Value(C))))
3509     return BinaryOperator::CreateOr(C, Op1);
3510 
3511   // B | ((A & B) ^ C) -> B | C
3512   if (match(Op1, m_c_Xor(m_c_And(m_Value(A), m_Specific(Op0)), m_Value(C))))
3513     return BinaryOperator::CreateOr(Op0, C);
3514 
3515   // ((B | C) & A) | B -> B | (A & C)
3516   if (match(Op0, m_c_And(m_c_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
3517     return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C));
3518 
3519   // B | ((B | C) & A) -> B | (A & C)
3520   if (match(Op1, m_c_And(m_c_Or(m_Specific(Op0), m_Value(C)), m_Value(A))))
3521     return BinaryOperator::CreateOr(Op0, Builder.CreateAnd(A, C));
3522 
3523   if (Instruction *DeMorgan = matchDeMorgansLaws(I, *this))
3524     return DeMorgan;
3525 
3526   // Canonicalize xor to the RHS.
3527   bool SwappedForXor = false;
3528   if (match(Op0, m_Xor(m_Value(), m_Value()))) {
3529     std::swap(Op0, Op1);
3530     SwappedForXor = true;
3531   }
3532 
3533   if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
3534     // (A | ?) | (A ^ B) --> (A | ?) | B
3535     // (B | ?) | (A ^ B) --> (B | ?) | A
3536     if (match(Op0, m_c_Or(m_Specific(A), m_Value())))
3537       return BinaryOperator::CreateOr(Op0, B);
3538     if (match(Op0, m_c_Or(m_Specific(B), m_Value())))
3539       return BinaryOperator::CreateOr(Op0, A);
3540 
3541     // (A & B) | (A ^ B) --> A | B
3542     // (B & A) | (A ^ B) --> A | B
3543     if (match(Op0, m_And(m_Specific(A), m_Specific(B))) ||
3544         match(Op0, m_And(m_Specific(B), m_Specific(A))))
3545       return BinaryOperator::CreateOr(A, B);
3546 
3547     // ~A | (A ^ B) --> ~(A & B)
3548     // ~B | (A ^ B) --> ~(A & B)
3549     // The swap above should always make Op0 the 'not'.
3550     if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3551         (match(Op0, m_Not(m_Specific(A))) || match(Op0, m_Not(m_Specific(B)))))
3552       return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
3553 
3554     // Same as above, but peek through an 'and' to the common operand:
3555     // ~(A & ?) | (A ^ B) --> ~((A & ?) & B)
3556     // ~(B & ?) | (A ^ B) --> ~((B & ?) & A)
3557     Instruction *And;
3558     if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3559         match(Op0, m_Not(m_CombineAnd(m_Instruction(And),
3560                                       m_c_And(m_Specific(A), m_Value())))))
3561       return BinaryOperator::CreateNot(Builder.CreateAnd(And, B));
3562     if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3563         match(Op0, m_Not(m_CombineAnd(m_Instruction(And),
3564                                       m_c_And(m_Specific(B), m_Value())))))
3565       return BinaryOperator::CreateNot(Builder.CreateAnd(And, A));
3566 
3567     // (~A | C) | (A ^ B) --> ~(A & B) | C
3568     // (~B | C) | (A ^ B) --> ~(A & B) | C
3569     if (Op0->hasOneUse() && Op1->hasOneUse() &&
3570         (match(Op0, m_c_Or(m_Not(m_Specific(A)), m_Value(C))) ||
3571          match(Op0, m_c_Or(m_Not(m_Specific(B)), m_Value(C))))) {
3572       Value *Nand = Builder.CreateNot(Builder.CreateAnd(A, B), "nand");
3573       return BinaryOperator::CreateOr(Nand, C);
3574     }
3575 
3576     // A | (~A ^ B) --> ~B | A
3577     // B | (A ^ ~B) --> ~A | B
3578     if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
3579       Value *NotB = Builder.CreateNot(B, B->getName() + ".not");
3580       return BinaryOperator::CreateOr(NotB, Op0);
3581     }
3582     if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
3583       Value *NotA = Builder.CreateNot(A, A->getName() + ".not");
3584       return BinaryOperator::CreateOr(NotA, Op0);
3585     }
3586   }
3587 
3588   // A | ~(A | B) -> A | ~B
3589   // A | ~(A ^ B) -> A | ~B
3590   if (match(Op1, m_Not(m_Value(A))))
3591     if (BinaryOperator *B = dyn_cast<BinaryOperator>(A))
3592       if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) &&
3593           Op1->hasOneUse() && (B->getOpcode() == Instruction::Or ||
3594                                B->getOpcode() == Instruction::Xor)) {
3595         Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
3596                                                  B->getOperand(0);
3597         Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not");
3598         return BinaryOperator::CreateOr(Not, Op0);
3599       }
3600 
3601   if (SwappedForXor)
3602     std::swap(Op0, Op1);
3603 
3604   {
3605     ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
3606     ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
3607     if (LHS && RHS)
3608       if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ false))
3609         return replaceInstUsesWith(I, Res);
3610 
3611     // TODO: Make this recursive; it's a little tricky because an arbitrary
3612     // number of 'or' instructions might have to be created.
3613     Value *X, *Y;
3614     if (LHS && match(Op1, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) {
3615       bool IsLogical = isa<SelectInst>(Op1);
3616       // LHS | (X || Y) --> (LHS || X) || Y
3617       if (auto *Cmp = dyn_cast<ICmpInst>(X))
3618         if (Value *Res =
3619                 foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false, IsLogical))
3620           return replaceInstUsesWith(I, IsLogical
3621                                             ? Builder.CreateLogicalOr(Res, Y)
3622                                             : Builder.CreateOr(Res, Y));
3623       // LHS | (X || Y) --> X || (LHS | Y)
3624       if (auto *Cmp = dyn_cast<ICmpInst>(Y))
3625         if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false,
3626                                           /* IsLogical */ false))
3627           return replaceInstUsesWith(I, IsLogical
3628                                             ? Builder.CreateLogicalOr(X, Res)
3629                                             : Builder.CreateOr(X, Res));
3630     }
3631     if (RHS && match(Op0, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) {
3632       bool IsLogical = isa<SelectInst>(Op0);
3633       // (X || Y) | RHS --> (X || RHS) || Y
3634       if (auto *Cmp = dyn_cast<ICmpInst>(X))
3635         if (Value *Res =
3636                 foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false, IsLogical))
3637           return replaceInstUsesWith(I, IsLogical
3638                                             ? Builder.CreateLogicalOr(Res, Y)
3639                                             : Builder.CreateOr(Res, Y));
3640       // (X || Y) | RHS --> X || (Y | RHS)
3641       if (auto *Cmp = dyn_cast<ICmpInst>(Y))
3642         if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false,
3643                                           /* IsLogical */ false))
3644           return replaceInstUsesWith(I, IsLogical
3645                                             ? Builder.CreateLogicalOr(X, Res)
3646                                             : Builder.CreateOr(X, Res));
3647     }
3648   }
3649 
3650   if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
3651     if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
3652       if (Value *Res = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ false))
3653         return replaceInstUsesWith(I, Res);
3654 
3655   if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder))
3656     return FoldedFCmps;
3657 
3658   if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
3659     return CastedOr;
3660 
3661   if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
3662     return Sel;
3663 
3664   // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
3665   // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
3666   //       with binop identity constant. But creating a select with non-constant
3667   //       arm may not be reversible due to poison semantics. Is that a good
3668   //       canonicalization?
3669   if (match(&I, m_c_Or(m_OneUse(m_SExt(m_Value(A))), m_Value(B))) &&
3670       A->getType()->isIntOrIntVectorTy(1))
3671     return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), B);
3672 
3673   // Note: If we've gotten to the point of visiting the outer OR, then the
3674   // inner one couldn't be simplified.  If it was a constant, then it won't
3675   // be simplified by a later pass either, so we try swapping the inner/outer
3676   // ORs in the hopes that we'll be able to simplify it this way.
3677   // (X|C) | V --> (X|V) | C
3678   ConstantInt *CI;
3679   if (Op0->hasOneUse() && !match(Op1, m_ConstantInt()) &&
3680       match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) {
3681     Value *Inner = Builder.CreateOr(A, Op1);
3682     Inner->takeName(Op0);
3683     return BinaryOperator::CreateOr(Inner, CI);
3684   }
3685 
3686   // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
3687   // Since this OR statement hasn't been optimized further yet, we hope
3688   // that this transformation will allow the new ORs to be optimized.
3689   {
3690     Value *X = nullptr, *Y = nullptr;
3691     if (Op0->hasOneUse() && Op1->hasOneUse() &&
3692         match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
3693         match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
3694       Value *orTrue = Builder.CreateOr(A, C);
3695       Value *orFalse = Builder.CreateOr(B, D);
3696       return SelectInst::Create(X, orTrue, orFalse);
3697     }
3698   }
3699 
3700   // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X)  --> X s> Y ? -1 : X.
3701   {
3702     Value *X, *Y;
3703     if (match(&I, m_c_Or(m_OneUse(m_AShr(
3704                              m_NSWSub(m_Value(Y), m_Value(X)),
3705                              m_SpecificInt(Ty->getScalarSizeInBits() - 1))),
3706                          m_Deferred(X)))) {
3707       Value *NewICmpInst = Builder.CreateICmpSGT(X, Y);
3708       Value *AllOnes = ConstantInt::getAllOnesValue(Ty);
3709       return SelectInst::Create(NewICmpInst, AllOnes, X);
3710     }
3711   }
3712 
3713   {
3714     // ((A & B) ^ A) | ((A & B) ^ B) -> A ^ B
3715     // (A ^ (A & B)) | (B ^ (A & B)) -> A ^ B
3716     // ((A & B) ^ B) | ((A & B) ^ A) -> A ^ B
3717     // (B ^ (A & B)) | (A ^ (A & B)) -> A ^ B
3718     const auto TryXorOpt = [&](Value *Lhs, Value *Rhs) -> Instruction * {
3719       if (match(Lhs, m_c_Xor(m_And(m_Value(A), m_Value(B)), m_Deferred(A))) &&
3720           match(Rhs,
3721                 m_c_Xor(m_And(m_Specific(A), m_Specific(B)), m_Deferred(B)))) {
3722         return BinaryOperator::CreateXor(A, B);
3723       }
3724       return nullptr;
3725     };
3726 
3727     if (Instruction *Result = TryXorOpt(Op0, Op1))
3728       return Result;
3729     if (Instruction *Result = TryXorOpt(Op1, Op0))
3730       return Result;
3731   }
3732 
3733   if (Instruction *V =
3734           canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
3735     return V;
3736 
3737   CmpInst::Predicate Pred;
3738   Value *Mul, *Ov, *MulIsNotZero, *UMulWithOv;
3739   // Check if the OR weakens the overflow condition for umul.with.overflow by
3740   // treating any non-zero result as overflow. In that case, we overflow if both
3741   // umul.with.overflow operands are != 0, as in that case the result can only
3742   // be 0, iff the multiplication overflows.
3743   if (match(&I,
3744             m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_Value(UMulWithOv)),
3745                                 m_Value(Ov)),
3746                    m_CombineAnd(m_ICmp(Pred,
3747                                        m_CombineAnd(m_ExtractValue<0>(
3748                                                         m_Deferred(UMulWithOv)),
3749                                                     m_Value(Mul)),
3750                                        m_ZeroInt()),
3751                                 m_Value(MulIsNotZero)))) &&
3752       (Ov->hasOneUse() || (MulIsNotZero->hasOneUse() && Mul->hasOneUse())) &&
3753       Pred == CmpInst::ICMP_NE) {
3754     Value *A, *B;
3755     if (match(UMulWithOv, m_Intrinsic<Intrinsic::umul_with_overflow>(
3756                               m_Value(A), m_Value(B)))) {
3757       Value *NotNullA = Builder.CreateIsNotNull(A);
3758       Value *NotNullB = Builder.CreateIsNotNull(B);
3759       return BinaryOperator::CreateAnd(NotNullA, NotNullB);
3760     }
3761   }
3762 
3763   /// Res, Overflow = xxx_with_overflow X, C1
3764   /// Try to canonicalize the pattern "Overflow | icmp pred Res, C2" into
3765   /// "Overflow | icmp pred X, C2 +/- C1".
3766   const WithOverflowInst *WO;
3767   const Value *WOV;
3768   const APInt *C1, *C2;
3769   if (match(&I, m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_CombineAnd(
3770                                         m_WithOverflowInst(WO), m_Value(WOV))),
3771                                     m_Value(Ov)),
3772                        m_OneUse(m_ICmp(Pred, m_ExtractValue<0>(m_Deferred(WOV)),
3773                                        m_APInt(C2))))) &&
3774       (WO->getBinaryOp() == Instruction::Add ||
3775        WO->getBinaryOp() == Instruction::Sub) &&
3776       (ICmpInst::isEquality(Pred) ||
3777        WO->isSigned() == ICmpInst::isSigned(Pred)) &&
3778       match(WO->getRHS(), m_APInt(C1))) {
3779     bool Overflow;
3780     APInt NewC = WO->getBinaryOp() == Instruction::Add
3781                      ? (ICmpInst::isSigned(Pred) ? C2->ssub_ov(*C1, Overflow)
3782                                                  : C2->usub_ov(*C1, Overflow))
3783                      : (ICmpInst::isSigned(Pred) ? C2->sadd_ov(*C1, Overflow)
3784                                                  : C2->uadd_ov(*C1, Overflow));
3785     if (!Overflow || ICmpInst::isEquality(Pred)) {
3786       Value *NewCmp = Builder.CreateICmp(
3787           Pred, WO->getLHS(), ConstantInt::get(WO->getLHS()->getType(), NewC));
3788       return BinaryOperator::CreateOr(Ov, NewCmp);
3789     }
3790   }
3791 
3792   // (~x) | y  -->  ~(x & (~y))  iff that gets rid of inversions
3793   if (sinkNotIntoOtherHandOfLogicalOp(I))
3794     return &I;
3795 
3796   // Improve "get low bit mask up to and including bit X" pattern:
3797   //   (1 << X) | ((1 << X) + -1)  -->  -1 l>> (bitwidth(x) - 1 - X)
3798   if (match(&I, m_c_Or(m_Add(m_Shl(m_One(), m_Value(X)), m_AllOnes()),
3799                        m_Shl(m_One(), m_Deferred(X)))) &&
3800       match(&I, m_c_Or(m_OneUse(m_Value()), m_Value()))) {
3801     Value *Sub = Builder.CreateSub(
3802         ConstantInt::get(Ty, Ty->getScalarSizeInBits() - 1), X);
3803     return BinaryOperator::CreateLShr(Constant::getAllOnesValue(Ty), Sub);
3804   }
3805 
3806   // An or recurrence w/loop invariant step is equivelent to (or start, step)
3807   PHINode *PN = nullptr;
3808   Value *Start = nullptr, *Step = nullptr;
3809   if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN))
3810     return replaceInstUsesWith(I, Builder.CreateOr(Start, Step));
3811 
3812   // (A & B) | (C | D) or (C | D) | (A & B)
3813   // Can be combined if C or D is of type (A/B & X)
3814   if (match(&I, m_c_Or(m_OneUse(m_And(m_Value(A), m_Value(B))),
3815                        m_OneUse(m_Or(m_Value(C), m_Value(D)))))) {
3816     // (A & B) | (C | ?) -> C | (? | (A & B))
3817     // (A & B) | (C | ?) -> C | (? | (A & B))
3818     // (A & B) | (C | ?) -> C | (? | (A & B))
3819     // (A & B) | (C | ?) -> C | (? | (A & B))
3820     // (C | ?) | (A & B) -> C | (? | (A & B))
3821     // (C | ?) | (A & B) -> C | (? | (A & B))
3822     // (C | ?) | (A & B) -> C | (? | (A & B))
3823     // (C | ?) | (A & B) -> C | (? | (A & B))
3824     if (match(D, m_OneUse(m_c_And(m_Specific(A), m_Value()))) ||
3825         match(D, m_OneUse(m_c_And(m_Specific(B), m_Value()))))
3826       return BinaryOperator::CreateOr(
3827           C, Builder.CreateOr(D, Builder.CreateAnd(A, B)));
3828     // (A & B) | (? | D) -> (? | (A & B)) | D
3829     // (A & B) | (? | D) -> (? | (A & B)) | D
3830     // (A & B) | (? | D) -> (? | (A & B)) | D
3831     // (A & B) | (? | D) -> (? | (A & B)) | D
3832     // (? | D) | (A & B) -> (? | (A & B)) | D
3833     // (? | D) | (A & B) -> (? | (A & B)) | D
3834     // (? | D) | (A & B) -> (? | (A & B)) | D
3835     // (? | D) | (A & B) -> (? | (A & B)) | D
3836     if (match(C, m_OneUse(m_c_And(m_Specific(A), m_Value()))) ||
3837         match(C, m_OneUse(m_c_And(m_Specific(B), m_Value()))))
3838       return BinaryOperator::CreateOr(
3839           Builder.CreateOr(C, Builder.CreateAnd(A, B)), D);
3840   }
3841 
3842   if (Instruction *R = reassociateForUses(I, Builder))
3843     return R;
3844 
3845   if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
3846     return Canonicalized;
3847 
3848   if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
3849     return Folded;
3850 
3851   if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
3852     return Res;
3853 
3854   // If we are setting the sign bit of a floating-point value, convert
3855   // this to fneg(fabs), then cast back to integer.
3856   //
3857   // If the result isn't immediately cast back to a float, this will increase
3858   // the number of instructions. This is still probably a better canonical form
3859   // as it enables FP value tracking.
3860   //
3861   // Assumes any IEEE-represented type has the sign bit in the high bit.
3862   //
3863   // This is generous interpretation of noimplicitfloat, this is not a true
3864   // floating-point operation.
3865   Value *CastOp;
3866   if (match(Op0, m_BitCast(m_Value(CastOp))) && match(Op1, m_SignMask()) &&
3867       !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
3868           Attribute::NoImplicitFloat)) {
3869     Type *EltTy = CastOp->getType()->getScalarType();
3870     if (EltTy->isFloatingPointTy() && EltTy->isIEEE() &&
3871         EltTy->getPrimitiveSizeInBits() ==
3872         I.getType()->getScalarType()->getPrimitiveSizeInBits()) {
3873       Value *FAbs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, CastOp);
3874       Value *FNegFAbs = Builder.CreateFNeg(FAbs);
3875       return new BitCastInst(FNegFAbs, I.getType());
3876     }
3877   }
3878 
3879   // (X & C1) | C2 -> X & (C1 | C2) iff (X & C2) == C2
3880   if (match(Op0, m_OneUse(m_And(m_Value(X), m_APInt(C1)))) &&
3881       match(Op1, m_APInt(C2))) {
3882     KnownBits KnownX = computeKnownBits(X, /*Depth*/ 0, &I);
3883     if ((KnownX.One & *C2) == *C2)
3884       return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *C1 | *C2));
3885   }
3886 
3887   return nullptr;
3888 }
3889 
3890 /// A ^ B can be specified using other logic ops in a variety of patterns. We
3891 /// can fold these early and efficiently by morphing an existing instruction.
3892 static Instruction *foldXorToXor(BinaryOperator &I,
3893                                  InstCombiner::BuilderTy &Builder) {
3894   assert(I.getOpcode() == Instruction::Xor);
3895   Value *Op0 = I.getOperand(0);
3896   Value *Op1 = I.getOperand(1);
3897   Value *A, *B;
3898 
3899   // There are 4 commuted variants for each of the basic patterns.
3900 
3901   // (A & B) ^ (A | B) -> A ^ B
3902   // (A & B) ^ (B | A) -> A ^ B
3903   // (A | B) ^ (A & B) -> A ^ B
3904   // (A | B) ^ (B & A) -> A ^ B
3905   if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)),
3906                         m_c_Or(m_Deferred(A), m_Deferred(B)))))
3907     return BinaryOperator::CreateXor(A, B);
3908 
3909   // (A | ~B) ^ (~A | B) -> A ^ B
3910   // (~B | A) ^ (~A | B) -> A ^ B
3911   // (~A | B) ^ (A | ~B) -> A ^ B
3912   // (B | ~A) ^ (A | ~B) -> A ^ B
3913   if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))),
3914                       m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B)))))
3915     return BinaryOperator::CreateXor(A, B);
3916 
3917   // (A & ~B) ^ (~A & B) -> A ^ B
3918   // (~B & A) ^ (~A & B) -> A ^ B
3919   // (~A & B) ^ (A & ~B) -> A ^ B
3920   // (B & ~A) ^ (A & ~B) -> A ^ B
3921   if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))),
3922                       m_c_And(m_Not(m_Deferred(A)), m_Deferred(B)))))
3923     return BinaryOperator::CreateXor(A, B);
3924 
3925   // For the remaining cases we need to get rid of one of the operands.
3926   if (!Op0->hasOneUse() && !Op1->hasOneUse())
3927     return nullptr;
3928 
3929   // (A | B) ^ ~(A & B) -> ~(A ^ B)
3930   // (A | B) ^ ~(B & A) -> ~(A ^ B)
3931   // (A & B) ^ ~(A | B) -> ~(A ^ B)
3932   // (A & B) ^ ~(B | A) -> ~(A ^ B)
3933   // Complexity sorting ensures the not will be on the right side.
3934   if ((match(Op0, m_Or(m_Value(A), m_Value(B))) &&
3935        match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) ||
3936       (match(Op0, m_And(m_Value(A), m_Value(B))) &&
3937        match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))))
3938     return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
3939 
3940   return nullptr;
3941 }
3942 
3943 Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
3944                                         BinaryOperator &I) {
3945   assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS &&
3946          I.getOperand(1) == RHS && "Should be 'xor' with these operands");
3947 
3948   ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
3949   Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
3950   Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
3951 
3952   if (predicatesFoldable(PredL, PredR)) {
3953     if (LHS0 == RHS1 && LHS1 == RHS0) {
3954       std::swap(LHS0, LHS1);
3955       PredL = ICmpInst::getSwappedPredicate(PredL);
3956     }
3957     if (LHS0 == RHS0 && LHS1 == RHS1) {
3958       // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
3959       unsigned Code = getICmpCode(PredL) ^ getICmpCode(PredR);
3960       bool IsSigned = LHS->isSigned() || RHS->isSigned();
3961       return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder);
3962     }
3963   }
3964 
3965   // TODO: This can be generalized to compares of non-signbits using
3966   // decomposeBitTestICmp(). It could be enhanced more by using (something like)
3967   // foldLogOpOfMaskedICmps().
3968   const APInt *LC, *RC;
3969   if (match(LHS1, m_APInt(LC)) && match(RHS1, m_APInt(RC)) &&
3970       LHS0->getType() == RHS0->getType() &&
3971       LHS0->getType()->isIntOrIntVectorTy()) {
3972     // Convert xor of signbit tests to signbit test of xor'd values:
3973     // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0
3974     // (X <  0) ^ (Y <  0) --> (X ^ Y) < 0
3975     // (X > -1) ^ (Y <  0) --> (X ^ Y) > -1
3976     // (X <  0) ^ (Y > -1) --> (X ^ Y) > -1
3977     bool TrueIfSignedL, TrueIfSignedR;
3978     if ((LHS->hasOneUse() || RHS->hasOneUse()) &&
3979         isSignBitCheck(PredL, *LC, TrueIfSignedL) &&
3980         isSignBitCheck(PredR, *RC, TrueIfSignedR)) {
3981       Value *XorLR = Builder.CreateXor(LHS0, RHS0);
3982       return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg(XorLR) :
3983                                               Builder.CreateIsNotNeg(XorLR);
3984     }
3985 
3986     // Fold (icmp pred1 X, C1) ^ (icmp pred2 X, C2)
3987     // into a single comparison using range-based reasoning.
3988     if (LHS0 == RHS0) {
3989       ConstantRange CR1 = ConstantRange::makeExactICmpRegion(PredL, *LC);
3990       ConstantRange CR2 = ConstantRange::makeExactICmpRegion(PredR, *RC);
3991       auto CRUnion = CR1.exactUnionWith(CR2);
3992       auto CRIntersect = CR1.exactIntersectWith(CR2);
3993       if (CRUnion && CRIntersect)
3994         if (auto CR = CRUnion->exactIntersectWith(CRIntersect->inverse())) {
3995           if (CR->isFullSet())
3996             return ConstantInt::getTrue(I.getType());
3997           if (CR->isEmptySet())
3998             return ConstantInt::getFalse(I.getType());
3999 
4000           CmpInst::Predicate NewPred;
4001           APInt NewC, Offset;
4002           CR->getEquivalentICmp(NewPred, NewC, Offset);
4003 
4004           if ((Offset.isZero() && (LHS->hasOneUse() || RHS->hasOneUse())) ||
4005               (LHS->hasOneUse() && RHS->hasOneUse())) {
4006             Value *NewV = LHS0;
4007             Type *Ty = LHS0->getType();
4008             if (!Offset.isZero())
4009               NewV = Builder.CreateAdd(NewV, ConstantInt::get(Ty, Offset));
4010             return Builder.CreateICmp(NewPred, NewV,
4011                                       ConstantInt::get(Ty, NewC));
4012           }
4013         }
4014     }
4015   }
4016 
4017   // Instead of trying to imitate the folds for and/or, decompose this 'xor'
4018   // into those logic ops. That is, try to turn this into an and-of-icmps
4019   // because we have many folds for that pattern.
4020   //
4021   // This is based on a truth table definition of xor:
4022   // X ^ Y --> (X | Y) & !(X & Y)
4023   if (Value *OrICmp = simplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
4024     // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
4025     // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
4026     if (Value *AndICmp = simplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
4027       // TODO: Independently handle cases where the 'and' side is a constant.
4028       ICmpInst *X = nullptr, *Y = nullptr;
4029       if (OrICmp == LHS && AndICmp == RHS) {
4030         // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS  --> X & !Y
4031         X = LHS;
4032         Y = RHS;
4033       }
4034       if (OrICmp == RHS && AndICmp == LHS) {
4035         // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS  --> !Y & X
4036         X = RHS;
4037         Y = LHS;
4038       }
4039       if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(Y, &I))) {
4040         // Invert the predicate of 'Y', thus inverting its output.
4041         Y->setPredicate(Y->getInversePredicate());
4042         // So, are there other uses of Y?
4043         if (!Y->hasOneUse()) {
4044           // We need to adapt other uses of Y though. Get a value that matches
4045           // the original value of Y before inversion. While this increases
4046           // immediate instruction count, we have just ensured that all the
4047           // users are freely-invertible, so that 'not' *will* get folded away.
4048           BuilderTy::InsertPointGuard Guard(Builder);
4049           // Set insertion point to right after the Y.
4050           Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator()));
4051           Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4052           // Replace all uses of Y (excluding the one in NotY!) with NotY.
4053           Worklist.pushUsersToWorkList(*Y);
4054           Y->replaceUsesWithIf(NotY,
4055                                [NotY](Use &U) { return U.getUser() != NotY; });
4056         }
4057         // All done.
4058         return Builder.CreateAnd(LHS, RHS);
4059       }
4060     }
4061   }
4062 
4063   return nullptr;
4064 }
4065 
4066 /// If we have a masked merge, in the canonical form of:
4067 /// (assuming that A only has one use.)
4068 ///   |        A  |  |B|
4069 ///   ((x ^ y) & M) ^ y
4070 ///    |  D  |
4071 /// * If M is inverted:
4072 ///      |  D  |
4073 ///     ((x ^ y) & ~M) ^ y
4074 ///   We can canonicalize by swapping the final xor operand
4075 ///   to eliminate the 'not' of the mask.
4076 ///     ((x ^ y) & M) ^ x
4077 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
4078 ///   because that shortens the dependency chain and improves analysis:
4079 ///     (x & M) | (y & ~M)
4080 static Instruction *visitMaskedMerge(BinaryOperator &I,
4081                                      InstCombiner::BuilderTy &Builder) {
4082   Value *B, *X, *D;
4083   Value *M;
4084   if (!match(&I, m_c_Xor(m_Value(B),
4085                          m_OneUse(m_c_And(
4086                              m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)),
4087                                           m_Value(D)),
4088                              m_Value(M))))))
4089     return nullptr;
4090 
4091   Value *NotM;
4092   if (match(M, m_Not(m_Value(NotM)))) {
4093     // De-invert the mask and swap the value in B part.
4094     Value *NewA = Builder.CreateAnd(D, NotM);
4095     return BinaryOperator::CreateXor(NewA, X);
4096   }
4097 
4098   Constant *C;
4099   if (D->hasOneUse() && match(M, m_Constant(C))) {
4100     // Propagating undef is unsafe. Clamp undef elements to -1.
4101     Type *EltTy = C->getType()->getScalarType();
4102     C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy));
4103     // Unfold.
4104     Value *LHS = Builder.CreateAnd(X, C);
4105     Value *NotC = Builder.CreateNot(C);
4106     Value *RHS = Builder.CreateAnd(B, NotC);
4107     return BinaryOperator::CreateOr(LHS, RHS);
4108   }
4109 
4110   return nullptr;
4111 }
4112 
4113 static Instruction *foldNotXor(BinaryOperator &I,
4114                                InstCombiner::BuilderTy &Builder) {
4115   Value *X, *Y;
4116   // FIXME: one-use check is not needed in general, but currently we are unable
4117   // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182)
4118   if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y))))))
4119     return nullptr;
4120 
4121   auto hasCommonOperand = [](Value *A, Value *B, Value *C, Value *D) {
4122     return A == C || A == D || B == C || B == D;
4123   };
4124 
4125   Value *A, *B, *C, *D;
4126   // Canonicalize ~((A & B) ^ (A | ?)) -> (A & B) | ~(A | ?)
4127   // 4 commuted variants
4128   if (match(X, m_And(m_Value(A), m_Value(B))) &&
4129       match(Y, m_Or(m_Value(C), m_Value(D))) && hasCommonOperand(A, B, C, D)) {
4130     Value *NotY = Builder.CreateNot(Y);
4131     return BinaryOperator::CreateOr(X, NotY);
4132   };
4133 
4134   // Canonicalize ~((A | ?) ^ (A & B)) -> (A & B) | ~(A | ?)
4135   // 4 commuted variants
4136   if (match(Y, m_And(m_Value(A), m_Value(B))) &&
4137       match(X, m_Or(m_Value(C), m_Value(D))) && hasCommonOperand(A, B, C, D)) {
4138     Value *NotX = Builder.CreateNot(X);
4139     return BinaryOperator::CreateOr(Y, NotX);
4140   };
4141 
4142   return nullptr;
4143 }
4144 
4145 /// Canonicalize a shifty way to code absolute value to the more common pattern
4146 /// that uses negation and select.
4147 static Instruction *canonicalizeAbs(BinaryOperator &Xor,
4148                                     InstCombiner::BuilderTy &Builder) {
4149   assert(Xor.getOpcode() == Instruction::Xor && "Expected an xor instruction.");
4150 
4151   // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
4152   // We're relying on the fact that we only do this transform when the shift has
4153   // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
4154   // instructions).
4155   Value *Op0 = Xor.getOperand(0), *Op1 = Xor.getOperand(1);
4156   if (Op0->hasNUses(2))
4157     std::swap(Op0, Op1);
4158 
4159   Type *Ty = Xor.getType();
4160   Value *A;
4161   const APInt *ShAmt;
4162   if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
4163       Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
4164       match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) {
4165     // Op1 = ashr i32 A, 31   ; smear the sign bit
4166     // xor (add A, Op1), Op1  ; add -1 and flip bits if negative
4167     // --> (A < 0) ? -A : A
4168     Value *IsNeg = Builder.CreateIsNeg(A);
4169     // Copy the nuw/nsw flags from the add to the negate.
4170     auto *Add = cast<BinaryOperator>(Op0);
4171     Value *NegA = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(),
4172                                    Add->hasNoSignedWrap());
4173     return SelectInst::Create(IsNeg, NegA, A);
4174   }
4175   return nullptr;
4176 }
4177 
4178 static bool canFreelyInvert(InstCombiner &IC, Value *Op,
4179                             Instruction *IgnoredUser) {
4180   auto *I = dyn_cast<Instruction>(Op);
4181   return I && IC.isFreeToInvert(I, /*WillInvertAllUses=*/true) &&
4182          IC.canFreelyInvertAllUsersOf(I, IgnoredUser);
4183 }
4184 
4185 static Value *freelyInvert(InstCombinerImpl &IC, Value *Op,
4186                            Instruction *IgnoredUser) {
4187   auto *I = cast<Instruction>(Op);
4188   IC.Builder.SetInsertPoint(*I->getInsertionPointAfterDef());
4189   Value *NotOp = IC.Builder.CreateNot(Op, Op->getName() + ".not");
4190   Op->replaceUsesWithIf(NotOp,
4191                         [NotOp](Use &U) { return U.getUser() != NotOp; });
4192   IC.freelyInvertAllUsersOf(NotOp, IgnoredUser);
4193   return NotOp;
4194 }
4195 
4196 // Transform
4197 //   z = ~(x &/| y)
4198 // into:
4199 //   z = ((~x) |/& (~y))
4200 // iff both x and y are free to invert and all uses of z can be freely updated.
4201 bool InstCombinerImpl::sinkNotIntoLogicalOp(Instruction &I) {
4202   Value *Op0, *Op1;
4203   if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1))))
4204     return false;
4205 
4206   // If this logic op has not been simplified yet, just bail out and let that
4207   // happen first. Otherwise, the code below may wrongly invert.
4208   if (Op0 == Op1)
4209     return false;
4210 
4211   Instruction::BinaryOps NewOpc =
4212       match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And;
4213   bool IsBinaryOp = isa<BinaryOperator>(I);
4214 
4215   // Can our users be adapted?
4216   if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
4217     return false;
4218 
4219   // And can the operands be adapted?
4220   if (!canFreelyInvert(*this, Op0, &I) || !canFreelyInvert(*this, Op1, &I))
4221     return false;
4222 
4223   Op0 = freelyInvert(*this, Op0, &I);
4224   Op1 = freelyInvert(*this, Op1, &I);
4225 
4226   Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
4227   Value *NewLogicOp;
4228   if (IsBinaryOp)
4229     NewLogicOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not");
4230   else
4231     NewLogicOp =
4232         Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not");
4233 
4234   replaceInstUsesWith(I, NewLogicOp);
4235   // We can not just create an outer `not`, it will most likely be immediately
4236   // folded back, reconstructing our initial pattern, and causing an
4237   // infinite combine loop, so immediately manually fold it away.
4238   freelyInvertAllUsersOf(NewLogicOp);
4239   return true;
4240 }
4241 
4242 // Transform
4243 //   z = (~x) &/| y
4244 // into:
4245 //   z = ~(x |/& (~y))
4246 // iff y is free to invert and all uses of z can be freely updated.
4247 bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction &I) {
4248   Value *Op0, *Op1;
4249   if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1))))
4250     return false;
4251   Instruction::BinaryOps NewOpc =
4252       match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And;
4253   bool IsBinaryOp = isa<BinaryOperator>(I);
4254 
4255   Value *NotOp0 = nullptr;
4256   Value *NotOp1 = nullptr;
4257   Value **OpToInvert = nullptr;
4258   if (match(Op0, m_Not(m_Value(NotOp0))) && canFreelyInvert(*this, Op1, &I)) {
4259     Op0 = NotOp0;
4260     OpToInvert = &Op1;
4261   } else if (match(Op1, m_Not(m_Value(NotOp1))) &&
4262              canFreelyInvert(*this, Op0, &I)) {
4263     Op1 = NotOp1;
4264     OpToInvert = &Op0;
4265   } else
4266     return false;
4267 
4268   // And can our users be adapted?
4269   if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
4270     return false;
4271 
4272   *OpToInvert = freelyInvert(*this, *OpToInvert, &I);
4273 
4274   Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
4275   Value *NewBinOp;
4276   if (IsBinaryOp)
4277     NewBinOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not");
4278   else
4279     NewBinOp = Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not");
4280   replaceInstUsesWith(I, NewBinOp);
4281   // We can not just create an outer `not`, it will most likely be immediately
4282   // folded back, reconstructing our initial pattern, and causing an
4283   // infinite combine loop, so immediately manually fold it away.
4284   freelyInvertAllUsersOf(NewBinOp);
4285   return true;
4286 }
4287 
4288 Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
4289   Value *NotOp;
4290   if (!match(&I, m_Not(m_Value(NotOp))))
4291     return nullptr;
4292 
4293   // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
4294   // We must eliminate the and/or (one-use) for these transforms to not increase
4295   // the instruction count.
4296   //
4297   // ~(~X & Y) --> (X | ~Y)
4298   // ~(Y & ~X) --> (X | ~Y)
4299   //
4300   // Note: The logical matches do not check for the commuted patterns because
4301   //       those are handled via SimplifySelectsFeedingBinaryOp().
4302   Type *Ty = I.getType();
4303   Value *X, *Y;
4304   if (match(NotOp, m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y))))) {
4305     Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4306     return BinaryOperator::CreateOr(X, NotY);
4307   }
4308   if (match(NotOp, m_OneUse(m_LogicalAnd(m_Not(m_Value(X)), m_Value(Y))))) {
4309     Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4310     return SelectInst::Create(X, ConstantInt::getTrue(Ty), NotY);
4311   }
4312 
4313   // ~(~X | Y) --> (X & ~Y)
4314   // ~(Y | ~X) --> (X & ~Y)
4315   if (match(NotOp, m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y))))) {
4316     Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4317     return BinaryOperator::CreateAnd(X, NotY);
4318   }
4319   if (match(NotOp, m_OneUse(m_LogicalOr(m_Not(m_Value(X)), m_Value(Y))))) {
4320     Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4321     return SelectInst::Create(X, NotY, ConstantInt::getFalse(Ty));
4322   }
4323 
4324   // Is this a 'not' (~) fed by a binary operator?
4325   BinaryOperator *NotVal;
4326   if (match(NotOp, m_BinOp(NotVal))) {
4327     // ~((-X) | Y) --> (X - 1) & (~Y)
4328     if (match(NotVal,
4329               m_OneUse(m_c_Or(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))) {
4330       Value *DecX = Builder.CreateAdd(X, ConstantInt::getAllOnesValue(Ty));
4331       Value *NotY = Builder.CreateNot(Y);
4332       return BinaryOperator::CreateAnd(DecX, NotY);
4333     }
4334 
4335     // ~(~X >>s Y) --> (X >>s Y)
4336     if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
4337       return BinaryOperator::CreateAShr(X, Y);
4338 
4339     // Treat lshr with non-negative operand as ashr.
4340     // ~(~X >>u Y) --> (X >>s Y) iff X is known negative
4341     if (match(NotVal, m_LShr(m_Not(m_Value(X)), m_Value(Y))) &&
4342         isKnownNegative(X, SQ.getWithInstruction(NotVal)))
4343       return BinaryOperator::CreateAShr(X, Y);
4344 
4345     // Bit-hack form of a signbit test for iN type:
4346     // ~(X >>s (N - 1)) --> sext i1 (X > -1) to iN
4347     unsigned FullShift = Ty->getScalarSizeInBits() - 1;
4348     if (match(NotVal, m_OneUse(m_AShr(m_Value(X), m_SpecificInt(FullShift))))) {
4349       Value *IsNotNeg = Builder.CreateIsNotNeg(X, "isnotneg");
4350       return new SExtInst(IsNotNeg, Ty);
4351     }
4352 
4353     // If we are inverting a right-shifted constant, we may be able to eliminate
4354     // the 'not' by inverting the constant and using the opposite shift type.
4355     // Canonicalization rules ensure that only a negative constant uses 'ashr',
4356     // but we must check that in case that transform has not fired yet.
4357 
4358     // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
4359     Constant *C;
4360     if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) &&
4361         match(C, m_Negative())) {
4362       // We matched a negative constant, so propagating undef is unsafe.
4363       // Clamp undef elements to -1.
4364       Type *EltTy = Ty->getScalarType();
4365       C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy));
4366       return BinaryOperator::CreateLShr(ConstantExpr::getNot(C), Y);
4367     }
4368 
4369     // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
4370     if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) &&
4371         match(C, m_NonNegative())) {
4372       // We matched a non-negative constant, so propagating undef is unsafe.
4373       // Clamp undef elements to 0.
4374       Type *EltTy = Ty->getScalarType();
4375       C = Constant::replaceUndefsWith(C, ConstantInt::getNullValue(EltTy));
4376       return BinaryOperator::CreateAShr(ConstantExpr::getNot(C), Y);
4377     }
4378 
4379     // ~(X + C) --> ~C - X
4380     if (match(NotVal, m_c_Add(m_Value(X), m_ImmConstant(C))))
4381       return BinaryOperator::CreateSub(ConstantExpr::getNot(C), X);
4382 
4383     // ~(X - Y) --> ~X + Y
4384     // FIXME: is it really beneficial to sink the `not` here?
4385     if (match(NotVal, m_Sub(m_Value(X), m_Value(Y))))
4386       if (isa<Constant>(X) || NotVal->hasOneUse())
4387         return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y);
4388 
4389     // ~(~X + Y) --> X - Y
4390     if (match(NotVal, m_c_Add(m_Not(m_Value(X)), m_Value(Y))))
4391       return BinaryOperator::CreateWithCopiedFlags(Instruction::Sub, X, Y,
4392                                                    NotVal);
4393   }
4394 
4395   // not (cmp A, B) = !cmp A, B
4396   CmpInst::Predicate Pred;
4397   if (match(NotOp, m_Cmp(Pred, m_Value(), m_Value())) &&
4398       (NotOp->hasOneUse() ||
4399        InstCombiner::canFreelyInvertAllUsersOf(cast<Instruction>(NotOp),
4400                                                /*IgnoredUser=*/nullptr))) {
4401     cast<CmpInst>(NotOp)->setPredicate(CmpInst::getInversePredicate(Pred));
4402     freelyInvertAllUsersOf(NotOp);
4403     return &I;
4404   }
4405 
4406   // Move a 'not' ahead of casts of a bool to enable logic reduction:
4407   // not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
4408   if (match(NotOp, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X)))))) && X->getType()->isIntOrIntVectorTy(1)) {
4409     Type *SextTy = cast<BitCastOperator>(NotOp)->getSrcTy();
4410     Value *NotX = Builder.CreateNot(X);
4411     Value *Sext = Builder.CreateSExt(NotX, SextTy);
4412     return CastInst::CreateBitOrPointerCast(Sext, Ty);
4413   }
4414 
4415   if (auto *NotOpI = dyn_cast<Instruction>(NotOp))
4416     if (sinkNotIntoLogicalOp(*NotOpI))
4417       return &I;
4418 
4419   // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
4420   // ~min(~X, ~Y) --> max(X, Y)
4421   // ~max(~X, Y) --> min(X, ~Y)
4422   auto *II = dyn_cast<IntrinsicInst>(NotOp);
4423   if (II && II->hasOneUse()) {
4424     if (match(NotOp, m_c_MaxOrMin(m_Not(m_Value(X)), m_Value(Y)))) {
4425       Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
4426       Value *NotY = Builder.CreateNot(Y);
4427       Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, NotY);
4428       return replaceInstUsesWith(I, InvMaxMin);
4429     }
4430 
4431     if (II->getIntrinsicID() == Intrinsic::is_fpclass) {
4432       ConstantInt *ClassMask = cast<ConstantInt>(II->getArgOperand(1));
4433       II->setArgOperand(
4434           1, ConstantInt::get(ClassMask->getType(),
4435                               ~ClassMask->getZExtValue() & fcAllFlags));
4436       return replaceInstUsesWith(I, II);
4437     }
4438   }
4439 
4440   if (NotOp->hasOneUse()) {
4441     // Pull 'not' into operands of select if both operands are one-use compares
4442     // or one is one-use compare and the other one is a constant.
4443     // Inverting the predicates eliminates the 'not' operation.
4444     // Example:
4445     //   not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) -->
4446     //     select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?)
4447     //   not (select ?, (cmp TPred, ?, ?), true -->
4448     //     select ?, (cmp InvTPred, ?, ?), false
4449     if (auto *Sel = dyn_cast<SelectInst>(NotOp)) {
4450       Value *TV = Sel->getTrueValue();
4451       Value *FV = Sel->getFalseValue();
4452       auto *CmpT = dyn_cast<CmpInst>(TV);
4453       auto *CmpF = dyn_cast<CmpInst>(FV);
4454       bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(TV);
4455       bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(FV);
4456       if (InvertibleT && InvertibleF) {
4457         if (CmpT)
4458           CmpT->setPredicate(CmpT->getInversePredicate());
4459         else
4460           Sel->setTrueValue(ConstantExpr::getNot(cast<Constant>(TV)));
4461         if (CmpF)
4462           CmpF->setPredicate(CmpF->getInversePredicate());
4463         else
4464           Sel->setFalseValue(ConstantExpr::getNot(cast<Constant>(FV)));
4465         return replaceInstUsesWith(I, Sel);
4466       }
4467     }
4468   }
4469 
4470   if (Instruction *NewXor = foldNotXor(I, Builder))
4471     return NewXor;
4472 
4473   // TODO: Could handle multi-use better by checking if all uses of NotOp (other
4474   // than I) can be inverted.
4475   if (Value *R = getFreelyInverted(NotOp, NotOp->hasOneUse(), &Builder))
4476     return replaceInstUsesWith(I, R);
4477 
4478   return nullptr;
4479 }
4480 
4481 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
4482 // here. We should standardize that construct where it is needed or choose some
4483 // other way to ensure that commutated variants of patterns are not missed.
4484 Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
4485   if (Value *V = simplifyXorInst(I.getOperand(0), I.getOperand(1),
4486                                  SQ.getWithInstruction(&I)))
4487     return replaceInstUsesWith(I, V);
4488 
4489   if (SimplifyAssociativeOrCommutative(I))
4490     return &I;
4491 
4492   if (Instruction *X = foldVectorBinop(I))
4493     return X;
4494 
4495   if (Instruction *Phi = foldBinopWithPhiOperands(I))
4496     return Phi;
4497 
4498   if (Instruction *NewXor = foldXorToXor(I, Builder))
4499     return NewXor;
4500 
4501   // (A&B)^(A&C) -> A&(B^C) etc
4502   if (Value *V = foldUsingDistributiveLaws(I))
4503     return replaceInstUsesWith(I, V);
4504 
4505   // See if we can simplify any instructions used by the instruction whose sole
4506   // purpose is to compute bits we don't care about.
4507   if (SimplifyDemandedInstructionBits(I))
4508     return &I;
4509 
4510   if (Value *V = SimplifyBSwap(I, Builder))
4511     return replaceInstUsesWith(I, V);
4512 
4513   if (Instruction *R = foldNot(I))
4514     return R;
4515 
4516   if (Instruction *R = foldBinOpShiftWithShift(I))
4517     return R;
4518 
4519   // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M)
4520   // This it a special case in haveNoCommonBitsSet, but the computeKnownBits
4521   // calls in there are unnecessary as SimplifyDemandedInstructionBits should
4522   // have already taken care of those cases.
4523   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4524   Value *M;
4525   if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()),
4526                         m_c_And(m_Deferred(M), m_Value()))))
4527     return BinaryOperator::CreateDisjointOr(Op0, Op1);
4528 
4529   if (Instruction *Xor = visitMaskedMerge(I, Builder))
4530     return Xor;
4531 
4532   Value *X, *Y;
4533   Constant *C1;
4534   if (match(Op1, m_Constant(C1))) {
4535     Constant *C2;
4536 
4537     if (match(Op0, m_OneUse(m_Or(m_Value(X), m_ImmConstant(C2)))) &&
4538         match(C1, m_ImmConstant())) {
4539       // (X | C2) ^ C1 --> (X & ~C2) ^ (C1^C2)
4540       C2 = Constant::replaceUndefsWith(
4541           C2, Constant::getAllOnesValue(C2->getType()->getScalarType()));
4542       Value *And = Builder.CreateAnd(
4543           X, Constant::mergeUndefsWith(ConstantExpr::getNot(C2), C1));
4544       return BinaryOperator::CreateXor(
4545           And, Constant::mergeUndefsWith(ConstantExpr::getXor(C1, C2), C1));
4546     }
4547 
4548     // Use DeMorgan and reassociation to eliminate a 'not' op.
4549     if (match(Op0, m_OneUse(m_Or(m_Not(m_Value(X)), m_Constant(C2))))) {
4550       // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1
4551       Value *And = Builder.CreateAnd(X, ConstantExpr::getNot(C2));
4552       return BinaryOperator::CreateXor(And, ConstantExpr::getNot(C1));
4553     }
4554     if (match(Op0, m_OneUse(m_And(m_Not(m_Value(X)), m_Constant(C2))))) {
4555       // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1
4556       Value *Or = Builder.CreateOr(X, ConstantExpr::getNot(C2));
4557       return BinaryOperator::CreateXor(Or, ConstantExpr::getNot(C1));
4558     }
4559 
4560     // Convert xor ([trunc] (ashr X, BW-1)), C =>
4561     //   select(X >s -1, C, ~C)
4562     // The ashr creates "AllZeroOrAllOne's", which then optionally inverses the
4563     // constant depending on whether this input is less than 0.
4564     const APInt *CA;
4565     if (match(Op0, m_OneUse(m_TruncOrSelf(
4566                        m_AShr(m_Value(X), m_APIntAllowUndef(CA))))) &&
4567         *CA == X->getType()->getScalarSizeInBits() - 1 &&
4568         !match(C1, m_AllOnes())) {
4569       assert(!C1->isZeroValue() && "Unexpected xor with 0");
4570       Value *IsNotNeg = Builder.CreateIsNotNeg(X);
4571       return SelectInst::Create(IsNotNeg, Op1, Builder.CreateNot(Op1));
4572     }
4573   }
4574 
4575   Type *Ty = I.getType();
4576   {
4577     const APInt *RHSC;
4578     if (match(Op1, m_APInt(RHSC))) {
4579       Value *X;
4580       const APInt *C;
4581       // (C - X) ^ signmaskC --> (C + signmaskC) - X
4582       if (RHSC->isSignMask() && match(Op0, m_Sub(m_APInt(C), m_Value(X))))
4583         return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C + *RHSC), X);
4584 
4585       // (X + C) ^ signmaskC --> X + (C + signmaskC)
4586       if (RHSC->isSignMask() && match(Op0, m_Add(m_Value(X), m_APInt(C))))
4587         return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C + *RHSC));
4588 
4589       // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0
4590       if (match(Op0, m_Or(m_Value(X), m_APInt(C))) &&
4591           MaskedValueIsZero(X, *C, 0, &I))
4592         return BinaryOperator::CreateXor(X, ConstantInt::get(Ty, *C ^ *RHSC));
4593 
4594       // When X is a power-of-two or zero and zero input is poison:
4595       // ctlz(i32 X) ^ 31 --> cttz(X)
4596       // cttz(i32 X) ^ 31 --> ctlz(X)
4597       auto *II = dyn_cast<IntrinsicInst>(Op0);
4598       if (II && II->hasOneUse() && *RHSC == Ty->getScalarSizeInBits() - 1) {
4599         Intrinsic::ID IID = II->getIntrinsicID();
4600         if ((IID == Intrinsic::ctlz || IID == Intrinsic::cttz) &&
4601             match(II->getArgOperand(1), m_One()) &&
4602             isKnownToBeAPowerOfTwo(II->getArgOperand(0), /*OrZero */ true)) {
4603           IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz;
4604           Function *F = Intrinsic::getDeclaration(II->getModule(), IID, Ty);
4605           return CallInst::Create(F, {II->getArgOperand(0), Builder.getTrue()});
4606         }
4607       }
4608 
4609       // If RHSC is inverting the remaining bits of shifted X,
4610       // canonicalize to a 'not' before the shift to help SCEV and codegen:
4611       // (X << C) ^ RHSC --> ~X << C
4612       if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_APInt(C)))) &&
4613           *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).shl(*C)) {
4614         Value *NotX = Builder.CreateNot(X);
4615         return BinaryOperator::CreateShl(NotX, ConstantInt::get(Ty, *C));
4616       }
4617       // (X >>u C) ^ RHSC --> ~X >>u C
4618       if (match(Op0, m_OneUse(m_LShr(m_Value(X), m_APInt(C)))) &&
4619           *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).lshr(*C)) {
4620         Value *NotX = Builder.CreateNot(X);
4621         return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *C));
4622       }
4623       // TODO: We could handle 'ashr' here as well. That would be matching
4624       //       a 'not' op and moving it before the shift. Doing that requires
4625       //       preventing the inverse fold in canShiftBinOpWithConstantRHS().
4626     }
4627 
4628     // If we are XORing the sign bit of a floating-point value, convert
4629     // this to fneg, then cast back to integer.
4630     //
4631     // This is generous interpretation of noimplicitfloat, this is not a true
4632     // floating-point operation.
4633     //
4634     // Assumes any IEEE-represented type has the sign bit in the high bit.
4635     // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
4636     Value *CastOp;
4637     if (match(Op0, m_BitCast(m_Value(CastOp))) && match(Op1, m_SignMask()) &&
4638         !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
4639             Attribute::NoImplicitFloat)) {
4640       Type *EltTy = CastOp->getType()->getScalarType();
4641       if (EltTy->isFloatingPointTy() && EltTy->isIEEE() &&
4642           EltTy->getPrimitiveSizeInBits() ==
4643           I.getType()->getScalarType()->getPrimitiveSizeInBits()) {
4644         Value *FNeg = Builder.CreateFNeg(CastOp);
4645         return new BitCastInst(FNeg, I.getType());
4646       }
4647     }
4648   }
4649 
4650   // FIXME: This should not be limited to scalar (pull into APInt match above).
4651   {
4652     Value *X;
4653     ConstantInt *C1, *C2, *C3;
4654     // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
4655     if (match(Op1, m_ConstantInt(C3)) &&
4656         match(Op0, m_LShr(m_Xor(m_Value(X), m_ConstantInt(C1)),
4657                           m_ConstantInt(C2))) &&
4658         Op0->hasOneUse()) {
4659       // fold (C1 >> C2) ^ C3
4660       APInt FoldConst = C1->getValue().lshr(C2->getValue());
4661       FoldConst ^= C3->getValue();
4662       // Prepare the two operands.
4663       auto *Opnd0 = Builder.CreateLShr(X, C2);
4664       Opnd0->takeName(Op0);
4665       return BinaryOperator::CreateXor(Opnd0, ConstantInt::get(Ty, FoldConst));
4666     }
4667   }
4668 
4669   if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
4670     return FoldedLogic;
4671 
4672   // Y ^ (X | Y) --> X & ~Y
4673   // Y ^ (Y | X) --> X & ~Y
4674   if (match(Op1, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op0)))))
4675     return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op0));
4676   // (X | Y) ^ Y --> X & ~Y
4677   // (Y | X) ^ Y --> X & ~Y
4678   if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op1)))))
4679     return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op1));
4680 
4681   // Y ^ (X & Y) --> ~X & Y
4682   // Y ^ (Y & X) --> ~X & Y
4683   if (match(Op1, m_OneUse(m_c_And(m_Value(X), m_Specific(Op0)))))
4684     return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(X));
4685   // (X & Y) ^ Y --> ~X & Y
4686   // (Y & X) ^ Y --> ~X & Y
4687   // Canonical form is (X & C) ^ C; don't touch that.
4688   // TODO: A 'not' op is better for analysis and codegen, but demanded bits must
4689   //       be fixed to prefer that (otherwise we get infinite looping).
4690   if (!match(Op1, m_Constant()) &&
4691       match(Op0, m_OneUse(m_c_And(m_Value(X), m_Specific(Op1)))))
4692     return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(X));
4693 
4694   Value *A, *B, *C;
4695   // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
4696   if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
4697                         m_OneUse(m_c_Or(m_Deferred(A), m_Value(C))))))
4698       return BinaryOperator::CreateXor(
4699           Builder.CreateAnd(Builder.CreateNot(A), C), B);
4700 
4701   // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
4702   if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
4703                         m_OneUse(m_c_Or(m_Deferred(B), m_Value(C))))))
4704       return BinaryOperator::CreateXor(
4705           Builder.CreateAnd(Builder.CreateNot(B), C), A);
4706 
4707   // (A & B) ^ (A ^ B) -> (A | B)
4708   if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
4709       match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
4710     return BinaryOperator::CreateOr(A, B);
4711   // (A ^ B) ^ (A & B) -> (A | B)
4712   if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
4713       match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
4714     return BinaryOperator::CreateOr(A, B);
4715 
4716   // (A & ~B) ^ ~A -> ~(A & B)
4717   // (~B & A) ^ ~A -> ~(A & B)
4718   if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
4719       match(Op1, m_Not(m_Specific(A))))
4720     return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
4721 
4722   // (~A & B) ^ A --> A | B -- There are 4 commuted variants.
4723   if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(A)), m_Value(B)), m_Deferred(A))))
4724     return BinaryOperator::CreateOr(A, B);
4725 
4726   // (~A | B) ^ A --> ~(A & B)
4727   if (match(Op0, m_OneUse(m_c_Or(m_Not(m_Specific(Op1)), m_Value(B)))))
4728     return BinaryOperator::CreateNot(Builder.CreateAnd(Op1, B));
4729 
4730   // A ^ (~A | B) --> ~(A & B)
4731   if (match(Op1, m_OneUse(m_c_Or(m_Not(m_Specific(Op0)), m_Value(B)))))
4732     return BinaryOperator::CreateNot(Builder.CreateAnd(Op0, B));
4733 
4734   // (A | B) ^ (A | C) --> (B ^ C) & ~A -- There are 4 commuted variants.
4735   // TODO: Loosen one-use restriction if common operand is a constant.
4736   Value *D;
4737   if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B)))) &&
4738       match(Op1, m_OneUse(m_Or(m_Value(C), m_Value(D))))) {
4739     if (B == C || B == D)
4740       std::swap(A, B);
4741     if (A == C)
4742       std::swap(C, D);
4743     if (A == D) {
4744       Value *NotA = Builder.CreateNot(A);
4745       return BinaryOperator::CreateAnd(Builder.CreateXor(B, C), NotA);
4746     }
4747   }
4748 
4749   // (A & B) ^ (A | C) --> A ? ~B : C -- There are 4 commuted variants.
4750   if (I.getType()->isIntOrIntVectorTy(1) &&
4751       match(Op0, m_OneUse(m_LogicalAnd(m_Value(A), m_Value(B)))) &&
4752       match(Op1, m_OneUse(m_LogicalOr(m_Value(C), m_Value(D))))) {
4753     bool NeedFreeze = isa<SelectInst>(Op0) && isa<SelectInst>(Op1) && B == D;
4754     if (B == C || B == D)
4755       std::swap(A, B);
4756     if (A == C)
4757       std::swap(C, D);
4758     if (A == D) {
4759       if (NeedFreeze)
4760         A = Builder.CreateFreeze(A);
4761       Value *NotB = Builder.CreateNot(B);
4762       return SelectInst::Create(A, NotB, C);
4763     }
4764   }
4765 
4766   if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
4767     if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
4768       if (Value *V = foldXorOfICmps(LHS, RHS, I))
4769         return replaceInstUsesWith(I, V);
4770 
4771   if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
4772     return CastedXor;
4773 
4774   if (Instruction *Abs = canonicalizeAbs(I, Builder))
4775     return Abs;
4776 
4777   // Otherwise, if all else failed, try to hoist the xor-by-constant:
4778   //   (X ^ C) ^ Y --> (X ^ Y) ^ C
4779   // Just like we do in other places, we completely avoid the fold
4780   // for constantexprs, at least to avoid endless combine loop.
4781   if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_CombineAnd(m_Value(X),
4782                                                     m_Unless(m_ConstantExpr())),
4783                                        m_ImmConstant(C1))),
4784                         m_Value(Y))))
4785     return BinaryOperator::CreateXor(Builder.CreateXor(X, Y), C1);
4786 
4787   if (Instruction *R = reassociateForUses(I, Builder))
4788     return R;
4789 
4790   if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
4791     return Canonicalized;
4792 
4793   if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
4794     return Folded;
4795 
4796   if (Instruction *Folded = canonicalizeConditionalNegationViaMathToSelect(I))
4797     return Folded;
4798 
4799   if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
4800     return Res;
4801 
4802   return nullptr;
4803 }
4804