1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitAnd, visitOr, and visitXor functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/Analysis/CmpInstAnalysis.h"
15 #include "llvm/Analysis/FloatingPointPredicateUtils.h"
16 #include "llvm/Analysis/InstructionSimplify.h"
17 #include "llvm/IR/ConstantRange.h"
18 #include "llvm/IR/Intrinsics.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Transforms/InstCombine/InstCombiner.h"
21 #include "llvm/Transforms/Utils/Local.h"
22
23 using namespace llvm;
24 using namespace PatternMatch;
25
26 #define DEBUG_TYPE "instcombine"
27
28 /// This is the complement of getICmpCode, which turns an opcode and two
29 /// operands into either a constant true or false, or a brand new ICmp
30 /// instruction. The sign is passed in to determine which kind of predicate to
31 /// use in the new icmp instruction.
getNewICmpValue(unsigned Code,bool Sign,Value * LHS,Value * RHS,InstCombiner::BuilderTy & Builder)32 static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS,
33 InstCombiner::BuilderTy &Builder) {
34 ICmpInst::Predicate NewPred;
35 if (Constant *TorF = getPredForICmpCode(Code, Sign, LHS->getType(), NewPred))
36 return TorF;
37 return Builder.CreateICmp(NewPred, LHS, RHS);
38 }
39
40 /// This is the complement of getFCmpCode, which turns an opcode and two
41 /// operands into either a FCmp instruction, or a true/false constant.
getFCmpValue(unsigned Code,Value * LHS,Value * RHS,InstCombiner::BuilderTy & Builder,FMFSource FMF)42 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
43 InstCombiner::BuilderTy &Builder, FMFSource FMF) {
44 FCmpInst::Predicate NewPred;
45 if (Constant *TorF = getPredForFCmpCode(Code, LHS->getType(), NewPred))
46 return TorF;
47 return Builder.CreateFCmpFMF(NewPred, LHS, RHS, FMF);
48 }
49
50 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
51 /// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates
52 /// whether to treat V, Lo, and Hi as signed or not.
insertRangeTest(Value * V,const APInt & Lo,const APInt & Hi,bool isSigned,bool Inside)53 Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo,
54 const APInt &Hi, bool isSigned,
55 bool Inside) {
56 assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) &&
57 "Lo is not < Hi in range emission code!");
58
59 Type *Ty = V->getType();
60
61 // V >= Min && V < Hi --> V < Hi
62 // V < Min || V >= Hi --> V >= Hi
63 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
64 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
65 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
66 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
67 }
68
69 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
70 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
71 Value *VMinusLo =
72 Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
73 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
74 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
75 }
76
77 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
78 /// that can be simplified.
79 /// One of A and B is considered the mask. The other is the value. This is
80 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
81 /// only "Mask", then both A and B can be considered masks. If A is the mask,
82 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
83 /// If both A and C are constants, this proof is also easy.
84 /// For the following explanations, we assume that A is the mask.
85 ///
86 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
87 /// bits of A are set in B.
88 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
89 ///
90 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
91 /// bits of A are cleared in B.
92 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
93 ///
94 /// "Mixed" declares that (A & B) == C and C might or might not contain any
95 /// number of one bits and zero bits.
96 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
97 ///
98 /// "Not" means that in above descriptions "==" should be replaced by "!=".
99 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
100 ///
101 /// If the mask A contains a single bit, then the following is equivalent:
102 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
103 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
104 enum MaskedICmpType {
105 AMask_AllOnes = 1,
106 AMask_NotAllOnes = 2,
107 BMask_AllOnes = 4,
108 BMask_NotAllOnes = 8,
109 Mask_AllZeros = 16,
110 Mask_NotAllZeros = 32,
111 AMask_Mixed = 64,
112 AMask_NotMixed = 128,
113 BMask_Mixed = 256,
114 BMask_NotMixed = 512
115 };
116
117 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
118 /// satisfies.
getMaskedICmpType(Value * A,Value * B,Value * C,ICmpInst::Predicate Pred)119 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
120 ICmpInst::Predicate Pred) {
121 const APInt *ConstA = nullptr, *ConstB = nullptr, *ConstC = nullptr;
122 match(A, m_APInt(ConstA));
123 match(B, m_APInt(ConstB));
124 match(C, m_APInt(ConstC));
125 bool IsEq = (Pred == ICmpInst::ICMP_EQ);
126 bool IsAPow2 = ConstA && ConstA->isPowerOf2();
127 bool IsBPow2 = ConstB && ConstB->isPowerOf2();
128 unsigned MaskVal = 0;
129 if (ConstC && ConstC->isZero()) {
130 // if C is zero, then both A and B qualify as mask
131 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
132 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
133 if (IsAPow2)
134 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
135 : (AMask_AllOnes | AMask_Mixed));
136 if (IsBPow2)
137 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
138 : (BMask_AllOnes | BMask_Mixed));
139 return MaskVal;
140 }
141
142 if (A == C) {
143 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
144 : (AMask_NotAllOnes | AMask_NotMixed));
145 if (IsAPow2)
146 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
147 : (Mask_AllZeros | AMask_Mixed));
148 } else if (ConstA && ConstC && ConstC->isSubsetOf(*ConstA)) {
149 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
150 }
151
152 if (B == C) {
153 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
154 : (BMask_NotAllOnes | BMask_NotMixed));
155 if (IsBPow2)
156 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
157 : (Mask_AllZeros | BMask_Mixed));
158 } else if (ConstB && ConstC && ConstC->isSubsetOf(*ConstB)) {
159 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
160 }
161
162 return MaskVal;
163 }
164
165 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
166 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
167 /// is adjacent to the corresponding normal flag (recording ==), this just
168 /// involves swapping those bits over.
conjugateICmpMask(unsigned Mask)169 static unsigned conjugateICmpMask(unsigned Mask) {
170 unsigned NewMask;
171 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
172 AMask_Mixed | BMask_Mixed))
173 << 1;
174
175 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
176 AMask_NotMixed | BMask_NotMixed))
177 >> 1;
178
179 return NewMask;
180 }
181
182 // Adapts the external decomposeBitTestICmp for local use.
decomposeBitTestICmp(Value * Cond,CmpInst::Predicate & Pred,Value * & X,Value * & Y,Value * & Z)183 static bool decomposeBitTestICmp(Value *Cond, CmpInst::Predicate &Pred,
184 Value *&X, Value *&Y, Value *&Z) {
185 auto Res = llvm::decomposeBitTest(Cond, /*LookThroughTrunc=*/true,
186 /*AllowNonZeroC=*/true);
187 if (!Res)
188 return false;
189
190 Pred = Res->Pred;
191 X = Res->X;
192 Y = ConstantInt::get(X->getType(), Res->Mask);
193 Z = ConstantInt::get(X->getType(), Res->C);
194 return true;
195 }
196
197 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
198 /// Return the pattern classes (from MaskedICmpType) for the left hand side and
199 /// the right hand side as a pair.
200 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL
201 /// and PredR are their predicates, respectively.
202 static std::optional<std::pair<unsigned, unsigned>>
getMaskedTypeForICmpPair(Value * & A,Value * & B,Value * & C,Value * & D,Value * & E,Value * LHS,Value * RHS,ICmpInst::Predicate & PredL,ICmpInst::Predicate & PredR)203 getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, Value *&D, Value *&E,
204 Value *LHS, Value *RHS, ICmpInst::Predicate &PredL,
205 ICmpInst::Predicate &PredR) {
206
207 // Here comes the tricky part:
208 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
209 // and L11 & L12 == L21 & L22. The same goes for RHS.
210 // Now we must find those components L** and R**, that are equal, so
211 // that we can extract the parameters A, B, C, D, and E for the canonical
212 // above.
213
214 // Check whether the icmp can be decomposed into a bit test.
215 Value *L1, *L11, *L12, *L2, *L21, *L22;
216 if (decomposeBitTestICmp(LHS, PredL, L11, L12, L2)) {
217 L21 = L22 = L1 = nullptr;
218 } else {
219 auto *LHSCMP = dyn_cast<ICmpInst>(LHS);
220 if (!LHSCMP)
221 return std::nullopt;
222
223 // Don't allow pointers. Splat vectors are fine.
224 if (!LHSCMP->getOperand(0)->getType()->isIntOrIntVectorTy())
225 return std::nullopt;
226
227 PredL = LHSCMP->getPredicate();
228 L1 = LHSCMP->getOperand(0);
229 L2 = LHSCMP->getOperand(1);
230 // Look for ANDs in the LHS icmp.
231 if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
232 // Any icmp can be viewed as being trivially masked; if it allows us to
233 // remove one, it's worth it.
234 L11 = L1;
235 L12 = Constant::getAllOnesValue(L1->getType());
236 }
237
238 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
239 L21 = L2;
240 L22 = Constant::getAllOnesValue(L2->getType());
241 }
242 }
243
244 // Bail if LHS was a icmp that can't be decomposed into an equality.
245 if (!ICmpInst::isEquality(PredL))
246 return std::nullopt;
247
248 Value *R11, *R12, *R2;
249 if (decomposeBitTestICmp(RHS, PredR, R11, R12, R2)) {
250 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
251 A = R11;
252 D = R12;
253 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
254 A = R12;
255 D = R11;
256 } else {
257 return std::nullopt;
258 }
259 E = R2;
260 } else {
261 auto *RHSCMP = dyn_cast<ICmpInst>(RHS);
262 if (!RHSCMP)
263 return std::nullopt;
264 // Don't allow pointers. Splat vectors are fine.
265 if (!RHSCMP->getOperand(0)->getType()->isIntOrIntVectorTy())
266 return std::nullopt;
267
268 PredR = RHSCMP->getPredicate();
269
270 Value *R1 = RHSCMP->getOperand(0);
271 R2 = RHSCMP->getOperand(1);
272 bool Ok = false;
273 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
274 // As before, model no mask as a trivial mask if it'll let us do an
275 // optimization.
276 R11 = R1;
277 R12 = Constant::getAllOnesValue(R1->getType());
278 }
279
280 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
281 A = R11;
282 D = R12;
283 E = R2;
284 Ok = true;
285 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
286 A = R12;
287 D = R11;
288 E = R2;
289 Ok = true;
290 }
291
292 // Avoid matching against the -1 value we created for unmasked operand.
293 if (Ok && match(A, m_AllOnes()))
294 Ok = false;
295
296 // Look for ANDs on the right side of the RHS icmp.
297 if (!Ok) {
298 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
299 R11 = R2;
300 R12 = Constant::getAllOnesValue(R2->getType());
301 }
302
303 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
304 A = R11;
305 D = R12;
306 E = R1;
307 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
308 A = R12;
309 D = R11;
310 E = R1;
311 } else {
312 return std::nullopt;
313 }
314 }
315 }
316
317 // Bail if RHS was a icmp that can't be decomposed into an equality.
318 if (!ICmpInst::isEquality(PredR))
319 return std::nullopt;
320
321 if (L11 == A) {
322 B = L12;
323 C = L2;
324 } else if (L12 == A) {
325 B = L11;
326 C = L2;
327 } else if (L21 == A) {
328 B = L22;
329 C = L1;
330 } else if (L22 == A) {
331 B = L21;
332 C = L1;
333 }
334
335 unsigned LeftType = getMaskedICmpType(A, B, C, PredL);
336 unsigned RightType = getMaskedICmpType(A, D, E, PredR);
337 return std::optional<std::pair<unsigned, unsigned>>(
338 std::make_pair(LeftType, RightType));
339 }
340
341 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
342 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
343 /// and the right hand side is of type BMask_Mixed. For example,
344 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
345 /// Also used for logical and/or, must be poison safe.
foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(Value * LHS,Value * RHS,bool IsAnd,Value * A,Value * B,Value * D,Value * E,ICmpInst::Predicate PredL,ICmpInst::Predicate PredR,InstCombiner::BuilderTy & Builder)346 static Value *foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
347 Value *LHS, Value *RHS, bool IsAnd, Value *A, Value *B, Value *D, Value *E,
348 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
349 InstCombiner::BuilderTy &Builder) {
350 // We are given the canonical form:
351 // (icmp ne (A & B), 0) & (icmp eq (A & D), E).
352 // where D & E == E.
353 //
354 // If IsAnd is false, we get it in negated form:
355 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) ->
356 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)).
357 //
358 // We currently handle the case of B, C, D, E are constant.
359 //
360 const APInt *BCst, *DCst, *OrigECst;
361 if (!match(B, m_APInt(BCst)) || !match(D, m_APInt(DCst)) ||
362 !match(E, m_APInt(OrigECst)))
363 return nullptr;
364
365 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
366
367 // Update E to the canonical form when D is a power of two and RHS is
368 // canonicalized as,
369 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or
370 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0).
371 APInt ECst = *OrigECst;
372 if (PredR != NewCC)
373 ECst ^= *DCst;
374
375 // If B or D is zero, skip because if LHS or RHS can be trivially folded by
376 // other folding rules and this pattern won't apply any more.
377 if (*BCst == 0 || *DCst == 0)
378 return nullptr;
379
380 // If B and D don't intersect, ie. (B & D) == 0, try to fold isNaN idiom:
381 // (icmp ne (A & FractionBits), 0) & (icmp eq (A & ExpBits), ExpBits)
382 // -> isNaN(A)
383 // Otherwise, we cannot deduce anything from it.
384 if (!BCst->intersects(*DCst)) {
385 Value *Src;
386 if (*DCst == ECst && match(A, m_ElementWiseBitCast(m_Value(Src))) &&
387 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
388 Attribute::StrictFP)) {
389 Type *Ty = Src->getType()->getScalarType();
390 if (!Ty->isIEEELikeFPTy())
391 return nullptr;
392
393 APInt ExpBits = APFloat::getInf(Ty->getFltSemantics()).bitcastToAPInt();
394 if (ECst != ExpBits)
395 return nullptr;
396 APInt FractionBits = ~ExpBits;
397 FractionBits.clearSignBit();
398 if (*BCst != FractionBits)
399 return nullptr;
400
401 return Builder.CreateFCmp(IsAnd ? FCmpInst::FCMP_UNO : FCmpInst::FCMP_ORD,
402 Src, ConstantFP::getZero(Src->getType()));
403 }
404 return nullptr;
405 }
406
407 // If the following two conditions are met:
408 //
409 // 1. mask B covers only a single bit that's not covered by mask D, that is,
410 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of
411 // B and D has only one bit set) and,
412 //
413 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other
414 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0
415 //
416 // then that single bit in B must be one and thus the whole expression can be
417 // folded to
418 // (A & (B | D)) == (B & (B ^ D)) | E.
419 //
420 // For example,
421 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9)
422 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8)
423 if ((((*BCst & *DCst) & ECst) == 0) &&
424 (*BCst & (*BCst ^ *DCst)).isPowerOf2()) {
425 APInt BorD = *BCst | *DCst;
426 APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst;
427 Value *NewMask = ConstantInt::get(A->getType(), BorD);
428 Value *NewMaskedValue = ConstantInt::get(A->getType(), BandBxorDorE);
429 Value *NewAnd = Builder.CreateAnd(A, NewMask);
430 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue);
431 }
432
433 auto IsSubSetOrEqual = [](const APInt *C1, const APInt *C2) {
434 return (*C1 & *C2) == *C1;
435 };
436 auto IsSuperSetOrEqual = [](const APInt *C1, const APInt *C2) {
437 return (*C1 & *C2) == *C2;
438 };
439
440 // In the following, we consider only the cases where B is a superset of D, B
441 // is a subset of D, or B == D because otherwise there's at least one bit
442 // covered by B but not D, in which case we can't deduce much from it, so
443 // no folding (aside from the single must-be-one bit case right above.)
444 // For example,
445 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding.
446 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
447 return nullptr;
448
449 // At this point, either B is a superset of D, B is a subset of D or B == D.
450
451 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict
452 // and the whole expression becomes false (or true if negated), otherwise, no
453 // folding.
454 // For example,
455 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false.
456 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding.
457 if (ECst.isZero()) {
458 if (IsSubSetOrEqual(BCst, DCst))
459 return ConstantInt::get(LHS->getType(), !IsAnd);
460 return nullptr;
461 }
462
463 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B ==
464 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is
465 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes
466 // RHS. For example,
467 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
468 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
469 if (IsSuperSetOrEqual(BCst, DCst)) {
470 // We can't guarantee that samesign hold after this fold.
471 if (auto *ICmp = dyn_cast<ICmpInst>(RHS))
472 ICmp->setSameSign(false);
473 return RHS;
474 }
475 // Otherwise, B is a subset of D. If B and E have a common bit set,
476 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example.
477 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
478 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code");
479 if ((*BCst & ECst) != 0) {
480 // We can't guarantee that samesign hold after this fold.
481 if (auto *ICmp = dyn_cast<ICmpInst>(RHS))
482 ICmp->setSameSign(false);
483 return RHS;
484 }
485 // Otherwise, LHS and RHS contradict and the whole expression becomes false
486 // (or true if negated.) For example,
487 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false.
488 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false.
489 return ConstantInt::get(LHS->getType(), !IsAnd);
490 }
491
492 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
493 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
494 /// aren't of the common mask pattern type.
495 /// Also used for logical and/or, must be poison safe.
foldLogOpOfMaskedICmpsAsymmetric(Value * LHS,Value * RHS,bool IsAnd,Value * A,Value * B,Value * C,Value * D,Value * E,ICmpInst::Predicate PredL,ICmpInst::Predicate PredR,unsigned LHSMask,unsigned RHSMask,InstCombiner::BuilderTy & Builder)496 static Value *foldLogOpOfMaskedICmpsAsymmetric(
497 Value *LHS, Value *RHS, bool IsAnd, Value *A, Value *B, Value *C, Value *D,
498 Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
499 unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) {
500 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
501 "Expected equality predicates for masked type of icmps.");
502 // Handle Mask_NotAllZeros-BMask_Mixed cases.
503 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or
504 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E)
505 // which gets swapped to
506 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C).
507 if (!IsAnd) {
508 LHSMask = conjugateICmpMask(LHSMask);
509 RHSMask = conjugateICmpMask(RHSMask);
510 }
511 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) {
512 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
513 LHS, RHS, IsAnd, A, B, D, E, PredL, PredR, Builder)) {
514 return V;
515 }
516 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) {
517 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
518 RHS, LHS, IsAnd, A, D, B, C, PredR, PredL, Builder)) {
519 return V;
520 }
521 }
522 return nullptr;
523 }
524
525 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
526 /// into a single (icmp(A & X) ==/!= Y).
foldLogOpOfMaskedICmps(Value * LHS,Value * RHS,bool IsAnd,bool IsLogical,InstCombiner::BuilderTy & Builder,const SimplifyQuery & Q)527 static Value *foldLogOpOfMaskedICmps(Value *LHS, Value *RHS, bool IsAnd,
528 bool IsLogical,
529 InstCombiner::BuilderTy &Builder,
530 const SimplifyQuery &Q) {
531 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
532 ICmpInst::Predicate PredL, PredR;
533 std::optional<std::pair<unsigned, unsigned>> MaskPair =
534 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
535 if (!MaskPair)
536 return nullptr;
537 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
538 "Expected equality predicates for masked type of icmps.");
539 unsigned LHSMask = MaskPair->first;
540 unsigned RHSMask = MaskPair->second;
541 unsigned Mask = LHSMask & RHSMask;
542 if (Mask == 0) {
543 // Even if the two sides don't share a common pattern, check if folding can
544 // still happen.
545 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric(
546 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask,
547 Builder))
548 return V;
549 return nullptr;
550 }
551
552 // In full generality:
553 // (icmp (A & B) Op C) | (icmp (A & D) Op E)
554 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
555 //
556 // If the latter can be converted into (icmp (A & X) Op Y) then the former is
557 // equivalent to (icmp (A & X) !Op Y).
558 //
559 // Therefore, we can pretend for the rest of this function that we're dealing
560 // with the conjunction, provided we flip the sense of any comparisons (both
561 // input and output).
562
563 // In most cases we're going to produce an EQ for the "&&" case.
564 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
565 if (!IsAnd) {
566 // Convert the masking analysis into its equivalent with negated
567 // comparisons.
568 Mask = conjugateICmpMask(Mask);
569 }
570
571 if (Mask & Mask_AllZeros) {
572 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
573 // -> (icmp eq (A & (B|D)), 0)
574 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
575 return nullptr; // TODO: Use freeze?
576 Value *NewOr = Builder.CreateOr(B, D);
577 Value *NewAnd = Builder.CreateAnd(A, NewOr);
578 // We can't use C as zero because we might actually handle
579 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
580 // with B and D, having a single bit set.
581 Value *Zero = Constant::getNullValue(A->getType());
582 return Builder.CreateICmp(NewCC, NewAnd, Zero);
583 }
584 if (Mask & BMask_AllOnes) {
585 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
586 // -> (icmp eq (A & (B|D)), (B|D))
587 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
588 return nullptr; // TODO: Use freeze?
589 Value *NewOr = Builder.CreateOr(B, D);
590 Value *NewAnd = Builder.CreateAnd(A, NewOr);
591 return Builder.CreateICmp(NewCC, NewAnd, NewOr);
592 }
593 if (Mask & AMask_AllOnes) {
594 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
595 // -> (icmp eq (A & (B&D)), A)
596 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
597 return nullptr; // TODO: Use freeze?
598 Value *NewAnd1 = Builder.CreateAnd(B, D);
599 Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
600 return Builder.CreateICmp(NewCC, NewAnd2, A);
601 }
602
603 const APInt *ConstB, *ConstD;
604 if (match(B, m_APInt(ConstB)) && match(D, m_APInt(ConstD))) {
605 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
606 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
607 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
608 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
609 // Only valid if one of the masks is a superset of the other (check "B&D"
610 // is the same as either B or D).
611 APInt NewMask = *ConstB & *ConstD;
612 if (NewMask == *ConstB)
613 return LHS;
614 if (NewMask == *ConstD) {
615 if (IsLogical) {
616 if (auto *RHSI = dyn_cast<Instruction>(RHS))
617 RHSI->dropPoisonGeneratingFlags();
618 }
619 return RHS;
620 }
621 }
622
623 if (Mask & AMask_NotAllOnes) {
624 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
625 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
626 // Only valid if one of the masks is a superset of the other (check "B|D"
627 // is the same as either B or D).
628 APInt NewMask = *ConstB | *ConstD;
629 if (NewMask == *ConstB)
630 return LHS;
631 if (NewMask == *ConstD)
632 return RHS;
633 }
634
635 if (Mask & (BMask_Mixed | BMask_NotMixed)) {
636 // Mixed:
637 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
638 // We already know that B & C == C && D & E == E.
639 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
640 // C and E, which are shared by both the mask B and the mask D, don't
641 // contradict, then we can transform to
642 // -> (icmp eq (A & (B|D)), (C|E))
643 // Currently, we only handle the case of B, C, D, and E being constant.
644 // We can't simply use C and E because we might actually handle
645 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
646 // with B and D, having a single bit set.
647
648 // NotMixed:
649 // (icmp ne (A & B), C) & (icmp ne (A & D), E)
650 // -> (icmp ne (A & (B & D)), (C & E))
651 // Check the intersection (B & D) for inequality.
652 // Assume that (B & D) == B || (B & D) == D, i.e B/D is a subset of D/B
653 // and (B & D) & (C ^ E) == 0, bits of C and E, which are shared by both
654 // the B and the D, don't contradict. Note that we can assume (~B & C) ==
655 // 0 && (~D & E) == 0, previous operation should delete these icmps if it
656 // hadn't been met.
657
658 const APInt *OldConstC, *OldConstE;
659 if (!match(C, m_APInt(OldConstC)) || !match(E, m_APInt(OldConstE)))
660 return nullptr;
661
662 auto FoldBMixed = [&](ICmpInst::Predicate CC, bool IsNot) -> Value * {
663 CC = IsNot ? CmpInst::getInversePredicate(CC) : CC;
664 const APInt ConstC = PredL != CC ? *ConstB ^ *OldConstC : *OldConstC;
665 const APInt ConstE = PredR != CC ? *ConstD ^ *OldConstE : *OldConstE;
666
667 if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue())
668 return IsNot ? nullptr : ConstantInt::get(LHS->getType(), !IsAnd);
669
670 if (IsNot && !ConstB->isSubsetOf(*ConstD) &&
671 !ConstD->isSubsetOf(*ConstB))
672 return nullptr;
673
674 APInt BD, CE;
675 if (IsNot) {
676 BD = *ConstB & *ConstD;
677 CE = ConstC & ConstE;
678 } else {
679 BD = *ConstB | *ConstD;
680 CE = ConstC | ConstE;
681 }
682 Value *NewAnd = Builder.CreateAnd(A, BD);
683 Value *CEVal = ConstantInt::get(A->getType(), CE);
684 return Builder.CreateICmp(CC, NewAnd, CEVal);
685 };
686
687 if (Mask & BMask_Mixed)
688 return FoldBMixed(NewCC, false);
689 if (Mask & BMask_NotMixed) // can be else also
690 return FoldBMixed(NewCC, true);
691 }
692 }
693
694 // (icmp eq (A & B), 0) | (icmp eq (A & D), 0)
695 // -> (icmp ne (A & (B|D)), (B|D))
696 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0)
697 // -> (icmp eq (A & (B|D)), (B|D))
698 // iff B and D is known to be a power of two
699 if (Mask & Mask_NotAllZeros &&
700 isKnownToBeAPowerOfTwo(B, /*OrZero=*/false, Q) &&
701 isKnownToBeAPowerOfTwo(D, /*OrZero=*/false, Q)) {
702 // If this is a logical and/or, then we must prevent propagation of a
703 // poison value from the RHS by inserting freeze.
704 if (IsLogical)
705 D = Builder.CreateFreeze(D);
706 Value *Mask = Builder.CreateOr(B, D);
707 Value *Masked = Builder.CreateAnd(A, Mask);
708 return Builder.CreateICmp(NewCC, Masked, Mask);
709 }
710 return nullptr;
711 }
712
713 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
714 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
715 /// If \p Inverted is true then the check is for the inverted range, e.g.
716 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
simplifyRangeCheck(ICmpInst * Cmp0,ICmpInst * Cmp1,bool Inverted)717 Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
718 bool Inverted) {
719 // Check the lower range comparison, e.g. x >= 0
720 // InstCombine already ensured that if there is a constant it's on the RHS.
721 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
722 if (!RangeStart)
723 return nullptr;
724
725 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
726 Cmp0->getPredicate());
727
728 // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
729 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
730 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
731 return nullptr;
732
733 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
734 Cmp1->getPredicate());
735
736 Value *Input = Cmp0->getOperand(0);
737 Value *Cmp1Op0 = Cmp1->getOperand(0);
738 Value *Cmp1Op1 = Cmp1->getOperand(1);
739 Value *RangeEnd;
740 if (match(Cmp1Op0, m_SExtOrSelf(m_Specific(Input)))) {
741 // For the upper range compare we have: icmp x, n
742 Input = Cmp1Op0;
743 RangeEnd = Cmp1Op1;
744 } else if (match(Cmp1Op1, m_SExtOrSelf(m_Specific(Input)))) {
745 // For the upper range compare we have: icmp n, x
746 Input = Cmp1Op1;
747 RangeEnd = Cmp1Op0;
748 Pred1 = ICmpInst::getSwappedPredicate(Pred1);
749 } else {
750 return nullptr;
751 }
752
753 // Check the upper range comparison, e.g. x < n
754 ICmpInst::Predicate NewPred;
755 switch (Pred1) {
756 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
757 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
758 default: return nullptr;
759 }
760
761 // This simplification is only valid if the upper range is not negative.
762 KnownBits Known = computeKnownBits(RangeEnd, Cmp1);
763 if (!Known.isNonNegative())
764 return nullptr;
765
766 if (Inverted)
767 NewPred = ICmpInst::getInversePredicate(NewPred);
768
769 return Builder.CreateICmp(NewPred, Input, RangeEnd);
770 }
771
772 // (or (icmp eq X, 0), (icmp eq X, Pow2OrZero))
773 // -> (icmp eq (and X, Pow2OrZero), X)
774 // (and (icmp ne X, 0), (icmp ne X, Pow2OrZero))
775 // -> (icmp ne (and X, Pow2OrZero), X)
776 static Value *
foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy & Builder,ICmpInst * LHS,ICmpInst * RHS,bool IsAnd,const SimplifyQuery & Q)777 foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy &Builder,
778 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
779 const SimplifyQuery &Q) {
780 CmpPredicate Pred = IsAnd ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
781 // Make sure we have right compares for our op.
782 if (LHS->getPredicate() != Pred || RHS->getPredicate() != Pred)
783 return nullptr;
784
785 // Make it so we can match LHS against the (icmp eq/ne X, 0) just for
786 // simplicity.
787 if (match(RHS->getOperand(1), m_Zero()))
788 std::swap(LHS, RHS);
789
790 Value *Pow2, *Op;
791 // Match the desired pattern:
792 // LHS: (icmp eq/ne X, 0)
793 // RHS: (icmp eq/ne X, Pow2OrZero)
794 // Skip if Pow2OrZero is 1. Either way it gets folded to (icmp ugt X, 1) but
795 // this form ends up slightly less canonical.
796 // We could potentially be more sophisticated than requiring LHS/RHS
797 // be one-use. We don't create additional instructions if only one
798 // of them is one-use. So cases where one is one-use and the other
799 // is two-use might be profitable.
800 if (!match(LHS, m_OneUse(m_ICmp(Pred, m_Value(Op), m_Zero()))) ||
801 !match(RHS, m_OneUse(m_c_ICmp(Pred, m_Specific(Op), m_Value(Pow2)))) ||
802 match(Pow2, m_One()) ||
803 !isKnownToBeAPowerOfTwo(Pow2, Q.DL, /*OrZero=*/true, Q.AC, Q.CxtI, Q.DT))
804 return nullptr;
805
806 Value *And = Builder.CreateAnd(Op, Pow2);
807 return Builder.CreateICmp(Pred, And, Op);
808 }
809
810 /// General pattern:
811 /// X & Y
812 ///
813 /// Where Y is checking that all the high bits (covered by a mask 4294967168)
814 /// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0
815 /// Pattern can be one of:
816 /// %t = add i32 %arg, 128
817 /// %r = icmp ult i32 %t, 256
818 /// Or
819 /// %t0 = shl i32 %arg, 24
820 /// %t1 = ashr i32 %t0, 24
821 /// %r = icmp eq i32 %t1, %arg
822 /// Or
823 /// %t0 = trunc i32 %arg to i8
824 /// %t1 = sext i8 %t0 to i32
825 /// %r = icmp eq i32 %t1, %arg
826 /// This pattern is a signed truncation check.
827 ///
828 /// And X is checking that some bit in that same mask is zero.
829 /// I.e. can be one of:
830 /// %r = icmp sgt i32 %arg, -1
831 /// Or
832 /// %t = and i32 %arg, 2147483648
833 /// %r = icmp eq i32 %t, 0
834 ///
835 /// Since we are checking that all the bits in that mask are the same,
836 /// and a particular bit is zero, what we are really checking is that all the
837 /// masked bits are zero.
838 /// So this should be transformed to:
839 /// %r = icmp ult i32 %arg, 128
foldSignedTruncationCheck(ICmpInst * ICmp0,ICmpInst * ICmp1,Instruction & CxtI,InstCombiner::BuilderTy & Builder)840 static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
841 Instruction &CxtI,
842 InstCombiner::BuilderTy &Builder) {
843 assert(CxtI.getOpcode() == Instruction::And);
844
845 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two)
846 auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X,
847 APInt &SignBitMask) -> bool {
848 const APInt *I01, *I1; // powers of two; I1 == I01 << 1
849 if (!(match(ICmp, m_SpecificICmp(ICmpInst::ICMP_ULT,
850 m_Add(m_Value(X), m_Power2(I01)),
851 m_Power2(I1))) &&
852 I1->ugt(*I01) && I01->shl(1) == *I1))
853 return false;
854 // Which bit is the new sign bit as per the 'signed truncation' pattern?
855 SignBitMask = *I01;
856 return true;
857 };
858
859 // One icmp needs to be 'signed truncation check'.
860 // We need to match this first, else we will mismatch commutative cases.
861 Value *X1;
862 APInt HighestBit;
863 ICmpInst *OtherICmp;
864 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
865 OtherICmp = ICmp0;
866 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
867 OtherICmp = ICmp1;
868 else
869 return nullptr;
870
871 assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)");
872
873 // Try to match/decompose into: icmp eq (X & Mask), 0
874 auto tryToDecompose = [](ICmpInst *ICmp, Value *&X,
875 APInt &UnsetBitsMask) -> bool {
876 CmpPredicate Pred = ICmp->getPredicate();
877 // Can it be decomposed into icmp eq (X & Mask), 0 ?
878 auto Res = llvm::decomposeBitTestICmp(
879 ICmp->getOperand(0), ICmp->getOperand(1), Pred,
880 /*LookThroughTrunc=*/false, /*AllowNonZeroC=*/false,
881 /*DecomposeAnd=*/true);
882 if (Res && Res->Pred == ICmpInst::ICMP_EQ) {
883 X = Res->X;
884 UnsetBitsMask = Res->Mask;
885 return true;
886 }
887
888 return false;
889 };
890
891 // And the other icmp needs to be decomposable into a bit test.
892 Value *X0;
893 APInt UnsetBitsMask;
894 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
895 return nullptr;
896
897 assert(!UnsetBitsMask.isZero() && "empty mask makes no sense.");
898
899 // Are they working on the same value?
900 Value *X;
901 if (X1 == X0) {
902 // Ok as is.
903 X = X1;
904 } else if (match(X0, m_Trunc(m_Specific(X1)))) {
905 UnsetBitsMask = UnsetBitsMask.zext(X1->getType()->getScalarSizeInBits());
906 X = X1;
907 } else
908 return nullptr;
909
910 // So which bits should be uniform as per the 'signed truncation check'?
911 // (all the bits starting with (i.e. including) HighestBit)
912 APInt SignBitsMask = ~(HighestBit - 1U);
913
914 // UnsetBitsMask must have some common bits with SignBitsMask,
915 if (!UnsetBitsMask.intersects(SignBitsMask))
916 return nullptr;
917
918 // Does UnsetBitsMask contain any bits outside of SignBitsMask?
919 if (!UnsetBitsMask.isSubsetOf(SignBitsMask)) {
920 APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
921 if (!OtherHighestBit.isPowerOf2())
922 return nullptr;
923 HighestBit = APIntOps::umin(HighestBit, OtherHighestBit);
924 }
925 // Else, if it does not, then all is ok as-is.
926
927 // %r = icmp ult %X, SignBit
928 return Builder.CreateICmpULT(X, ConstantInt::get(X->getType(), HighestBit),
929 CxtI.getName() + ".simplified");
930 }
931
932 /// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and
933 /// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1).
934 /// Also used for logical and/or, must be poison safe if range attributes are
935 /// dropped.
foldIsPowerOf2OrZero(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd,InstCombiner::BuilderTy & Builder,InstCombinerImpl & IC)936 static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd,
937 InstCombiner::BuilderTy &Builder,
938 InstCombinerImpl &IC) {
939 CmpPredicate Pred0, Pred1;
940 Value *X;
941 if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
942 m_SpecificInt(1))) ||
943 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())))
944 return nullptr;
945
946 auto *CtPop = cast<Instruction>(Cmp0->getOperand(0));
947 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_NE) {
948 // Drop range attributes and re-infer them in the next iteration.
949 CtPop->dropPoisonGeneratingAnnotations();
950 IC.addToWorklist(CtPop);
951 return Builder.CreateICmpUGT(CtPop, ConstantInt::get(CtPop->getType(), 1));
952 }
953 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_EQ) {
954 // Drop range attributes and re-infer them in the next iteration.
955 CtPop->dropPoisonGeneratingAnnotations();
956 IC.addToWorklist(CtPop);
957 return Builder.CreateICmpULT(CtPop, ConstantInt::get(CtPop->getType(), 2));
958 }
959
960 return nullptr;
961 }
962
963 /// Reduce a pair of compares that check if a value has exactly 1 bit set.
964 /// Also used for logical and/or, must be poison safe if range attributes are
965 /// dropped.
foldIsPowerOf2(ICmpInst * Cmp0,ICmpInst * Cmp1,bool JoinedByAnd,InstCombiner::BuilderTy & Builder,InstCombinerImpl & IC)966 static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd,
967 InstCombiner::BuilderTy &Builder,
968 InstCombinerImpl &IC) {
969 // Handle 'and' / 'or' commutation: make the equality check the first operand.
970 if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE)
971 std::swap(Cmp0, Cmp1);
972 else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ)
973 std::swap(Cmp0, Cmp1);
974
975 // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1
976 Value *X;
977 if (JoinedByAnd &&
978 match(Cmp0, m_SpecificICmp(ICmpInst::ICMP_NE, m_Value(X), m_ZeroInt())) &&
979 match(Cmp1, m_SpecificICmp(ICmpInst::ICMP_ULT,
980 m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)),
981 m_SpecificInt(2)))) {
982 auto *CtPop = cast<Instruction>(Cmp1->getOperand(0));
983 // Drop range attributes and re-infer them in the next iteration.
984 CtPop->dropPoisonGeneratingAnnotations();
985 IC.addToWorklist(CtPop);
986 return Builder.CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1));
987 }
988 // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1
989 if (!JoinedByAnd &&
990 match(Cmp0, m_SpecificICmp(ICmpInst::ICMP_EQ, m_Value(X), m_ZeroInt())) &&
991 match(Cmp1, m_SpecificICmp(ICmpInst::ICMP_UGT,
992 m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)),
993 m_SpecificInt(1)))) {
994 auto *CtPop = cast<Instruction>(Cmp1->getOperand(0));
995 // Drop range attributes and re-infer them in the next iteration.
996 CtPop->dropPoisonGeneratingAnnotations();
997 IC.addToWorklist(CtPop);
998 return Builder.CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1));
999 }
1000 return nullptr;
1001 }
1002
1003 /// Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff
1004 /// B is a contiguous set of ones starting from the most significant bit
1005 /// (negative power of 2), D and E are equal, and D is a contiguous set of ones
1006 /// starting at the most significant zero bit in B. Parameter B supports masking
1007 /// using undef/poison in either scalar or vector values.
foldNegativePower2AndShiftedMask(Value * A,Value * B,Value * D,Value * E,ICmpInst::Predicate PredL,ICmpInst::Predicate PredR,InstCombiner::BuilderTy & Builder)1008 static Value *foldNegativePower2AndShiftedMask(
1009 Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL,
1010 ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder) {
1011 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
1012 "Expected equality predicates for masked type of icmps.");
1013 if (PredL != ICmpInst::ICMP_EQ || PredR != ICmpInst::ICMP_NE)
1014 return nullptr;
1015
1016 if (!match(B, m_NegatedPower2()) || !match(D, m_ShiftedMask()) ||
1017 !match(E, m_ShiftedMask()))
1018 return nullptr;
1019
1020 // Test scalar arguments for conversion. B has been validated earlier to be a
1021 // negative power of two and thus is guaranteed to have one or more contiguous
1022 // ones starting from the MSB followed by zero or more contiguous zeros. D has
1023 // been validated earlier to be a shifted set of one or more contiguous ones.
1024 // In order to match, B leading ones and D leading zeros should be equal. The
1025 // predicate that B be a negative power of 2 prevents the condition of there
1026 // ever being zero leading ones. Thus 0 == 0 cannot occur. The predicate that
1027 // D always be a shifted mask prevents the condition of D equaling 0. This
1028 // prevents matching the condition where B contains the maximum number of
1029 // leading one bits (-1) and D contains the maximum number of leading zero
1030 // bits (0).
1031 auto isReducible = [](const Value *B, const Value *D, const Value *E) {
1032 const APInt *BCst, *DCst, *ECst;
1033 return match(B, m_APIntAllowPoison(BCst)) && match(D, m_APInt(DCst)) &&
1034 match(E, m_APInt(ECst)) && *DCst == *ECst &&
1035 (isa<PoisonValue>(B) ||
1036 (BCst->countLeadingOnes() == DCst->countLeadingZeros()));
1037 };
1038
1039 // Test vector type arguments for conversion.
1040 if (const auto *BVTy = dyn_cast<VectorType>(B->getType())) {
1041 const auto *BFVTy = dyn_cast<FixedVectorType>(BVTy);
1042 const auto *BConst = dyn_cast<Constant>(B);
1043 const auto *DConst = dyn_cast<Constant>(D);
1044 const auto *EConst = dyn_cast<Constant>(E);
1045
1046 if (!BFVTy || !BConst || !DConst || !EConst)
1047 return nullptr;
1048
1049 for (unsigned I = 0; I != BFVTy->getNumElements(); ++I) {
1050 const auto *BElt = BConst->getAggregateElement(I);
1051 const auto *DElt = DConst->getAggregateElement(I);
1052 const auto *EElt = EConst->getAggregateElement(I);
1053
1054 if (!BElt || !DElt || !EElt)
1055 return nullptr;
1056 if (!isReducible(BElt, DElt, EElt))
1057 return nullptr;
1058 }
1059 } else {
1060 // Test scalar type arguments for conversion.
1061 if (!isReducible(B, D, E))
1062 return nullptr;
1063 }
1064 return Builder.CreateICmp(ICmpInst::ICMP_ULT, A, D);
1065 }
1066
1067 /// Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) &
1068 /// (icmp(X & M) != M)) into (icmp X u< M). Where P is a power of 2, M < P, and
1069 /// M is a contiguous shifted mask starting at the right most significant zero
1070 /// bit in P. SGT is supported as when P is the largest representable power of
1071 /// 2, an earlier optimization converts the expression into (icmp X s> -1).
1072 /// Parameter P supports masking using undef/poison in either scalar or vector
1073 /// values.
foldPowerOf2AndShiftedMask(ICmpInst * Cmp0,ICmpInst * Cmp1,bool JoinedByAnd,InstCombiner::BuilderTy & Builder)1074 static Value *foldPowerOf2AndShiftedMask(ICmpInst *Cmp0, ICmpInst *Cmp1,
1075 bool JoinedByAnd,
1076 InstCombiner::BuilderTy &Builder) {
1077 if (!JoinedByAnd)
1078 return nullptr;
1079 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
1080 ICmpInst::Predicate CmpPred0, CmpPred1;
1081 // Assuming P is a 2^n, getMaskedTypeForICmpPair will normalize (icmp X u<
1082 // 2^n) into (icmp (X & ~(2^n-1)) == 0) and (icmp X s> -1) into (icmp (X &
1083 // SignMask) == 0).
1084 std::optional<std::pair<unsigned, unsigned>> MaskPair =
1085 getMaskedTypeForICmpPair(A, B, C, D, E, Cmp0, Cmp1, CmpPred0, CmpPred1);
1086 if (!MaskPair)
1087 return nullptr;
1088
1089 const auto compareBMask = BMask_NotMixed | BMask_NotAllOnes;
1090 unsigned CmpMask0 = MaskPair->first;
1091 unsigned CmpMask1 = MaskPair->second;
1092 if ((CmpMask0 & Mask_AllZeros) && (CmpMask1 == compareBMask)) {
1093 if (Value *V = foldNegativePower2AndShiftedMask(A, B, D, E, CmpPred0,
1094 CmpPred1, Builder))
1095 return V;
1096 } else if ((CmpMask0 == compareBMask) && (CmpMask1 & Mask_AllZeros)) {
1097 if (Value *V = foldNegativePower2AndShiftedMask(A, D, B, C, CmpPred1,
1098 CmpPred0, Builder))
1099 return V;
1100 }
1101 return nullptr;
1102 }
1103
1104 /// Commuted variants are assumed to be handled by calling this function again
1105 /// with the parameters swapped.
foldUnsignedUnderflowCheck(ICmpInst * ZeroICmp,ICmpInst * UnsignedICmp,bool IsAnd,const SimplifyQuery & Q,InstCombiner::BuilderTy & Builder)1106 static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
1107 ICmpInst *UnsignedICmp, bool IsAnd,
1108 const SimplifyQuery &Q,
1109 InstCombiner::BuilderTy &Builder) {
1110 Value *ZeroCmpOp;
1111 CmpPredicate EqPred;
1112 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(ZeroCmpOp), m_Zero())) ||
1113 !ICmpInst::isEquality(EqPred))
1114 return nullptr;
1115
1116 CmpPredicate UnsignedPred;
1117
1118 Value *A, *B;
1119 if (match(UnsignedICmp,
1120 m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) &&
1121 match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) &&
1122 (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
1123 auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
1124 if (!isKnownNonZero(NonZero, Q))
1125 std::swap(NonZero, Other);
1126 return isKnownNonZero(NonZero, Q);
1127 };
1128
1129 // Given ZeroCmpOp = (A + B)
1130 // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff
1131 // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff
1132 // with X being the value (A/B) that is known to be non-zero,
1133 // and Y being remaining value.
1134 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
1135 IsAnd && GetKnownNonZeroAndOther(B, A))
1136 return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
1137 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
1138 !IsAnd && GetKnownNonZeroAndOther(B, A))
1139 return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
1140 }
1141
1142 return nullptr;
1143 }
1144
1145 struct IntPart {
1146 Value *From;
1147 unsigned StartBit;
1148 unsigned NumBits;
1149 };
1150
1151 /// Match an extraction of bits from an integer.
matchIntPart(Value * V)1152 static std::optional<IntPart> matchIntPart(Value *V) {
1153 Value *X;
1154 if (!match(V, m_OneUse(m_Trunc(m_Value(X)))))
1155 return std::nullopt;
1156
1157 unsigned NumOriginalBits = X->getType()->getScalarSizeInBits();
1158 unsigned NumExtractedBits = V->getType()->getScalarSizeInBits();
1159 Value *Y;
1160 const APInt *Shift;
1161 // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits
1162 // from Y, not any shifted-in zeroes.
1163 if (match(X, m_OneUse(m_LShr(m_Value(Y), m_APInt(Shift)))) &&
1164 Shift->ule(NumOriginalBits - NumExtractedBits))
1165 return {{Y, (unsigned)Shift->getZExtValue(), NumExtractedBits}};
1166 return {{X, 0, NumExtractedBits}};
1167 }
1168
1169 /// Materialize an extraction of bits from an integer in IR.
extractIntPart(const IntPart & P,IRBuilderBase & Builder)1170 static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) {
1171 Value *V = P.From;
1172 if (P.StartBit)
1173 V = Builder.CreateLShr(V, P.StartBit);
1174 Type *TruncTy = V->getType()->getWithNewBitWidth(P.NumBits);
1175 if (TruncTy != V->getType())
1176 V = Builder.CreateTrunc(V, TruncTy);
1177 return V;
1178 }
1179
1180 /// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01
1181 /// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01
1182 /// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer.
foldEqOfParts(Value * Cmp0,Value * Cmp1,bool IsAnd)1183 Value *InstCombinerImpl::foldEqOfParts(Value *Cmp0, Value *Cmp1, bool IsAnd) {
1184 if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
1185 return nullptr;
1186
1187 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1188 auto GetMatchPart = [&](Value *CmpV,
1189 unsigned OpNo) -> std::optional<IntPart> {
1190 assert(CmpV->getType()->isIntOrIntVectorTy(1) && "Must be bool");
1191
1192 Value *X, *Y;
1193 // icmp ne (and x, 1), (and y, 1) <=> trunc (xor x, y) to i1
1194 // icmp eq (and x, 1), (and y, 1) <=> not (trunc (xor x, y) to i1)
1195 if (Pred == CmpInst::ICMP_NE
1196 ? match(CmpV, m_Trunc(m_Xor(m_Value(X), m_Value(Y))))
1197 : match(CmpV, m_Not(m_Trunc(m_Xor(m_Value(X), m_Value(Y))))))
1198 return {{OpNo == 0 ? X : Y, 0, 1}};
1199
1200 auto *Cmp = dyn_cast<ICmpInst>(CmpV);
1201 if (!Cmp)
1202 return std::nullopt;
1203
1204 if (Pred == Cmp->getPredicate())
1205 return matchIntPart(Cmp->getOperand(OpNo));
1206
1207 const APInt *C;
1208 // (icmp eq (lshr x, C), (lshr y, C)) gets optimized to:
1209 // (icmp ult (xor x, y), 1 << C) so also look for that.
1210 if (Pred == CmpInst::ICMP_EQ && Cmp->getPredicate() == CmpInst::ICMP_ULT) {
1211 if (!match(Cmp->getOperand(1), m_Power2(C)) ||
1212 !match(Cmp->getOperand(0), m_Xor(m_Value(), m_Value())))
1213 return std::nullopt;
1214 }
1215
1216 // (icmp ne (lshr x, C), (lshr y, C)) gets optimized to:
1217 // (icmp ugt (xor x, y), (1 << C) - 1) so also look for that.
1218 else if (Pred == CmpInst::ICMP_NE &&
1219 Cmp->getPredicate() == CmpInst::ICMP_UGT) {
1220 if (!match(Cmp->getOperand(1), m_LowBitMask(C)) ||
1221 !match(Cmp->getOperand(0), m_Xor(m_Value(), m_Value())))
1222 return std::nullopt;
1223 } else {
1224 return std::nullopt;
1225 }
1226
1227 unsigned From = Pred == CmpInst::ICMP_NE ? C->popcount() : C->countr_zero();
1228 Instruction *I = cast<Instruction>(Cmp->getOperand(0));
1229 return {{I->getOperand(OpNo), From, C->getBitWidth() - From}};
1230 };
1231
1232 std::optional<IntPart> L0 = GetMatchPart(Cmp0, 0);
1233 std::optional<IntPart> R0 = GetMatchPart(Cmp0, 1);
1234 std::optional<IntPart> L1 = GetMatchPart(Cmp1, 0);
1235 std::optional<IntPart> R1 = GetMatchPart(Cmp1, 1);
1236 if (!L0 || !R0 || !L1 || !R1)
1237 return nullptr;
1238
1239 // Make sure the LHS/RHS compare a part of the same value, possibly after
1240 // an operand swap.
1241 if (L0->From != L1->From || R0->From != R1->From) {
1242 if (L0->From != R1->From || R0->From != L1->From)
1243 return nullptr;
1244 std::swap(L1, R1);
1245 }
1246
1247 // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being
1248 // the low part and L1/R1 being the high part.
1249 if (L0->StartBit + L0->NumBits != L1->StartBit ||
1250 R0->StartBit + R0->NumBits != R1->StartBit) {
1251 if (L1->StartBit + L1->NumBits != L0->StartBit ||
1252 R1->StartBit + R1->NumBits != R0->StartBit)
1253 return nullptr;
1254 std::swap(L0, L1);
1255 std::swap(R0, R1);
1256 }
1257
1258 // We can simplify to a comparison of these larger parts of the integers.
1259 IntPart L = {L0->From, L0->StartBit, L0->NumBits + L1->NumBits};
1260 IntPart R = {R0->From, R0->StartBit, R0->NumBits + R1->NumBits};
1261 Value *LValue = extractIntPart(L, Builder);
1262 Value *RValue = extractIntPart(R, Builder);
1263 return Builder.CreateICmp(Pred, LValue, RValue);
1264 }
1265
1266 /// Reduce logic-of-compares with equality to a constant by substituting a
1267 /// common operand with the constant. Callers are expected to call this with
1268 /// Cmp0/Cmp1 switched to handle logic op commutativity.
foldAndOrOfICmpsWithConstEq(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd,bool IsLogical,InstCombiner::BuilderTy & Builder,const SimplifyQuery & Q)1269 static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1,
1270 bool IsAnd, bool IsLogical,
1271 InstCombiner::BuilderTy &Builder,
1272 const SimplifyQuery &Q) {
1273 // Match an equality compare with a non-poison constant as Cmp0.
1274 // Also, give up if the compare can be constant-folded to avoid looping.
1275 CmpPredicate Pred0;
1276 Value *X;
1277 Constant *C;
1278 if (!match(Cmp0, m_ICmp(Pred0, m_Value(X), m_Constant(C))) ||
1279 !isGuaranteedNotToBeUndefOrPoison(C) || isa<Constant>(X))
1280 return nullptr;
1281 if ((IsAnd && Pred0 != ICmpInst::ICMP_EQ) ||
1282 (!IsAnd && Pred0 != ICmpInst::ICMP_NE))
1283 return nullptr;
1284
1285 // The other compare must include a common operand (X). Canonicalize the
1286 // common operand as operand 1 (Pred1 is swapped if the common operand was
1287 // operand 0).
1288 Value *Y;
1289 CmpPredicate Pred1;
1290 if (!match(Cmp1, m_c_ICmp(Pred1, m_Value(Y), m_Specific(X))))
1291 return nullptr;
1292
1293 // Replace variable with constant value equivalence to remove a variable use:
1294 // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C)
1295 // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C)
1296 // Can think of the 'or' substitution with the 'and' bool equivalent:
1297 // A || B --> A || (!A && B)
1298 Value *SubstituteCmp = simplifyICmpInst(Pred1, Y, C, Q);
1299 if (!SubstituteCmp) {
1300 // If we need to create a new instruction, require that the old compare can
1301 // be removed.
1302 if (!Cmp1->hasOneUse())
1303 return nullptr;
1304 SubstituteCmp = Builder.CreateICmp(Pred1, Y, C);
1305 }
1306 if (IsLogical)
1307 return IsAnd ? Builder.CreateLogicalAnd(Cmp0, SubstituteCmp)
1308 : Builder.CreateLogicalOr(Cmp0, SubstituteCmp);
1309 return Builder.CreateBinOp(IsAnd ? Instruction::And : Instruction::Or, Cmp0,
1310 SubstituteCmp);
1311 }
1312
1313 /// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2)
1314 /// or (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2)
1315 /// into a single comparison using range-based reasoning.
1316 /// NOTE: This is also used for logical and/or, must be poison-safe!
foldAndOrOfICmpsUsingRanges(ICmpInst * ICmp1,ICmpInst * ICmp2,bool IsAnd)1317 Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1,
1318 ICmpInst *ICmp2,
1319 bool IsAnd) {
1320 CmpPredicate Pred1, Pred2;
1321 Value *V1, *V2;
1322 const APInt *C1, *C2;
1323 if (!match(ICmp1, m_ICmp(Pred1, m_Value(V1), m_APInt(C1))) ||
1324 !match(ICmp2, m_ICmp(Pred2, m_Value(V2), m_APInt(C2))))
1325 return nullptr;
1326
1327 // Look through add of a constant offset on V1, V2, or both operands. This
1328 // allows us to interpret the V + C' < C'' range idiom into a proper range.
1329 const APInt *Offset1 = nullptr, *Offset2 = nullptr;
1330 if (V1 != V2) {
1331 Value *X;
1332 if (match(V1, m_Add(m_Value(X), m_APInt(Offset1))))
1333 V1 = X;
1334 if (match(V2, m_Add(m_Value(X), m_APInt(Offset2))))
1335 V2 = X;
1336 }
1337
1338 if (V1 != V2)
1339 return nullptr;
1340
1341 ConstantRange CR1 = ConstantRange::makeExactICmpRegion(
1342 IsAnd ? ICmpInst::getInverseCmpPredicate(Pred1) : Pred1, *C1);
1343 if (Offset1)
1344 CR1 = CR1.subtract(*Offset1);
1345
1346 ConstantRange CR2 = ConstantRange::makeExactICmpRegion(
1347 IsAnd ? ICmpInst::getInverseCmpPredicate(Pred2) : Pred2, *C2);
1348 if (Offset2)
1349 CR2 = CR2.subtract(*Offset2);
1350
1351 Type *Ty = V1->getType();
1352 Value *NewV = V1;
1353 std::optional<ConstantRange> CR = CR1.exactUnionWith(CR2);
1354 if (!CR) {
1355 if (!(ICmp1->hasOneUse() && ICmp2->hasOneUse()) || CR1.isWrappedSet() ||
1356 CR2.isWrappedSet())
1357 return nullptr;
1358
1359 // Check whether we have equal-size ranges that only differ by one bit.
1360 // In that case we can apply a mask to map one range onto the other.
1361 APInt LowerDiff = CR1.getLower() ^ CR2.getLower();
1362 APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1);
1363 APInt CR1Size = CR1.getUpper() - CR1.getLower();
1364 if (!LowerDiff.isPowerOf2() || LowerDiff != UpperDiff ||
1365 CR1Size != CR2.getUpper() - CR2.getLower())
1366 return nullptr;
1367
1368 CR = CR1.getLower().ult(CR2.getLower()) ? CR1 : CR2;
1369 NewV = Builder.CreateAnd(NewV, ConstantInt::get(Ty, ~LowerDiff));
1370 }
1371
1372 if (IsAnd)
1373 CR = CR->inverse();
1374
1375 CmpInst::Predicate NewPred;
1376 APInt NewC, Offset;
1377 CR->getEquivalentICmp(NewPred, NewC, Offset);
1378
1379 if (Offset != 0)
1380 NewV = Builder.CreateAdd(NewV, ConstantInt::get(Ty, Offset));
1381 return Builder.CreateICmp(NewPred, NewV, ConstantInt::get(Ty, NewC));
1382 }
1383
1384 /// Ignore all operations which only change the sign of a value, returning the
1385 /// underlying magnitude value.
stripSignOnlyFPOps(Value * Val)1386 static Value *stripSignOnlyFPOps(Value *Val) {
1387 match(Val, m_FNeg(m_Value(Val)));
1388 match(Val, m_FAbs(m_Value(Val)));
1389 match(Val, m_CopySign(m_Value(Val), m_Value()));
1390 return Val;
1391 }
1392
1393 /// Matches canonical form of isnan, fcmp ord x, 0
matchIsNotNaN(FCmpInst::Predicate P,Value * LHS,Value * RHS)1394 static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS) {
1395 return P == FCmpInst::FCMP_ORD && match(RHS, m_AnyZeroFP());
1396 }
1397
1398 /// Matches fcmp u__ x, +/-inf
matchUnorderedInfCompare(FCmpInst::Predicate P,Value * LHS,Value * RHS)1399 static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS,
1400 Value *RHS) {
1401 return FCmpInst::isUnordered(P) && match(RHS, m_Inf());
1402 }
1403
1404 /// and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1405 ///
1406 /// Clang emits this pattern for doing an isfinite check in __builtin_isnormal.
matchIsFiniteTest(InstCombiner::BuilderTy & Builder,FCmpInst * LHS,FCmpInst * RHS)1407 static Value *matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS,
1408 FCmpInst *RHS) {
1409 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1410 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1411 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1412
1413 if (!matchIsNotNaN(PredL, LHS0, LHS1) ||
1414 !matchUnorderedInfCompare(PredR, RHS0, RHS1))
1415 return nullptr;
1416
1417 return Builder.CreateFCmpFMF(FCmpInst::getOrderedPredicate(PredR), RHS0, RHS1,
1418 FMFSource::intersect(LHS, RHS));
1419 }
1420
foldLogicOfFCmps(FCmpInst * LHS,FCmpInst * RHS,bool IsAnd,bool IsLogicalSelect)1421 Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS,
1422 bool IsAnd, bool IsLogicalSelect) {
1423 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1424 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1425 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1426
1427 if (LHS0 == RHS1 && RHS0 == LHS1) {
1428 // Swap RHS operands to match LHS.
1429 PredR = FCmpInst::getSwappedPredicate(PredR);
1430 std::swap(RHS0, RHS1);
1431 }
1432
1433 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1434 // Suppose the relation between x and y is R, where R is one of
1435 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
1436 // testing the desired relations.
1437 //
1438 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1439 // bool(R & CC0) && bool(R & CC1)
1440 // = bool((R & CC0) & (R & CC1))
1441 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
1442 //
1443 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1444 // bool(R & CC0) || bool(R & CC1)
1445 // = bool((R & CC0) | (R & CC1))
1446 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1447 if (LHS0 == RHS0 && LHS1 == RHS1) {
1448 unsigned FCmpCodeL = getFCmpCode(PredL);
1449 unsigned FCmpCodeR = getFCmpCode(PredR);
1450 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1451
1452 // Intersect the fast math flags.
1453 // TODO: We can union the fast math flags unless this is a logical select.
1454 return getFCmpValue(NewPred, LHS0, LHS1, Builder,
1455 FMFSource::intersect(LHS, RHS));
1456 }
1457
1458 // This transform is not valid for a logical select.
1459 if (!IsLogicalSelect &&
1460 ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1461 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO &&
1462 !IsAnd))) {
1463 if (LHS0->getType() != RHS0->getType())
1464 return nullptr;
1465
1466 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
1467 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
1468 if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP())) {
1469 // Ignore the constants because they are obviously not NANs:
1470 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
1471 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
1472 return Builder.CreateFCmpFMF(PredL, LHS0, RHS0,
1473 FMFSource::intersect(LHS, RHS));
1474 }
1475 }
1476
1477 // This transform is not valid for a logical select.
1478 if (!IsLogicalSelect && IsAnd &&
1479 stripSignOnlyFPOps(LHS0) == stripSignOnlyFPOps(RHS0)) {
1480 // and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1481 // and (fcmp ord x, 0), (fcmp u* fabs(x), inf) -> fcmp o* x, inf
1482 if (Value *Left = matchIsFiniteTest(Builder, LHS, RHS))
1483 return Left;
1484 if (Value *Right = matchIsFiniteTest(Builder, RHS, LHS))
1485 return Right;
1486 }
1487
1488 // Turn at least two fcmps with constants into llvm.is.fpclass.
1489 //
1490 // If we can represent a combined value test with one class call, we can
1491 // potentially eliminate 4-6 instructions. If we can represent a test with a
1492 // single fcmp with fneg and fabs, that's likely a better canonical form.
1493 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1494 auto [ClassValRHS, ClassMaskRHS] =
1495 fcmpToClassTest(PredR, *RHS->getFunction(), RHS0, RHS1);
1496 if (ClassValRHS) {
1497 auto [ClassValLHS, ClassMaskLHS] =
1498 fcmpToClassTest(PredL, *LHS->getFunction(), LHS0, LHS1);
1499 if (ClassValLHS == ClassValRHS) {
1500 unsigned CombinedMask = IsAnd ? (ClassMaskLHS & ClassMaskRHS)
1501 : (ClassMaskLHS | ClassMaskRHS);
1502 return Builder.CreateIntrinsic(
1503 Intrinsic::is_fpclass, {ClassValLHS->getType()},
1504 {ClassValLHS, Builder.getInt32(CombinedMask)});
1505 }
1506 }
1507 }
1508
1509 // Canonicalize the range check idiom:
1510 // and (fcmp olt/ole/ult/ule x, C), (fcmp ogt/oge/ugt/uge x, -C)
1511 // --> fabs(x) olt/ole/ult/ule C
1512 // or (fcmp ogt/oge/ugt/uge x, C), (fcmp olt/ole/ult/ule x, -C)
1513 // --> fabs(x) ogt/oge/ugt/uge C
1514 // TODO: Generalize to handle a negated variable operand?
1515 const APFloat *LHSC, *RHSC;
1516 if (LHS0 == RHS0 && LHS->hasOneUse() && RHS->hasOneUse() &&
1517 FCmpInst::getSwappedPredicate(PredL) == PredR &&
1518 match(LHS1, m_APFloatAllowPoison(LHSC)) &&
1519 match(RHS1, m_APFloatAllowPoison(RHSC)) &&
1520 LHSC->bitwiseIsEqual(neg(*RHSC))) {
1521 auto IsLessThanOrLessEqual = [](FCmpInst::Predicate Pred) {
1522 switch (Pred) {
1523 case FCmpInst::FCMP_OLT:
1524 case FCmpInst::FCMP_OLE:
1525 case FCmpInst::FCMP_ULT:
1526 case FCmpInst::FCMP_ULE:
1527 return true;
1528 default:
1529 return false;
1530 }
1531 };
1532 if (IsLessThanOrLessEqual(IsAnd ? PredR : PredL)) {
1533 std::swap(LHSC, RHSC);
1534 std::swap(PredL, PredR);
1535 }
1536 if (IsLessThanOrLessEqual(IsAnd ? PredL : PredR)) {
1537 FastMathFlags NewFlag = LHS->getFastMathFlags();
1538 if (!IsLogicalSelect)
1539 NewFlag |= RHS->getFastMathFlags();
1540
1541 Value *FAbs =
1542 Builder.CreateUnaryIntrinsic(Intrinsic::fabs, LHS0, NewFlag);
1543 return Builder.CreateFCmpFMF(
1544 PredL, FAbs, ConstantFP::get(LHS0->getType(), *LHSC), NewFlag);
1545 }
1546 }
1547
1548 return nullptr;
1549 }
1550
1551 /// Match an fcmp against a special value that performs a test possible by
1552 /// llvm.is.fpclass.
matchIsFPClassLikeFCmp(Value * Op,Value * & ClassVal,uint64_t & ClassMask)1553 static bool matchIsFPClassLikeFCmp(Value *Op, Value *&ClassVal,
1554 uint64_t &ClassMask) {
1555 auto *FCmp = dyn_cast<FCmpInst>(Op);
1556 if (!FCmp || !FCmp->hasOneUse())
1557 return false;
1558
1559 std::tie(ClassVal, ClassMask) =
1560 fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
1561 FCmp->getOperand(0), FCmp->getOperand(1));
1562 return ClassVal != nullptr;
1563 }
1564
1565 /// or (is_fpclass x, mask0), (is_fpclass x, mask1)
1566 /// -> is_fpclass x, (mask0 | mask1)
1567 /// and (is_fpclass x, mask0), (is_fpclass x, mask1)
1568 /// -> is_fpclass x, (mask0 & mask1)
1569 /// xor (is_fpclass x, mask0), (is_fpclass x, mask1)
1570 /// -> is_fpclass x, (mask0 ^ mask1)
foldLogicOfIsFPClass(BinaryOperator & BO,Value * Op0,Value * Op1)1571 Instruction *InstCombinerImpl::foldLogicOfIsFPClass(BinaryOperator &BO,
1572 Value *Op0, Value *Op1) {
1573 Value *ClassVal0 = nullptr;
1574 Value *ClassVal1 = nullptr;
1575 uint64_t ClassMask0, ClassMask1;
1576
1577 // Restrict to folding one fcmp into one is.fpclass for now, don't introduce a
1578 // new class.
1579 //
1580 // TODO: Support forming is.fpclass out of 2 separate fcmps when codegen is
1581 // better.
1582
1583 bool IsLHSClass =
1584 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::is_fpclass>(
1585 m_Value(ClassVal0), m_ConstantInt(ClassMask0))));
1586 bool IsRHSClass =
1587 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::is_fpclass>(
1588 m_Value(ClassVal1), m_ConstantInt(ClassMask1))));
1589 if ((((IsLHSClass || matchIsFPClassLikeFCmp(Op0, ClassVal0, ClassMask0)) &&
1590 (IsRHSClass || matchIsFPClassLikeFCmp(Op1, ClassVal1, ClassMask1)))) &&
1591 ClassVal0 == ClassVal1) {
1592 unsigned NewClassMask;
1593 switch (BO.getOpcode()) {
1594 case Instruction::And:
1595 NewClassMask = ClassMask0 & ClassMask1;
1596 break;
1597 case Instruction::Or:
1598 NewClassMask = ClassMask0 | ClassMask1;
1599 break;
1600 case Instruction::Xor:
1601 NewClassMask = ClassMask0 ^ ClassMask1;
1602 break;
1603 default:
1604 llvm_unreachable("not a binary logic operator");
1605 }
1606
1607 if (IsLHSClass) {
1608 auto *II = cast<IntrinsicInst>(Op0);
1609 II->setArgOperand(
1610 1, ConstantInt::get(II->getArgOperand(1)->getType(), NewClassMask));
1611 return replaceInstUsesWith(BO, II);
1612 }
1613
1614 if (IsRHSClass) {
1615 auto *II = cast<IntrinsicInst>(Op1);
1616 II->setArgOperand(
1617 1, ConstantInt::get(II->getArgOperand(1)->getType(), NewClassMask));
1618 return replaceInstUsesWith(BO, II);
1619 }
1620
1621 CallInst *NewClass =
1622 Builder.CreateIntrinsic(Intrinsic::is_fpclass, {ClassVal0->getType()},
1623 {ClassVal0, Builder.getInt32(NewClassMask)});
1624 return replaceInstUsesWith(BO, NewClass);
1625 }
1626
1627 return nullptr;
1628 }
1629
1630 /// Look for the pattern that conditionally negates a value via math operations:
1631 /// cond.splat = sext i1 cond
1632 /// sub = add cond.splat, x
1633 /// xor = xor sub, cond.splat
1634 /// and rewrite it to do the same, but via logical operations:
1635 /// value.neg = sub 0, value
1636 /// cond = select i1 neg, value.neg, value
canonicalizeConditionalNegationViaMathToSelect(BinaryOperator & I)1637 Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
1638 BinaryOperator &I) {
1639 assert(I.getOpcode() == BinaryOperator::Xor && "Only for xor!");
1640 Value *Cond, *X;
1641 // As per complexity ordering, `xor` is not commutative here.
1642 if (!match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())) ||
1643 !match(I.getOperand(1), m_SExt(m_Value(Cond))) ||
1644 !Cond->getType()->isIntOrIntVectorTy(1) ||
1645 !match(I.getOperand(0), m_c_Add(m_SExt(m_Specific(Cond)), m_Value(X))))
1646 return nullptr;
1647 return SelectInst::Create(Cond, Builder.CreateNeg(X, X->getName() + ".neg"),
1648 X);
1649 }
1650
1651 /// This a limited reassociation for a special case (see above) where we are
1652 /// checking if two values are either both NAN (unordered) or not-NAN (ordered).
1653 /// This could be handled more generally in '-reassociation', but it seems like
1654 /// an unlikely pattern for a large number of logic ops and fcmps.
reassociateFCmps(BinaryOperator & BO,InstCombiner::BuilderTy & Builder)1655 static Instruction *reassociateFCmps(BinaryOperator &BO,
1656 InstCombiner::BuilderTy &Builder) {
1657 Instruction::BinaryOps Opcode = BO.getOpcode();
1658 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1659 "Expecting and/or op for fcmp transform");
1660
1661 // There are 4 commuted variants of the pattern. Canonicalize operands of this
1662 // logic op so an fcmp is operand 0 and a matching logic op is operand 1.
1663 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1), *X;
1664 if (match(Op1, m_FCmp(m_Value(), m_AnyZeroFP())))
1665 std::swap(Op0, Op1);
1666
1667 // Match inner binop and the predicate for combining 2 NAN checks into 1.
1668 Value *BO10, *BO11;
1669 FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD
1670 : FCmpInst::FCMP_UNO;
1671 if (!match(Op0, m_SpecificFCmp(NanPred, m_Value(X), m_AnyZeroFP())) ||
1672 !match(Op1, m_BinOp(Opcode, m_Value(BO10), m_Value(BO11))))
1673 return nullptr;
1674
1675 // The inner logic op must have a matching fcmp operand.
1676 Value *Y;
1677 if (!match(BO10, m_SpecificFCmp(NanPred, m_Value(Y), m_AnyZeroFP())) ||
1678 X->getType() != Y->getType())
1679 std::swap(BO10, BO11);
1680
1681 if (!match(BO10, m_SpecificFCmp(NanPred, m_Value(Y), m_AnyZeroFP())) ||
1682 X->getType() != Y->getType())
1683 return nullptr;
1684
1685 // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z
1686 // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z
1687 // Intersect FMF from the 2 source fcmps.
1688 Value *NewFCmp =
1689 Builder.CreateFCmpFMF(NanPred, X, Y, FMFSource::intersect(Op0, BO10));
1690 return BinaryOperator::Create(Opcode, NewFCmp, BO11);
1691 }
1692
1693 /// Match variations of De Morgan's Laws:
1694 /// (~A & ~B) == (~(A | B))
1695 /// (~A | ~B) == (~(A & B))
matchDeMorgansLaws(BinaryOperator & I,InstCombiner & IC)1696 static Instruction *matchDeMorgansLaws(BinaryOperator &I,
1697 InstCombiner &IC) {
1698 const Instruction::BinaryOps Opcode = I.getOpcode();
1699 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1700 "Trying to match De Morgan's Laws with something other than and/or");
1701
1702 // Flip the logic operation.
1703 const Instruction::BinaryOps FlippedOpcode =
1704 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1705
1706 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1707 Value *A, *B;
1708 if (match(Op0, m_OneUse(m_Not(m_Value(A)))) &&
1709 match(Op1, m_OneUse(m_Not(m_Value(B)))) &&
1710 !IC.isFreeToInvert(A, A->hasOneUse()) &&
1711 !IC.isFreeToInvert(B, B->hasOneUse())) {
1712 Value *AndOr =
1713 IC.Builder.CreateBinOp(FlippedOpcode, A, B, I.getName() + ".demorgan");
1714 return BinaryOperator::CreateNot(AndOr);
1715 }
1716
1717 // The 'not' ops may require reassociation.
1718 // (A & ~B) & ~C --> A & ~(B | C)
1719 // (~B & A) & ~C --> A & ~(B | C)
1720 // (A | ~B) | ~C --> A | ~(B & C)
1721 // (~B | A) | ~C --> A | ~(B & C)
1722 Value *C;
1723 if (match(Op0, m_OneUse(m_c_BinOp(Opcode, m_Value(A), m_Not(m_Value(B))))) &&
1724 match(Op1, m_Not(m_Value(C)))) {
1725 Value *FlippedBO = IC.Builder.CreateBinOp(FlippedOpcode, B, C);
1726 return BinaryOperator::Create(Opcode, A, IC.Builder.CreateNot(FlippedBO));
1727 }
1728
1729 return nullptr;
1730 }
1731
shouldOptimizeCast(CastInst * CI)1732 bool InstCombinerImpl::shouldOptimizeCast(CastInst *CI) {
1733 Value *CastSrc = CI->getOperand(0);
1734
1735 // Noop casts and casts of constants should be eliminated trivially.
1736 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc))
1737 return false;
1738
1739 // If this cast is paired with another cast that can be eliminated, we prefer
1740 // to have it eliminated.
1741 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
1742 if (isEliminableCastPair(PrecedingCI, CI))
1743 return false;
1744
1745 return true;
1746 }
1747
1748 /// Fold {and,or,xor} (cast X), C.
foldLogicCastConstant(BinaryOperator & Logic,CastInst * Cast,InstCombinerImpl & IC)1749 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
1750 InstCombinerImpl &IC) {
1751 Constant *C = dyn_cast<Constant>(Logic.getOperand(1));
1752 if (!C)
1753 return nullptr;
1754
1755 auto LogicOpc = Logic.getOpcode();
1756 Type *DestTy = Logic.getType();
1757 Type *SrcTy = Cast->getSrcTy();
1758
1759 // Move the logic operation ahead of a zext or sext if the constant is
1760 // unchanged in the smaller source type. Performing the logic in a smaller
1761 // type may provide more information to later folds, and the smaller logic
1762 // instruction may be cheaper (particularly in the case of vectors).
1763 Value *X;
1764 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) {
1765 if (Constant *TruncC = IC.getLosslessUnsignedTrunc(C, SrcTy)) {
1766 // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1767 Value *NewOp = IC.Builder.CreateBinOp(LogicOpc, X, TruncC);
1768 return new ZExtInst(NewOp, DestTy);
1769 }
1770 }
1771
1772 if (match(Cast, m_OneUse(m_SExtLike(m_Value(X))))) {
1773 if (Constant *TruncC = IC.getLosslessSignedTrunc(C, SrcTy)) {
1774 // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1775 Value *NewOp = IC.Builder.CreateBinOp(LogicOpc, X, TruncC);
1776 return new SExtInst(NewOp, DestTy);
1777 }
1778 }
1779
1780 return nullptr;
1781 }
1782
1783 /// Fold {and,or,xor} (cast X), Y.
foldCastedBitwiseLogic(BinaryOperator & I)1784 Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) {
1785 auto LogicOpc = I.getOpcode();
1786 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1787
1788 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1789
1790 // fold bitwise(A >> BW - 1, zext(icmp)) (BW is the scalar bits of the
1791 // type of A)
1792 // -> bitwise(zext(A < 0), zext(icmp))
1793 // -> zext(bitwise(A < 0, icmp))
1794 auto FoldBitwiseICmpZeroWithICmp = [&](Value *Op0,
1795 Value *Op1) -> Instruction * {
1796 Value *A;
1797 bool IsMatched =
1798 match(Op0,
1799 m_OneUse(m_LShr(
1800 m_Value(A),
1801 m_SpecificInt(Op0->getType()->getScalarSizeInBits() - 1)))) &&
1802 match(Op1, m_OneUse(m_ZExt(m_ICmp(m_Value(), m_Value()))));
1803
1804 if (!IsMatched)
1805 return nullptr;
1806
1807 auto *ICmpL =
1808 Builder.CreateICmpSLT(A, Constant::getNullValue(A->getType()));
1809 auto *ICmpR = cast<ZExtInst>(Op1)->getOperand(0);
1810 auto *BitwiseOp = Builder.CreateBinOp(LogicOpc, ICmpL, ICmpR);
1811
1812 return new ZExtInst(BitwiseOp, Op0->getType());
1813 };
1814
1815 if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op0, Op1))
1816 return Ret;
1817
1818 if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op1, Op0))
1819 return Ret;
1820
1821 CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1822 if (!Cast0)
1823 return nullptr;
1824
1825 // This must be a cast from an integer or integer vector source type to allow
1826 // transformation of the logic operation to the source type.
1827 Type *DestTy = I.getType();
1828 Type *SrcTy = Cast0->getSrcTy();
1829 if (!SrcTy->isIntOrIntVectorTy())
1830 return nullptr;
1831
1832 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, *this))
1833 return Ret;
1834
1835 CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1836 if (!Cast1)
1837 return nullptr;
1838
1839 // Both operands of the logic operation are casts. The casts must be the
1840 // same kind for reduction.
1841 Instruction::CastOps CastOpcode = Cast0->getOpcode();
1842 if (CastOpcode != Cast1->getOpcode())
1843 return nullptr;
1844
1845 // Can't fold it profitably if no one of casts has one use.
1846 if (!Cast0->hasOneUse() && !Cast1->hasOneUse())
1847 return nullptr;
1848
1849 Value *X, *Y;
1850 if (match(Cast0, m_ZExtOrSExt(m_Value(X))) &&
1851 match(Cast1, m_ZExtOrSExt(m_Value(Y)))) {
1852 // Cast the narrower source to the wider source type.
1853 unsigned XNumBits = X->getType()->getScalarSizeInBits();
1854 unsigned YNumBits = Y->getType()->getScalarSizeInBits();
1855 if (XNumBits != YNumBits) {
1856 // Cast the narrower source to the wider source type only if both of casts
1857 // have one use to avoid creating an extra instruction.
1858 if (!Cast0->hasOneUse() || !Cast1->hasOneUse())
1859 return nullptr;
1860
1861 // If the source types do not match, but the casts are matching extends,
1862 // we can still narrow the logic op.
1863 if (XNumBits < YNumBits) {
1864 X = Builder.CreateCast(CastOpcode, X, Y->getType());
1865 } else if (YNumBits < XNumBits) {
1866 Y = Builder.CreateCast(CastOpcode, Y, X->getType());
1867 }
1868 }
1869
1870 // Do the logic op in the intermediate width, then widen more.
1871 Value *NarrowLogic = Builder.CreateBinOp(LogicOpc, X, Y, I.getName());
1872 auto *Disjoint = dyn_cast<PossiblyDisjointInst>(&I);
1873 auto *NewDisjoint = dyn_cast<PossiblyDisjointInst>(NarrowLogic);
1874 if (Disjoint && NewDisjoint)
1875 NewDisjoint->setIsDisjoint(Disjoint->isDisjoint());
1876 return CastInst::Create(CastOpcode, NarrowLogic, DestTy);
1877 }
1878
1879 // If the src type of casts are different, give up for other cast opcodes.
1880 if (SrcTy != Cast1->getSrcTy())
1881 return nullptr;
1882
1883 Value *Cast0Src = Cast0->getOperand(0);
1884 Value *Cast1Src = Cast1->getOperand(0);
1885
1886 // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1887 if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1888 Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1889 I.getName());
1890 return CastInst::Create(CastOpcode, NewOp, DestTy);
1891 }
1892
1893 return nullptr;
1894 }
1895
foldAndToXor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1896 static Instruction *foldAndToXor(BinaryOperator &I,
1897 InstCombiner::BuilderTy &Builder) {
1898 assert(I.getOpcode() == Instruction::And);
1899 Value *Op0 = I.getOperand(0);
1900 Value *Op1 = I.getOperand(1);
1901 Value *A, *B;
1902
1903 // Operand complexity canonicalization guarantees that the 'or' is Op0.
1904 // (A | B) & ~(A & B) --> A ^ B
1905 // (A | B) & ~(B & A) --> A ^ B
1906 if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)),
1907 m_Not(m_c_And(m_Deferred(A), m_Deferred(B))))))
1908 return BinaryOperator::CreateXor(A, B);
1909
1910 // (A | ~B) & (~A | B) --> ~(A ^ B)
1911 // (A | ~B) & (B | ~A) --> ~(A ^ B)
1912 // (~B | A) & (~A | B) --> ~(A ^ B)
1913 // (~B | A) & (B | ~A) --> ~(A ^ B)
1914 if (Op0->hasOneUse() || Op1->hasOneUse())
1915 if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))),
1916 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B)))))
1917 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1918
1919 return nullptr;
1920 }
1921
foldOrToXor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1922 static Instruction *foldOrToXor(BinaryOperator &I,
1923 InstCombiner::BuilderTy &Builder) {
1924 assert(I.getOpcode() == Instruction::Or);
1925 Value *Op0 = I.getOperand(0);
1926 Value *Op1 = I.getOperand(1);
1927 Value *A, *B;
1928
1929 // Operand complexity canonicalization guarantees that the 'and' is Op0.
1930 // (A & B) | ~(A | B) --> ~(A ^ B)
1931 // (A & B) | ~(B | A) --> ~(A ^ B)
1932 if (Op0->hasOneUse() || Op1->hasOneUse())
1933 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1934 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1935 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1936
1937 // Operand complexity canonicalization guarantees that the 'xor' is Op0.
1938 // (A ^ B) | ~(A | B) --> ~(A & B)
1939 // (A ^ B) | ~(B | A) --> ~(A & B)
1940 if (Op0->hasOneUse() || Op1->hasOneUse())
1941 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
1942 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1943 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
1944
1945 // (A & ~B) | (~A & B) --> A ^ B
1946 // (A & ~B) | (B & ~A) --> A ^ B
1947 // (~B & A) | (~A & B) --> A ^ B
1948 // (~B & A) | (B & ~A) --> A ^ B
1949 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
1950 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
1951 return BinaryOperator::CreateXor(A, B);
1952
1953 return nullptr;
1954 }
1955
1956 /// Return true if a constant shift amount is always less than the specified
1957 /// bit-width. If not, the shift could create poison in the narrower type.
canNarrowShiftAmt(Constant * C,unsigned BitWidth)1958 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {
1959 APInt Threshold(C->getType()->getScalarSizeInBits(), BitWidth);
1960 return match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold));
1961 }
1962
1963 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1964 /// a common zext operand: and (binop (zext X), C), (zext X).
narrowMaskedBinOp(BinaryOperator & And)1965 Instruction *InstCombinerImpl::narrowMaskedBinOp(BinaryOperator &And) {
1966 // This transform could also apply to {or, and, xor}, but there are better
1967 // folds for those cases, so we don't expect those patterns here. AShr is not
1968 // handled because it should always be transformed to LShr in this sequence.
1969 // The subtract transform is different because it has a constant on the left.
1970 // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1971 Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1);
1972 Constant *C;
1973 if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) &&
1974 !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) &&
1975 !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) &&
1976 !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) &&
1977 !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1)))))
1978 return nullptr;
1979
1980 Value *X;
1981 if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3))
1982 return nullptr;
1983
1984 Type *Ty = And.getType();
1985 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType()))
1986 return nullptr;
1987
1988 // If we're narrowing a shift, the shift amount must be safe (less than the
1989 // width) in the narrower type. If the shift amount is greater, instsimplify
1990 // usually handles that case, but we can't guarantee/assert it.
1991 Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode();
1992 if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1993 if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits()))
1994 return nullptr;
1995
1996 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
1997 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
1998 Value *NewC = ConstantExpr::getTrunc(C, X->getType());
1999 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X)
2000 : Builder.CreateBinOp(Opc, X, NewC);
2001 return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty);
2002 }
2003
2004 /// Try folding relatively complex patterns for both And and Or operations
2005 /// with all And and Or swapped.
foldComplexAndOrPatterns(BinaryOperator & I,InstCombiner::BuilderTy & Builder)2006 static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
2007 InstCombiner::BuilderTy &Builder) {
2008 const Instruction::BinaryOps Opcode = I.getOpcode();
2009 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
2010
2011 // Flip the logic operation.
2012 const Instruction::BinaryOps FlippedOpcode =
2013 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
2014
2015 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2016 Value *A, *B, *C, *X, *Y, *Dummy;
2017
2018 // Match following expressions:
2019 // (~(A | B) & C)
2020 // (~(A & B) | C)
2021 // Captures X = ~(A | B) or ~(A & B)
2022 const auto matchNotOrAnd =
2023 [Opcode, FlippedOpcode](Value *Op, auto m_A, auto m_B, auto m_C,
2024 Value *&X, bool CountUses = false) -> bool {
2025 if (CountUses && !Op->hasOneUse())
2026 return false;
2027
2028 if (match(Op, m_c_BinOp(FlippedOpcode,
2029 m_CombineAnd(m_Value(X),
2030 m_Not(m_c_BinOp(Opcode, m_A, m_B))),
2031 m_C)))
2032 return !CountUses || X->hasOneUse();
2033
2034 return false;
2035 };
2036
2037 // (~(A | B) & C) | ... --> ...
2038 // (~(A & B) | C) & ... --> ...
2039 // TODO: One use checks are conservative. We just need to check that a total
2040 // number of multiple used values does not exceed reduction
2041 // in operations.
2042 if (matchNotOrAnd(Op0, m_Value(A), m_Value(B), m_Value(C), X)) {
2043 // (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A
2044 // (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A)
2045 if (matchNotOrAnd(Op1, m_Specific(A), m_Specific(C), m_Specific(B), Dummy,
2046 true)) {
2047 Value *Xor = Builder.CreateXor(B, C);
2048 return (Opcode == Instruction::Or)
2049 ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(A))
2050 : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, A));
2051 }
2052
2053 // (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B
2054 // (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B)
2055 if (matchNotOrAnd(Op1, m_Specific(B), m_Specific(C), m_Specific(A), Dummy,
2056 true)) {
2057 Value *Xor = Builder.CreateXor(A, C);
2058 return (Opcode == Instruction::Or)
2059 ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(B))
2060 : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, B));
2061 }
2062
2063 // (~(A | B) & C) | ~(A | C) --> ~((B & C) | A)
2064 // (~(A & B) | C) & ~(A & C) --> ~((B | C) & A)
2065 if (match(Op1, m_OneUse(m_Not(m_OneUse(
2066 m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)))))))
2067 return BinaryOperator::CreateNot(Builder.CreateBinOp(
2068 Opcode, Builder.CreateBinOp(FlippedOpcode, B, C), A));
2069
2070 // (~(A | B) & C) | ~(B | C) --> ~((A & C) | B)
2071 // (~(A & B) | C) & ~(B & C) --> ~((A | C) & B)
2072 if (match(Op1, m_OneUse(m_Not(m_OneUse(
2073 m_c_BinOp(Opcode, m_Specific(B), m_Specific(C)))))))
2074 return BinaryOperator::CreateNot(Builder.CreateBinOp(
2075 Opcode, Builder.CreateBinOp(FlippedOpcode, A, C), B));
2076
2077 // (~(A | B) & C) | ~(C | (A ^ B)) --> ~((A | B) & (C | (A ^ B)))
2078 // Note, the pattern with swapped and/or is not handled because the
2079 // result is more undefined than a source:
2080 // (~(A & B) | C) & ~(C & (A ^ B)) --> (A ^ B ^ C) | ~(A | C) is invalid.
2081 if (Opcode == Instruction::Or && Op0->hasOneUse() &&
2082 match(Op1, m_OneUse(m_Not(m_CombineAnd(
2083 m_Value(Y),
2084 m_c_BinOp(Opcode, m_Specific(C),
2085 m_c_Xor(m_Specific(A), m_Specific(B)))))))) {
2086 // X = ~(A | B)
2087 // Y = (C | (A ^ B)
2088 Value *Or = cast<BinaryOperator>(X)->getOperand(0);
2089 return BinaryOperator::CreateNot(Builder.CreateAnd(Or, Y));
2090 }
2091 }
2092
2093 // (~A & B & C) | ... --> ...
2094 // (~A | B | C) | ... --> ...
2095 // TODO: One use checks are conservative. We just need to check that a total
2096 // number of multiple used values does not exceed reduction
2097 // in operations.
2098 if (match(Op0,
2099 m_OneUse(m_c_BinOp(FlippedOpcode,
2100 m_BinOp(FlippedOpcode, m_Value(B), m_Value(C)),
2101 m_CombineAnd(m_Value(X), m_Not(m_Value(A)))))) ||
2102 match(Op0, m_OneUse(m_c_BinOp(
2103 FlippedOpcode,
2104 m_c_BinOp(FlippedOpcode, m_Value(C),
2105 m_CombineAnd(m_Value(X), m_Not(m_Value(A)))),
2106 m_Value(B))))) {
2107 // X = ~A
2108 // (~A & B & C) | ~(A | B | C) --> ~(A | (B ^ C))
2109 // (~A | B | C) & ~(A & B & C) --> (~A | (B ^ C))
2110 if (match(Op1, m_OneUse(m_Not(m_c_BinOp(
2111 Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(B)),
2112 m_Specific(C))))) ||
2113 match(Op1, m_OneUse(m_Not(m_c_BinOp(
2114 Opcode, m_c_BinOp(Opcode, m_Specific(B), m_Specific(C)),
2115 m_Specific(A))))) ||
2116 match(Op1, m_OneUse(m_Not(m_c_BinOp(
2117 Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)),
2118 m_Specific(B)))))) {
2119 Value *Xor = Builder.CreateXor(B, C);
2120 return (Opcode == Instruction::Or)
2121 ? BinaryOperator::CreateNot(Builder.CreateOr(Xor, A))
2122 : BinaryOperator::CreateOr(Xor, X);
2123 }
2124
2125 // (~A & B & C) | ~(A | B) --> (C | ~B) & ~A
2126 // (~A | B | C) & ~(A & B) --> (C & ~B) | ~A
2127 if (match(Op1, m_OneUse(m_Not(m_OneUse(
2128 m_c_BinOp(Opcode, m_Specific(A), m_Specific(B)))))))
2129 return BinaryOperator::Create(
2130 FlippedOpcode, Builder.CreateBinOp(Opcode, C, Builder.CreateNot(B)),
2131 X);
2132
2133 // (~A & B & C) | ~(A | C) --> (B | ~C) & ~A
2134 // (~A | B | C) & ~(A & C) --> (B & ~C) | ~A
2135 if (match(Op1, m_OneUse(m_Not(m_OneUse(
2136 m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)))))))
2137 return BinaryOperator::Create(
2138 FlippedOpcode, Builder.CreateBinOp(Opcode, B, Builder.CreateNot(C)),
2139 X);
2140 }
2141
2142 return nullptr;
2143 }
2144
2145 /// Try to reassociate a pair of binops so that values with one use only are
2146 /// part of the same instruction. This may enable folds that are limited with
2147 /// multi-use restrictions and makes it more likely to match other patterns that
2148 /// are looking for a common operand.
reassociateForUses(BinaryOperator & BO,InstCombinerImpl::BuilderTy & Builder)2149 static Instruction *reassociateForUses(BinaryOperator &BO,
2150 InstCombinerImpl::BuilderTy &Builder) {
2151 Instruction::BinaryOps Opcode = BO.getOpcode();
2152 Value *X, *Y, *Z;
2153 if (match(&BO,
2154 m_c_BinOp(Opcode, m_OneUse(m_BinOp(Opcode, m_Value(X), m_Value(Y))),
2155 m_OneUse(m_Value(Z))))) {
2156 if (!isa<Constant>(X) && !isa<Constant>(Y) && !isa<Constant>(Z)) {
2157 // (X op Y) op Z --> (Y op Z) op X
2158 if (!X->hasOneUse()) {
2159 Value *YZ = Builder.CreateBinOp(Opcode, Y, Z);
2160 return BinaryOperator::Create(Opcode, YZ, X);
2161 }
2162 // (X op Y) op Z --> (X op Z) op Y
2163 if (!Y->hasOneUse()) {
2164 Value *XZ = Builder.CreateBinOp(Opcode, X, Z);
2165 return BinaryOperator::Create(Opcode, XZ, Y);
2166 }
2167 }
2168 }
2169
2170 return nullptr;
2171 }
2172
2173 // Match
2174 // (X + C2) | C
2175 // (X + C2) ^ C
2176 // (X + C2) & C
2177 // and convert to do the bitwise logic first:
2178 // (X | C) + C2
2179 // (X ^ C) + C2
2180 // (X & C) + C2
2181 // iff bits affected by logic op are lower than last bit affected by math op
canonicalizeLogicFirst(BinaryOperator & I,InstCombiner::BuilderTy & Builder)2182 static Instruction *canonicalizeLogicFirst(BinaryOperator &I,
2183 InstCombiner::BuilderTy &Builder) {
2184 Type *Ty = I.getType();
2185 Instruction::BinaryOps OpC = I.getOpcode();
2186 Value *Op0 = I.getOperand(0);
2187 Value *Op1 = I.getOperand(1);
2188 Value *X;
2189 const APInt *C, *C2;
2190
2191 if (!(match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C2)))) &&
2192 match(Op1, m_APInt(C))))
2193 return nullptr;
2194
2195 unsigned Width = Ty->getScalarSizeInBits();
2196 unsigned LastOneMath = Width - C2->countr_zero();
2197
2198 switch (OpC) {
2199 case Instruction::And:
2200 if (C->countl_one() < LastOneMath)
2201 return nullptr;
2202 break;
2203 case Instruction::Xor:
2204 case Instruction::Or:
2205 if (C->countl_zero() < LastOneMath)
2206 return nullptr;
2207 break;
2208 default:
2209 llvm_unreachable("Unexpected BinaryOp!");
2210 }
2211
2212 Value *NewBinOp = Builder.CreateBinOp(OpC, X, ConstantInt::get(Ty, *C));
2213 return BinaryOperator::CreateWithCopiedFlags(Instruction::Add, NewBinOp,
2214 ConstantInt::get(Ty, *C2), Op0);
2215 }
2216
2217 // binop(shift(ShiftedC1, ShAmt), shift(ShiftedC2, add(ShAmt, AddC))) ->
2218 // shift(binop(ShiftedC1, shift(ShiftedC2, AddC)), ShAmt)
2219 // where both shifts are the same and AddC is a valid shift amount.
foldBinOpOfDisplacedShifts(BinaryOperator & I)2220 Instruction *InstCombinerImpl::foldBinOpOfDisplacedShifts(BinaryOperator &I) {
2221 assert((I.isBitwiseLogicOp() || I.getOpcode() == Instruction::Add) &&
2222 "Unexpected opcode");
2223
2224 Value *ShAmt;
2225 Constant *ShiftedC1, *ShiftedC2, *AddC;
2226 Type *Ty = I.getType();
2227 unsigned BitWidth = Ty->getScalarSizeInBits();
2228 if (!match(&I, m_c_BinOp(m_Shift(m_ImmConstant(ShiftedC1), m_Value(ShAmt)),
2229 m_Shift(m_ImmConstant(ShiftedC2),
2230 m_AddLike(m_Deferred(ShAmt),
2231 m_ImmConstant(AddC))))))
2232 return nullptr;
2233
2234 // Make sure the add constant is a valid shift amount.
2235 if (!match(AddC,
2236 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(BitWidth, BitWidth))))
2237 return nullptr;
2238
2239 // Avoid constant expressions.
2240 auto *Op0Inst = dyn_cast<Instruction>(I.getOperand(0));
2241 auto *Op1Inst = dyn_cast<Instruction>(I.getOperand(1));
2242 if (!Op0Inst || !Op1Inst)
2243 return nullptr;
2244
2245 // Both shifts must be the same.
2246 Instruction::BinaryOps ShiftOp =
2247 static_cast<Instruction::BinaryOps>(Op0Inst->getOpcode());
2248 if (ShiftOp != Op1Inst->getOpcode())
2249 return nullptr;
2250
2251 // For adds, only left shifts are supported.
2252 if (I.getOpcode() == Instruction::Add && ShiftOp != Instruction::Shl)
2253 return nullptr;
2254
2255 Value *NewC = Builder.CreateBinOp(
2256 I.getOpcode(), ShiftedC1, Builder.CreateBinOp(ShiftOp, ShiftedC2, AddC));
2257 return BinaryOperator::Create(ShiftOp, NewC, ShAmt);
2258 }
2259
2260 // Fold and/or/xor with two equal intrinsic IDs:
2261 // bitwise(fshl (A, B, ShAmt), fshl(C, D, ShAmt))
2262 // -> fshl(bitwise(A, C), bitwise(B, D), ShAmt)
2263 // bitwise(fshr (A, B, ShAmt), fshr(C, D, ShAmt))
2264 // -> fshr(bitwise(A, C), bitwise(B, D), ShAmt)
2265 // bitwise(bswap(A), bswap(B)) -> bswap(bitwise(A, B))
2266 // bitwise(bswap(A), C) -> bswap(bitwise(A, bswap(C)))
2267 // bitwise(bitreverse(A), bitreverse(B)) -> bitreverse(bitwise(A, B))
2268 // bitwise(bitreverse(A), C) -> bitreverse(bitwise(A, bitreverse(C)))
2269 static Instruction *
foldBitwiseLogicWithIntrinsics(BinaryOperator & I,InstCombiner::BuilderTy & Builder)2270 foldBitwiseLogicWithIntrinsics(BinaryOperator &I,
2271 InstCombiner::BuilderTy &Builder) {
2272 assert(I.isBitwiseLogicOp() && "Should and/or/xor");
2273 if (!I.getOperand(0)->hasOneUse())
2274 return nullptr;
2275 IntrinsicInst *X = dyn_cast<IntrinsicInst>(I.getOperand(0));
2276 if (!X)
2277 return nullptr;
2278
2279 IntrinsicInst *Y = dyn_cast<IntrinsicInst>(I.getOperand(1));
2280 if (Y && (!Y->hasOneUse() || X->getIntrinsicID() != Y->getIntrinsicID()))
2281 return nullptr;
2282
2283 Intrinsic::ID IID = X->getIntrinsicID();
2284 const APInt *RHSC;
2285 // Try to match constant RHS.
2286 if (!Y && (!(IID == Intrinsic::bswap || IID == Intrinsic::bitreverse) ||
2287 !match(I.getOperand(1), m_APInt(RHSC))))
2288 return nullptr;
2289
2290 switch (IID) {
2291 case Intrinsic::fshl:
2292 case Intrinsic::fshr: {
2293 if (X->getOperand(2) != Y->getOperand(2))
2294 return nullptr;
2295 Value *NewOp0 =
2296 Builder.CreateBinOp(I.getOpcode(), X->getOperand(0), Y->getOperand(0));
2297 Value *NewOp1 =
2298 Builder.CreateBinOp(I.getOpcode(), X->getOperand(1), Y->getOperand(1));
2299 Function *F =
2300 Intrinsic::getOrInsertDeclaration(I.getModule(), IID, I.getType());
2301 return CallInst::Create(F, {NewOp0, NewOp1, X->getOperand(2)});
2302 }
2303 case Intrinsic::bswap:
2304 case Intrinsic::bitreverse: {
2305 Value *NewOp0 = Builder.CreateBinOp(
2306 I.getOpcode(), X->getOperand(0),
2307 Y ? Y->getOperand(0)
2308 : ConstantInt::get(I.getType(), IID == Intrinsic::bswap
2309 ? RHSC->byteSwap()
2310 : RHSC->reverseBits()));
2311 Function *F =
2312 Intrinsic::getOrInsertDeclaration(I.getModule(), IID, I.getType());
2313 return CallInst::Create(F, {NewOp0});
2314 }
2315 default:
2316 return nullptr;
2317 }
2318 }
2319
2320 // Try to simplify V by replacing occurrences of Op with RepOp, but only look
2321 // through bitwise operations. In particular, for X | Y we try to replace Y with
2322 // 0 inside X and for X & Y we try to replace Y with -1 inside X.
2323 // Return the simplified result of X if successful, and nullptr otherwise.
2324 // If SimplifyOnly is true, no new instructions will be created.
simplifyAndOrWithOpReplaced(Value * V,Value * Op,Value * RepOp,bool SimplifyOnly,InstCombinerImpl & IC,unsigned Depth=0)2325 static Value *simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp,
2326 bool SimplifyOnly,
2327 InstCombinerImpl &IC,
2328 unsigned Depth = 0) {
2329 if (Op == RepOp)
2330 return nullptr;
2331
2332 if (V == Op)
2333 return RepOp;
2334
2335 auto *I = dyn_cast<BinaryOperator>(V);
2336 if (!I || !I->isBitwiseLogicOp() || Depth >= 3)
2337 return nullptr;
2338
2339 if (!I->hasOneUse())
2340 SimplifyOnly = true;
2341
2342 Value *NewOp0 = simplifyAndOrWithOpReplaced(I->getOperand(0), Op, RepOp,
2343 SimplifyOnly, IC, Depth + 1);
2344 Value *NewOp1 = simplifyAndOrWithOpReplaced(I->getOperand(1), Op, RepOp,
2345 SimplifyOnly, IC, Depth + 1);
2346 if (!NewOp0 && !NewOp1)
2347 return nullptr;
2348
2349 if (!NewOp0)
2350 NewOp0 = I->getOperand(0);
2351 if (!NewOp1)
2352 NewOp1 = I->getOperand(1);
2353
2354 if (Value *Res = simplifyBinOp(I->getOpcode(), NewOp0, NewOp1,
2355 IC.getSimplifyQuery().getWithInstruction(I)))
2356 return Res;
2357
2358 if (SimplifyOnly)
2359 return nullptr;
2360 return IC.Builder.CreateBinOp(I->getOpcode(), NewOp0, NewOp1);
2361 }
2362
2363 /// Reassociate and/or expressions to see if we can fold the inner and/or ops.
2364 /// TODO: Make this recursive; it's a little tricky because an arbitrary
2365 /// number of and/or instructions might have to be created.
reassociateBooleanAndOr(Value * LHS,Value * X,Value * Y,Instruction & I,bool IsAnd,bool RHSIsLogical)2366 Value *InstCombinerImpl::reassociateBooleanAndOr(Value *LHS, Value *X, Value *Y,
2367 Instruction &I, bool IsAnd,
2368 bool RHSIsLogical) {
2369 Instruction::BinaryOps Opcode = IsAnd ? Instruction::And : Instruction::Or;
2370 // LHS bop (X lop Y) --> (LHS bop X) lop Y
2371 // LHS bop (X bop Y) --> (LHS bop X) bop Y
2372 if (Value *Res = foldBooleanAndOr(LHS, X, I, IsAnd, /*IsLogical=*/false))
2373 return RHSIsLogical ? Builder.CreateLogicalOp(Opcode, Res, Y)
2374 : Builder.CreateBinOp(Opcode, Res, Y);
2375 // LHS bop (X bop Y) --> X bop (LHS bop Y)
2376 // LHS bop (X lop Y) --> X lop (LHS bop Y)
2377 if (Value *Res = foldBooleanAndOr(LHS, Y, I, IsAnd, /*IsLogical=*/false))
2378 return RHSIsLogical ? Builder.CreateLogicalOp(Opcode, X, Res)
2379 : Builder.CreateBinOp(Opcode, X, Res);
2380 return nullptr;
2381 }
2382
2383 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2384 // here. We should standardize that construct where it is needed or choose some
2385 // other way to ensure that commutated variants of patterns are not missed.
visitAnd(BinaryOperator & I)2386 Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
2387 Type *Ty = I.getType();
2388
2389 if (Value *V = simplifyAndInst(I.getOperand(0), I.getOperand(1),
2390 SQ.getWithInstruction(&I)))
2391 return replaceInstUsesWith(I, V);
2392
2393 if (SimplifyAssociativeOrCommutative(I))
2394 return &I;
2395
2396 if (Instruction *X = foldVectorBinop(I))
2397 return X;
2398
2399 if (Instruction *Phi = foldBinopWithPhiOperands(I))
2400 return Phi;
2401
2402 // See if we can simplify any instructions used by the instruction whose sole
2403 // purpose is to compute bits we don't care about.
2404 if (SimplifyDemandedInstructionBits(I))
2405 return &I;
2406
2407 // Do this before using distributive laws to catch simple and/or/not patterns.
2408 if (Instruction *Xor = foldAndToXor(I, Builder))
2409 return Xor;
2410
2411 if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
2412 return X;
2413
2414 // (A|B)&(A|C) -> A|(B&C) etc
2415 if (Value *V = foldUsingDistributiveLaws(I))
2416 return replaceInstUsesWith(I, V);
2417
2418 if (Instruction *R = foldBinOpShiftWithShift(I))
2419 return R;
2420
2421 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2422
2423 Value *X, *Y;
2424 const APInt *C;
2425 if ((match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) ||
2426 (match(Op0, m_OneUse(m_Shl(m_APInt(C), m_Value(X)))) && (*C)[0])) &&
2427 match(Op1, m_One())) {
2428 // (1 >> X) & 1 --> zext(X == 0)
2429 // (C << X) & 1 --> zext(X == 0), when C is odd
2430 Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, 0));
2431 return new ZExtInst(IsZero, Ty);
2432 }
2433
2434 // (-(X & 1)) & Y --> (X & 1) == 0 ? 0 : Y
2435 Value *Neg;
2436 if (match(&I,
2437 m_c_And(m_CombineAnd(m_Value(Neg),
2438 m_OneUse(m_Neg(m_And(m_Value(), m_One())))),
2439 m_Value(Y)))) {
2440 Value *Cmp = Builder.CreateIsNull(Neg);
2441 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Y);
2442 }
2443
2444 // Canonicalize:
2445 // (X +/- Y) & Y --> ~X & Y when Y is a power of 2.
2446 if (match(&I, m_c_And(m_Value(Y), m_OneUse(m_CombineOr(
2447 m_c_Add(m_Value(X), m_Deferred(Y)),
2448 m_Sub(m_Value(X), m_Deferred(Y)))))) &&
2449 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, &I))
2450 return BinaryOperator::CreateAnd(Builder.CreateNot(X), Y);
2451
2452 if (match(Op1, m_APInt(C))) {
2453 const APInt *XorC;
2454 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) {
2455 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
2456 Constant *NewC = ConstantInt::get(Ty, *C & *XorC);
2457 Value *And = Builder.CreateAnd(X, Op1);
2458 And->takeName(Op0);
2459 return BinaryOperator::CreateXor(And, NewC);
2460 }
2461
2462 const APInt *OrC;
2463 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
2464 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
2465 // NOTE: This reduces the number of bits set in the & mask, which
2466 // can expose opportunities for store narrowing for scalars.
2467 // NOTE: SimplifyDemandedBits should have already removed bits from C1
2468 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
2469 // above, but this feels safer.
2470 APInt Together = *C & *OrC;
2471 Value *And = Builder.CreateAnd(X, ConstantInt::get(Ty, Together ^ *C));
2472 And->takeName(Op0);
2473 return BinaryOperator::CreateOr(And, ConstantInt::get(Ty, Together));
2474 }
2475
2476 unsigned Width = Ty->getScalarSizeInBits();
2477 const APInt *ShiftC;
2478 if (match(Op0, m_OneUse(m_SExt(m_AShr(m_Value(X), m_APInt(ShiftC))))) &&
2479 ShiftC->ult(Width)) {
2480 if (*C == APInt::getLowBitsSet(Width, Width - ShiftC->getZExtValue())) {
2481 // We are clearing high bits that were potentially set by sext+ashr:
2482 // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC
2483 Value *Sext = Builder.CreateSExt(X, Ty);
2484 Constant *ShAmtC = ConstantInt::get(Ty, ShiftC->zext(Width));
2485 return BinaryOperator::CreateLShr(Sext, ShAmtC);
2486 }
2487 }
2488
2489 // If this 'and' clears the sign-bits added by ashr, replace with lshr:
2490 // and (ashr X, ShiftC), C --> lshr X, ShiftC
2491 if (match(Op0, m_AShr(m_Value(X), m_APInt(ShiftC))) && ShiftC->ult(Width) &&
2492 C->isMask(Width - ShiftC->getZExtValue()))
2493 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, *ShiftC));
2494
2495 const APInt *AddC;
2496 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC)))) {
2497 // If we are masking the result of the add down to exactly one bit and
2498 // the constant we are adding has no bits set below that bit, then the
2499 // add is flipping a single bit. Example:
2500 // (X + 4) & 4 --> (X & 4) ^ 4
2501 if (Op0->hasOneUse() && C->isPowerOf2() && (*AddC & (*C - 1)) == 0) {
2502 assert((*C & *AddC) != 0 && "Expected common bit");
2503 Value *NewAnd = Builder.CreateAnd(X, Op1);
2504 return BinaryOperator::CreateXor(NewAnd, Op1);
2505 }
2506 }
2507
2508 // ((C1 OP zext(X)) & C2) -> zext((C1 OP X) & C2) if C2 fits in the
2509 // bitwidth of X and OP behaves well when given trunc(C1) and X.
2510 auto isNarrowableBinOpcode = [](BinaryOperator *B) {
2511 switch (B->getOpcode()) {
2512 case Instruction::Xor:
2513 case Instruction::Or:
2514 case Instruction::Mul:
2515 case Instruction::Add:
2516 case Instruction::Sub:
2517 return true;
2518 default:
2519 return false;
2520 }
2521 };
2522 BinaryOperator *BO;
2523 if (match(Op0, m_OneUse(m_BinOp(BO))) && isNarrowableBinOpcode(BO)) {
2524 Instruction::BinaryOps BOpcode = BO->getOpcode();
2525 Value *X;
2526 const APInt *C1;
2527 // TODO: The one-use restrictions could be relaxed a little if the AND
2528 // is going to be removed.
2529 // Try to narrow the 'and' and a binop with constant operand:
2530 // and (bo (zext X), C1), C --> zext (and (bo X, TruncC1), TruncC)
2531 if (match(BO, m_c_BinOp(m_OneUse(m_ZExt(m_Value(X))), m_APInt(C1))) &&
2532 C->isIntN(X->getType()->getScalarSizeInBits())) {
2533 unsigned XWidth = X->getType()->getScalarSizeInBits();
2534 Constant *TruncC1 = ConstantInt::get(X->getType(), C1->trunc(XWidth));
2535 Value *BinOp = isa<ZExtInst>(BO->getOperand(0))
2536 ? Builder.CreateBinOp(BOpcode, X, TruncC1)
2537 : Builder.CreateBinOp(BOpcode, TruncC1, X);
2538 Constant *TruncC = ConstantInt::get(X->getType(), C->trunc(XWidth));
2539 Value *And = Builder.CreateAnd(BinOp, TruncC);
2540 return new ZExtInst(And, Ty);
2541 }
2542
2543 // Similar to above: if the mask matches the zext input width, then the
2544 // 'and' can be eliminated, so we can truncate the other variable op:
2545 // and (bo (zext X), Y), C --> zext (bo X, (trunc Y))
2546 if (isa<Instruction>(BO->getOperand(0)) &&
2547 match(BO->getOperand(0), m_OneUse(m_ZExt(m_Value(X)))) &&
2548 C->isMask(X->getType()->getScalarSizeInBits())) {
2549 Y = BO->getOperand(1);
2550 Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr");
2551 Value *NewBO =
2552 Builder.CreateBinOp(BOpcode, X, TrY, BO->getName() + ".narrow");
2553 return new ZExtInst(NewBO, Ty);
2554 }
2555 // and (bo Y, (zext X)), C --> zext (bo (trunc Y), X)
2556 if (isa<Instruction>(BO->getOperand(1)) &&
2557 match(BO->getOperand(1), m_OneUse(m_ZExt(m_Value(X)))) &&
2558 C->isMask(X->getType()->getScalarSizeInBits())) {
2559 Y = BO->getOperand(0);
2560 Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr");
2561 Value *NewBO =
2562 Builder.CreateBinOp(BOpcode, TrY, X, BO->getName() + ".narrow");
2563 return new ZExtInst(NewBO, Ty);
2564 }
2565 }
2566
2567 // This is intentionally placed after the narrowing transforms for
2568 // efficiency (transform directly to the narrow logic op if possible).
2569 // If the mask is only needed on one incoming arm, push the 'and' op up.
2570 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
2571 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2572 APInt NotAndMask(~(*C));
2573 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
2574 if (MaskedValueIsZero(X, NotAndMask, &I)) {
2575 // Not masking anything out for the LHS, move mask to RHS.
2576 // and ({x}or X, Y), C --> {x}or X, (and Y, C)
2577 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
2578 return BinaryOperator::Create(BinOp, X, NewRHS);
2579 }
2580 if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, &I)) {
2581 // Not masking anything out for the RHS, move mask to LHS.
2582 // and ({x}or X, Y), C --> {x}or (and X, C), Y
2583 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
2584 return BinaryOperator::Create(BinOp, NewLHS, Y);
2585 }
2586 }
2587
2588 // When the mask is a power-of-2 constant and op0 is a shifted-power-of-2
2589 // constant, test if the shift amount equals the offset bit index:
2590 // (ShiftC << X) & C --> X == (log2(C) - log2(ShiftC)) ? C : 0
2591 // (ShiftC >> X) & C --> X == (log2(ShiftC) - log2(C)) ? C : 0
2592 if (C->isPowerOf2() &&
2593 match(Op0, m_OneUse(m_LogicalShift(m_Power2(ShiftC), m_Value(X))))) {
2594 int Log2ShiftC = ShiftC->exactLogBase2();
2595 int Log2C = C->exactLogBase2();
2596 bool IsShiftLeft =
2597 cast<BinaryOperator>(Op0)->getOpcode() == Instruction::Shl;
2598 int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
2599 assert(BitNum >= 0 && "Expected demanded bits to handle impossible mask");
2600 Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, BitNum));
2601 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C),
2602 ConstantInt::getNullValue(Ty));
2603 }
2604
2605 Constant *C1, *C2;
2606 const APInt *C3 = C;
2607 Value *X;
2608 if (C3->isPowerOf2()) {
2609 Constant *Log2C3 = ConstantInt::get(Ty, C3->countr_zero());
2610 if (match(Op0, m_OneUse(m_LShr(m_Shl(m_ImmConstant(C1), m_Value(X)),
2611 m_ImmConstant(C2)))) &&
2612 match(C1, m_Power2())) {
2613 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1);
2614 Constant *LshrC = ConstantExpr::getAdd(C2, Log2C3);
2615 KnownBits KnownLShrc = computeKnownBits(LshrC, nullptr);
2616 if (KnownLShrc.getMaxValue().ult(Width)) {
2617 // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth:
2618 // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0
2619 Constant *CmpC = ConstantExpr::getSub(LshrC, Log2C1);
2620 Value *Cmp = Builder.CreateICmpEQ(X, CmpC);
2621 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3),
2622 ConstantInt::getNullValue(Ty));
2623 }
2624 }
2625
2626 if (match(Op0, m_OneUse(m_Shl(m_LShr(m_ImmConstant(C1), m_Value(X)),
2627 m_ImmConstant(C2)))) &&
2628 match(C1, m_Power2())) {
2629 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1);
2630 Constant *Cmp =
2631 ConstantFoldCompareInstOperands(ICmpInst::ICMP_ULT, Log2C3, C2, DL);
2632 if (Cmp && Cmp->isZeroValue()) {
2633 // iff C1,C3 is pow2 and Log2(C3) >= C2:
2634 // ((C1 >> X) << C2) & C3 -> X == (cttz(C1)+C2-cttz(C3)) ? C3 : 0
2635 Constant *ShlC = ConstantExpr::getAdd(C2, Log2C1);
2636 Constant *CmpC = ConstantExpr::getSub(ShlC, Log2C3);
2637 Value *Cmp = Builder.CreateICmpEQ(X, CmpC);
2638 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3),
2639 ConstantInt::getNullValue(Ty));
2640 }
2641 }
2642 }
2643 }
2644
2645 // If we are clearing the sign bit of a floating-point value, convert this to
2646 // fabs, then cast back to integer.
2647 //
2648 // This is a generous interpretation for noimplicitfloat, this is not a true
2649 // floating-point operation.
2650 //
2651 // Assumes any IEEE-represented type has the sign bit in the high bit.
2652 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
2653 Value *CastOp;
2654 if (match(Op0, m_ElementWiseBitCast(m_Value(CastOp))) &&
2655 match(Op1, m_MaxSignedValue()) &&
2656 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
2657 Attribute::NoImplicitFloat)) {
2658 Type *EltTy = CastOp->getType()->getScalarType();
2659 if (EltTy->isFloatingPointTy() &&
2660 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
2661 Value *FAbs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, CastOp);
2662 return new BitCastInst(FAbs, I.getType());
2663 }
2664 }
2665
2666 // and(shl(zext(X), Y), SignMask) -> and(sext(X), SignMask)
2667 // where Y is a valid shift amount.
2668 if (match(&I, m_And(m_OneUse(m_Shl(m_ZExt(m_Value(X)), m_Value(Y))),
2669 m_SignMask())) &&
2670 match(Y, m_SpecificInt_ICMP(
2671 ICmpInst::Predicate::ICMP_EQ,
2672 APInt(Ty->getScalarSizeInBits(),
2673 Ty->getScalarSizeInBits() -
2674 X->getType()->getScalarSizeInBits())))) {
2675 auto *SExt = Builder.CreateSExt(X, Ty, X->getName() + ".signext");
2676 return BinaryOperator::CreateAnd(SExt, Op1);
2677 }
2678
2679 if (Instruction *Z = narrowMaskedBinOp(I))
2680 return Z;
2681
2682 if (I.getType()->isIntOrIntVectorTy(1)) {
2683 if (auto *SI0 = dyn_cast<SelectInst>(Op0)) {
2684 if (auto *R =
2685 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ true))
2686 return R;
2687 }
2688 if (auto *SI1 = dyn_cast<SelectInst>(Op1)) {
2689 if (auto *R =
2690 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ true))
2691 return R;
2692 }
2693 }
2694
2695 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
2696 return FoldedLogic;
2697
2698 if (Instruction *DeMorgan = matchDeMorgansLaws(I, *this))
2699 return DeMorgan;
2700
2701 {
2702 Value *A, *B, *C;
2703 // A & ~(A ^ B) --> A & B
2704 if (match(Op1, m_Not(m_c_Xor(m_Specific(Op0), m_Value(B)))))
2705 return BinaryOperator::CreateAnd(Op0, B);
2706 // ~(A ^ B) & A --> A & B
2707 if (match(Op0, m_Not(m_c_Xor(m_Specific(Op1), m_Value(B)))))
2708 return BinaryOperator::CreateAnd(Op1, B);
2709
2710 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
2711 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2712 match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) {
2713 Value *NotC = Op1->hasOneUse()
2714 ? Builder.CreateNot(C)
2715 : getFreelyInverted(C, C->hasOneUse(), &Builder);
2716 if (NotC != nullptr)
2717 return BinaryOperator::CreateAnd(Op0, NotC);
2718 }
2719
2720 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
2721 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))) &&
2722 match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) {
2723 Value *NotC = Op0->hasOneUse()
2724 ? Builder.CreateNot(C)
2725 : getFreelyInverted(C, C->hasOneUse(), &Builder);
2726 if (NotC != nullptr)
2727 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
2728 }
2729
2730 // (A | B) & (~A ^ B) -> A & B
2731 // (A | B) & (B ^ ~A) -> A & B
2732 // (B | A) & (~A ^ B) -> A & B
2733 // (B | A) & (B ^ ~A) -> A & B
2734 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2735 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2736 return BinaryOperator::CreateAnd(A, B);
2737
2738 // (~A ^ B) & (A | B) -> A & B
2739 // (~A ^ B) & (B | A) -> A & B
2740 // (B ^ ~A) & (A | B) -> A & B
2741 // (B ^ ~A) & (B | A) -> A & B
2742 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2743 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
2744 return BinaryOperator::CreateAnd(A, B);
2745
2746 // (~A | B) & (A ^ B) -> ~A & B
2747 // (~A | B) & (B ^ A) -> ~A & B
2748 // (B | ~A) & (A ^ B) -> ~A & B
2749 // (B | ~A) & (B ^ A) -> ~A & B
2750 if (match(Op0, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2751 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
2752 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
2753
2754 // (A ^ B) & (~A | B) -> ~A & B
2755 // (B ^ A) & (~A | B) -> ~A & B
2756 // (A ^ B) & (B | ~A) -> ~A & B
2757 // (B ^ A) & (B | ~A) -> ~A & B
2758 if (match(Op1, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2759 match(Op0, m_c_Xor(m_Specific(A), m_Specific(B))))
2760 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
2761 }
2762
2763 if (Value *Res =
2764 foldBooleanAndOr(Op0, Op1, I, /*IsAnd=*/true, /*IsLogical=*/false))
2765 return replaceInstUsesWith(I, Res);
2766
2767 if (match(Op1, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) {
2768 bool IsLogical = isa<SelectInst>(Op1);
2769 if (auto *V = reassociateBooleanAndOr(Op0, X, Y, I, /*IsAnd=*/true,
2770 /*RHSIsLogical=*/IsLogical))
2771 return replaceInstUsesWith(I, V);
2772 }
2773 if (match(Op0, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) {
2774 bool IsLogical = isa<SelectInst>(Op0);
2775 if (auto *V = reassociateBooleanAndOr(Op1, X, Y, I, /*IsAnd=*/true,
2776 /*RHSIsLogical=*/IsLogical))
2777 return replaceInstUsesWith(I, V);
2778 }
2779
2780 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder))
2781 return FoldedFCmps;
2782
2783 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
2784 return CastedAnd;
2785
2786 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
2787 return Sel;
2788
2789 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
2790 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
2791 // with binop identity constant. But creating a select with non-constant
2792 // arm may not be reversible due to poison semantics. Is that a good
2793 // canonicalization?
2794 Value *A, *B;
2795 if (match(&I, m_c_And(m_SExt(m_Value(A)), m_Value(B))) &&
2796 A->getType()->isIntOrIntVectorTy(1))
2797 return SelectInst::Create(A, B, Constant::getNullValue(Ty));
2798
2799 // Similarly, a 'not' of the bool translates to a swap of the select arms:
2800 // ~sext(A) & B / B & ~sext(A) --> A ? 0 : B
2801 if (match(&I, m_c_And(m_Not(m_SExt(m_Value(A))), m_Value(B))) &&
2802 A->getType()->isIntOrIntVectorTy(1))
2803 return SelectInst::Create(A, Constant::getNullValue(Ty), B);
2804
2805 // and(zext(A), B) -> A ? (B & 1) : 0
2806 if (match(&I, m_c_And(m_OneUse(m_ZExt(m_Value(A))), m_Value(B))) &&
2807 A->getType()->isIntOrIntVectorTy(1))
2808 return SelectInst::Create(A, Builder.CreateAnd(B, ConstantInt::get(Ty, 1)),
2809 Constant::getNullValue(Ty));
2810
2811 // (-1 + A) & B --> A ? 0 : B where A is 0/1.
2812 if (match(&I, m_c_And(m_OneUse(m_Add(m_ZExtOrSelf(m_Value(A)), m_AllOnes())),
2813 m_Value(B)))) {
2814 if (A->getType()->isIntOrIntVectorTy(1))
2815 return SelectInst::Create(A, Constant::getNullValue(Ty), B);
2816 if (computeKnownBits(A, &I).countMaxActiveBits() <= 1) {
2817 return SelectInst::Create(
2818 Builder.CreateICmpEQ(A, Constant::getNullValue(A->getType())), B,
2819 Constant::getNullValue(Ty));
2820 }
2821 }
2822
2823 // (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext
2824 if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
2825 m_AShr(m_Value(X), m_APIntAllowPoison(C)))),
2826 m_Value(Y))) &&
2827 *C == X->getType()->getScalarSizeInBits() - 1) {
2828 Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
2829 return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty));
2830 }
2831 // If there's a 'not' of the shifted value, swap the select operands:
2832 // ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
2833 if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
2834 m_Not(m_AShr(m_Value(X), m_APIntAllowPoison(C))))),
2835 m_Value(Y))) &&
2836 *C == X->getType()->getScalarSizeInBits() - 1) {
2837 Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
2838 return SelectInst::Create(IsNeg, ConstantInt::getNullValue(Ty), Y);
2839 }
2840
2841 // (~x) & y --> ~(x | (~y)) iff that gets rid of inversions
2842 if (sinkNotIntoOtherHandOfLogicalOp(I))
2843 return &I;
2844
2845 // An and recurrence w/loop invariant step is equivelent to (and start, step)
2846 PHINode *PN = nullptr;
2847 Value *Start = nullptr, *Step = nullptr;
2848 if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN))
2849 return replaceInstUsesWith(I, Builder.CreateAnd(Start, Step));
2850
2851 if (Instruction *R = reassociateForUses(I, Builder))
2852 return R;
2853
2854 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
2855 return Canonicalized;
2856
2857 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
2858 return Folded;
2859
2860 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
2861 return Res;
2862
2863 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
2864 return Res;
2865
2866 if (Value *V =
2867 simplifyAndOrWithOpReplaced(Op0, Op1, Constant::getAllOnesValue(Ty),
2868 /*SimplifyOnly*/ false, *this))
2869 return BinaryOperator::CreateAnd(V, Op1);
2870 if (Value *V =
2871 simplifyAndOrWithOpReplaced(Op1, Op0, Constant::getAllOnesValue(Ty),
2872 /*SimplifyOnly*/ false, *this))
2873 return BinaryOperator::CreateAnd(Op0, V);
2874
2875 return nullptr;
2876 }
2877
matchBSwapOrBitReverse(Instruction & I,bool MatchBSwaps,bool MatchBitReversals)2878 Instruction *InstCombinerImpl::matchBSwapOrBitReverse(Instruction &I,
2879 bool MatchBSwaps,
2880 bool MatchBitReversals) {
2881 SmallVector<Instruction *, 4> Insts;
2882 if (!recognizeBSwapOrBitReverseIdiom(&I, MatchBSwaps, MatchBitReversals,
2883 Insts))
2884 return nullptr;
2885 Instruction *LastInst = Insts.pop_back_val();
2886 LastInst->removeFromParent();
2887
2888 for (auto *Inst : Insts) {
2889 Inst->setDebugLoc(I.getDebugLoc());
2890 Worklist.push(Inst);
2891 }
2892 return LastInst;
2893 }
2894
2895 std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
convertOrOfShiftsToFunnelShift(Instruction & Or)2896 InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
2897 // TODO: Can we reduce the code duplication between this and the related
2898 // rotate matching code under visitSelect and visitTrunc?
2899 assert(Or.getOpcode() == BinaryOperator::Or && "Expecting or instruction");
2900
2901 unsigned Width = Or.getType()->getScalarSizeInBits();
2902
2903 Instruction *Or0, *Or1;
2904 if (!match(Or.getOperand(0), m_Instruction(Or0)) ||
2905 !match(Or.getOperand(1), m_Instruction(Or1)))
2906 return std::nullopt;
2907
2908 bool IsFshl = true; // Sub on LSHR.
2909 SmallVector<Value *, 3> FShiftArgs;
2910
2911 // First, find an or'd pair of opposite shifts:
2912 // or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)
2913 if (isa<BinaryOperator>(Or0) && isa<BinaryOperator>(Or1)) {
2914 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
2915 if (!match(Or0,
2916 m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) ||
2917 !match(Or1,
2918 m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) ||
2919 Or0->getOpcode() == Or1->getOpcode())
2920 return std::nullopt;
2921
2922 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
2923 if (Or0->getOpcode() == BinaryOperator::LShr) {
2924 std::swap(Or0, Or1);
2925 std::swap(ShVal0, ShVal1);
2926 std::swap(ShAmt0, ShAmt1);
2927 }
2928 assert(Or0->getOpcode() == BinaryOperator::Shl &&
2929 Or1->getOpcode() == BinaryOperator::LShr &&
2930 "Illegal or(shift,shift) pair");
2931
2932 // Match the shift amount operands for a funnel shift pattern. This always
2933 // matches a subtraction on the R operand.
2934 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
2935 // Check for constant shift amounts that sum to the bitwidth.
2936 const APInt *LI, *RI;
2937 if (match(L, m_APIntAllowPoison(LI)) && match(R, m_APIntAllowPoison(RI)))
2938 if (LI->ult(Width) && RI->ult(Width) && (*LI + *RI) == Width)
2939 return ConstantInt::get(L->getType(), *LI);
2940
2941 Constant *LC, *RC;
2942 if (match(L, m_Constant(LC)) && match(R, m_Constant(RC)) &&
2943 match(L,
2944 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
2945 match(R,
2946 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
2947 match(ConstantExpr::getAdd(LC, RC), m_SpecificIntAllowPoison(Width)))
2948 return ConstantExpr::mergeUndefsWith(LC, RC);
2949
2950 // (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
2951 // We limit this to X < Width in case the backend re-expands the
2952 // intrinsic, and has to reintroduce a shift modulo operation (InstCombine
2953 // might remove it after this fold). This still doesn't guarantee that the
2954 // final codegen will match this original pattern.
2955 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) {
2956 KnownBits KnownL = computeKnownBits(L, &Or);
2957 return KnownL.getMaxValue().ult(Width) ? L : nullptr;
2958 }
2959
2960 // For non-constant cases, the following patterns currently only work for
2961 // rotation patterns.
2962 // TODO: Add general funnel-shift compatible patterns.
2963 if (ShVal0 != ShVal1)
2964 return nullptr;
2965
2966 // For non-constant cases we don't support non-pow2 shift masks.
2967 // TODO: Is it worth matching urem as well?
2968 if (!isPowerOf2_32(Width))
2969 return nullptr;
2970
2971 // The shift amount may be masked with negation:
2972 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
2973 Value *X;
2974 unsigned Mask = Width - 1;
2975 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
2976 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
2977 return X;
2978
2979 // (shl ShVal, X) | (lshr ShVal, ((-X) & (Width - 1)))
2980 if (match(R, m_And(m_Neg(m_Specific(L)), m_SpecificInt(Mask))))
2981 return L;
2982
2983 // Similar to above, but the shift amount may be extended after masking,
2984 // so return the extended value as the parameter for the intrinsic.
2985 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
2986 match(R,
2987 m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))),
2988 m_SpecificInt(Mask))))
2989 return L;
2990
2991 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
2992 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
2993 return L;
2994
2995 return nullptr;
2996 };
2997
2998 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
2999 if (!ShAmt) {
3000 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
3001 IsFshl = false; // Sub on SHL.
3002 }
3003 if (!ShAmt)
3004 return std::nullopt;
3005
3006 FShiftArgs = {ShVal0, ShVal1, ShAmt};
3007 } else if (isa<ZExtInst>(Or0) || isa<ZExtInst>(Or1)) {
3008 // If there are two 'or' instructions concat variables in opposite order:
3009 //
3010 // Slot1 and Slot2 are all zero bits.
3011 // | Slot1 | Low | Slot2 | High |
3012 // LowHigh = or (shl (zext Low), ZextLowShlAmt), (zext High)
3013 // | Slot2 | High | Slot1 | Low |
3014 // HighLow = or (shl (zext High), ZextHighShlAmt), (zext Low)
3015 //
3016 // the latter 'or' can be safely convert to
3017 // -> HighLow = fshl LowHigh, LowHigh, ZextHighShlAmt
3018 // if ZextLowShlAmt + ZextHighShlAmt == Width.
3019 if (!isa<ZExtInst>(Or1))
3020 std::swap(Or0, Or1);
3021
3022 Value *High, *ZextHigh, *Low;
3023 const APInt *ZextHighShlAmt;
3024 if (!match(Or0,
3025 m_OneUse(m_Shl(m_Value(ZextHigh), m_APInt(ZextHighShlAmt)))))
3026 return std::nullopt;
3027
3028 if (!match(Or1, m_ZExt(m_Value(Low))) ||
3029 !match(ZextHigh, m_ZExt(m_Value(High))))
3030 return std::nullopt;
3031
3032 unsigned HighSize = High->getType()->getScalarSizeInBits();
3033 unsigned LowSize = Low->getType()->getScalarSizeInBits();
3034 // Make sure High does not overlap with Low and most significant bits of
3035 // High aren't shifted out.
3036 if (ZextHighShlAmt->ult(LowSize) || ZextHighShlAmt->ugt(Width - HighSize))
3037 return std::nullopt;
3038
3039 for (User *U : ZextHigh->users()) {
3040 Value *X, *Y;
3041 if (!match(U, m_Or(m_Value(X), m_Value(Y))))
3042 continue;
3043
3044 if (!isa<ZExtInst>(Y))
3045 std::swap(X, Y);
3046
3047 const APInt *ZextLowShlAmt;
3048 if (!match(X, m_Shl(m_Specific(Or1), m_APInt(ZextLowShlAmt))) ||
3049 !match(Y, m_Specific(ZextHigh)) || !DT.dominates(U, &Or))
3050 continue;
3051
3052 // HighLow is good concat. If sum of two shifts amount equals to Width,
3053 // LowHigh must also be a good concat.
3054 if (*ZextLowShlAmt + *ZextHighShlAmt != Width)
3055 continue;
3056
3057 // Low must not overlap with High and most significant bits of Low must
3058 // not be shifted out.
3059 assert(ZextLowShlAmt->uge(HighSize) &&
3060 ZextLowShlAmt->ule(Width - LowSize) && "Invalid concat");
3061
3062 FShiftArgs = {U, U, ConstantInt::get(Or0->getType(), *ZextHighShlAmt)};
3063 break;
3064 }
3065 }
3066
3067 if (FShiftArgs.empty())
3068 return std::nullopt;
3069
3070 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
3071 return std::make_pair(IID, FShiftArgs);
3072 }
3073
3074 /// Match UB-safe variants of the funnel shift intrinsic.
matchFunnelShift(Instruction & Or,InstCombinerImpl & IC)3075 static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {
3076 if (auto Opt = IC.convertOrOfShiftsToFunnelShift(Or)) {
3077 auto [IID, FShiftArgs] = *Opt;
3078 Function *F =
3079 Intrinsic::getOrInsertDeclaration(Or.getModule(), IID, Or.getType());
3080 return CallInst::Create(F, FShiftArgs);
3081 }
3082
3083 return nullptr;
3084 }
3085
3086 /// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
matchOrConcat(Instruction & Or,InstCombiner::BuilderTy & Builder)3087 static Value *matchOrConcat(Instruction &Or, InstCombiner::BuilderTy &Builder) {
3088 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'");
3089 Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1);
3090 Type *Ty = Or.getType();
3091
3092 unsigned Width = Ty->getScalarSizeInBits();
3093 if ((Width & 1) != 0)
3094 return nullptr;
3095 unsigned HalfWidth = Width / 2;
3096
3097 // Canonicalize zext (lower half) to LHS.
3098 if (!isa<ZExtInst>(Op0))
3099 std::swap(Op0, Op1);
3100
3101 // Find lower/upper half.
3102 Value *LowerSrc, *ShlVal, *UpperSrc;
3103 const APInt *C;
3104 if (!match(Op0, m_OneUse(m_ZExt(m_Value(LowerSrc)))) ||
3105 !match(Op1, m_OneUse(m_Shl(m_Value(ShlVal), m_APInt(C)))) ||
3106 !match(ShlVal, m_OneUse(m_ZExt(m_Value(UpperSrc)))))
3107 return nullptr;
3108 if (*C != HalfWidth || LowerSrc->getType() != UpperSrc->getType() ||
3109 LowerSrc->getType()->getScalarSizeInBits() != HalfWidth)
3110 return nullptr;
3111
3112 auto ConcatIntrinsicCalls = [&](Intrinsic::ID id, Value *Lo, Value *Hi) {
3113 Value *NewLower = Builder.CreateZExt(Lo, Ty);
3114 Value *NewUpper = Builder.CreateZExt(Hi, Ty);
3115 NewUpper = Builder.CreateShl(NewUpper, HalfWidth);
3116 Value *BinOp = Builder.CreateOr(NewLower, NewUpper);
3117 return Builder.CreateIntrinsic(id, Ty, BinOp);
3118 };
3119
3120 // BSWAP: Push the concat down, swapping the lower/upper sources.
3121 // concat(bswap(x),bswap(y)) -> bswap(concat(x,y))
3122 Value *LowerBSwap, *UpperBSwap;
3123 if (match(LowerSrc, m_BSwap(m_Value(LowerBSwap))) &&
3124 match(UpperSrc, m_BSwap(m_Value(UpperBSwap))))
3125 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap);
3126
3127 // BITREVERSE: Push the concat down, swapping the lower/upper sources.
3128 // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y))
3129 Value *LowerBRev, *UpperBRev;
3130 if (match(LowerSrc, m_BitReverse(m_Value(LowerBRev))) &&
3131 match(UpperSrc, m_BitReverse(m_Value(UpperBRev))))
3132 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev);
3133
3134 // iX ext split: extending or(zext(x),shl(zext(y),bw/2) pattern
3135 // to consume sext/ashr:
3136 // or(zext(sext(x)),shl(zext(sext(ashr(x,xbw-1))),bw/2)
3137 // or(zext(x),shl(zext(ashr(x,xbw-1)),bw/2)
3138 Value *X;
3139 if (match(LowerSrc, m_SExtOrSelf(m_Value(X))) &&
3140 match(UpperSrc,
3141 m_SExtOrSelf(m_AShr(
3142 m_Specific(X),
3143 m_SpecificInt(X->getType()->getScalarSizeInBits() - 1)))))
3144 return Builder.CreateSExt(X, Ty);
3145
3146 return nullptr;
3147 }
3148
3149 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
areInverseVectorBitmasks(Constant * C1,Constant * C2)3150 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
3151 unsigned NumElts = cast<FixedVectorType>(C1->getType())->getNumElements();
3152 for (unsigned i = 0; i != NumElts; ++i) {
3153 Constant *EltC1 = C1->getAggregateElement(i);
3154 Constant *EltC2 = C2->getAggregateElement(i);
3155 if (!EltC1 || !EltC2)
3156 return false;
3157
3158 // One element must be all ones, and the other must be all zeros.
3159 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) ||
3160 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes()))))
3161 return false;
3162 }
3163 return true;
3164 }
3165
3166 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
3167 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
3168 /// B, it can be used as the condition operand of a select instruction.
3169 /// We will detect (A & C) | ~(B | D) when the flag ABIsTheSame enabled.
getSelectCondition(Value * A,Value * B,bool ABIsTheSame)3170 Value *InstCombinerImpl::getSelectCondition(Value *A, Value *B,
3171 bool ABIsTheSame) {
3172 // We may have peeked through bitcasts in the caller.
3173 // Exit immediately if we don't have (vector) integer types.
3174 Type *Ty = A->getType();
3175 if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy())
3176 return nullptr;
3177
3178 // If A is the 'not' operand of B and has enough signbits, we have our answer.
3179 if (ABIsTheSame ? (A == B) : match(B, m_Not(m_Specific(A)))) {
3180 // If these are scalars or vectors of i1, A can be used directly.
3181 if (Ty->isIntOrIntVectorTy(1))
3182 return A;
3183
3184 // If we look through a vector bitcast, the caller will bitcast the operands
3185 // to match the condition's number of bits (N x i1).
3186 // To make this poison-safe, disallow bitcast from wide element to narrow
3187 // element. That could allow poison in lanes where it was not present in the
3188 // original code.
3189 A = peekThroughBitcast(A);
3190 if (A->getType()->isIntOrIntVectorTy()) {
3191 unsigned NumSignBits = ComputeNumSignBits(A);
3192 if (NumSignBits == A->getType()->getScalarSizeInBits() &&
3193 NumSignBits <= Ty->getScalarSizeInBits())
3194 return Builder.CreateTrunc(A, CmpInst::makeCmpResultType(A->getType()));
3195 }
3196 return nullptr;
3197 }
3198
3199 // TODO: add support for sext and constant case
3200 if (ABIsTheSame)
3201 return nullptr;
3202
3203 // If both operands are constants, see if the constants are inverse bitmasks.
3204 Constant *AConst, *BConst;
3205 if (match(A, m_Constant(AConst)) && match(B, m_Constant(BConst)))
3206 if (AConst == ConstantExpr::getNot(BConst) &&
3207 ComputeNumSignBits(A) == Ty->getScalarSizeInBits())
3208 return Builder.CreateZExtOrTrunc(A, CmpInst::makeCmpResultType(Ty));
3209
3210 // Look for more complex patterns. The 'not' op may be hidden behind various
3211 // casts. Look through sexts and bitcasts to find the booleans.
3212 Value *Cond;
3213 Value *NotB;
3214 if (match(A, m_SExt(m_Value(Cond))) &&
3215 Cond->getType()->isIntOrIntVectorTy(1)) {
3216 // A = sext i1 Cond; B = sext (not (i1 Cond))
3217 if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
3218 return Cond;
3219
3220 // A = sext i1 Cond; B = not ({bitcast} (sext (i1 Cond)))
3221 // TODO: The one-use checks are unnecessary or misplaced. If the caller
3222 // checked for uses on logic ops/casts, that should be enough to
3223 // make this transform worthwhile.
3224 if (match(B, m_OneUse(m_Not(m_Value(NotB))))) {
3225 NotB = peekThroughBitcast(NotB, true);
3226 if (match(NotB, m_SExt(m_Specific(Cond))))
3227 return Cond;
3228 }
3229 }
3230
3231 // All scalar (and most vector) possibilities should be handled now.
3232 // Try more matches that only apply to non-splat constant vectors.
3233 if (!Ty->isVectorTy())
3234 return nullptr;
3235
3236 // If both operands are xor'd with constants using the same sexted boolean
3237 // operand, see if the constants are inverse bitmasks.
3238 // TODO: Use ConstantExpr::getNot()?
3239 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AConst)))) &&
3240 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BConst)))) &&
3241 Cond->getType()->isIntOrIntVectorTy(1) &&
3242 areInverseVectorBitmasks(AConst, BConst)) {
3243 AConst = ConstantExpr::getTrunc(AConst, CmpInst::makeCmpResultType(Ty));
3244 return Builder.CreateXor(Cond, AConst);
3245 }
3246 return nullptr;
3247 }
3248
3249 /// We have an expression of the form (A & B) | (C & D). Try to simplify this
3250 /// to "A' ? B : D", where A' is a boolean or vector of booleans.
3251 /// When InvertFalseVal is set to true, we try to match the pattern
3252 /// where we have peeked through a 'not' op and A and C are the same:
3253 /// (A & B) | ~(A | D) --> (A & B) | (~A & ~D) --> A' ? B : ~D
matchSelectFromAndOr(Value * A,Value * B,Value * C,Value * D,bool InvertFalseVal)3254 Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *B, Value *C,
3255 Value *D, bool InvertFalseVal) {
3256 // The potential condition of the select may be bitcasted. In that case, look
3257 // through its bitcast and the corresponding bitcast of the 'not' condition.
3258 Type *OrigType = A->getType();
3259 A = peekThroughBitcast(A, true);
3260 C = peekThroughBitcast(C, true);
3261 if (Value *Cond = getSelectCondition(A, C, InvertFalseVal)) {
3262 // ((bc Cond) & B) | ((bc ~Cond) & D) --> bc (select Cond, (bc B), (bc D))
3263 // If this is a vector, we may need to cast to match the condition's length.
3264 // The bitcasts will either all exist or all not exist. The builder will
3265 // not create unnecessary casts if the types already match.
3266 Type *SelTy = A->getType();
3267 if (auto *VecTy = dyn_cast<VectorType>(Cond->getType())) {
3268 // For a fixed or scalable vector get N from <{vscale x} N x iM>
3269 unsigned Elts = VecTy->getElementCount().getKnownMinValue();
3270 // For a fixed or scalable vector, get the size in bits of N x iM; for a
3271 // scalar this is just M.
3272 unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinValue();
3273 Type *EltTy = Builder.getIntNTy(SelEltSize / Elts);
3274 SelTy = VectorType::get(EltTy, VecTy->getElementCount());
3275 }
3276 Value *BitcastB = Builder.CreateBitCast(B, SelTy);
3277 if (InvertFalseVal)
3278 D = Builder.CreateNot(D);
3279 Value *BitcastD = Builder.CreateBitCast(D, SelTy);
3280 Value *Select = Builder.CreateSelect(Cond, BitcastB, BitcastD);
3281 return Builder.CreateBitCast(Select, OrigType);
3282 }
3283
3284 return nullptr;
3285 }
3286
3287 // (icmp eq X, C) | (icmp ult Other, (X - C)) -> (icmp ule Other, (X - (C + 1)))
3288 // (icmp ne X, C) & (icmp uge Other, (X - C)) -> (icmp ugt Other, (X - (C + 1)))
foldAndOrOfICmpEqConstantAndICmp(ICmpInst * LHS,ICmpInst * RHS,bool IsAnd,bool IsLogical,IRBuilderBase & Builder)3289 static Value *foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS,
3290 bool IsAnd, bool IsLogical,
3291 IRBuilderBase &Builder) {
3292 Value *LHS0 = LHS->getOperand(0);
3293 Value *RHS0 = RHS->getOperand(0);
3294 Value *RHS1 = RHS->getOperand(1);
3295
3296 ICmpInst::Predicate LPred =
3297 IsAnd ? LHS->getInversePredicate() : LHS->getPredicate();
3298 ICmpInst::Predicate RPred =
3299 IsAnd ? RHS->getInversePredicate() : RHS->getPredicate();
3300
3301 const APInt *CInt;
3302 if (LPred != ICmpInst::ICMP_EQ ||
3303 !match(LHS->getOperand(1), m_APIntAllowPoison(CInt)) ||
3304 !LHS0->getType()->isIntOrIntVectorTy() ||
3305 !(LHS->hasOneUse() || RHS->hasOneUse()))
3306 return nullptr;
3307
3308 auto MatchRHSOp = [LHS0, CInt](const Value *RHSOp) {
3309 return match(RHSOp,
3310 m_Add(m_Specific(LHS0), m_SpecificIntAllowPoison(-*CInt))) ||
3311 (CInt->isZero() && RHSOp == LHS0);
3312 };
3313
3314 Value *Other;
3315 if (RPred == ICmpInst::ICMP_ULT && MatchRHSOp(RHS1))
3316 Other = RHS0;
3317 else if (RPred == ICmpInst::ICMP_UGT && MatchRHSOp(RHS0))
3318 Other = RHS1;
3319 else
3320 return nullptr;
3321
3322 if (IsLogical)
3323 Other = Builder.CreateFreeze(Other);
3324
3325 return Builder.CreateICmp(
3326 IsAnd ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE,
3327 Builder.CreateSub(LHS0, ConstantInt::get(LHS0->getType(), *CInt + 1)),
3328 Other);
3329 }
3330
3331 /// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible.
3332 /// If IsLogical is true, then the and/or is in select form and the transform
3333 /// must be poison-safe.
foldAndOrOfICmps(ICmpInst * LHS,ICmpInst * RHS,Instruction & I,bool IsAnd,bool IsLogical)3334 Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
3335 Instruction &I, bool IsAnd,
3336 bool IsLogical) {
3337 const SimplifyQuery Q = SQ.getWithInstruction(&I);
3338
3339 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
3340 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
3341 Value *LHS1 = LHS->getOperand(1), *RHS1 = RHS->getOperand(1);
3342
3343 const APInt *LHSC = nullptr, *RHSC = nullptr;
3344 match(LHS1, m_APInt(LHSC));
3345 match(RHS1, m_APInt(RHSC));
3346
3347 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
3348 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3349 if (predicatesFoldable(PredL, PredR)) {
3350 if (LHS0 == RHS1 && LHS1 == RHS0) {
3351 PredL = ICmpInst::getSwappedPredicate(PredL);
3352 std::swap(LHS0, LHS1);
3353 }
3354 if (LHS0 == RHS0 && LHS1 == RHS1) {
3355 unsigned Code = IsAnd ? getICmpCode(PredL) & getICmpCode(PredR)
3356 : getICmpCode(PredL) | getICmpCode(PredR);
3357 bool IsSigned = LHS->isSigned() || RHS->isSigned();
3358 return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder);
3359 }
3360 }
3361
3362 if (Value *V =
3363 foldAndOrOfICmpEqConstantAndICmp(LHS, RHS, IsAnd, IsLogical, Builder))
3364 return V;
3365 // We can treat logical like bitwise here, because both operands are used on
3366 // the LHS, and as such poison from both will propagate.
3367 if (Value *V = foldAndOrOfICmpEqConstantAndICmp(RHS, LHS, IsAnd,
3368 /*IsLogical*/ false, Builder))
3369 return V;
3370
3371 if (Value *V =
3372 foldAndOrOfICmpsWithConstEq(LHS, RHS, IsAnd, IsLogical, Builder, Q))
3373 return V;
3374 // We can convert this case to bitwise and, because both operands are used
3375 // on the LHS, and as such poison from both will propagate.
3376 if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, IsAnd,
3377 /*IsLogical=*/false, Builder, Q)) {
3378 // If RHS is still used, we should drop samesign flag.
3379 if (IsLogical && RHS->hasSameSign() && !RHS->use_empty()) {
3380 RHS->setSameSign(false);
3381 addToWorklist(RHS);
3382 }
3383 return V;
3384 }
3385
3386 if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, IsAnd, Builder, *this))
3387 return V;
3388 if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, IsAnd, Builder, *this))
3389 return V;
3390
3391 // TODO: One of these directions is fine with logical and/or, the other could
3392 // be supported by inserting freeze.
3393 if (!IsLogical) {
3394 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
3395 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
3396 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/!IsAnd))
3397 return V;
3398
3399 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
3400 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
3401 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/!IsAnd))
3402 return V;
3403 }
3404
3405 // TODO: Add conjugated or fold, check whether it is safe for logical and/or.
3406 if (IsAnd && !IsLogical)
3407 if (Value *V = foldSignedTruncationCheck(LHS, RHS, I, Builder))
3408 return V;
3409
3410 if (Value *V = foldIsPowerOf2(LHS, RHS, IsAnd, Builder, *this))
3411 return V;
3412
3413 if (Value *V = foldPowerOf2AndShiftedMask(LHS, RHS, IsAnd, Builder))
3414 return V;
3415
3416 // TODO: Verify whether this is safe for logical and/or.
3417 if (!IsLogical) {
3418 if (Value *X = foldUnsignedUnderflowCheck(LHS, RHS, IsAnd, Q, Builder))
3419 return X;
3420 if (Value *X = foldUnsignedUnderflowCheck(RHS, LHS, IsAnd, Q, Builder))
3421 return X;
3422 }
3423
3424 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
3425 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
3426 // TODO: Remove this and below when foldLogOpOfMaskedICmps can handle undefs.
3427 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3428 PredL == PredR && match(LHS1, m_ZeroInt()) && match(RHS1, m_ZeroInt()) &&
3429 LHS0->getType() == RHS0->getType() &&
3430 (!IsLogical || isGuaranteedNotToBePoison(RHS0))) {
3431 Value *NewOr = Builder.CreateOr(LHS0, RHS0);
3432 return Builder.CreateICmp(PredL, NewOr,
3433 Constant::getNullValue(NewOr->getType()));
3434 }
3435
3436 // (icmp ne A, -1) | (icmp ne B, -1) --> (icmp ne (A&B), -1)
3437 // (icmp eq A, -1) & (icmp eq B, -1) --> (icmp eq (A&B), -1)
3438 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3439 PredL == PredR && match(LHS1, m_AllOnes()) && match(RHS1, m_AllOnes()) &&
3440 LHS0->getType() == RHS0->getType() &&
3441 (!IsLogical || isGuaranteedNotToBePoison(RHS0))) {
3442 Value *NewAnd = Builder.CreateAnd(LHS0, RHS0);
3443 return Builder.CreateICmp(PredL, NewAnd,
3444 Constant::getAllOnesValue(LHS0->getType()));
3445 }
3446
3447 if (!IsLogical)
3448 if (Value *V =
3449 foldAndOrOfICmpsWithPow2AndWithZero(Builder, LHS, RHS, IsAnd, Q))
3450 return V;
3451
3452 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
3453 if (!LHSC || !RHSC)
3454 return nullptr;
3455
3456 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
3457 // (trunc x) != C1 | (and x, CA) != C2 -> (and x, CA|CMAX) != C1|C2
3458 // where CMAX is the all ones value for the truncated type,
3459 // iff the lower bits of C2 and CA are zero.
3460 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3461 PredL == PredR && LHS->hasOneUse() && RHS->hasOneUse()) {
3462 Value *V;
3463 const APInt *AndC, *SmallC = nullptr, *BigC = nullptr;
3464
3465 // (trunc x) == C1 & (and x, CA) == C2
3466 // (and x, CA) == C2 & (trunc x) == C1
3467 if (match(RHS0, m_Trunc(m_Value(V))) &&
3468 match(LHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
3469 SmallC = RHSC;
3470 BigC = LHSC;
3471 } else if (match(LHS0, m_Trunc(m_Value(V))) &&
3472 match(RHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
3473 SmallC = LHSC;
3474 BigC = RHSC;
3475 }
3476
3477 if (SmallC && BigC) {
3478 unsigned BigBitSize = BigC->getBitWidth();
3479 unsigned SmallBitSize = SmallC->getBitWidth();
3480
3481 // Check that the low bits are zero.
3482 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
3483 if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) {
3484 Value *NewAnd = Builder.CreateAnd(V, Low | *AndC);
3485 APInt N = SmallC->zext(BigBitSize) | *BigC;
3486 Value *NewVal = ConstantInt::get(NewAnd->getType(), N);
3487 return Builder.CreateICmp(PredL, NewAnd, NewVal);
3488 }
3489 }
3490 }
3491
3492 // Match naive pattern (and its inverted form) for checking if two values
3493 // share same sign. An example of the pattern:
3494 // (icmp slt (X & Y), 0) | (icmp sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1)
3495 // Inverted form (example):
3496 // (icmp slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0)
3497 bool TrueIfSignedL, TrueIfSignedR;
3498 if (isSignBitCheck(PredL, *LHSC, TrueIfSignedL) &&
3499 isSignBitCheck(PredR, *RHSC, TrueIfSignedR) &&
3500 (RHS->hasOneUse() || LHS->hasOneUse())) {
3501 Value *X, *Y;
3502 if (IsAnd) {
3503 if ((TrueIfSignedL && !TrueIfSignedR &&
3504 match(LHS0, m_Or(m_Value(X), m_Value(Y))) &&
3505 match(RHS0, m_c_And(m_Specific(X), m_Specific(Y)))) ||
3506 (!TrueIfSignedL && TrueIfSignedR &&
3507 match(LHS0, m_And(m_Value(X), m_Value(Y))) &&
3508 match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y))))) {
3509 Value *NewXor = Builder.CreateXor(X, Y);
3510 return Builder.CreateIsNeg(NewXor);
3511 }
3512 } else {
3513 if ((TrueIfSignedL && !TrueIfSignedR &&
3514 match(LHS0, m_And(m_Value(X), m_Value(Y))) &&
3515 match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y)))) ||
3516 (!TrueIfSignedL && TrueIfSignedR &&
3517 match(LHS0, m_Or(m_Value(X), m_Value(Y))) &&
3518 match(RHS0, m_c_And(m_Specific(X), m_Specific(Y))))) {
3519 Value *NewXor = Builder.CreateXor(X, Y);
3520 return Builder.CreateIsNotNeg(NewXor);
3521 }
3522 }
3523 }
3524
3525 // (X & ExpMask) != 0 && (X & ExpMask) != ExpMask -> isnormal(X)
3526 // (X & ExpMask) == 0 || (X & ExpMask) == ExpMask -> !isnormal(X)
3527 Value *X;
3528 const APInt *MaskC;
3529 if (LHS0 == RHS0 && PredL == PredR &&
3530 PredL == (IsAnd ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ) &&
3531 !I.getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
3532 LHS->hasOneUse() && RHS->hasOneUse() &&
3533 match(LHS0, m_And(m_ElementWiseBitCast(m_Value(X)), m_APInt(MaskC))) &&
3534 X->getType()->getScalarType()->isIEEELikeFPTy() &&
3535 APFloat(X->getType()->getScalarType()->getFltSemantics(), *MaskC)
3536 .isPosInfinity() &&
3537 ((LHSC->isZero() && *RHSC == *MaskC) ||
3538 (RHSC->isZero() && *LHSC == *MaskC)))
3539 return Builder.createIsFPClass(X, IsAnd ? FPClassTest::fcNormal
3540 : ~FPClassTest::fcNormal);
3541
3542 return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd);
3543 }
3544
3545 /// If IsLogical is true, then the and/or is in select form and the transform
3546 /// must be poison-safe.
foldBooleanAndOr(Value * LHS,Value * RHS,Instruction & I,bool IsAnd,bool IsLogical)3547 Value *InstCombinerImpl::foldBooleanAndOr(Value *LHS, Value *RHS,
3548 Instruction &I, bool IsAnd,
3549 bool IsLogical) {
3550 if (!LHS->getType()->isIntOrIntVectorTy(1))
3551 return nullptr;
3552
3553 // handle (roughly):
3554 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
3555 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
3556 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, IsAnd, IsLogical, Builder,
3557 SQ.getWithInstruction(&I)))
3558 return V;
3559
3560 if (auto *LHSCmp = dyn_cast<ICmpInst>(LHS))
3561 if (auto *RHSCmp = dyn_cast<ICmpInst>(RHS))
3562 if (Value *Res = foldAndOrOfICmps(LHSCmp, RHSCmp, I, IsAnd, IsLogical))
3563 return Res;
3564
3565 if (auto *LHSCmp = dyn_cast<FCmpInst>(LHS))
3566 if (auto *RHSCmp = dyn_cast<FCmpInst>(RHS))
3567 if (Value *Res = foldLogicOfFCmps(LHSCmp, RHSCmp, IsAnd, IsLogical))
3568 return Res;
3569
3570 if (Value *Res = foldEqOfParts(LHS, RHS, IsAnd))
3571 return Res;
3572
3573 return nullptr;
3574 }
3575
foldOrOfInversions(BinaryOperator & I,InstCombiner::BuilderTy & Builder)3576 static Value *foldOrOfInversions(BinaryOperator &I,
3577 InstCombiner::BuilderTy &Builder) {
3578 assert(I.getOpcode() == Instruction::Or &&
3579 "Simplification only supports or at the moment.");
3580
3581 Value *Cmp1, *Cmp2, *Cmp3, *Cmp4;
3582 if (!match(I.getOperand(0), m_And(m_Value(Cmp1), m_Value(Cmp2))) ||
3583 !match(I.getOperand(1), m_And(m_Value(Cmp3), m_Value(Cmp4))))
3584 return nullptr;
3585
3586 // Check if any two pairs of the and operations are inversions of each other.
3587 if (isKnownInversion(Cmp1, Cmp3) && isKnownInversion(Cmp2, Cmp4))
3588 return Builder.CreateXor(Cmp1, Cmp4);
3589 if (isKnownInversion(Cmp1, Cmp4) && isKnownInversion(Cmp2, Cmp3))
3590 return Builder.CreateXor(Cmp1, Cmp3);
3591
3592 return nullptr;
3593 }
3594
3595 // A decomposition of ((X & Mask) * Factor). The NUW / NSW bools
3596 // track these properities for preservation. Note that we can decompose
3597 // equivalent select form of this expression (e.g. (!(X & Mask) ? 0 : Mask *
3598 // Factor))
3599 struct DecomposedBitMaskMul {
3600 Value *X;
3601 APInt Factor;
3602 APInt Mask;
3603 bool NUW;
3604 bool NSW;
3605
isCombineableWithDecomposedBitMaskMul3606 bool isCombineableWith(const DecomposedBitMaskMul Other) {
3607 return X == Other.X && !Mask.intersects(Other.Mask) &&
3608 Factor == Other.Factor;
3609 }
3610 };
3611
matchBitmaskMul(Value * V)3612 static std::optional<DecomposedBitMaskMul> matchBitmaskMul(Value *V) {
3613 Instruction *Op = dyn_cast<Instruction>(V);
3614 if (!Op)
3615 return std::nullopt;
3616
3617 // Decompose (A & N) * C) into BitMaskMul
3618 Value *Original = nullptr;
3619 const APInt *Mask = nullptr;
3620 const APInt *MulConst = nullptr;
3621 if (match(Op, m_Mul(m_And(m_Value(Original), m_APInt(Mask)),
3622 m_APInt(MulConst)))) {
3623 if (MulConst->isZero() || Mask->isZero())
3624 return std::nullopt;
3625
3626 return std::optional<DecomposedBitMaskMul>(
3627 {Original, *MulConst, *Mask,
3628 cast<BinaryOperator>(Op)->hasNoUnsignedWrap(),
3629 cast<BinaryOperator>(Op)->hasNoSignedWrap()});
3630 }
3631
3632 Value *Cond = nullptr;
3633 const APInt *EqZero = nullptr, *NeZero = nullptr;
3634
3635 // Decompose ((A & N) ? 0 : N * C) into BitMaskMul
3636 if (match(Op, m_Select(m_Value(Cond), m_APInt(EqZero), m_APInt(NeZero)))) {
3637 auto ICmpDecompose =
3638 decomposeBitTest(Cond, /*LookThruTrunc=*/true,
3639 /*AllowNonZeroC=*/false, /*DecomposeBitMask=*/true);
3640 if (!ICmpDecompose.has_value())
3641 return std::nullopt;
3642
3643 assert(ICmpInst::isEquality(ICmpDecompose->Pred) &&
3644 ICmpDecompose->C.isZero());
3645
3646 if (ICmpDecompose->Pred == ICmpInst::ICMP_NE)
3647 std::swap(EqZero, NeZero);
3648
3649 if (!EqZero->isZero() || NeZero->isZero())
3650 return std::nullopt;
3651
3652 if (!ICmpDecompose->Mask.isPowerOf2() || ICmpDecompose->Mask.isZero() ||
3653 NeZero->getBitWidth() != ICmpDecompose->Mask.getBitWidth())
3654 return std::nullopt;
3655
3656 if (!NeZero->urem(ICmpDecompose->Mask).isZero())
3657 return std::nullopt;
3658
3659 return std::optional<DecomposedBitMaskMul>(
3660 {ICmpDecompose->X, NeZero->udiv(ICmpDecompose->Mask),
3661 ICmpDecompose->Mask, /*NUW=*/false, /*NSW=*/false});
3662 }
3663
3664 return std::nullopt;
3665 }
3666
3667 /// (A & N) * C + (A & M) * C -> (A & (N + M)) & C
3668 /// This also accepts the equivalent select form of (A & N) * C
3669 /// expressions i.e. !(A & N) ? 0 : N * C)
foldBitmaskMul(Value * Op0,Value * Op1,InstCombiner::BuilderTy & Builder)3670 static Value *foldBitmaskMul(Value *Op0, Value *Op1,
3671 InstCombiner::BuilderTy &Builder) {
3672 auto Decomp1 = matchBitmaskMul(Op1);
3673 if (!Decomp1)
3674 return nullptr;
3675
3676 auto Decomp0 = matchBitmaskMul(Op0);
3677 if (!Decomp0)
3678 return nullptr;
3679
3680 if (Decomp0->isCombineableWith(*Decomp1)) {
3681 Value *NewAnd = Builder.CreateAnd(
3682 Decomp0->X,
3683 ConstantInt::get(Decomp0->X->getType(), Decomp0->Mask + Decomp1->Mask));
3684
3685 return Builder.CreateMul(
3686 NewAnd, ConstantInt::get(NewAnd->getType(), Decomp1->Factor), "",
3687 Decomp0->NUW && Decomp1->NUW, Decomp0->NSW && Decomp1->NSW);
3688 }
3689
3690 return nullptr;
3691 }
3692
foldDisjointOr(Value * LHS,Value * RHS)3693 Value *InstCombinerImpl::foldDisjointOr(Value *LHS, Value *RHS) {
3694 if (Value *Res = foldBitmaskMul(LHS, RHS, Builder))
3695 return Res;
3696
3697 return nullptr;
3698 }
3699
reassociateDisjointOr(Value * LHS,Value * RHS)3700 Value *InstCombinerImpl::reassociateDisjointOr(Value *LHS, Value *RHS) {
3701
3702 Value *X, *Y;
3703 if (match(RHS, m_OneUse(m_DisjointOr(m_Value(X), m_Value(Y))))) {
3704 if (Value *Res = foldDisjointOr(LHS, X))
3705 return Builder.CreateOr(Res, Y, "", /*IsDisjoint=*/true);
3706 if (Value *Res = foldDisjointOr(LHS, Y))
3707 return Builder.CreateOr(Res, X, "", /*IsDisjoint=*/true);
3708 }
3709
3710 if (match(LHS, m_OneUse(m_DisjointOr(m_Value(X), m_Value(Y))))) {
3711 if (Value *Res = foldDisjointOr(X, RHS))
3712 return Builder.CreateOr(Res, Y, "", /*IsDisjoint=*/true);
3713 if (Value *Res = foldDisjointOr(Y, RHS))
3714 return Builder.CreateOr(Res, X, "", /*IsDisjoint=*/true);
3715 }
3716
3717 return nullptr;
3718 }
3719
3720 /// Fold Res, Overflow = (umul.with.overflow x c1); (or Overflow (ugt Res c2))
3721 /// --> (ugt x (c2/c1)). This code checks whether a multiplication of two
3722 /// unsigned numbers (one is a constant) is mathematically greater than a
3723 /// second constant.
foldOrUnsignedUMulOverflowICmp(BinaryOperator & I,InstCombiner::BuilderTy & Builder,const DataLayout & DL)3724 static Value *foldOrUnsignedUMulOverflowICmp(BinaryOperator &I,
3725 InstCombiner::BuilderTy &Builder,
3726 const DataLayout &DL) {
3727 Value *WOV, *X;
3728 const APInt *C1, *C2;
3729 if (match(&I,
3730 m_c_Or(m_ExtractValue<1>(
3731 m_CombineAnd(m_Intrinsic<Intrinsic::umul_with_overflow>(
3732 m_Value(X), m_APInt(C1)),
3733 m_Value(WOV))),
3734 m_OneUse(m_SpecificCmp(ICmpInst::ICMP_UGT,
3735 m_ExtractValue<0>(m_Deferred(WOV)),
3736 m_APInt(C2))))) &&
3737 !C1->isZero()) {
3738 Constant *NewC = ConstantInt::get(X->getType(), C2->udiv(*C1));
3739 return Builder.CreateICmp(ICmpInst::ICMP_UGT, X, NewC);
3740 }
3741 return nullptr;
3742 }
3743
3744 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
3745 // here. We should standardize that construct where it is needed or choose some
3746 // other way to ensure that commutated variants of patterns are not missed.
visitOr(BinaryOperator & I)3747 Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
3748 if (Value *V = simplifyOrInst(I.getOperand(0), I.getOperand(1),
3749 SQ.getWithInstruction(&I)))
3750 return replaceInstUsesWith(I, V);
3751
3752 if (SimplifyAssociativeOrCommutative(I))
3753 return &I;
3754
3755 if (Instruction *X = foldVectorBinop(I))
3756 return X;
3757
3758 if (Instruction *Phi = foldBinopWithPhiOperands(I))
3759 return Phi;
3760
3761 // See if we can simplify any instructions used by the instruction whose sole
3762 // purpose is to compute bits we don't care about.
3763 if (SimplifyDemandedInstructionBits(I))
3764 return &I;
3765
3766 // Do this before using distributive laws to catch simple and/or/not patterns.
3767 if (Instruction *Xor = foldOrToXor(I, Builder))
3768 return Xor;
3769
3770 if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
3771 return X;
3772
3773 // (A & B) | (C & D) -> A ^ D where A == ~C && B == ~D
3774 // (A & B) | (C & D) -> A ^ C where A == ~D && B == ~C
3775 if (Value *V = foldOrOfInversions(I, Builder))
3776 return replaceInstUsesWith(I, V);
3777
3778 // (A&B)|(A&C) -> A&(B|C) etc
3779 if (Value *V = foldUsingDistributiveLaws(I))
3780 return replaceInstUsesWith(I, V);
3781
3782 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3783 Type *Ty = I.getType();
3784 if (Ty->isIntOrIntVectorTy(1)) {
3785 if (auto *SI0 = dyn_cast<SelectInst>(Op0)) {
3786 if (auto *R =
3787 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ false))
3788 return R;
3789 }
3790 if (auto *SI1 = dyn_cast<SelectInst>(Op1)) {
3791 if (auto *R =
3792 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ false))
3793 return R;
3794 }
3795 }
3796
3797 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
3798 return FoldedLogic;
3799
3800 if (Instruction *BitOp = matchBSwapOrBitReverse(I, /*MatchBSwaps*/ true,
3801 /*MatchBitReversals*/ true))
3802 return BitOp;
3803
3804 if (Instruction *Funnel = matchFunnelShift(I, *this))
3805 return Funnel;
3806
3807 if (Value *Concat = matchOrConcat(I, Builder))
3808 return replaceInstUsesWith(I, Concat);
3809
3810 if (Instruction *R = foldBinOpShiftWithShift(I))
3811 return R;
3812
3813 if (Instruction *R = tryFoldInstWithCtpopWithNot(&I))
3814 return R;
3815
3816 if (cast<PossiblyDisjointInst>(I).isDisjoint()) {
3817 if (Instruction *R =
3818 foldAddLikeCommutative(I.getOperand(0), I.getOperand(1),
3819 /*NSW=*/true, /*NUW=*/true))
3820 return R;
3821 if (Instruction *R =
3822 foldAddLikeCommutative(I.getOperand(1), I.getOperand(0),
3823 /*NSW=*/true, /*NUW=*/true))
3824 return R;
3825
3826 if (Value *Res = foldBitmaskMul(I.getOperand(0), I.getOperand(1), Builder))
3827 return replaceInstUsesWith(I, Res);
3828
3829 if (Value *Res = reassociateDisjointOr(I.getOperand(0), I.getOperand(1)))
3830 return replaceInstUsesWith(I, Res);
3831 }
3832
3833 Value *X, *Y;
3834 const APInt *CV;
3835 if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
3836 !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, &I)) {
3837 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
3838 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
3839 Value *Or = Builder.CreateOr(X, Y);
3840 return BinaryOperator::CreateXor(Or, ConstantInt::get(Ty, *CV));
3841 }
3842
3843 // If the operands have no common bits set:
3844 // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1)
3845 if (match(&I, m_c_DisjointOr(m_OneUse(m_Mul(m_Value(X), m_Value(Y))),
3846 m_Deferred(X)))) {
3847 Value *IncrementY = Builder.CreateAdd(Y, ConstantInt::get(Ty, 1));
3848 return BinaryOperator::CreateMul(X, IncrementY);
3849 }
3850
3851 // (A & C) | (B & D)
3852 Value *A, *B, *C, *D;
3853 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
3854 match(Op1, m_And(m_Value(B), m_Value(D)))) {
3855
3856 // (A & C0) | (B & C1)
3857 const APInt *C0, *C1;
3858 if (match(C, m_APInt(C0)) && match(D, m_APInt(C1))) {
3859 Value *X;
3860 if (*C0 == ~*C1) {
3861 // ((X | B) & MaskC) | (B & ~MaskC) -> (X & MaskC) | B
3862 if (match(A, m_c_Or(m_Value(X), m_Specific(B))))
3863 return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C0), B);
3864 // (A & MaskC) | ((X | A) & ~MaskC) -> (X & ~MaskC) | A
3865 if (match(B, m_c_Or(m_Specific(A), m_Value(X))))
3866 return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C1), A);
3867
3868 // ((X ^ B) & MaskC) | (B & ~MaskC) -> (X & MaskC) ^ B
3869 if (match(A, m_c_Xor(m_Value(X), m_Specific(B))))
3870 return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C0), B);
3871 // (A & MaskC) | ((X ^ A) & ~MaskC) -> (X & ~MaskC) ^ A
3872 if (match(B, m_c_Xor(m_Specific(A), m_Value(X))))
3873 return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C1), A);
3874 }
3875
3876 if ((*C0 & *C1).isZero()) {
3877 // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1)
3878 // iff (C0 & C1) == 0 and (X & ~C0) == 0
3879 if (match(A, m_c_Or(m_Value(X), m_Specific(B))) &&
3880 MaskedValueIsZero(X, ~*C0, &I)) {
3881 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3882 return BinaryOperator::CreateAnd(A, C01);
3883 }
3884 // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1)
3885 // iff (C0 & C1) == 0 and (X & ~C1) == 0
3886 if (match(B, m_c_Or(m_Value(X), m_Specific(A))) &&
3887 MaskedValueIsZero(X, ~*C1, &I)) {
3888 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3889 return BinaryOperator::CreateAnd(B, C01);
3890 }
3891 // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1)
3892 // iff (C0 & C1) == 0 and (C2 & ~C0) == 0 and (C3 & ~C1) == 0.
3893 const APInt *C2, *C3;
3894 if (match(A, m_Or(m_Value(X), m_APInt(C2))) &&
3895 match(B, m_Or(m_Specific(X), m_APInt(C3))) &&
3896 (*C2 & ~*C0).isZero() && (*C3 & ~*C1).isZero()) {
3897 Value *Or = Builder.CreateOr(X, *C2 | *C3, "bitfield");
3898 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3899 return BinaryOperator::CreateAnd(Or, C01);
3900 }
3901 }
3902 }
3903
3904 // Don't try to form a select if it's unlikely that we'll get rid of at
3905 // least one of the operands. A select is generally more expensive than the
3906 // 'or' that it is replacing.
3907 if (Op0->hasOneUse() || Op1->hasOneUse()) {
3908 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
3909 if (Value *V = matchSelectFromAndOr(A, C, B, D))
3910 return replaceInstUsesWith(I, V);
3911 if (Value *V = matchSelectFromAndOr(A, C, D, B))
3912 return replaceInstUsesWith(I, V);
3913 if (Value *V = matchSelectFromAndOr(C, A, B, D))
3914 return replaceInstUsesWith(I, V);
3915 if (Value *V = matchSelectFromAndOr(C, A, D, B))
3916 return replaceInstUsesWith(I, V);
3917 if (Value *V = matchSelectFromAndOr(B, D, A, C))
3918 return replaceInstUsesWith(I, V);
3919 if (Value *V = matchSelectFromAndOr(B, D, C, A))
3920 return replaceInstUsesWith(I, V);
3921 if (Value *V = matchSelectFromAndOr(D, B, A, C))
3922 return replaceInstUsesWith(I, V);
3923 if (Value *V = matchSelectFromAndOr(D, B, C, A))
3924 return replaceInstUsesWith(I, V);
3925 }
3926 }
3927
3928 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
3929 match(Op1, m_Not(m_Or(m_Value(B), m_Value(D)))) &&
3930 (Op0->hasOneUse() || Op1->hasOneUse())) {
3931 // (Cond & C) | ~(Cond | D) -> Cond ? C : ~D
3932 if (Value *V = matchSelectFromAndOr(A, C, B, D, true))
3933 return replaceInstUsesWith(I, V);
3934 if (Value *V = matchSelectFromAndOr(A, C, D, B, true))
3935 return replaceInstUsesWith(I, V);
3936 if (Value *V = matchSelectFromAndOr(C, A, B, D, true))
3937 return replaceInstUsesWith(I, V);
3938 if (Value *V = matchSelectFromAndOr(C, A, D, B, true))
3939 return replaceInstUsesWith(I, V);
3940 }
3941
3942 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
3943 if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
3944 if (match(Op1,
3945 m_c_Xor(m_c_Xor(m_Specific(B), m_Value(C)), m_Specific(A))) ||
3946 match(Op1, m_c_Xor(m_c_Xor(m_Specific(A), m_Value(C)), m_Specific(B))))
3947 return BinaryOperator::CreateOr(Op0, C);
3948
3949 // ((B ^ C) ^ A) | (A ^ B) -> (A ^ B) | C
3950 if (match(Op1, m_Xor(m_Value(A), m_Value(B))))
3951 if (match(Op0,
3952 m_c_Xor(m_c_Xor(m_Specific(B), m_Value(C)), m_Specific(A))) ||
3953 match(Op0, m_c_Xor(m_c_Xor(m_Specific(A), m_Value(C)), m_Specific(B))))
3954 return BinaryOperator::CreateOr(Op1, C);
3955
3956 if (Instruction *DeMorgan = matchDeMorgansLaws(I, *this))
3957 return DeMorgan;
3958
3959 // Canonicalize xor to the RHS.
3960 bool SwappedForXor = false;
3961 if (match(Op0, m_Xor(m_Value(), m_Value()))) {
3962 std::swap(Op0, Op1);
3963 SwappedForXor = true;
3964 }
3965
3966 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
3967 // (A | ?) | (A ^ B) --> (A | ?) | B
3968 // (B | ?) | (A ^ B) --> (B | ?) | A
3969 if (match(Op0, m_c_Or(m_Specific(A), m_Value())))
3970 return BinaryOperator::CreateOr(Op0, B);
3971 if (match(Op0, m_c_Or(m_Specific(B), m_Value())))
3972 return BinaryOperator::CreateOr(Op0, A);
3973
3974 // (A & B) | (A ^ B) --> A | B
3975 // (B & A) | (A ^ B) --> A | B
3976 if (match(Op0, m_c_And(m_Specific(A), m_Specific(B))))
3977 return BinaryOperator::CreateOr(A, B);
3978
3979 // ~A | (A ^ B) --> ~(A & B)
3980 // ~B | (A ^ B) --> ~(A & B)
3981 // The swap above should always make Op0 the 'not'.
3982 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3983 (match(Op0, m_Not(m_Specific(A))) || match(Op0, m_Not(m_Specific(B)))))
3984 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
3985
3986 // Same as above, but peek through an 'and' to the common operand:
3987 // ~(A & ?) | (A ^ B) --> ~((A & ?) & B)
3988 // ~(B & ?) | (A ^ B) --> ~((B & ?) & A)
3989 Instruction *And;
3990 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3991 match(Op0, m_Not(m_CombineAnd(m_Instruction(And),
3992 m_c_And(m_Specific(A), m_Value())))))
3993 return BinaryOperator::CreateNot(Builder.CreateAnd(And, B));
3994 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3995 match(Op0, m_Not(m_CombineAnd(m_Instruction(And),
3996 m_c_And(m_Specific(B), m_Value())))))
3997 return BinaryOperator::CreateNot(Builder.CreateAnd(And, A));
3998
3999 // (~A | C) | (A ^ B) --> ~(A & B) | C
4000 // (~B | C) | (A ^ B) --> ~(A & B) | C
4001 if (Op0->hasOneUse() && Op1->hasOneUse() &&
4002 (match(Op0, m_c_Or(m_Not(m_Specific(A)), m_Value(C))) ||
4003 match(Op0, m_c_Or(m_Not(m_Specific(B)), m_Value(C))))) {
4004 Value *Nand = Builder.CreateNot(Builder.CreateAnd(A, B), "nand");
4005 return BinaryOperator::CreateOr(Nand, C);
4006 }
4007 }
4008
4009 if (SwappedForXor)
4010 std::swap(Op0, Op1);
4011
4012 if (Value *Res =
4013 foldBooleanAndOr(Op0, Op1, I, /*IsAnd=*/false, /*IsLogical=*/false))
4014 return replaceInstUsesWith(I, Res);
4015
4016 if (match(Op1, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) {
4017 bool IsLogical = isa<SelectInst>(Op1);
4018 if (auto *V = reassociateBooleanAndOr(Op0, X, Y, I, /*IsAnd=*/false,
4019 /*RHSIsLogical=*/IsLogical))
4020 return replaceInstUsesWith(I, V);
4021 }
4022 if (match(Op0, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) {
4023 bool IsLogical = isa<SelectInst>(Op0);
4024 if (auto *V = reassociateBooleanAndOr(Op1, X, Y, I, /*IsAnd=*/false,
4025 /*RHSIsLogical=*/IsLogical))
4026 return replaceInstUsesWith(I, V);
4027 }
4028
4029 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder))
4030 return FoldedFCmps;
4031
4032 if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
4033 return CastedOr;
4034
4035 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
4036 return Sel;
4037
4038 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
4039 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
4040 // with binop identity constant. But creating a select with non-constant
4041 // arm may not be reversible due to poison semantics. Is that a good
4042 // canonicalization?
4043 if (match(&I, m_c_Or(m_OneUse(m_SExt(m_Value(A))), m_Value(B))) &&
4044 A->getType()->isIntOrIntVectorTy(1))
4045 return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), B);
4046
4047 // Note: If we've gotten to the point of visiting the outer OR, then the
4048 // inner one couldn't be simplified. If it was a constant, then it won't
4049 // be simplified by a later pass either, so we try swapping the inner/outer
4050 // ORs in the hopes that we'll be able to simplify it this way.
4051 // (X|C) | V --> (X|V) | C
4052 // Pass the disjoint flag in the following two patterns:
4053 // 1. or-disjoint (or-disjoint X, C), V -->
4054 // or-disjoint (or-disjoint X, V), C
4055 //
4056 // 2. or-disjoint (or X, C), V -->
4057 // or (or-disjoint X, V), C
4058 ConstantInt *CI;
4059 if (Op0->hasOneUse() && !match(Op1, m_ConstantInt()) &&
4060 match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) {
4061 bool IsDisjointOuter = cast<PossiblyDisjointInst>(I).isDisjoint();
4062 bool IsDisjointInner = cast<PossiblyDisjointInst>(Op0)->isDisjoint();
4063 Value *Inner = Builder.CreateOr(A, Op1);
4064 cast<PossiblyDisjointInst>(Inner)->setIsDisjoint(IsDisjointOuter);
4065 Inner->takeName(Op0);
4066 return IsDisjointOuter && IsDisjointInner
4067 ? BinaryOperator::CreateDisjointOr(Inner, CI)
4068 : BinaryOperator::CreateOr(Inner, CI);
4069 }
4070
4071 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
4072 // Since this OR statement hasn't been optimized further yet, we hope
4073 // that this transformation will allow the new ORs to be optimized.
4074 {
4075 Value *X = nullptr, *Y = nullptr;
4076 if (Op0->hasOneUse() && Op1->hasOneUse() &&
4077 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
4078 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
4079 Value *orTrue = Builder.CreateOr(A, C);
4080 Value *orFalse = Builder.CreateOr(B, D);
4081 return SelectInst::Create(X, orTrue, orFalse);
4082 }
4083 }
4084
4085 // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X) --> X s> Y ? -1 : X.
4086 {
4087 Value *X, *Y;
4088 if (match(&I, m_c_Or(m_OneUse(m_AShr(
4089 m_NSWSub(m_Value(Y), m_Value(X)),
4090 m_SpecificInt(Ty->getScalarSizeInBits() - 1))),
4091 m_Deferred(X)))) {
4092 Value *NewICmpInst = Builder.CreateICmpSGT(X, Y);
4093 Value *AllOnes = ConstantInt::getAllOnesValue(Ty);
4094 return SelectInst::Create(NewICmpInst, AllOnes, X);
4095 }
4096 }
4097
4098 {
4099 // ((A & B) ^ A) | ((A & B) ^ B) -> A ^ B
4100 // (A ^ (A & B)) | (B ^ (A & B)) -> A ^ B
4101 // ((A & B) ^ B) | ((A & B) ^ A) -> A ^ B
4102 // (B ^ (A & B)) | (A ^ (A & B)) -> A ^ B
4103 const auto TryXorOpt = [&](Value *Lhs, Value *Rhs) -> Instruction * {
4104 if (match(Lhs, m_c_Xor(m_And(m_Value(A), m_Value(B)), m_Deferred(A))) &&
4105 match(Rhs,
4106 m_c_Xor(m_And(m_Specific(A), m_Specific(B)), m_Specific(B)))) {
4107 return BinaryOperator::CreateXor(A, B);
4108 }
4109 return nullptr;
4110 };
4111
4112 if (Instruction *Result = TryXorOpt(Op0, Op1))
4113 return Result;
4114 if (Instruction *Result = TryXorOpt(Op1, Op0))
4115 return Result;
4116 }
4117
4118 if (Instruction *V =
4119 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
4120 return V;
4121
4122 CmpPredicate Pred;
4123 Value *Mul, *Ov, *MulIsNotZero, *UMulWithOv;
4124 // Check if the OR weakens the overflow condition for umul.with.overflow by
4125 // treating any non-zero result as overflow. In that case, we overflow if both
4126 // umul.with.overflow operands are != 0, as in that case the result can only
4127 // be 0, iff the multiplication overflows.
4128 if (match(&I,
4129 m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_Value(UMulWithOv)),
4130 m_Value(Ov)),
4131 m_CombineAnd(
4132 m_SpecificICmp(ICmpInst::ICMP_NE,
4133 m_CombineAnd(m_ExtractValue<0>(
4134 m_Deferred(UMulWithOv)),
4135 m_Value(Mul)),
4136 m_ZeroInt()),
4137 m_Value(MulIsNotZero)))) &&
4138 (Ov->hasOneUse() || (MulIsNotZero->hasOneUse() && Mul->hasOneUse()))) {
4139 Value *A, *B;
4140 if (match(UMulWithOv, m_Intrinsic<Intrinsic::umul_with_overflow>(
4141 m_Value(A), m_Value(B)))) {
4142 Value *NotNullA = Builder.CreateIsNotNull(A);
4143 Value *NotNullB = Builder.CreateIsNotNull(B);
4144 return BinaryOperator::CreateAnd(NotNullA, NotNullB);
4145 }
4146 }
4147
4148 /// Res, Overflow = xxx_with_overflow X, C1
4149 /// Try to canonicalize the pattern "Overflow | icmp pred Res, C2" into
4150 /// "Overflow | icmp pred X, C2 +/- C1".
4151 const WithOverflowInst *WO;
4152 const Value *WOV;
4153 const APInt *C1, *C2;
4154 if (match(&I, m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_CombineAnd(
4155 m_WithOverflowInst(WO), m_Value(WOV))),
4156 m_Value(Ov)),
4157 m_OneUse(m_ICmp(Pred, m_ExtractValue<0>(m_Deferred(WOV)),
4158 m_APInt(C2))))) &&
4159 (WO->getBinaryOp() == Instruction::Add ||
4160 WO->getBinaryOp() == Instruction::Sub) &&
4161 (ICmpInst::isEquality(Pred) ||
4162 WO->isSigned() == ICmpInst::isSigned(Pred)) &&
4163 match(WO->getRHS(), m_APInt(C1))) {
4164 bool Overflow;
4165 APInt NewC = WO->getBinaryOp() == Instruction::Add
4166 ? (ICmpInst::isSigned(Pred) ? C2->ssub_ov(*C1, Overflow)
4167 : C2->usub_ov(*C1, Overflow))
4168 : (ICmpInst::isSigned(Pred) ? C2->sadd_ov(*C1, Overflow)
4169 : C2->uadd_ov(*C1, Overflow));
4170 if (!Overflow || ICmpInst::isEquality(Pred)) {
4171 Value *NewCmp = Builder.CreateICmp(
4172 Pred, WO->getLHS(), ConstantInt::get(WO->getLHS()->getType(), NewC));
4173 return BinaryOperator::CreateOr(Ov, NewCmp);
4174 }
4175 }
4176
4177 // Try to fold the pattern "Overflow | icmp pred Res, C2" into a single
4178 // comparison instruction for umul.with.overflow.
4179 if (Value *R = foldOrUnsignedUMulOverflowICmp(I, Builder, DL))
4180 return replaceInstUsesWith(I, R);
4181
4182 // (~x) | y --> ~(x & (~y)) iff that gets rid of inversions
4183 if (sinkNotIntoOtherHandOfLogicalOp(I))
4184 return &I;
4185
4186 // Improve "get low bit mask up to and including bit X" pattern:
4187 // (1 << X) | ((1 << X) + -1) --> -1 l>> (bitwidth(x) - 1 - X)
4188 if (match(&I, m_c_Or(m_Add(m_Shl(m_One(), m_Value(X)), m_AllOnes()),
4189 m_Shl(m_One(), m_Deferred(X)))) &&
4190 match(&I, m_c_Or(m_OneUse(m_Value()), m_Value()))) {
4191 Value *Sub = Builder.CreateSub(
4192 ConstantInt::get(Ty, Ty->getScalarSizeInBits() - 1), X);
4193 return BinaryOperator::CreateLShr(Constant::getAllOnesValue(Ty), Sub);
4194 }
4195
4196 // An or recurrence w/loop invariant step is equivelent to (or start, step)
4197 PHINode *PN = nullptr;
4198 Value *Start = nullptr, *Step = nullptr;
4199 if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN))
4200 return replaceInstUsesWith(I, Builder.CreateOr(Start, Step));
4201
4202 // (A & B) | (C | D) or (C | D) | (A & B)
4203 // Can be combined if C or D is of type (A/B & X)
4204 if (match(&I, m_c_Or(m_OneUse(m_And(m_Value(A), m_Value(B))),
4205 m_OneUse(m_Or(m_Value(C), m_Value(D)))))) {
4206 // (A & B) | (C | ?) -> C | (? | (A & B))
4207 // (A & B) | (C | ?) -> C | (? | (A & B))
4208 // (A & B) | (C | ?) -> C | (? | (A & B))
4209 // (A & B) | (C | ?) -> C | (? | (A & B))
4210 // (C | ?) | (A & B) -> C | (? | (A & B))
4211 // (C | ?) | (A & B) -> C | (? | (A & B))
4212 // (C | ?) | (A & B) -> C | (? | (A & B))
4213 // (C | ?) | (A & B) -> C | (? | (A & B))
4214 if (match(D, m_OneUse(m_c_And(m_Specific(A), m_Value()))) ||
4215 match(D, m_OneUse(m_c_And(m_Specific(B), m_Value()))))
4216 return BinaryOperator::CreateOr(
4217 C, Builder.CreateOr(D, Builder.CreateAnd(A, B)));
4218 // (A & B) | (? | D) -> (? | (A & B)) | D
4219 // (A & B) | (? | D) -> (? | (A & B)) | D
4220 // (A & B) | (? | D) -> (? | (A & B)) | D
4221 // (A & B) | (? | D) -> (? | (A & B)) | D
4222 // (? | D) | (A & B) -> (? | (A & B)) | D
4223 // (? | D) | (A & B) -> (? | (A & B)) | D
4224 // (? | D) | (A & B) -> (? | (A & B)) | D
4225 // (? | D) | (A & B) -> (? | (A & B)) | D
4226 if (match(C, m_OneUse(m_c_And(m_Specific(A), m_Value()))) ||
4227 match(C, m_OneUse(m_c_And(m_Specific(B), m_Value()))))
4228 return BinaryOperator::CreateOr(
4229 Builder.CreateOr(C, Builder.CreateAnd(A, B)), D);
4230 }
4231
4232 if (Instruction *R = reassociateForUses(I, Builder))
4233 return R;
4234
4235 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
4236 return Canonicalized;
4237
4238 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
4239 return Folded;
4240
4241 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
4242 return Res;
4243
4244 // If we are setting the sign bit of a floating-point value, convert
4245 // this to fneg(fabs), then cast back to integer.
4246 //
4247 // If the result isn't immediately cast back to a float, this will increase
4248 // the number of instructions. This is still probably a better canonical form
4249 // as it enables FP value tracking.
4250 //
4251 // Assumes any IEEE-represented type has the sign bit in the high bit.
4252 //
4253 // This is generous interpretation of noimplicitfloat, this is not a true
4254 // floating-point operation.
4255 Value *CastOp;
4256 if (match(Op0, m_ElementWiseBitCast(m_Value(CastOp))) &&
4257 match(Op1, m_SignMask()) &&
4258 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
4259 Attribute::NoImplicitFloat)) {
4260 Type *EltTy = CastOp->getType()->getScalarType();
4261 if (EltTy->isFloatingPointTy() &&
4262 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
4263 Value *FAbs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, CastOp);
4264 Value *FNegFAbs = Builder.CreateFNeg(FAbs);
4265 return new BitCastInst(FNegFAbs, I.getType());
4266 }
4267 }
4268
4269 // (X & C1) | C2 -> X & (C1 | C2) iff (X & C2) == C2
4270 if (match(Op0, m_OneUse(m_And(m_Value(X), m_APInt(C1)))) &&
4271 match(Op1, m_APInt(C2))) {
4272 KnownBits KnownX = computeKnownBits(X, &I);
4273 if ((KnownX.One & *C2) == *C2)
4274 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *C1 | *C2));
4275 }
4276
4277 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
4278 return Res;
4279
4280 if (Value *V =
4281 simplifyAndOrWithOpReplaced(Op0, Op1, Constant::getNullValue(Ty),
4282 /*SimplifyOnly*/ false, *this))
4283 return BinaryOperator::CreateOr(V, Op1);
4284 if (Value *V =
4285 simplifyAndOrWithOpReplaced(Op1, Op0, Constant::getNullValue(Ty),
4286 /*SimplifyOnly*/ false, *this))
4287 return BinaryOperator::CreateOr(Op0, V);
4288
4289 if (cast<PossiblyDisjointInst>(I).isDisjoint())
4290 if (Value *V = SimplifyAddWithRemainder(I))
4291 return replaceInstUsesWith(I, V);
4292
4293 return nullptr;
4294 }
4295
4296 /// A ^ B can be specified using other logic ops in a variety of patterns. We
4297 /// can fold these early and efficiently by morphing an existing instruction.
foldXorToXor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)4298 static Instruction *foldXorToXor(BinaryOperator &I,
4299 InstCombiner::BuilderTy &Builder) {
4300 assert(I.getOpcode() == Instruction::Xor);
4301 Value *Op0 = I.getOperand(0);
4302 Value *Op1 = I.getOperand(1);
4303 Value *A, *B;
4304
4305 // There are 4 commuted variants for each of the basic patterns.
4306
4307 // (A & B) ^ (A | B) -> A ^ B
4308 // (A & B) ^ (B | A) -> A ^ B
4309 // (A | B) ^ (A & B) -> A ^ B
4310 // (A | B) ^ (B & A) -> A ^ B
4311 if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)),
4312 m_c_Or(m_Deferred(A), m_Deferred(B)))))
4313 return BinaryOperator::CreateXor(A, B);
4314
4315 // (A | ~B) ^ (~A | B) -> A ^ B
4316 // (~B | A) ^ (~A | B) -> A ^ B
4317 // (~A | B) ^ (A | ~B) -> A ^ B
4318 // (B | ~A) ^ (A | ~B) -> A ^ B
4319 if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))),
4320 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B)))))
4321 return BinaryOperator::CreateXor(A, B);
4322
4323 // (A & ~B) ^ (~A & B) -> A ^ B
4324 // (~B & A) ^ (~A & B) -> A ^ B
4325 // (~A & B) ^ (A & ~B) -> A ^ B
4326 // (B & ~A) ^ (A & ~B) -> A ^ B
4327 if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))),
4328 m_c_And(m_Not(m_Deferred(A)), m_Deferred(B)))))
4329 return BinaryOperator::CreateXor(A, B);
4330
4331 // For the remaining cases we need to get rid of one of the operands.
4332 if (!Op0->hasOneUse() && !Op1->hasOneUse())
4333 return nullptr;
4334
4335 // (A | B) ^ ~(A & B) -> ~(A ^ B)
4336 // (A | B) ^ ~(B & A) -> ~(A ^ B)
4337 // (A & B) ^ ~(A | B) -> ~(A ^ B)
4338 // (A & B) ^ ~(B | A) -> ~(A ^ B)
4339 // Complexity sorting ensures the not will be on the right side.
4340 if ((match(Op0, m_Or(m_Value(A), m_Value(B))) &&
4341 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) ||
4342 (match(Op0, m_And(m_Value(A), m_Value(B))) &&
4343 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))))
4344 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
4345
4346 return nullptr;
4347 }
4348
foldXorOfICmps(ICmpInst * LHS,ICmpInst * RHS,BinaryOperator & I)4349 Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
4350 BinaryOperator &I) {
4351 assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS &&
4352 I.getOperand(1) == RHS && "Should be 'xor' with these operands");
4353
4354 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
4355 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
4356 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
4357
4358 if (predicatesFoldable(PredL, PredR)) {
4359 if (LHS0 == RHS1 && LHS1 == RHS0) {
4360 std::swap(LHS0, LHS1);
4361 PredL = ICmpInst::getSwappedPredicate(PredL);
4362 }
4363 if (LHS0 == RHS0 && LHS1 == RHS1) {
4364 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
4365 unsigned Code = getICmpCode(PredL) ^ getICmpCode(PredR);
4366 bool IsSigned = LHS->isSigned() || RHS->isSigned();
4367 return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder);
4368 }
4369 }
4370
4371 const APInt *LC, *RC;
4372 if (match(LHS1, m_APInt(LC)) && match(RHS1, m_APInt(RC)) &&
4373 LHS0->getType() == RHS0->getType() &&
4374 LHS0->getType()->isIntOrIntVectorTy()) {
4375 // Convert xor of signbit tests to signbit test of xor'd values:
4376 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0
4377 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0
4378 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1
4379 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1
4380 bool TrueIfSignedL, TrueIfSignedR;
4381 if ((LHS->hasOneUse() || RHS->hasOneUse()) &&
4382 isSignBitCheck(PredL, *LC, TrueIfSignedL) &&
4383 isSignBitCheck(PredR, *RC, TrueIfSignedR)) {
4384 Value *XorLR = Builder.CreateXor(LHS0, RHS0);
4385 return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg(XorLR) :
4386 Builder.CreateIsNotNeg(XorLR);
4387 }
4388
4389 // Fold (icmp pred1 X, C1) ^ (icmp pred2 X, C2)
4390 // into a single comparison using range-based reasoning.
4391 if (LHS0 == RHS0) {
4392 ConstantRange CR1 = ConstantRange::makeExactICmpRegion(PredL, *LC);
4393 ConstantRange CR2 = ConstantRange::makeExactICmpRegion(PredR, *RC);
4394 auto CRUnion = CR1.exactUnionWith(CR2);
4395 auto CRIntersect = CR1.exactIntersectWith(CR2);
4396 if (CRUnion && CRIntersect)
4397 if (auto CR = CRUnion->exactIntersectWith(CRIntersect->inverse())) {
4398 if (CR->isFullSet())
4399 return ConstantInt::getTrue(I.getType());
4400 if (CR->isEmptySet())
4401 return ConstantInt::getFalse(I.getType());
4402
4403 CmpInst::Predicate NewPred;
4404 APInt NewC, Offset;
4405 CR->getEquivalentICmp(NewPred, NewC, Offset);
4406
4407 if ((Offset.isZero() && (LHS->hasOneUse() || RHS->hasOneUse())) ||
4408 (LHS->hasOneUse() && RHS->hasOneUse())) {
4409 Value *NewV = LHS0;
4410 Type *Ty = LHS0->getType();
4411 if (!Offset.isZero())
4412 NewV = Builder.CreateAdd(NewV, ConstantInt::get(Ty, Offset));
4413 return Builder.CreateICmp(NewPred, NewV,
4414 ConstantInt::get(Ty, NewC));
4415 }
4416 }
4417 }
4418
4419 // Fold (icmp eq/ne (X & Pow2), 0) ^ (icmp eq/ne (Y & Pow2), 0) into
4420 // (icmp eq/ne ((X ^ Y) & Pow2), 0)
4421 Value *X, *Y, *Pow2;
4422 if (ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
4423 LC->isZero() && RC->isZero() && LHS->hasOneUse() && RHS->hasOneUse() &&
4424 match(LHS0, m_And(m_Value(X), m_Value(Pow2))) &&
4425 match(RHS0, m_And(m_Value(Y), m_Specific(Pow2))) &&
4426 isKnownToBeAPowerOfTwo(Pow2, /*OrZero=*/true, &I)) {
4427 Value *Xor = Builder.CreateXor(X, Y);
4428 Value *And = Builder.CreateAnd(Xor, Pow2);
4429 return Builder.CreateICmp(PredL == PredR ? ICmpInst::ICMP_NE
4430 : ICmpInst::ICMP_EQ,
4431 And, ConstantInt::getNullValue(Xor->getType()));
4432 }
4433 }
4434
4435 // Instead of trying to imitate the folds for and/or, decompose this 'xor'
4436 // into those logic ops. That is, try to turn this into an and-of-icmps
4437 // because we have many folds for that pattern.
4438 //
4439 // This is based on a truth table definition of xor:
4440 // X ^ Y --> (X | Y) & !(X & Y)
4441 if (Value *OrICmp = simplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
4442 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
4443 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
4444 if (Value *AndICmp = simplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
4445 // TODO: Independently handle cases where the 'and' side is a constant.
4446 ICmpInst *X = nullptr, *Y = nullptr;
4447 if (OrICmp == LHS && AndICmp == RHS) {
4448 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS --> X & !Y
4449 X = LHS;
4450 Y = RHS;
4451 }
4452 if (OrICmp == RHS && AndICmp == LHS) {
4453 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS --> !Y & X
4454 X = RHS;
4455 Y = LHS;
4456 }
4457 if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(Y, &I))) {
4458 // Invert the predicate of 'Y', thus inverting its output.
4459 Y->setPredicate(Y->getInversePredicate());
4460 // So, are there other uses of Y?
4461 if (!Y->hasOneUse()) {
4462 // We need to adapt other uses of Y though. Get a value that matches
4463 // the original value of Y before inversion. While this increases
4464 // immediate instruction count, we have just ensured that all the
4465 // users are freely-invertible, so that 'not' *will* get folded away.
4466 BuilderTy::InsertPointGuard Guard(Builder);
4467 // Set insertion point to right after the Y.
4468 Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator()));
4469 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4470 // Replace all uses of Y (excluding the one in NotY!) with NotY.
4471 Worklist.pushUsersToWorkList(*Y);
4472 Y->replaceUsesWithIf(NotY,
4473 [NotY](Use &U) { return U.getUser() != NotY; });
4474 }
4475 // All done.
4476 return Builder.CreateAnd(LHS, RHS);
4477 }
4478 }
4479 }
4480
4481 return nullptr;
4482 }
4483
4484 /// If we have a masked merge, in the canonical form of:
4485 /// (assuming that A only has one use.)
4486 /// | A | |B|
4487 /// ((x ^ y) & M) ^ y
4488 /// | D |
4489 /// * If M is inverted:
4490 /// | D |
4491 /// ((x ^ y) & ~M) ^ y
4492 /// We can canonicalize by swapping the final xor operand
4493 /// to eliminate the 'not' of the mask.
4494 /// ((x ^ y) & M) ^ x
4495 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
4496 /// because that shortens the dependency chain and improves analysis:
4497 /// (x & M) | (y & ~M)
visitMaskedMerge(BinaryOperator & I,InstCombiner::BuilderTy & Builder)4498 static Instruction *visitMaskedMerge(BinaryOperator &I,
4499 InstCombiner::BuilderTy &Builder) {
4500 Value *B, *X, *D;
4501 Value *M;
4502 if (!match(&I, m_c_Xor(m_Value(B),
4503 m_OneUse(m_c_And(
4504 m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)),
4505 m_Value(D)),
4506 m_Value(M))))))
4507 return nullptr;
4508
4509 Value *NotM;
4510 if (match(M, m_Not(m_Value(NotM)))) {
4511 // De-invert the mask and swap the value in B part.
4512 Value *NewA = Builder.CreateAnd(D, NotM);
4513 return BinaryOperator::CreateXor(NewA, X);
4514 }
4515
4516 Constant *C;
4517 if (D->hasOneUse() && match(M, m_Constant(C))) {
4518 // Propagating undef is unsafe. Clamp undef elements to -1.
4519 Type *EltTy = C->getType()->getScalarType();
4520 C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy));
4521 // Unfold.
4522 Value *LHS = Builder.CreateAnd(X, C);
4523 Value *NotC = Builder.CreateNot(C);
4524 Value *RHS = Builder.CreateAnd(B, NotC);
4525 return BinaryOperator::CreateOr(LHS, RHS);
4526 }
4527
4528 return nullptr;
4529 }
4530
foldNotXor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)4531 static Instruction *foldNotXor(BinaryOperator &I,
4532 InstCombiner::BuilderTy &Builder) {
4533 Value *X, *Y;
4534 // FIXME: one-use check is not needed in general, but currently we are unable
4535 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182)
4536 if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y))))))
4537 return nullptr;
4538
4539 auto hasCommonOperand = [](Value *A, Value *B, Value *C, Value *D) {
4540 return A == C || A == D || B == C || B == D;
4541 };
4542
4543 Value *A, *B, *C, *D;
4544 // Canonicalize ~((A & B) ^ (A | ?)) -> (A & B) | ~(A | ?)
4545 // 4 commuted variants
4546 if (match(X, m_And(m_Value(A), m_Value(B))) &&
4547 match(Y, m_Or(m_Value(C), m_Value(D))) && hasCommonOperand(A, B, C, D)) {
4548 Value *NotY = Builder.CreateNot(Y);
4549 return BinaryOperator::CreateOr(X, NotY);
4550 };
4551
4552 // Canonicalize ~((A | ?) ^ (A & B)) -> (A & B) | ~(A | ?)
4553 // 4 commuted variants
4554 if (match(Y, m_And(m_Value(A), m_Value(B))) &&
4555 match(X, m_Or(m_Value(C), m_Value(D))) && hasCommonOperand(A, B, C, D)) {
4556 Value *NotX = Builder.CreateNot(X);
4557 return BinaryOperator::CreateOr(Y, NotX);
4558 };
4559
4560 return nullptr;
4561 }
4562
4563 /// Canonicalize a shifty way to code absolute value to the more common pattern
4564 /// that uses negation and select.
canonicalizeAbs(BinaryOperator & Xor,InstCombiner::BuilderTy & Builder)4565 static Instruction *canonicalizeAbs(BinaryOperator &Xor,
4566 InstCombiner::BuilderTy &Builder) {
4567 assert(Xor.getOpcode() == Instruction::Xor && "Expected an xor instruction.");
4568
4569 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
4570 // We're relying on the fact that we only do this transform when the shift has
4571 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
4572 // instructions).
4573 Value *Op0 = Xor.getOperand(0), *Op1 = Xor.getOperand(1);
4574 if (Op0->hasNUses(2))
4575 std::swap(Op0, Op1);
4576
4577 Type *Ty = Xor.getType();
4578 Value *A;
4579 const APInt *ShAmt;
4580 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
4581 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
4582 match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) {
4583 // Op1 = ashr i32 A, 31 ; smear the sign bit
4584 // xor (add A, Op1), Op1 ; add -1 and flip bits if negative
4585 // --> (A < 0) ? -A : A
4586 Value *IsNeg = Builder.CreateIsNeg(A);
4587 // Copy the nsw flags from the add to the negate.
4588 auto *Add = cast<BinaryOperator>(Op0);
4589 Value *NegA = Add->hasNoUnsignedWrap()
4590 ? Constant::getNullValue(A->getType())
4591 : Builder.CreateNeg(A, "", Add->hasNoSignedWrap());
4592 return SelectInst::Create(IsNeg, NegA, A);
4593 }
4594 return nullptr;
4595 }
4596
canFreelyInvert(InstCombiner & IC,Value * Op,Instruction * IgnoredUser)4597 static bool canFreelyInvert(InstCombiner &IC, Value *Op,
4598 Instruction *IgnoredUser) {
4599 auto *I = dyn_cast<Instruction>(Op);
4600 return I && IC.isFreeToInvert(I, /*WillInvertAllUses=*/true) &&
4601 IC.canFreelyInvertAllUsersOf(I, IgnoredUser);
4602 }
4603
freelyInvert(InstCombinerImpl & IC,Value * Op,Instruction * IgnoredUser)4604 static Value *freelyInvert(InstCombinerImpl &IC, Value *Op,
4605 Instruction *IgnoredUser) {
4606 auto *I = cast<Instruction>(Op);
4607 IC.Builder.SetInsertPoint(*I->getInsertionPointAfterDef());
4608 Value *NotOp = IC.Builder.CreateNot(Op, Op->getName() + ".not");
4609 Op->replaceUsesWithIf(NotOp,
4610 [NotOp](Use &U) { return U.getUser() != NotOp; });
4611 IC.freelyInvertAllUsersOf(NotOp, IgnoredUser);
4612 return NotOp;
4613 }
4614
4615 // Transform
4616 // z = ~(x &/| y)
4617 // into:
4618 // z = ((~x) |/& (~y))
4619 // iff both x and y are free to invert and all uses of z can be freely updated.
sinkNotIntoLogicalOp(Instruction & I)4620 bool InstCombinerImpl::sinkNotIntoLogicalOp(Instruction &I) {
4621 Value *Op0, *Op1;
4622 if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1))))
4623 return false;
4624
4625 // If this logic op has not been simplified yet, just bail out and let that
4626 // happen first. Otherwise, the code below may wrongly invert.
4627 if (Op0 == Op1)
4628 return false;
4629
4630 // If one of the operands is a user of the other,
4631 // freelyInvert->freelyInvertAllUsersOf will change the operands of I, which
4632 // may cause miscompilation.
4633 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
4634 return false;
4635
4636 Instruction::BinaryOps NewOpc =
4637 match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And;
4638 bool IsBinaryOp = isa<BinaryOperator>(I);
4639
4640 // Can our users be adapted?
4641 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
4642 return false;
4643
4644 // And can the operands be adapted?
4645 if (!canFreelyInvert(*this, Op0, &I) || !canFreelyInvert(*this, Op1, &I))
4646 return false;
4647
4648 Op0 = freelyInvert(*this, Op0, &I);
4649 Op1 = freelyInvert(*this, Op1, &I);
4650
4651 Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
4652 Value *NewLogicOp;
4653 if (IsBinaryOp)
4654 NewLogicOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not");
4655 else
4656 NewLogicOp =
4657 Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not");
4658
4659 replaceInstUsesWith(I, NewLogicOp);
4660 // We can not just create an outer `not`, it will most likely be immediately
4661 // folded back, reconstructing our initial pattern, and causing an
4662 // infinite combine loop, so immediately manually fold it away.
4663 freelyInvertAllUsersOf(NewLogicOp);
4664 return true;
4665 }
4666
4667 // Transform
4668 // z = (~x) &/| y
4669 // into:
4670 // z = ~(x |/& (~y))
4671 // iff y is free to invert and all uses of z can be freely updated.
sinkNotIntoOtherHandOfLogicalOp(Instruction & I)4672 bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction &I) {
4673 Value *Op0, *Op1;
4674 if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1))))
4675 return false;
4676 Instruction::BinaryOps NewOpc =
4677 match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And;
4678 bool IsBinaryOp = isa<BinaryOperator>(I);
4679
4680 Value *NotOp0 = nullptr;
4681 Value *NotOp1 = nullptr;
4682 Value **OpToInvert = nullptr;
4683 if (match(Op0, m_Not(m_Value(NotOp0))) && canFreelyInvert(*this, Op1, &I)) {
4684 Op0 = NotOp0;
4685 OpToInvert = &Op1;
4686 } else if (match(Op1, m_Not(m_Value(NotOp1))) &&
4687 canFreelyInvert(*this, Op0, &I)) {
4688 Op1 = NotOp1;
4689 OpToInvert = &Op0;
4690 } else
4691 return false;
4692
4693 // And can our users be adapted?
4694 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
4695 return false;
4696
4697 *OpToInvert = freelyInvert(*this, *OpToInvert, &I);
4698
4699 Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
4700 Value *NewBinOp;
4701 if (IsBinaryOp)
4702 NewBinOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not");
4703 else
4704 NewBinOp = Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not");
4705 replaceInstUsesWith(I, NewBinOp);
4706 // We can not just create an outer `not`, it will most likely be immediately
4707 // folded back, reconstructing our initial pattern, and causing an
4708 // infinite combine loop, so immediately manually fold it away.
4709 freelyInvertAllUsersOf(NewBinOp);
4710 return true;
4711 }
4712
foldNot(BinaryOperator & I)4713 Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
4714 Value *NotOp;
4715 if (!match(&I, m_Not(m_Value(NotOp))))
4716 return nullptr;
4717
4718 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
4719 // We must eliminate the and/or (one-use) for these transforms to not increase
4720 // the instruction count.
4721 //
4722 // ~(~X & Y) --> (X | ~Y)
4723 // ~(Y & ~X) --> (X | ~Y)
4724 //
4725 // Note: The logical matches do not check for the commuted patterns because
4726 // those are handled via SimplifySelectsFeedingBinaryOp().
4727 Type *Ty = I.getType();
4728 Value *X, *Y;
4729 if (match(NotOp, m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y))))) {
4730 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4731 return BinaryOperator::CreateOr(X, NotY);
4732 }
4733 if (match(NotOp, m_OneUse(m_LogicalAnd(m_Not(m_Value(X)), m_Value(Y))))) {
4734 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4735 return SelectInst::Create(X, ConstantInt::getTrue(Ty), NotY);
4736 }
4737
4738 // ~(~X | Y) --> (X & ~Y)
4739 // ~(Y | ~X) --> (X & ~Y)
4740 if (match(NotOp, m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y))))) {
4741 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4742 return BinaryOperator::CreateAnd(X, NotY);
4743 }
4744 if (match(NotOp, m_OneUse(m_LogicalOr(m_Not(m_Value(X)), m_Value(Y))))) {
4745 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4746 return SelectInst::Create(X, NotY, ConstantInt::getFalse(Ty));
4747 }
4748
4749 // Is this a 'not' (~) fed by a binary operator?
4750 BinaryOperator *NotVal;
4751 if (match(NotOp, m_BinOp(NotVal))) {
4752 // ~((-X) | Y) --> (X - 1) & (~Y)
4753 if (match(NotVal,
4754 m_OneUse(m_c_Or(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))) {
4755 Value *DecX = Builder.CreateAdd(X, ConstantInt::getAllOnesValue(Ty));
4756 Value *NotY = Builder.CreateNot(Y);
4757 return BinaryOperator::CreateAnd(DecX, NotY);
4758 }
4759
4760 // ~(~X >>s Y) --> (X >>s Y)
4761 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
4762 return BinaryOperator::CreateAShr(X, Y);
4763
4764 // Treat lshr with non-negative operand as ashr.
4765 // ~(~X >>u Y) --> (X >>s Y) iff X is known negative
4766 if (match(NotVal, m_LShr(m_Not(m_Value(X)), m_Value(Y))) &&
4767 isKnownNegative(X, SQ.getWithInstruction(NotVal)))
4768 return BinaryOperator::CreateAShr(X, Y);
4769
4770 // Bit-hack form of a signbit test for iN type:
4771 // ~(X >>s (N - 1)) --> sext i1 (X > -1) to iN
4772 unsigned FullShift = Ty->getScalarSizeInBits() - 1;
4773 if (match(NotVal, m_OneUse(m_AShr(m_Value(X), m_SpecificInt(FullShift))))) {
4774 Value *IsNotNeg = Builder.CreateIsNotNeg(X, "isnotneg");
4775 return new SExtInst(IsNotNeg, Ty);
4776 }
4777
4778 // If we are inverting a right-shifted constant, we may be able to eliminate
4779 // the 'not' by inverting the constant and using the opposite shift type.
4780 // Canonicalization rules ensure that only a negative constant uses 'ashr',
4781 // but we must check that in case that transform has not fired yet.
4782
4783 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
4784 Constant *C;
4785 if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) &&
4786 match(C, m_Negative()))
4787 return BinaryOperator::CreateLShr(ConstantExpr::getNot(C), Y);
4788
4789 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
4790 if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) &&
4791 match(C, m_NonNegative()))
4792 return BinaryOperator::CreateAShr(ConstantExpr::getNot(C), Y);
4793
4794 // ~(X + C) --> ~C - X
4795 if (match(NotVal, m_Add(m_Value(X), m_ImmConstant(C))))
4796 return BinaryOperator::CreateSub(ConstantExpr::getNot(C), X);
4797
4798 // ~(X - Y) --> ~X + Y
4799 // FIXME: is it really beneficial to sink the `not` here?
4800 if (match(NotVal, m_Sub(m_Value(X), m_Value(Y))))
4801 if (isa<Constant>(X) || NotVal->hasOneUse())
4802 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y);
4803
4804 // ~(~X + Y) --> X - Y
4805 if (match(NotVal, m_c_Add(m_Not(m_Value(X)), m_Value(Y))))
4806 return BinaryOperator::CreateWithCopiedFlags(Instruction::Sub, X, Y,
4807 NotVal);
4808 }
4809
4810 // not (cmp A, B) = !cmp A, B
4811 CmpPredicate Pred;
4812 if (match(NotOp, m_Cmp(Pred, m_Value(), m_Value())) &&
4813 (NotOp->hasOneUse() ||
4814 InstCombiner::canFreelyInvertAllUsersOf(cast<Instruction>(NotOp),
4815 /*IgnoredUser=*/nullptr))) {
4816 cast<CmpInst>(NotOp)->setPredicate(CmpInst::getInversePredicate(Pred));
4817 freelyInvertAllUsersOf(NotOp);
4818 return &I;
4819 }
4820
4821 // Move a 'not' ahead of casts of a bool to enable logic reduction:
4822 // not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
4823 if (match(NotOp, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X)))))) && X->getType()->isIntOrIntVectorTy(1)) {
4824 Type *SextTy = cast<BitCastOperator>(NotOp)->getSrcTy();
4825 Value *NotX = Builder.CreateNot(X);
4826 Value *Sext = Builder.CreateSExt(NotX, SextTy);
4827 return new BitCastInst(Sext, Ty);
4828 }
4829
4830 if (auto *NotOpI = dyn_cast<Instruction>(NotOp))
4831 if (sinkNotIntoLogicalOp(*NotOpI))
4832 return &I;
4833
4834 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
4835 // ~min(~X, ~Y) --> max(X, Y)
4836 // ~max(~X, Y) --> min(X, ~Y)
4837 auto *II = dyn_cast<IntrinsicInst>(NotOp);
4838 if (II && II->hasOneUse()) {
4839 if (match(NotOp, m_c_MaxOrMin(m_Not(m_Value(X)), m_Value(Y)))) {
4840 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
4841 Value *NotY = Builder.CreateNot(Y);
4842 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, NotY);
4843 return replaceInstUsesWith(I, InvMaxMin);
4844 }
4845
4846 if (II->getIntrinsicID() == Intrinsic::is_fpclass) {
4847 ConstantInt *ClassMask = cast<ConstantInt>(II->getArgOperand(1));
4848 II->setArgOperand(
4849 1, ConstantInt::get(ClassMask->getType(),
4850 ~ClassMask->getZExtValue() & fcAllFlags));
4851 return replaceInstUsesWith(I, II);
4852 }
4853 }
4854
4855 if (NotOp->hasOneUse()) {
4856 // Pull 'not' into operands of select if both operands are one-use compares
4857 // or one is one-use compare and the other one is a constant.
4858 // Inverting the predicates eliminates the 'not' operation.
4859 // Example:
4860 // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) -->
4861 // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?)
4862 // not (select ?, (cmp TPred, ?, ?), true -->
4863 // select ?, (cmp InvTPred, ?, ?), false
4864 if (auto *Sel = dyn_cast<SelectInst>(NotOp)) {
4865 Value *TV = Sel->getTrueValue();
4866 Value *FV = Sel->getFalseValue();
4867 auto *CmpT = dyn_cast<CmpInst>(TV);
4868 auto *CmpF = dyn_cast<CmpInst>(FV);
4869 bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(TV);
4870 bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(FV);
4871 if (InvertibleT && InvertibleF) {
4872 if (CmpT)
4873 CmpT->setPredicate(CmpT->getInversePredicate());
4874 else
4875 Sel->setTrueValue(ConstantExpr::getNot(cast<Constant>(TV)));
4876 if (CmpF)
4877 CmpF->setPredicate(CmpF->getInversePredicate());
4878 else
4879 Sel->setFalseValue(ConstantExpr::getNot(cast<Constant>(FV)));
4880 return replaceInstUsesWith(I, Sel);
4881 }
4882 }
4883 }
4884
4885 if (Instruction *NewXor = foldNotXor(I, Builder))
4886 return NewXor;
4887
4888 // TODO: Could handle multi-use better by checking if all uses of NotOp (other
4889 // than I) can be inverted.
4890 if (Value *R = getFreelyInverted(NotOp, NotOp->hasOneUse(), &Builder))
4891 return replaceInstUsesWith(I, R);
4892
4893 return nullptr;
4894 }
4895
4896 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
4897 // here. We should standardize that construct where it is needed or choose some
4898 // other way to ensure that commutated variants of patterns are not missed.
visitXor(BinaryOperator & I)4899 Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
4900 if (Value *V = simplifyXorInst(I.getOperand(0), I.getOperand(1),
4901 SQ.getWithInstruction(&I)))
4902 return replaceInstUsesWith(I, V);
4903
4904 if (SimplifyAssociativeOrCommutative(I))
4905 return &I;
4906
4907 if (Instruction *X = foldVectorBinop(I))
4908 return X;
4909
4910 if (Instruction *Phi = foldBinopWithPhiOperands(I))
4911 return Phi;
4912
4913 if (Instruction *NewXor = foldXorToXor(I, Builder))
4914 return NewXor;
4915
4916 // (A&B)^(A&C) -> A&(B^C) etc
4917 if (Value *V = foldUsingDistributiveLaws(I))
4918 return replaceInstUsesWith(I, V);
4919
4920 // See if we can simplify any instructions used by the instruction whose sole
4921 // purpose is to compute bits we don't care about.
4922 if (SimplifyDemandedInstructionBits(I))
4923 return &I;
4924
4925 if (Instruction *R = foldNot(I))
4926 return R;
4927
4928 if (Instruction *R = foldBinOpShiftWithShift(I))
4929 return R;
4930
4931 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4932 Value *X, *Y, *M;
4933
4934 // (X | Y) ^ M -> (X ^ M) ^ Y
4935 // (X | Y) ^ M -> (Y ^ M) ^ X
4936 if (match(&I, m_c_Xor(m_OneUse(m_DisjointOr(m_Value(X), m_Value(Y))),
4937 m_Value(M)))) {
4938 if (Value *XorAC = simplifyXorInst(X, M, SQ.getWithInstruction(&I)))
4939 return BinaryOperator::CreateXor(XorAC, Y);
4940
4941 if (Value *XorBC = simplifyXorInst(Y, M, SQ.getWithInstruction(&I)))
4942 return BinaryOperator::CreateXor(XorBC, X);
4943 }
4944
4945 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M)
4946 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits
4947 // calls in there are unnecessary as SimplifyDemandedInstructionBits should
4948 // have already taken care of those cases.
4949 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()),
4950 m_c_And(m_Deferred(M), m_Value())))) {
4951 if (isGuaranteedNotToBeUndef(M))
4952 return BinaryOperator::CreateDisjointOr(Op0, Op1);
4953 else
4954 return BinaryOperator::CreateOr(Op0, Op1);
4955 }
4956
4957 if (Instruction *Xor = visitMaskedMerge(I, Builder))
4958 return Xor;
4959
4960 Constant *C1;
4961 if (match(Op1, m_Constant(C1))) {
4962 Constant *C2;
4963
4964 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_ImmConstant(C2)))) &&
4965 match(C1, m_ImmConstant())) {
4966 // (X | C2) ^ C1 --> (X & ~C2) ^ (C1^C2)
4967 C2 = Constant::replaceUndefsWith(
4968 C2, Constant::getAllOnesValue(C2->getType()->getScalarType()));
4969 Value *And = Builder.CreateAnd(
4970 X, Constant::mergeUndefsWith(ConstantExpr::getNot(C2), C1));
4971 return BinaryOperator::CreateXor(
4972 And, Constant::mergeUndefsWith(ConstantExpr::getXor(C1, C2), C1));
4973 }
4974
4975 // Use DeMorgan and reassociation to eliminate a 'not' op.
4976 if (match(Op0, m_OneUse(m_Or(m_Not(m_Value(X)), m_Constant(C2))))) {
4977 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1
4978 Value *And = Builder.CreateAnd(X, ConstantExpr::getNot(C2));
4979 return BinaryOperator::CreateXor(And, ConstantExpr::getNot(C1));
4980 }
4981 if (match(Op0, m_OneUse(m_And(m_Not(m_Value(X)), m_Constant(C2))))) {
4982 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1
4983 Value *Or = Builder.CreateOr(X, ConstantExpr::getNot(C2));
4984 return BinaryOperator::CreateXor(Or, ConstantExpr::getNot(C1));
4985 }
4986
4987 // Convert xor ([trunc] (ashr X, BW-1)), C =>
4988 // select(X >s -1, C, ~C)
4989 // The ashr creates "AllZeroOrAllOne's", which then optionally inverses the
4990 // constant depending on whether this input is less than 0.
4991 const APInt *CA;
4992 if (match(Op0, m_OneUse(m_TruncOrSelf(
4993 m_AShr(m_Value(X), m_APIntAllowPoison(CA))))) &&
4994 *CA == X->getType()->getScalarSizeInBits() - 1 &&
4995 !match(C1, m_AllOnes())) {
4996 assert(!C1->isZeroValue() && "Unexpected xor with 0");
4997 Value *IsNotNeg = Builder.CreateIsNotNeg(X);
4998 return SelectInst::Create(IsNotNeg, Op1, Builder.CreateNot(Op1));
4999 }
5000 }
5001
5002 Type *Ty = I.getType();
5003 {
5004 const APInt *RHSC;
5005 if (match(Op1, m_APInt(RHSC))) {
5006 Value *X;
5007 const APInt *C;
5008 // (C - X) ^ signmaskC --> (C + signmaskC) - X
5009 if (RHSC->isSignMask() && match(Op0, m_Sub(m_APInt(C), m_Value(X))))
5010 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C + *RHSC), X);
5011
5012 // (X + C) ^ signmaskC --> X + (C + signmaskC)
5013 if (RHSC->isSignMask() && match(Op0, m_Add(m_Value(X), m_APInt(C))))
5014 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C + *RHSC));
5015
5016 // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0
5017 if (match(Op0, m_Or(m_Value(X), m_APInt(C))) &&
5018 MaskedValueIsZero(X, *C, &I))
5019 return BinaryOperator::CreateXor(X, ConstantInt::get(Ty, *C ^ *RHSC));
5020
5021 // When X is a power-of-two or zero and zero input is poison:
5022 // ctlz(i32 X) ^ 31 --> cttz(X)
5023 // cttz(i32 X) ^ 31 --> ctlz(X)
5024 auto *II = dyn_cast<IntrinsicInst>(Op0);
5025 if (II && II->hasOneUse() && *RHSC == Ty->getScalarSizeInBits() - 1) {
5026 Intrinsic::ID IID = II->getIntrinsicID();
5027 if ((IID == Intrinsic::ctlz || IID == Intrinsic::cttz) &&
5028 match(II->getArgOperand(1), m_One()) &&
5029 isKnownToBeAPowerOfTwo(II->getArgOperand(0), /*OrZero */ true)) {
5030 IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz;
5031 Function *F =
5032 Intrinsic::getOrInsertDeclaration(II->getModule(), IID, Ty);
5033 return CallInst::Create(F, {II->getArgOperand(0), Builder.getTrue()});
5034 }
5035 }
5036
5037 // If RHSC is inverting the remaining bits of shifted X,
5038 // canonicalize to a 'not' before the shift to help SCEV and codegen:
5039 // (X << C) ^ RHSC --> ~X << C
5040 if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_APInt(C)))) &&
5041 *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).shl(*C)) {
5042 Value *NotX = Builder.CreateNot(X);
5043 return BinaryOperator::CreateShl(NotX, ConstantInt::get(Ty, *C));
5044 }
5045 // (X >>u C) ^ RHSC --> ~X >>u C
5046 if (match(Op0, m_OneUse(m_LShr(m_Value(X), m_APInt(C)))) &&
5047 *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).lshr(*C)) {
5048 Value *NotX = Builder.CreateNot(X);
5049 return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *C));
5050 }
5051 // TODO: We could handle 'ashr' here as well. That would be matching
5052 // a 'not' op and moving it before the shift. Doing that requires
5053 // preventing the inverse fold in canShiftBinOpWithConstantRHS().
5054 }
5055
5056 // If we are XORing the sign bit of a floating-point value, convert
5057 // this to fneg, then cast back to integer.
5058 //
5059 // This is generous interpretation of noimplicitfloat, this is not a true
5060 // floating-point operation.
5061 //
5062 // Assumes any IEEE-represented type has the sign bit in the high bit.
5063 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
5064 Value *CastOp;
5065 if (match(Op0, m_ElementWiseBitCast(m_Value(CastOp))) &&
5066 match(Op1, m_SignMask()) &&
5067 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
5068 Attribute::NoImplicitFloat)) {
5069 Type *EltTy = CastOp->getType()->getScalarType();
5070 if (EltTy->isFloatingPointTy() &&
5071 APFloat::hasSignBitInMSB(EltTy->getFltSemantics())) {
5072 Value *FNeg = Builder.CreateFNeg(CastOp);
5073 return new BitCastInst(FNeg, I.getType());
5074 }
5075 }
5076 }
5077
5078 // FIXME: This should not be limited to scalar (pull into APInt match above).
5079 {
5080 Value *X;
5081 ConstantInt *C1, *C2, *C3;
5082 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
5083 if (match(Op1, m_ConstantInt(C3)) &&
5084 match(Op0, m_LShr(m_Xor(m_Value(X), m_ConstantInt(C1)),
5085 m_ConstantInt(C2))) &&
5086 Op0->hasOneUse()) {
5087 // fold (C1 >> C2) ^ C3
5088 APInt FoldConst = C1->getValue().lshr(C2->getValue());
5089 FoldConst ^= C3->getValue();
5090 // Prepare the two operands.
5091 auto *Opnd0 = Builder.CreateLShr(X, C2);
5092 Opnd0->takeName(Op0);
5093 return BinaryOperator::CreateXor(Opnd0, ConstantInt::get(Ty, FoldConst));
5094 }
5095 }
5096
5097 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
5098 return FoldedLogic;
5099
5100 // Y ^ (X | Y) --> X & ~Y
5101 // Y ^ (Y | X) --> X & ~Y
5102 if (match(Op1, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op0)))))
5103 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op0));
5104 // (X | Y) ^ Y --> X & ~Y
5105 // (Y | X) ^ Y --> X & ~Y
5106 if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op1)))))
5107 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op1));
5108
5109 // Y ^ (X & Y) --> ~X & Y
5110 // Y ^ (Y & X) --> ~X & Y
5111 if (match(Op1, m_OneUse(m_c_And(m_Value(X), m_Specific(Op0)))))
5112 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(X));
5113 // (X & Y) ^ Y --> ~X & Y
5114 // (Y & X) ^ Y --> ~X & Y
5115 // Canonical form is (X & C) ^ C; don't touch that.
5116 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must
5117 // be fixed to prefer that (otherwise we get infinite looping).
5118 if (!match(Op1, m_Constant()) &&
5119 match(Op0, m_OneUse(m_c_And(m_Value(X), m_Specific(Op1)))))
5120 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(X));
5121
5122 Value *A, *B, *C;
5123 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
5124 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
5125 m_OneUse(m_c_Or(m_Deferred(A), m_Value(C))))))
5126 return BinaryOperator::CreateXor(
5127 Builder.CreateAnd(Builder.CreateNot(A), C), B);
5128
5129 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
5130 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
5131 m_OneUse(m_c_Or(m_Deferred(B), m_Value(C))))))
5132 return BinaryOperator::CreateXor(
5133 Builder.CreateAnd(Builder.CreateNot(B), C), A);
5134
5135 // (A & B) ^ (A ^ B) -> (A | B)
5136 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
5137 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
5138 return BinaryOperator::CreateOr(A, B);
5139 // (A ^ B) ^ (A & B) -> (A | B)
5140 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
5141 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
5142 return BinaryOperator::CreateOr(A, B);
5143
5144 // (A & ~B) ^ ~A -> ~(A & B)
5145 // (~B & A) ^ ~A -> ~(A & B)
5146 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
5147 match(Op1, m_Not(m_Specific(A))))
5148 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
5149
5150 // (~A & B) ^ A --> A | B -- There are 4 commuted variants.
5151 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(A)), m_Value(B)), m_Deferred(A))))
5152 return BinaryOperator::CreateOr(A, B);
5153
5154 // (~A | B) ^ A --> ~(A & B)
5155 if (match(Op0, m_OneUse(m_c_Or(m_Not(m_Specific(Op1)), m_Value(B)))))
5156 return BinaryOperator::CreateNot(Builder.CreateAnd(Op1, B));
5157
5158 // A ^ (~A | B) --> ~(A & B)
5159 if (match(Op1, m_OneUse(m_c_Or(m_Not(m_Specific(Op0)), m_Value(B)))))
5160 return BinaryOperator::CreateNot(Builder.CreateAnd(Op0, B));
5161
5162 // (A | B) ^ (A | C) --> (B ^ C) & ~A -- There are 4 commuted variants.
5163 // TODO: Loosen one-use restriction if common operand is a constant.
5164 Value *D;
5165 if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B)))) &&
5166 match(Op1, m_OneUse(m_Or(m_Value(C), m_Value(D))))) {
5167 if (B == C || B == D)
5168 std::swap(A, B);
5169 if (A == C)
5170 std::swap(C, D);
5171 if (A == D) {
5172 Value *NotA = Builder.CreateNot(A);
5173 return BinaryOperator::CreateAnd(Builder.CreateXor(B, C), NotA);
5174 }
5175 }
5176
5177 // (A & B) ^ (A | C) --> A ? ~B : C -- There are 4 commuted variants.
5178 if (I.getType()->isIntOrIntVectorTy(1) &&
5179 match(&I, m_c_Xor(m_OneUse(m_LogicalAnd(m_Value(A), m_Value(B))),
5180 m_OneUse(m_LogicalOr(m_Value(C), m_Value(D)))))) {
5181 bool NeedFreeze = isa<SelectInst>(Op0) && isa<SelectInst>(Op1) && B == D;
5182 if (B == C || B == D)
5183 std::swap(A, B);
5184 if (A == C)
5185 std::swap(C, D);
5186 if (A == D) {
5187 if (NeedFreeze)
5188 A = Builder.CreateFreeze(A);
5189 Value *NotB = Builder.CreateNot(B);
5190 return SelectInst::Create(A, NotB, C);
5191 }
5192 }
5193
5194 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
5195 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
5196 if (Value *V = foldXorOfICmps(LHS, RHS, I))
5197 return replaceInstUsesWith(I, V);
5198
5199 if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
5200 return CastedXor;
5201
5202 if (Instruction *Abs = canonicalizeAbs(I, Builder))
5203 return Abs;
5204
5205 // Otherwise, if all else failed, try to hoist the xor-by-constant:
5206 // (X ^ C) ^ Y --> (X ^ Y) ^ C
5207 // Just like we do in other places, we completely avoid the fold
5208 // for constantexprs, at least to avoid endless combine loop.
5209 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_CombineAnd(m_Value(X),
5210 m_Unless(m_ConstantExpr())),
5211 m_ImmConstant(C1))),
5212 m_Value(Y))))
5213 return BinaryOperator::CreateXor(Builder.CreateXor(X, Y), C1);
5214
5215 if (Instruction *R = reassociateForUses(I, Builder))
5216 return R;
5217
5218 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
5219 return Canonicalized;
5220
5221 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
5222 return Folded;
5223
5224 if (Instruction *Folded = canonicalizeConditionalNegationViaMathToSelect(I))
5225 return Folded;
5226
5227 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
5228 return Res;
5229
5230 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
5231 return Res;
5232
5233 return nullptr;
5234 }
5235