1 //===- InstCombineAndOrXor.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitAnd, visitOr, and visitXor functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/Analysis/CmpInstAnalysis.h" 15 #include "llvm/Analysis/InstructionSimplify.h" 16 #include "llvm/IR/ConstantRange.h" 17 #include "llvm/IR/Intrinsics.h" 18 #include "llvm/IR/PatternMatch.h" 19 #include "llvm/Transforms/InstCombine/InstCombiner.h" 20 #include "llvm/Transforms/Utils/Local.h" 21 22 using namespace llvm; 23 using namespace PatternMatch; 24 25 #define DEBUG_TYPE "instcombine" 26 27 /// This is the complement of getICmpCode, which turns an opcode and two 28 /// operands into either a constant true or false, or a brand new ICmp 29 /// instruction. The sign is passed in to determine which kind of predicate to 30 /// use in the new icmp instruction. 31 static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS, 32 InstCombiner::BuilderTy &Builder) { 33 ICmpInst::Predicate NewPred; 34 if (Constant *TorF = getPredForICmpCode(Code, Sign, LHS->getType(), NewPred)) 35 return TorF; 36 return Builder.CreateICmp(NewPred, LHS, RHS); 37 } 38 39 /// This is the complement of getFCmpCode, which turns an opcode and two 40 /// operands into either a FCmp instruction, or a true/false constant. 41 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS, 42 InstCombiner::BuilderTy &Builder) { 43 FCmpInst::Predicate NewPred; 44 if (Constant *TorF = getPredForFCmpCode(Code, LHS->getType(), NewPred)) 45 return TorF; 46 return Builder.CreateFCmp(NewPred, LHS, RHS); 47 } 48 49 /// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or 50 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B)) 51 /// \param I Binary operator to transform. 52 /// \return Pointer to node that must replace the original binary operator, or 53 /// null pointer if no transformation was made. 54 static Value *SimplifyBSwap(BinaryOperator &I, 55 InstCombiner::BuilderTy &Builder) { 56 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying"); 57 58 Value *OldLHS = I.getOperand(0); 59 Value *OldRHS = I.getOperand(1); 60 61 Value *NewLHS; 62 if (!match(OldLHS, m_BSwap(m_Value(NewLHS)))) 63 return nullptr; 64 65 Value *NewRHS; 66 const APInt *C; 67 68 if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) { 69 // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) ) 70 if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse()) 71 return nullptr; 72 // NewRHS initialized by the matcher. 73 } else if (match(OldRHS, m_APInt(C))) { 74 // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) ) 75 if (!OldLHS->hasOneUse()) 76 return nullptr; 77 NewRHS = ConstantInt::get(I.getType(), C->byteSwap()); 78 } else 79 return nullptr; 80 81 Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS); 82 Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap, 83 I.getType()); 84 return Builder.CreateCall(F, BinOp); 85 } 86 87 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise 88 /// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates 89 /// whether to treat V, Lo, and Hi as signed or not. 90 Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo, 91 const APInt &Hi, bool isSigned, 92 bool Inside) { 93 assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) && 94 "Lo is not < Hi in range emission code!"); 95 96 Type *Ty = V->getType(); 97 98 // V >= Min && V < Hi --> V < Hi 99 // V < Min || V >= Hi --> V >= Hi 100 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE; 101 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) { 102 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred; 103 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi)); 104 } 105 106 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo 107 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo 108 Value *VMinusLo = 109 Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off"); 110 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo); 111 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo); 112 } 113 114 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns 115 /// that can be simplified. 116 /// One of A and B is considered the mask. The other is the value. This is 117 /// described as the "AMask" or "BMask" part of the enum. If the enum contains 118 /// only "Mask", then both A and B can be considered masks. If A is the mask, 119 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0. 120 /// If both A and C are constants, this proof is also easy. 121 /// For the following explanations, we assume that A is the mask. 122 /// 123 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all 124 /// bits of A are set in B. 125 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes 126 /// 127 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all 128 /// bits of A are cleared in B. 129 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes 130 /// 131 /// "Mixed" declares that (A & B) == C and C might or might not contain any 132 /// number of one bits and zero bits. 133 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed 134 /// 135 /// "Not" means that in above descriptions "==" should be replaced by "!=". 136 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes 137 /// 138 /// If the mask A contains a single bit, then the following is equivalent: 139 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0) 140 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0) 141 enum MaskedICmpType { 142 AMask_AllOnes = 1, 143 AMask_NotAllOnes = 2, 144 BMask_AllOnes = 4, 145 BMask_NotAllOnes = 8, 146 Mask_AllZeros = 16, 147 Mask_NotAllZeros = 32, 148 AMask_Mixed = 64, 149 AMask_NotMixed = 128, 150 BMask_Mixed = 256, 151 BMask_NotMixed = 512 152 }; 153 154 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C) 155 /// satisfies. 156 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C, 157 ICmpInst::Predicate Pred) { 158 const APInt *ConstA = nullptr, *ConstB = nullptr, *ConstC = nullptr; 159 match(A, m_APInt(ConstA)); 160 match(B, m_APInt(ConstB)); 161 match(C, m_APInt(ConstC)); 162 bool IsEq = (Pred == ICmpInst::ICMP_EQ); 163 bool IsAPow2 = ConstA && ConstA->isPowerOf2(); 164 bool IsBPow2 = ConstB && ConstB->isPowerOf2(); 165 unsigned MaskVal = 0; 166 if (ConstC && ConstC->isZero()) { 167 // if C is zero, then both A and B qualify as mask 168 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed) 169 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed)); 170 if (IsAPow2) 171 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed) 172 : (AMask_AllOnes | AMask_Mixed)); 173 if (IsBPow2) 174 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed) 175 : (BMask_AllOnes | BMask_Mixed)); 176 return MaskVal; 177 } 178 179 if (A == C) { 180 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed) 181 : (AMask_NotAllOnes | AMask_NotMixed)); 182 if (IsAPow2) 183 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed) 184 : (Mask_AllZeros | AMask_Mixed)); 185 } else if (ConstA && ConstC && ConstC->isSubsetOf(*ConstA)) { 186 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed); 187 } 188 189 if (B == C) { 190 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed) 191 : (BMask_NotAllOnes | BMask_NotMixed)); 192 if (IsBPow2) 193 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed) 194 : (Mask_AllZeros | BMask_Mixed)); 195 } else if (ConstB && ConstC && ConstC->isSubsetOf(*ConstB)) { 196 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed); 197 } 198 199 return MaskVal; 200 } 201 202 /// Convert an analysis of a masked ICmp into its equivalent if all boolean 203 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=) 204 /// is adjacent to the corresponding normal flag (recording ==), this just 205 /// involves swapping those bits over. 206 static unsigned conjugateICmpMask(unsigned Mask) { 207 unsigned NewMask; 208 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros | 209 AMask_Mixed | BMask_Mixed)) 210 << 1; 211 212 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros | 213 AMask_NotMixed | BMask_NotMixed)) 214 >> 1; 215 216 return NewMask; 217 } 218 219 // Adapts the external decomposeBitTestICmp for local use. 220 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, 221 Value *&X, Value *&Y, Value *&Z) { 222 APInt Mask; 223 if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask)) 224 return false; 225 226 Y = ConstantInt::get(X->getType(), Mask); 227 Z = ConstantInt::get(X->getType(), 0); 228 return true; 229 } 230 231 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E). 232 /// Return the pattern classes (from MaskedICmpType) for the left hand side and 233 /// the right hand side as a pair. 234 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL 235 /// and PredR are their predicates, respectively. 236 static std::optional<std::pair<unsigned, unsigned>> getMaskedTypeForICmpPair( 237 Value *&A, Value *&B, Value *&C, Value *&D, Value *&E, ICmpInst *LHS, 238 ICmpInst *RHS, ICmpInst::Predicate &PredL, ICmpInst::Predicate &PredR) { 239 // Don't allow pointers. Splat vectors are fine. 240 if (!LHS->getOperand(0)->getType()->isIntOrIntVectorTy() || 241 !RHS->getOperand(0)->getType()->isIntOrIntVectorTy()) 242 return std::nullopt; 243 244 // Here comes the tricky part: 245 // LHS might be of the form L11 & L12 == X, X == L21 & L22, 246 // and L11 & L12 == L21 & L22. The same goes for RHS. 247 // Now we must find those components L** and R**, that are equal, so 248 // that we can extract the parameters A, B, C, D, and E for the canonical 249 // above. 250 Value *L1 = LHS->getOperand(0); 251 Value *L2 = LHS->getOperand(1); 252 Value *L11, *L12, *L21, *L22; 253 // Check whether the icmp can be decomposed into a bit test. 254 if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) { 255 L21 = L22 = L1 = nullptr; 256 } else { 257 // Look for ANDs in the LHS icmp. 258 if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) { 259 // Any icmp can be viewed as being trivially masked; if it allows us to 260 // remove one, it's worth it. 261 L11 = L1; 262 L12 = Constant::getAllOnesValue(L1->getType()); 263 } 264 265 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) { 266 L21 = L2; 267 L22 = Constant::getAllOnesValue(L2->getType()); 268 } 269 } 270 271 // Bail if LHS was a icmp that can't be decomposed into an equality. 272 if (!ICmpInst::isEquality(PredL)) 273 return std::nullopt; 274 275 Value *R1 = RHS->getOperand(0); 276 Value *R2 = RHS->getOperand(1); 277 Value *R11, *R12; 278 bool Ok = false; 279 if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) { 280 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 281 A = R11; 282 D = R12; 283 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 284 A = R12; 285 D = R11; 286 } else { 287 return std::nullopt; 288 } 289 E = R2; 290 R1 = nullptr; 291 Ok = true; 292 } else { 293 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) { 294 // As before, model no mask as a trivial mask if it'll let us do an 295 // optimization. 296 R11 = R1; 297 R12 = Constant::getAllOnesValue(R1->getType()); 298 } 299 300 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 301 A = R11; 302 D = R12; 303 E = R2; 304 Ok = true; 305 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 306 A = R12; 307 D = R11; 308 E = R2; 309 Ok = true; 310 } 311 } 312 313 // Bail if RHS was a icmp that can't be decomposed into an equality. 314 if (!ICmpInst::isEquality(PredR)) 315 return std::nullopt; 316 317 // Look for ANDs on the right side of the RHS icmp. 318 if (!Ok) { 319 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) { 320 R11 = R2; 321 R12 = Constant::getAllOnesValue(R2->getType()); 322 } 323 324 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 325 A = R11; 326 D = R12; 327 E = R1; 328 Ok = true; 329 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 330 A = R12; 331 D = R11; 332 E = R1; 333 Ok = true; 334 } else { 335 return std::nullopt; 336 } 337 338 assert(Ok && "Failed to find AND on the right side of the RHS icmp."); 339 } 340 341 if (L11 == A) { 342 B = L12; 343 C = L2; 344 } else if (L12 == A) { 345 B = L11; 346 C = L2; 347 } else if (L21 == A) { 348 B = L22; 349 C = L1; 350 } else if (L22 == A) { 351 B = L21; 352 C = L1; 353 } 354 355 unsigned LeftType = getMaskedICmpType(A, B, C, PredL); 356 unsigned RightType = getMaskedICmpType(A, D, E, PredR); 357 return std::optional<std::pair<unsigned, unsigned>>( 358 std::make_pair(LeftType, RightType)); 359 } 360 361 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single 362 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros 363 /// and the right hand side is of type BMask_Mixed. For example, 364 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8). 365 /// Also used for logical and/or, must be poison safe. 366 static Value *foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 367 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, 368 Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, 369 InstCombiner::BuilderTy &Builder) { 370 // We are given the canonical form: 371 // (icmp ne (A & B), 0) & (icmp eq (A & D), E). 372 // where D & E == E. 373 // 374 // If IsAnd is false, we get it in negated form: 375 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) -> 376 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)). 377 // 378 // We currently handle the case of B, C, D, E are constant. 379 // 380 const APInt *BCst, *CCst, *DCst, *OrigECst; 381 if (!match(B, m_APInt(BCst)) || !match(C, m_APInt(CCst)) || 382 !match(D, m_APInt(DCst)) || !match(E, m_APInt(OrigECst))) 383 return nullptr; 384 385 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 386 387 // Update E to the canonical form when D is a power of two and RHS is 388 // canonicalized as, 389 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or 390 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0). 391 APInt ECst = *OrigECst; 392 if (PredR != NewCC) 393 ECst ^= *DCst; 394 395 // If B or D is zero, skip because if LHS or RHS can be trivially folded by 396 // other folding rules and this pattern won't apply any more. 397 if (*BCst == 0 || *DCst == 0) 398 return nullptr; 399 400 // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't 401 // deduce anything from it. 402 // For example, 403 // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding. 404 if ((*BCst & *DCst) == 0) 405 return nullptr; 406 407 // If the following two conditions are met: 408 // 409 // 1. mask B covers only a single bit that's not covered by mask D, that is, 410 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of 411 // B and D has only one bit set) and, 412 // 413 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other 414 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0 415 // 416 // then that single bit in B must be one and thus the whole expression can be 417 // folded to 418 // (A & (B | D)) == (B & (B ^ D)) | E. 419 // 420 // For example, 421 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9) 422 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8) 423 if ((((*BCst & *DCst) & ECst) == 0) && 424 (*BCst & (*BCst ^ *DCst)).isPowerOf2()) { 425 APInt BorD = *BCst | *DCst; 426 APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst; 427 Value *NewMask = ConstantInt::get(A->getType(), BorD); 428 Value *NewMaskedValue = ConstantInt::get(A->getType(), BandBxorDorE); 429 Value *NewAnd = Builder.CreateAnd(A, NewMask); 430 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue); 431 } 432 433 auto IsSubSetOrEqual = [](const APInt *C1, const APInt *C2) { 434 return (*C1 & *C2) == *C1; 435 }; 436 auto IsSuperSetOrEqual = [](const APInt *C1, const APInt *C2) { 437 return (*C1 & *C2) == *C2; 438 }; 439 440 // In the following, we consider only the cases where B is a superset of D, B 441 // is a subset of D, or B == D because otherwise there's at least one bit 442 // covered by B but not D, in which case we can't deduce much from it, so 443 // no folding (aside from the single must-be-one bit case right above.) 444 // For example, 445 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding. 446 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst)) 447 return nullptr; 448 449 // At this point, either B is a superset of D, B is a subset of D or B == D. 450 451 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict 452 // and the whole expression becomes false (or true if negated), otherwise, no 453 // folding. 454 // For example, 455 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false. 456 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding. 457 if (ECst.isZero()) { 458 if (IsSubSetOrEqual(BCst, DCst)) 459 return ConstantInt::get(LHS->getType(), !IsAnd); 460 return nullptr; 461 } 462 463 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B == 464 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is 465 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes 466 // RHS. For example, 467 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 468 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 469 if (IsSuperSetOrEqual(BCst, DCst)) 470 return RHS; 471 // Otherwise, B is a subset of D. If B and E have a common bit set, 472 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example. 473 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 474 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code"); 475 if ((*BCst & ECst) != 0) 476 return RHS; 477 // Otherwise, LHS and RHS contradict and the whole expression becomes false 478 // (or true if negated.) For example, 479 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false. 480 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false. 481 return ConstantInt::get(LHS->getType(), !IsAnd); 482 } 483 484 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single 485 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side 486 /// aren't of the common mask pattern type. 487 /// Also used for logical and/or, must be poison safe. 488 static Value *foldLogOpOfMaskedICmpsAsymmetric( 489 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, 490 Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, 491 unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) { 492 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && 493 "Expected equality predicates for masked type of icmps."); 494 // Handle Mask_NotAllZeros-BMask_Mixed cases. 495 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or 496 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E) 497 // which gets swapped to 498 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C). 499 if (!IsAnd) { 500 LHSMask = conjugateICmpMask(LHSMask); 501 RHSMask = conjugateICmpMask(RHSMask); 502 } 503 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) { 504 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 505 LHS, RHS, IsAnd, A, B, C, D, E, 506 PredL, PredR, Builder)) { 507 return V; 508 } 509 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) { 510 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 511 RHS, LHS, IsAnd, A, D, E, B, C, 512 PredR, PredL, Builder)) { 513 return V; 514 } 515 } 516 return nullptr; 517 } 518 519 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) 520 /// into a single (icmp(A & X) ==/!= Y). 521 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 522 bool IsLogical, 523 InstCombiner::BuilderTy &Builder) { 524 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr; 525 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 526 std::optional<std::pair<unsigned, unsigned>> MaskPair = 527 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR); 528 if (!MaskPair) 529 return nullptr; 530 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && 531 "Expected equality predicates for masked type of icmps."); 532 unsigned LHSMask = MaskPair->first; 533 unsigned RHSMask = MaskPair->second; 534 unsigned Mask = LHSMask & RHSMask; 535 if (Mask == 0) { 536 // Even if the two sides don't share a common pattern, check if folding can 537 // still happen. 538 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric( 539 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask, 540 Builder)) 541 return V; 542 return nullptr; 543 } 544 545 // In full generality: 546 // (icmp (A & B) Op C) | (icmp (A & D) Op E) 547 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ] 548 // 549 // If the latter can be converted into (icmp (A & X) Op Y) then the former is 550 // equivalent to (icmp (A & X) !Op Y). 551 // 552 // Therefore, we can pretend for the rest of this function that we're dealing 553 // with the conjunction, provided we flip the sense of any comparisons (both 554 // input and output). 555 556 // In most cases we're going to produce an EQ for the "&&" case. 557 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 558 if (!IsAnd) { 559 // Convert the masking analysis into its equivalent with negated 560 // comparisons. 561 Mask = conjugateICmpMask(Mask); 562 } 563 564 if (Mask & Mask_AllZeros) { 565 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0) 566 // -> (icmp eq (A & (B|D)), 0) 567 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D)) 568 return nullptr; // TODO: Use freeze? 569 Value *NewOr = Builder.CreateOr(B, D); 570 Value *NewAnd = Builder.CreateAnd(A, NewOr); 571 // We can't use C as zero because we might actually handle 572 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 573 // with B and D, having a single bit set. 574 Value *Zero = Constant::getNullValue(A->getType()); 575 return Builder.CreateICmp(NewCC, NewAnd, Zero); 576 } 577 if (Mask & BMask_AllOnes) { 578 // (icmp eq (A & B), B) & (icmp eq (A & D), D) 579 // -> (icmp eq (A & (B|D)), (B|D)) 580 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D)) 581 return nullptr; // TODO: Use freeze? 582 Value *NewOr = Builder.CreateOr(B, D); 583 Value *NewAnd = Builder.CreateAnd(A, NewOr); 584 return Builder.CreateICmp(NewCC, NewAnd, NewOr); 585 } 586 if (Mask & AMask_AllOnes) { 587 // (icmp eq (A & B), A) & (icmp eq (A & D), A) 588 // -> (icmp eq (A & (B&D)), A) 589 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D)) 590 return nullptr; // TODO: Use freeze? 591 Value *NewAnd1 = Builder.CreateAnd(B, D); 592 Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1); 593 return Builder.CreateICmp(NewCC, NewAnd2, A); 594 } 595 596 // Remaining cases assume at least that B and D are constant, and depend on 597 // their actual values. This isn't strictly necessary, just a "handle the 598 // easy cases for now" decision. 599 const APInt *ConstB, *ConstD; 600 if (!match(B, m_APInt(ConstB)) || !match(D, m_APInt(ConstD))) 601 return nullptr; 602 603 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) { 604 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and 605 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 606 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0) 607 // Only valid if one of the masks is a superset of the other (check "B&D" is 608 // the same as either B or D). 609 APInt NewMask = *ConstB & *ConstD; 610 if (NewMask == *ConstB) 611 return LHS; 612 else if (NewMask == *ConstD) 613 return RHS; 614 } 615 616 if (Mask & AMask_NotAllOnes) { 617 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 618 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A) 619 // Only valid if one of the masks is a superset of the other (check "B|D" is 620 // the same as either B or D). 621 APInt NewMask = *ConstB | *ConstD; 622 if (NewMask == *ConstB) 623 return LHS; 624 else if (NewMask == *ConstD) 625 return RHS; 626 } 627 628 if (Mask & BMask_Mixed) { 629 // (icmp eq (A & B), C) & (icmp eq (A & D), E) 630 // We already know that B & C == C && D & E == E. 631 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of 632 // C and E, which are shared by both the mask B and the mask D, don't 633 // contradict, then we can transform to 634 // -> (icmp eq (A & (B|D)), (C|E)) 635 // Currently, we only handle the case of B, C, D, and E being constant. 636 // We can't simply use C and E because we might actually handle 637 // (icmp ne (A & B), B) & (icmp eq (A & D), D) 638 // with B and D, having a single bit set. 639 const APInt *OldConstC, *OldConstE; 640 if (!match(C, m_APInt(OldConstC)) || !match(E, m_APInt(OldConstE))) 641 return nullptr; 642 643 const APInt ConstC = PredL != NewCC ? *ConstB ^ *OldConstC : *OldConstC; 644 const APInt ConstE = PredR != NewCC ? *ConstD ^ *OldConstE : *OldConstE; 645 646 // If there is a conflict, we should actually return a false for the 647 // whole construct. 648 if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue()) 649 return ConstantInt::get(LHS->getType(), !IsAnd); 650 651 Value *NewOr1 = Builder.CreateOr(B, D); 652 Value *NewAnd = Builder.CreateAnd(A, NewOr1); 653 Constant *NewOr2 = ConstantInt::get(A->getType(), ConstC | ConstE); 654 return Builder.CreateICmp(NewCC, NewAnd, NewOr2); 655 } 656 657 return nullptr; 658 } 659 660 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp. 661 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n 662 /// If \p Inverted is true then the check is for the inverted range, e.g. 663 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n 664 Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, 665 bool Inverted) { 666 // Check the lower range comparison, e.g. x >= 0 667 // InstCombine already ensured that if there is a constant it's on the RHS. 668 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1)); 669 if (!RangeStart) 670 return nullptr; 671 672 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() : 673 Cmp0->getPredicate()); 674 675 // Accept x > -1 or x >= 0 (after potentially inverting the predicate). 676 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) || 677 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero()))) 678 return nullptr; 679 680 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() : 681 Cmp1->getPredicate()); 682 683 Value *Input = Cmp0->getOperand(0); 684 Value *RangeEnd; 685 if (Cmp1->getOperand(0) == Input) { 686 // For the upper range compare we have: icmp x, n 687 RangeEnd = Cmp1->getOperand(1); 688 } else if (Cmp1->getOperand(1) == Input) { 689 // For the upper range compare we have: icmp n, x 690 RangeEnd = Cmp1->getOperand(0); 691 Pred1 = ICmpInst::getSwappedPredicate(Pred1); 692 } else { 693 return nullptr; 694 } 695 696 // Check the upper range comparison, e.g. x < n 697 ICmpInst::Predicate NewPred; 698 switch (Pred1) { 699 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break; 700 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break; 701 default: return nullptr; 702 } 703 704 // This simplification is only valid if the upper range is not negative. 705 KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1); 706 if (!Known.isNonNegative()) 707 return nullptr; 708 709 if (Inverted) 710 NewPred = ICmpInst::getInversePredicate(NewPred); 711 712 return Builder.CreateICmp(NewPred, Input, RangeEnd); 713 } 714 715 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2) 716 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2) 717 Value *InstCombinerImpl::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, 718 ICmpInst *RHS, 719 Instruction *CxtI, 720 bool IsAnd, 721 bool IsLogical) { 722 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 723 if (LHS->getPredicate() != Pred || RHS->getPredicate() != Pred) 724 return nullptr; 725 726 if (!match(LHS->getOperand(1), m_Zero()) || 727 !match(RHS->getOperand(1), m_Zero())) 728 return nullptr; 729 730 Value *L1, *L2, *R1, *R2; 731 if (match(LHS->getOperand(0), m_And(m_Value(L1), m_Value(L2))) && 732 match(RHS->getOperand(0), m_And(m_Value(R1), m_Value(R2)))) { 733 if (L1 == R2 || L2 == R2) 734 std::swap(R1, R2); 735 if (L2 == R1) 736 std::swap(L1, L2); 737 738 if (L1 == R1 && 739 isKnownToBeAPowerOfTwo(L2, false, 0, CxtI) && 740 isKnownToBeAPowerOfTwo(R2, false, 0, CxtI)) { 741 // If this is a logical and/or, then we must prevent propagation of a 742 // poison value from the RHS by inserting freeze. 743 if (IsLogical) 744 R2 = Builder.CreateFreeze(R2); 745 Value *Mask = Builder.CreateOr(L2, R2); 746 Value *Masked = Builder.CreateAnd(L1, Mask); 747 auto NewPred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE; 748 return Builder.CreateICmp(NewPred, Masked, Mask); 749 } 750 } 751 752 return nullptr; 753 } 754 755 /// General pattern: 756 /// X & Y 757 /// 758 /// Where Y is checking that all the high bits (covered by a mask 4294967168) 759 /// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0 760 /// Pattern can be one of: 761 /// %t = add i32 %arg, 128 762 /// %r = icmp ult i32 %t, 256 763 /// Or 764 /// %t0 = shl i32 %arg, 24 765 /// %t1 = ashr i32 %t0, 24 766 /// %r = icmp eq i32 %t1, %arg 767 /// Or 768 /// %t0 = trunc i32 %arg to i8 769 /// %t1 = sext i8 %t0 to i32 770 /// %r = icmp eq i32 %t1, %arg 771 /// This pattern is a signed truncation check. 772 /// 773 /// And X is checking that some bit in that same mask is zero. 774 /// I.e. can be one of: 775 /// %r = icmp sgt i32 %arg, -1 776 /// Or 777 /// %t = and i32 %arg, 2147483648 778 /// %r = icmp eq i32 %t, 0 779 /// 780 /// Since we are checking that all the bits in that mask are the same, 781 /// and a particular bit is zero, what we are really checking is that all the 782 /// masked bits are zero. 783 /// So this should be transformed to: 784 /// %r = icmp ult i32 %arg, 128 785 static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1, 786 Instruction &CxtI, 787 InstCombiner::BuilderTy &Builder) { 788 assert(CxtI.getOpcode() == Instruction::And); 789 790 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two) 791 auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X, 792 APInt &SignBitMask) -> bool { 793 CmpInst::Predicate Pred; 794 const APInt *I01, *I1; // powers of two; I1 == I01 << 1 795 if (!(match(ICmp, 796 m_ICmp(Pred, m_Add(m_Value(X), m_Power2(I01)), m_Power2(I1))) && 797 Pred == ICmpInst::ICMP_ULT && I1->ugt(*I01) && I01->shl(1) == *I1)) 798 return false; 799 // Which bit is the new sign bit as per the 'signed truncation' pattern? 800 SignBitMask = *I01; 801 return true; 802 }; 803 804 // One icmp needs to be 'signed truncation check'. 805 // We need to match this first, else we will mismatch commutative cases. 806 Value *X1; 807 APInt HighestBit; 808 ICmpInst *OtherICmp; 809 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit)) 810 OtherICmp = ICmp0; 811 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit)) 812 OtherICmp = ICmp1; 813 else 814 return nullptr; 815 816 assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)"); 817 818 // Try to match/decompose into: icmp eq (X & Mask), 0 819 auto tryToDecompose = [](ICmpInst *ICmp, Value *&X, 820 APInt &UnsetBitsMask) -> bool { 821 CmpInst::Predicate Pred = ICmp->getPredicate(); 822 // Can it be decomposed into icmp eq (X & Mask), 0 ? 823 if (llvm::decomposeBitTestICmp(ICmp->getOperand(0), ICmp->getOperand(1), 824 Pred, X, UnsetBitsMask, 825 /*LookThroughTrunc=*/false) && 826 Pred == ICmpInst::ICMP_EQ) 827 return true; 828 // Is it icmp eq (X & Mask), 0 already? 829 const APInt *Mask; 830 if (match(ICmp, m_ICmp(Pred, m_And(m_Value(X), m_APInt(Mask)), m_Zero())) && 831 Pred == ICmpInst::ICMP_EQ) { 832 UnsetBitsMask = *Mask; 833 return true; 834 } 835 return false; 836 }; 837 838 // And the other icmp needs to be decomposable into a bit test. 839 Value *X0; 840 APInt UnsetBitsMask; 841 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask)) 842 return nullptr; 843 844 assert(!UnsetBitsMask.isZero() && "empty mask makes no sense."); 845 846 // Are they working on the same value? 847 Value *X; 848 if (X1 == X0) { 849 // Ok as is. 850 X = X1; 851 } else if (match(X0, m_Trunc(m_Specific(X1)))) { 852 UnsetBitsMask = UnsetBitsMask.zext(X1->getType()->getScalarSizeInBits()); 853 X = X1; 854 } else 855 return nullptr; 856 857 // So which bits should be uniform as per the 'signed truncation check'? 858 // (all the bits starting with (i.e. including) HighestBit) 859 APInt SignBitsMask = ~(HighestBit - 1U); 860 861 // UnsetBitsMask must have some common bits with SignBitsMask, 862 if (!UnsetBitsMask.intersects(SignBitsMask)) 863 return nullptr; 864 865 // Does UnsetBitsMask contain any bits outside of SignBitsMask? 866 if (!UnsetBitsMask.isSubsetOf(SignBitsMask)) { 867 APInt OtherHighestBit = (~UnsetBitsMask) + 1U; 868 if (!OtherHighestBit.isPowerOf2()) 869 return nullptr; 870 HighestBit = APIntOps::umin(HighestBit, OtherHighestBit); 871 } 872 // Else, if it does not, then all is ok as-is. 873 874 // %r = icmp ult %X, SignBit 875 return Builder.CreateICmpULT(X, ConstantInt::get(X->getType(), HighestBit), 876 CxtI.getName() + ".simplified"); 877 } 878 879 /// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and 880 /// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1). 881 /// Also used for logical and/or, must be poison safe. 882 static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd, 883 InstCombiner::BuilderTy &Builder) { 884 CmpInst::Predicate Pred0, Pred1; 885 Value *X; 886 if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)), 887 m_SpecificInt(1))) || 888 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt()))) 889 return nullptr; 890 891 Value *CtPop = Cmp0->getOperand(0); 892 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_NE) 893 return Builder.CreateICmpUGT(CtPop, ConstantInt::get(CtPop->getType(), 1)); 894 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_EQ) 895 return Builder.CreateICmpULT(CtPop, ConstantInt::get(CtPop->getType(), 2)); 896 897 return nullptr; 898 } 899 900 /// Reduce a pair of compares that check if a value has exactly 1 bit set. 901 /// Also used for logical and/or, must be poison safe. 902 static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd, 903 InstCombiner::BuilderTy &Builder) { 904 // Handle 'and' / 'or' commutation: make the equality check the first operand. 905 if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE) 906 std::swap(Cmp0, Cmp1); 907 else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ) 908 std::swap(Cmp0, Cmp1); 909 910 // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1 911 CmpInst::Predicate Pred0, Pred1; 912 Value *X; 913 if (JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) && 914 match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)), 915 m_SpecificInt(2))) && 916 Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT) { 917 Value *CtPop = Cmp1->getOperand(0); 918 return Builder.CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1)); 919 } 920 // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1 921 if (!JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) && 922 match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)), 923 m_SpecificInt(1))) && 924 Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_UGT) { 925 Value *CtPop = Cmp1->getOperand(0); 926 return Builder.CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1)); 927 } 928 return nullptr; 929 } 930 931 /// Commuted variants are assumed to be handled by calling this function again 932 /// with the parameters swapped. 933 static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp, 934 ICmpInst *UnsignedICmp, bool IsAnd, 935 const SimplifyQuery &Q, 936 InstCombiner::BuilderTy &Builder) { 937 Value *ZeroCmpOp; 938 ICmpInst::Predicate EqPred; 939 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(ZeroCmpOp), m_Zero())) || 940 !ICmpInst::isEquality(EqPred)) 941 return nullptr; 942 943 auto IsKnownNonZero = [&](Value *V) { 944 return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 945 }; 946 947 ICmpInst::Predicate UnsignedPred; 948 949 Value *A, *B; 950 if (match(UnsignedICmp, 951 m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) && 952 match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) && 953 (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) { 954 auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) { 955 if (!IsKnownNonZero(NonZero)) 956 std::swap(NonZero, Other); 957 return IsKnownNonZero(NonZero); 958 }; 959 960 // Given ZeroCmpOp = (A + B) 961 // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff 962 // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff 963 // with X being the value (A/B) that is known to be non-zero, 964 // and Y being remaining value. 965 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE && 966 IsAnd && GetKnownNonZeroAndOther(B, A)) 967 return Builder.CreateICmpULT(Builder.CreateNeg(B), A); 968 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ && 969 !IsAnd && GetKnownNonZeroAndOther(B, A)) 970 return Builder.CreateICmpUGE(Builder.CreateNeg(B), A); 971 } 972 973 Value *Base, *Offset; 974 if (!match(ZeroCmpOp, m_Sub(m_Value(Base), m_Value(Offset)))) 975 return nullptr; 976 977 if (!match(UnsignedICmp, 978 m_c_ICmp(UnsignedPred, m_Specific(Base), m_Specific(Offset))) || 979 !ICmpInst::isUnsigned(UnsignedPred)) 980 return nullptr; 981 982 // Base >=/> Offset && (Base - Offset) != 0 <--> Base > Offset 983 // (no overflow and not null) 984 if ((UnsignedPred == ICmpInst::ICMP_UGE || 985 UnsignedPred == ICmpInst::ICMP_UGT) && 986 EqPred == ICmpInst::ICMP_NE && IsAnd) 987 return Builder.CreateICmpUGT(Base, Offset); 988 989 // Base <=/< Offset || (Base - Offset) == 0 <--> Base <= Offset 990 // (overflow or null) 991 if ((UnsignedPred == ICmpInst::ICMP_ULE || 992 UnsignedPred == ICmpInst::ICMP_ULT) && 993 EqPred == ICmpInst::ICMP_EQ && !IsAnd) 994 return Builder.CreateICmpULE(Base, Offset); 995 996 // Base <= Offset && (Base - Offset) != 0 --> Base < Offset 997 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && 998 IsAnd) 999 return Builder.CreateICmpULT(Base, Offset); 1000 1001 // Base > Offset || (Base - Offset) == 0 --> Base >= Offset 1002 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && 1003 !IsAnd) 1004 return Builder.CreateICmpUGE(Base, Offset); 1005 1006 return nullptr; 1007 } 1008 1009 struct IntPart { 1010 Value *From; 1011 unsigned StartBit; 1012 unsigned NumBits; 1013 }; 1014 1015 /// Match an extraction of bits from an integer. 1016 static std::optional<IntPart> matchIntPart(Value *V) { 1017 Value *X; 1018 if (!match(V, m_OneUse(m_Trunc(m_Value(X))))) 1019 return std::nullopt; 1020 1021 unsigned NumOriginalBits = X->getType()->getScalarSizeInBits(); 1022 unsigned NumExtractedBits = V->getType()->getScalarSizeInBits(); 1023 Value *Y; 1024 const APInt *Shift; 1025 // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits 1026 // from Y, not any shifted-in zeroes. 1027 if (match(X, m_OneUse(m_LShr(m_Value(Y), m_APInt(Shift)))) && 1028 Shift->ule(NumOriginalBits - NumExtractedBits)) 1029 return {{Y, (unsigned)Shift->getZExtValue(), NumExtractedBits}}; 1030 return {{X, 0, NumExtractedBits}}; 1031 } 1032 1033 /// Materialize an extraction of bits from an integer in IR. 1034 static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) { 1035 Value *V = P.From; 1036 if (P.StartBit) 1037 V = Builder.CreateLShr(V, P.StartBit); 1038 Type *TruncTy = V->getType()->getWithNewBitWidth(P.NumBits); 1039 if (TruncTy != V->getType()) 1040 V = Builder.CreateTrunc(V, TruncTy); 1041 return V; 1042 } 1043 1044 /// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01 1045 /// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01 1046 /// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer. 1047 Value *InstCombinerImpl::foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, 1048 bool IsAnd) { 1049 if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse()) 1050 return nullptr; 1051 1052 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE; 1053 if (Cmp0->getPredicate() != Pred || Cmp1->getPredicate() != Pred) 1054 return nullptr; 1055 1056 std::optional<IntPart> L0 = matchIntPart(Cmp0->getOperand(0)); 1057 std::optional<IntPart> R0 = matchIntPart(Cmp0->getOperand(1)); 1058 std::optional<IntPart> L1 = matchIntPart(Cmp1->getOperand(0)); 1059 std::optional<IntPart> R1 = matchIntPart(Cmp1->getOperand(1)); 1060 if (!L0 || !R0 || !L1 || !R1) 1061 return nullptr; 1062 1063 // Make sure the LHS/RHS compare a part of the same value, possibly after 1064 // an operand swap. 1065 if (L0->From != L1->From || R0->From != R1->From) { 1066 if (L0->From != R1->From || R0->From != L1->From) 1067 return nullptr; 1068 std::swap(L1, R1); 1069 } 1070 1071 // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being 1072 // the low part and L1/R1 being the high part. 1073 if (L0->StartBit + L0->NumBits != L1->StartBit || 1074 R0->StartBit + R0->NumBits != R1->StartBit) { 1075 if (L1->StartBit + L1->NumBits != L0->StartBit || 1076 R1->StartBit + R1->NumBits != R0->StartBit) 1077 return nullptr; 1078 std::swap(L0, L1); 1079 std::swap(R0, R1); 1080 } 1081 1082 // We can simplify to a comparison of these larger parts of the integers. 1083 IntPart L = {L0->From, L0->StartBit, L0->NumBits + L1->NumBits}; 1084 IntPart R = {R0->From, R0->StartBit, R0->NumBits + R1->NumBits}; 1085 Value *LValue = extractIntPart(L, Builder); 1086 Value *RValue = extractIntPart(R, Builder); 1087 return Builder.CreateICmp(Pred, LValue, RValue); 1088 } 1089 1090 /// Reduce logic-of-compares with equality to a constant by substituting a 1091 /// common operand with the constant. Callers are expected to call this with 1092 /// Cmp0/Cmp1 switched to handle logic op commutativity. 1093 static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1, 1094 bool IsAnd, bool IsLogical, 1095 InstCombiner::BuilderTy &Builder, 1096 const SimplifyQuery &Q) { 1097 // Match an equality compare with a non-poison constant as Cmp0. 1098 // Also, give up if the compare can be constant-folded to avoid looping. 1099 ICmpInst::Predicate Pred0; 1100 Value *X; 1101 Constant *C; 1102 if (!match(Cmp0, m_ICmp(Pred0, m_Value(X), m_Constant(C))) || 1103 !isGuaranteedNotToBeUndefOrPoison(C) || isa<Constant>(X)) 1104 return nullptr; 1105 if ((IsAnd && Pred0 != ICmpInst::ICMP_EQ) || 1106 (!IsAnd && Pred0 != ICmpInst::ICMP_NE)) 1107 return nullptr; 1108 1109 // The other compare must include a common operand (X). Canonicalize the 1110 // common operand as operand 1 (Pred1 is swapped if the common operand was 1111 // operand 0). 1112 Value *Y; 1113 ICmpInst::Predicate Pred1; 1114 if (!match(Cmp1, m_c_ICmp(Pred1, m_Value(Y), m_Deferred(X)))) 1115 return nullptr; 1116 1117 // Replace variable with constant value equivalence to remove a variable use: 1118 // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C) 1119 // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C) 1120 // Can think of the 'or' substitution with the 'and' bool equivalent: 1121 // A || B --> A || (!A && B) 1122 Value *SubstituteCmp = simplifyICmpInst(Pred1, Y, C, Q); 1123 if (!SubstituteCmp) { 1124 // If we need to create a new instruction, require that the old compare can 1125 // be removed. 1126 if (!Cmp1->hasOneUse()) 1127 return nullptr; 1128 SubstituteCmp = Builder.CreateICmp(Pred1, Y, C); 1129 } 1130 if (IsLogical) 1131 return IsAnd ? Builder.CreateLogicalAnd(Cmp0, SubstituteCmp) 1132 : Builder.CreateLogicalOr(Cmp0, SubstituteCmp); 1133 return Builder.CreateBinOp(IsAnd ? Instruction::And : Instruction::Or, Cmp0, 1134 SubstituteCmp); 1135 } 1136 1137 /// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2) 1138 /// or (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2) 1139 /// into a single comparison using range-based reasoning. 1140 /// NOTE: This is also used for logical and/or, must be poison-safe! 1141 Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1, 1142 ICmpInst *ICmp2, 1143 bool IsAnd) { 1144 ICmpInst::Predicate Pred1, Pred2; 1145 Value *V1, *V2; 1146 const APInt *C1, *C2; 1147 if (!match(ICmp1, m_ICmp(Pred1, m_Value(V1), m_APInt(C1))) || 1148 !match(ICmp2, m_ICmp(Pred2, m_Value(V2), m_APInt(C2)))) 1149 return nullptr; 1150 1151 // Look through add of a constant offset on V1, V2, or both operands. This 1152 // allows us to interpret the V + C' < C'' range idiom into a proper range. 1153 const APInt *Offset1 = nullptr, *Offset2 = nullptr; 1154 if (V1 != V2) { 1155 Value *X; 1156 if (match(V1, m_Add(m_Value(X), m_APInt(Offset1)))) 1157 V1 = X; 1158 if (match(V2, m_Add(m_Value(X), m_APInt(Offset2)))) 1159 V2 = X; 1160 } 1161 1162 if (V1 != V2) 1163 return nullptr; 1164 1165 ConstantRange CR1 = ConstantRange::makeExactICmpRegion( 1166 IsAnd ? ICmpInst::getInversePredicate(Pred1) : Pred1, *C1); 1167 if (Offset1) 1168 CR1 = CR1.subtract(*Offset1); 1169 1170 ConstantRange CR2 = ConstantRange::makeExactICmpRegion( 1171 IsAnd ? ICmpInst::getInversePredicate(Pred2) : Pred2, *C2); 1172 if (Offset2) 1173 CR2 = CR2.subtract(*Offset2); 1174 1175 Type *Ty = V1->getType(); 1176 Value *NewV = V1; 1177 std::optional<ConstantRange> CR = CR1.exactUnionWith(CR2); 1178 if (!CR) { 1179 if (!(ICmp1->hasOneUse() && ICmp2->hasOneUse()) || CR1.isWrappedSet() || 1180 CR2.isWrappedSet()) 1181 return nullptr; 1182 1183 // Check whether we have equal-size ranges that only differ by one bit. 1184 // In that case we can apply a mask to map one range onto the other. 1185 APInt LowerDiff = CR1.getLower() ^ CR2.getLower(); 1186 APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1); 1187 APInt CR1Size = CR1.getUpper() - CR1.getLower(); 1188 if (!LowerDiff.isPowerOf2() || LowerDiff != UpperDiff || 1189 CR1Size != CR2.getUpper() - CR2.getLower()) 1190 return nullptr; 1191 1192 CR = CR1.getLower().ult(CR2.getLower()) ? CR1 : CR2; 1193 NewV = Builder.CreateAnd(NewV, ConstantInt::get(Ty, ~LowerDiff)); 1194 } 1195 1196 if (IsAnd) 1197 CR = CR->inverse(); 1198 1199 CmpInst::Predicate NewPred; 1200 APInt NewC, Offset; 1201 CR->getEquivalentICmp(NewPred, NewC, Offset); 1202 1203 if (Offset != 0) 1204 NewV = Builder.CreateAdd(NewV, ConstantInt::get(Ty, Offset)); 1205 return Builder.CreateICmp(NewPred, NewV, ConstantInt::get(Ty, NewC)); 1206 } 1207 1208 /// Ignore all operations which only change the sign of a value, returning the 1209 /// underlying magnitude value. 1210 static Value *stripSignOnlyFPOps(Value *Val) { 1211 match(Val, m_FNeg(m_Value(Val))); 1212 match(Val, m_FAbs(m_Value(Val))); 1213 match(Val, m_CopySign(m_Value(Val), m_Value())); 1214 return Val; 1215 } 1216 1217 /// Matches canonical form of isnan, fcmp ord x, 0 1218 static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS) { 1219 return P == FCmpInst::FCMP_ORD && match(RHS, m_AnyZeroFP()); 1220 } 1221 1222 /// Matches fcmp u__ x, +/-inf 1223 static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS, 1224 Value *RHS) { 1225 return FCmpInst::isUnordered(P) && match(RHS, m_Inf()); 1226 } 1227 1228 /// and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf 1229 /// 1230 /// Clang emits this pattern for doing an isfinite check in __builtin_isnormal. 1231 static Value *matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS, 1232 FCmpInst *RHS) { 1233 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1234 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1235 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1236 1237 if (!matchIsNotNaN(PredL, LHS0, LHS1) || 1238 !matchUnorderedInfCompare(PredR, RHS0, RHS1)) 1239 return nullptr; 1240 1241 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1242 FastMathFlags FMF = LHS->getFastMathFlags(); 1243 FMF &= RHS->getFastMathFlags(); 1244 Builder.setFastMathFlags(FMF); 1245 1246 return Builder.CreateFCmp(FCmpInst::getOrderedPredicate(PredR), RHS0, RHS1); 1247 } 1248 1249 Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, 1250 bool IsAnd, bool IsLogicalSelect) { 1251 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1252 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1253 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1254 1255 if (LHS0 == RHS1 && RHS0 == LHS1) { 1256 // Swap RHS operands to match LHS. 1257 PredR = FCmpInst::getSwappedPredicate(PredR); 1258 std::swap(RHS0, RHS1); 1259 } 1260 1261 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y). 1262 // Suppose the relation between x and y is R, where R is one of 1263 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for 1264 // testing the desired relations. 1265 // 1266 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this: 1267 // bool(R & CC0) && bool(R & CC1) 1268 // = bool((R & CC0) & (R & CC1)) 1269 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency 1270 // 1271 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this: 1272 // bool(R & CC0) || bool(R & CC1) 1273 // = bool((R & CC0) | (R & CC1)) 1274 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;) 1275 if (LHS0 == RHS0 && LHS1 == RHS1) { 1276 unsigned FCmpCodeL = getFCmpCode(PredL); 1277 unsigned FCmpCodeR = getFCmpCode(PredR); 1278 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR; 1279 1280 // Intersect the fast math flags. 1281 // TODO: We can union the fast math flags unless this is a logical select. 1282 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1283 FastMathFlags FMF = LHS->getFastMathFlags(); 1284 FMF &= RHS->getFastMathFlags(); 1285 Builder.setFastMathFlags(FMF); 1286 1287 return getFCmpValue(NewPred, LHS0, LHS1, Builder); 1288 } 1289 1290 // This transform is not valid for a logical select. 1291 if (!IsLogicalSelect && 1292 ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || 1293 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && 1294 !IsAnd))) { 1295 if (LHS0->getType() != RHS0->getType()) 1296 return nullptr; 1297 1298 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and 1299 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0). 1300 if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP())) 1301 // Ignore the constants because they are obviously not NANs: 1302 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y) 1303 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y) 1304 return Builder.CreateFCmp(PredL, LHS0, RHS0); 1305 } 1306 1307 if (IsAnd && stripSignOnlyFPOps(LHS0) == stripSignOnlyFPOps(RHS0)) { 1308 // and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf 1309 // and (fcmp ord x, 0), (fcmp u* fabs(x), inf) -> fcmp o* x, inf 1310 if (Value *Left = matchIsFiniteTest(Builder, LHS, RHS)) 1311 return Left; 1312 if (Value *Right = matchIsFiniteTest(Builder, RHS, LHS)) 1313 return Right; 1314 } 1315 1316 return nullptr; 1317 } 1318 1319 /// or (is_fpclass x, mask0), (is_fpclass x, mask1) 1320 /// -> is_fpclass x, (mask0 | mask1) 1321 /// and (is_fpclass x, mask0), (is_fpclass x, mask1) 1322 /// -> is_fpclass x, (mask0 & mask1) 1323 /// xor (is_fpclass x, mask0), (is_fpclass x, mask1) 1324 /// -> is_fpclass x, (mask0 ^ mask1) 1325 Instruction *InstCombinerImpl::foldLogicOfIsFPClass(BinaryOperator &BO, 1326 Value *Op0, Value *Op1) { 1327 Value *ClassVal; 1328 uint64_t ClassMask0, ClassMask1; 1329 1330 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::is_fpclass>( 1331 m_Value(ClassVal), m_ConstantInt(ClassMask0)))) && 1332 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::is_fpclass>( 1333 m_Specific(ClassVal), m_ConstantInt(ClassMask1))))) { 1334 unsigned NewClassMask; 1335 switch (BO.getOpcode()) { 1336 case Instruction::And: 1337 NewClassMask = ClassMask0 & ClassMask1; 1338 break; 1339 case Instruction::Or: 1340 NewClassMask = ClassMask0 | ClassMask1; 1341 break; 1342 case Instruction::Xor: 1343 NewClassMask = ClassMask0 ^ ClassMask1; 1344 break; 1345 default: 1346 llvm_unreachable("not a binary logic operator"); 1347 } 1348 1349 // TODO: Also check for special fcmps 1350 auto *II = cast<IntrinsicInst>(Op0); 1351 II->setArgOperand( 1352 1, ConstantInt::get(II->getArgOperand(1)->getType(), NewClassMask)); 1353 return replaceInstUsesWith(BO, II); 1354 } 1355 1356 return nullptr; 1357 } 1358 1359 /// Look for the pattern that conditionally negates a value via math operations: 1360 /// cond.splat = sext i1 cond 1361 /// sub = add cond.splat, x 1362 /// xor = xor sub, cond.splat 1363 /// and rewrite it to do the same, but via logical operations: 1364 /// value.neg = sub 0, value 1365 /// cond = select i1 neg, value.neg, value 1366 Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect( 1367 BinaryOperator &I) { 1368 assert(I.getOpcode() == BinaryOperator::Xor && "Only for xor!"); 1369 Value *Cond, *X; 1370 // As per complexity ordering, `xor` is not commutative here. 1371 if (!match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())) || 1372 !match(I.getOperand(1), m_SExt(m_Value(Cond))) || 1373 !Cond->getType()->isIntOrIntVectorTy(1) || 1374 !match(I.getOperand(0), m_c_Add(m_SExt(m_Deferred(Cond)), m_Value(X)))) 1375 return nullptr; 1376 return SelectInst::Create(Cond, Builder.CreateNeg(X, X->getName() + ".neg"), 1377 X); 1378 } 1379 1380 /// This a limited reassociation for a special case (see above) where we are 1381 /// checking if two values are either both NAN (unordered) or not-NAN (ordered). 1382 /// This could be handled more generally in '-reassociation', but it seems like 1383 /// an unlikely pattern for a large number of logic ops and fcmps. 1384 static Instruction *reassociateFCmps(BinaryOperator &BO, 1385 InstCombiner::BuilderTy &Builder) { 1386 Instruction::BinaryOps Opcode = BO.getOpcode(); 1387 assert((Opcode == Instruction::And || Opcode == Instruction::Or) && 1388 "Expecting and/or op for fcmp transform"); 1389 1390 // There are 4 commuted variants of the pattern. Canonicalize operands of this 1391 // logic op so an fcmp is operand 0 and a matching logic op is operand 1. 1392 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1), *X; 1393 FCmpInst::Predicate Pred; 1394 if (match(Op1, m_FCmp(Pred, m_Value(), m_AnyZeroFP()))) 1395 std::swap(Op0, Op1); 1396 1397 // Match inner binop and the predicate for combining 2 NAN checks into 1. 1398 Value *BO10, *BO11; 1399 FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD 1400 : FCmpInst::FCMP_UNO; 1401 if (!match(Op0, m_FCmp(Pred, m_Value(X), m_AnyZeroFP())) || Pred != NanPred || 1402 !match(Op1, m_BinOp(Opcode, m_Value(BO10), m_Value(BO11)))) 1403 return nullptr; 1404 1405 // The inner logic op must have a matching fcmp operand. 1406 Value *Y; 1407 if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) || 1408 Pred != NanPred || X->getType() != Y->getType()) 1409 std::swap(BO10, BO11); 1410 1411 if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) || 1412 Pred != NanPred || X->getType() != Y->getType()) 1413 return nullptr; 1414 1415 // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z 1416 // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z 1417 Value *NewFCmp = Builder.CreateFCmp(Pred, X, Y); 1418 if (auto *NewFCmpInst = dyn_cast<FCmpInst>(NewFCmp)) { 1419 // Intersect FMF from the 2 source fcmps. 1420 NewFCmpInst->copyIRFlags(Op0); 1421 NewFCmpInst->andIRFlags(BO10); 1422 } 1423 return BinaryOperator::Create(Opcode, NewFCmp, BO11); 1424 } 1425 1426 /// Match variations of De Morgan's Laws: 1427 /// (~A & ~B) == (~(A | B)) 1428 /// (~A | ~B) == (~(A & B)) 1429 static Instruction *matchDeMorgansLaws(BinaryOperator &I, 1430 InstCombiner::BuilderTy &Builder) { 1431 const Instruction::BinaryOps Opcode = I.getOpcode(); 1432 assert((Opcode == Instruction::And || Opcode == Instruction::Or) && 1433 "Trying to match De Morgan's Laws with something other than and/or"); 1434 1435 // Flip the logic operation. 1436 const Instruction::BinaryOps FlippedOpcode = 1437 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And; 1438 1439 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1440 Value *A, *B; 1441 if (match(Op0, m_OneUse(m_Not(m_Value(A)))) && 1442 match(Op1, m_OneUse(m_Not(m_Value(B)))) && 1443 !InstCombiner::isFreeToInvert(A, A->hasOneUse()) && 1444 !InstCombiner::isFreeToInvert(B, B->hasOneUse())) { 1445 Value *AndOr = 1446 Builder.CreateBinOp(FlippedOpcode, A, B, I.getName() + ".demorgan"); 1447 return BinaryOperator::CreateNot(AndOr); 1448 } 1449 1450 // The 'not' ops may require reassociation. 1451 // (A & ~B) & ~C --> A & ~(B | C) 1452 // (~B & A) & ~C --> A & ~(B | C) 1453 // (A | ~B) | ~C --> A | ~(B & C) 1454 // (~B | A) | ~C --> A | ~(B & C) 1455 Value *C; 1456 if (match(Op0, m_OneUse(m_c_BinOp(Opcode, m_Value(A), m_Not(m_Value(B))))) && 1457 match(Op1, m_Not(m_Value(C)))) { 1458 Value *FlippedBO = Builder.CreateBinOp(FlippedOpcode, B, C); 1459 return BinaryOperator::Create(Opcode, A, Builder.CreateNot(FlippedBO)); 1460 } 1461 1462 return nullptr; 1463 } 1464 1465 bool InstCombinerImpl::shouldOptimizeCast(CastInst *CI) { 1466 Value *CastSrc = CI->getOperand(0); 1467 1468 // Noop casts and casts of constants should be eliminated trivially. 1469 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc)) 1470 return false; 1471 1472 // If this cast is paired with another cast that can be eliminated, we prefer 1473 // to have it eliminated. 1474 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc)) 1475 if (isEliminableCastPair(PrecedingCI, CI)) 1476 return false; 1477 1478 return true; 1479 } 1480 1481 /// Fold {and,or,xor} (cast X), C. 1482 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, 1483 InstCombiner::BuilderTy &Builder) { 1484 Constant *C = dyn_cast<Constant>(Logic.getOperand(1)); 1485 if (!C) 1486 return nullptr; 1487 1488 auto LogicOpc = Logic.getOpcode(); 1489 Type *DestTy = Logic.getType(); 1490 Type *SrcTy = Cast->getSrcTy(); 1491 1492 // Move the logic operation ahead of a zext or sext if the constant is 1493 // unchanged in the smaller source type. Performing the logic in a smaller 1494 // type may provide more information to later folds, and the smaller logic 1495 // instruction may be cheaper (particularly in the case of vectors). 1496 Value *X; 1497 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) { 1498 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); 1499 Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy); 1500 if (ZextTruncC == C) { 1501 // LogicOpc (zext X), C --> zext (LogicOpc X, C) 1502 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC); 1503 return new ZExtInst(NewOp, DestTy); 1504 } 1505 } 1506 1507 if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) { 1508 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); 1509 Constant *SextTruncC = ConstantExpr::getSExt(TruncC, DestTy); 1510 if (SextTruncC == C) { 1511 // LogicOpc (sext X), C --> sext (LogicOpc X, C) 1512 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC); 1513 return new SExtInst(NewOp, DestTy); 1514 } 1515 } 1516 1517 return nullptr; 1518 } 1519 1520 /// Fold {and,or,xor} (cast X), Y. 1521 Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) { 1522 auto LogicOpc = I.getOpcode(); 1523 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding"); 1524 1525 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1526 CastInst *Cast0 = dyn_cast<CastInst>(Op0); 1527 if (!Cast0) 1528 return nullptr; 1529 1530 // This must be a cast from an integer or integer vector source type to allow 1531 // transformation of the logic operation to the source type. 1532 Type *DestTy = I.getType(); 1533 Type *SrcTy = Cast0->getSrcTy(); 1534 if (!SrcTy->isIntOrIntVectorTy()) 1535 return nullptr; 1536 1537 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder)) 1538 return Ret; 1539 1540 CastInst *Cast1 = dyn_cast<CastInst>(Op1); 1541 if (!Cast1) 1542 return nullptr; 1543 1544 // Both operands of the logic operation are casts. The casts must be the 1545 // same kind for reduction. 1546 Instruction::CastOps CastOpcode = Cast0->getOpcode(); 1547 if (CastOpcode != Cast1->getOpcode()) 1548 return nullptr; 1549 1550 // If the source types do not match, but the casts are matching extends, we 1551 // can still narrow the logic op. 1552 if (SrcTy != Cast1->getSrcTy()) { 1553 Value *X, *Y; 1554 if (match(Cast0, m_OneUse(m_ZExtOrSExt(m_Value(X)))) && 1555 match(Cast1, m_OneUse(m_ZExtOrSExt(m_Value(Y))))) { 1556 // Cast the narrower source to the wider source type. 1557 unsigned XNumBits = X->getType()->getScalarSizeInBits(); 1558 unsigned YNumBits = Y->getType()->getScalarSizeInBits(); 1559 if (XNumBits < YNumBits) 1560 X = Builder.CreateCast(CastOpcode, X, Y->getType()); 1561 else 1562 Y = Builder.CreateCast(CastOpcode, Y, X->getType()); 1563 // Do the logic op in the intermediate width, then widen more. 1564 Value *NarrowLogic = Builder.CreateBinOp(LogicOpc, X, Y); 1565 return CastInst::Create(CastOpcode, NarrowLogic, DestTy); 1566 } 1567 1568 // Give up for other cast opcodes. 1569 return nullptr; 1570 } 1571 1572 Value *Cast0Src = Cast0->getOperand(0); 1573 Value *Cast1Src = Cast1->getOperand(0); 1574 1575 // fold logic(cast(A), cast(B)) -> cast(logic(A, B)) 1576 if ((Cast0->hasOneUse() || Cast1->hasOneUse()) && 1577 shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) { 1578 Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src, 1579 I.getName()); 1580 return CastInst::Create(CastOpcode, NewOp, DestTy); 1581 } 1582 1583 // For now, only 'and'/'or' have optimizations after this. 1584 if (LogicOpc == Instruction::Xor) 1585 return nullptr; 1586 1587 // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the 1588 // cast is otherwise not optimizable. This happens for vector sexts. 1589 ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src); 1590 ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src); 1591 if (ICmp0 && ICmp1) { 1592 if (Value *Res = 1593 foldAndOrOfICmps(ICmp0, ICmp1, I, LogicOpc == Instruction::And)) 1594 return CastInst::Create(CastOpcode, Res, DestTy); 1595 return nullptr; 1596 } 1597 1598 // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the 1599 // cast is otherwise not optimizable. This happens for vector sexts. 1600 FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src); 1601 FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src); 1602 if (FCmp0 && FCmp1) 1603 if (Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And)) 1604 return CastInst::Create(CastOpcode, R, DestTy); 1605 1606 return nullptr; 1607 } 1608 1609 static Instruction *foldAndToXor(BinaryOperator &I, 1610 InstCombiner::BuilderTy &Builder) { 1611 assert(I.getOpcode() == Instruction::And); 1612 Value *Op0 = I.getOperand(0); 1613 Value *Op1 = I.getOperand(1); 1614 Value *A, *B; 1615 1616 // Operand complexity canonicalization guarantees that the 'or' is Op0. 1617 // (A | B) & ~(A & B) --> A ^ B 1618 // (A | B) & ~(B & A) --> A ^ B 1619 if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)), 1620 m_Not(m_c_And(m_Deferred(A), m_Deferred(B)))))) 1621 return BinaryOperator::CreateXor(A, B); 1622 1623 // (A | ~B) & (~A | B) --> ~(A ^ B) 1624 // (A | ~B) & (B | ~A) --> ~(A ^ B) 1625 // (~B | A) & (~A | B) --> ~(A ^ B) 1626 // (~B | A) & (B | ~A) --> ~(A ^ B) 1627 if (Op0->hasOneUse() || Op1->hasOneUse()) 1628 if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))), 1629 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) 1630 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 1631 1632 return nullptr; 1633 } 1634 1635 static Instruction *foldOrToXor(BinaryOperator &I, 1636 InstCombiner::BuilderTy &Builder) { 1637 assert(I.getOpcode() == Instruction::Or); 1638 Value *Op0 = I.getOperand(0); 1639 Value *Op1 = I.getOperand(1); 1640 Value *A, *B; 1641 1642 // Operand complexity canonicalization guarantees that the 'and' is Op0. 1643 // (A & B) | ~(A | B) --> ~(A ^ B) 1644 // (A & B) | ~(B | A) --> ~(A ^ B) 1645 if (Op0->hasOneUse() || Op1->hasOneUse()) 1646 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 1647 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 1648 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 1649 1650 // Operand complexity canonicalization guarantees that the 'xor' is Op0. 1651 // (A ^ B) | ~(A | B) --> ~(A & B) 1652 // (A ^ B) | ~(B | A) --> ~(A & B) 1653 if (Op0->hasOneUse() || Op1->hasOneUse()) 1654 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 1655 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 1656 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B)); 1657 1658 // (A & ~B) | (~A & B) --> A ^ B 1659 // (A & ~B) | (B & ~A) --> A ^ B 1660 // (~B & A) | (~A & B) --> A ^ B 1661 // (~B & A) | (B & ~A) --> A ^ B 1662 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && 1663 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))) 1664 return BinaryOperator::CreateXor(A, B); 1665 1666 return nullptr; 1667 } 1668 1669 /// Return true if a constant shift amount is always less than the specified 1670 /// bit-width. If not, the shift could create poison in the narrower type. 1671 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) { 1672 APInt Threshold(C->getType()->getScalarSizeInBits(), BitWidth); 1673 return match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold)); 1674 } 1675 1676 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and 1677 /// a common zext operand: and (binop (zext X), C), (zext X). 1678 Instruction *InstCombinerImpl::narrowMaskedBinOp(BinaryOperator &And) { 1679 // This transform could also apply to {or, and, xor}, but there are better 1680 // folds for those cases, so we don't expect those patterns here. AShr is not 1681 // handled because it should always be transformed to LShr in this sequence. 1682 // The subtract transform is different because it has a constant on the left. 1683 // Add/mul commute the constant to RHS; sub with constant RHS becomes add. 1684 Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1); 1685 Constant *C; 1686 if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) && 1687 !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) && 1688 !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) && 1689 !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) && 1690 !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1))))) 1691 return nullptr; 1692 1693 Value *X; 1694 if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3)) 1695 return nullptr; 1696 1697 Type *Ty = And.getType(); 1698 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType())) 1699 return nullptr; 1700 1701 // If we're narrowing a shift, the shift amount must be safe (less than the 1702 // width) in the narrower type. If the shift amount is greater, instsimplify 1703 // usually handles that case, but we can't guarantee/assert it. 1704 Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode(); 1705 if (Opc == Instruction::LShr || Opc == Instruction::Shl) 1706 if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits())) 1707 return nullptr; 1708 1709 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X) 1710 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X) 1711 Value *NewC = ConstantExpr::getTrunc(C, X->getType()); 1712 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X) 1713 : Builder.CreateBinOp(Opc, X, NewC); 1714 return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty); 1715 } 1716 1717 /// Try folding relatively complex patterns for both And and Or operations 1718 /// with all And and Or swapped. 1719 static Instruction *foldComplexAndOrPatterns(BinaryOperator &I, 1720 InstCombiner::BuilderTy &Builder) { 1721 const Instruction::BinaryOps Opcode = I.getOpcode(); 1722 assert(Opcode == Instruction::And || Opcode == Instruction::Or); 1723 1724 // Flip the logic operation. 1725 const Instruction::BinaryOps FlippedOpcode = 1726 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And; 1727 1728 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1729 Value *A, *B, *C, *X, *Y, *Dummy; 1730 1731 // Match following expressions: 1732 // (~(A | B) & C) 1733 // (~(A & B) | C) 1734 // Captures X = ~(A | B) or ~(A & B) 1735 const auto matchNotOrAnd = 1736 [Opcode, FlippedOpcode](Value *Op, auto m_A, auto m_B, auto m_C, 1737 Value *&X, bool CountUses = false) -> bool { 1738 if (CountUses && !Op->hasOneUse()) 1739 return false; 1740 1741 if (match(Op, m_c_BinOp(FlippedOpcode, 1742 m_CombineAnd(m_Value(X), 1743 m_Not(m_c_BinOp(Opcode, m_A, m_B))), 1744 m_C))) 1745 return !CountUses || X->hasOneUse(); 1746 1747 return false; 1748 }; 1749 1750 // (~(A | B) & C) | ... --> ... 1751 // (~(A & B) | C) & ... --> ... 1752 // TODO: One use checks are conservative. We just need to check that a total 1753 // number of multiple used values does not exceed reduction 1754 // in operations. 1755 if (matchNotOrAnd(Op0, m_Value(A), m_Value(B), m_Value(C), X)) { 1756 // (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A 1757 // (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A) 1758 if (matchNotOrAnd(Op1, m_Specific(A), m_Specific(C), m_Specific(B), Dummy, 1759 true)) { 1760 Value *Xor = Builder.CreateXor(B, C); 1761 return (Opcode == Instruction::Or) 1762 ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(A)) 1763 : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, A)); 1764 } 1765 1766 // (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B 1767 // (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B) 1768 if (matchNotOrAnd(Op1, m_Specific(B), m_Specific(C), m_Specific(A), Dummy, 1769 true)) { 1770 Value *Xor = Builder.CreateXor(A, C); 1771 return (Opcode == Instruction::Or) 1772 ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(B)) 1773 : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, B)); 1774 } 1775 1776 // (~(A | B) & C) | ~(A | C) --> ~((B & C) | A) 1777 // (~(A & B) | C) & ~(A & C) --> ~((B | C) & A) 1778 if (match(Op1, m_OneUse(m_Not(m_OneUse( 1779 m_c_BinOp(Opcode, m_Specific(A), m_Specific(C))))))) 1780 return BinaryOperator::CreateNot(Builder.CreateBinOp( 1781 Opcode, Builder.CreateBinOp(FlippedOpcode, B, C), A)); 1782 1783 // (~(A | B) & C) | ~(B | C) --> ~((A & C) | B) 1784 // (~(A & B) | C) & ~(B & C) --> ~((A | C) & B) 1785 if (match(Op1, m_OneUse(m_Not(m_OneUse( 1786 m_c_BinOp(Opcode, m_Specific(B), m_Specific(C))))))) 1787 return BinaryOperator::CreateNot(Builder.CreateBinOp( 1788 Opcode, Builder.CreateBinOp(FlippedOpcode, A, C), B)); 1789 1790 // (~(A | B) & C) | ~(C | (A ^ B)) --> ~((A | B) & (C | (A ^ B))) 1791 // Note, the pattern with swapped and/or is not handled because the 1792 // result is more undefined than a source: 1793 // (~(A & B) | C) & ~(C & (A ^ B)) --> (A ^ B ^ C) | ~(A | C) is invalid. 1794 if (Opcode == Instruction::Or && Op0->hasOneUse() && 1795 match(Op1, m_OneUse(m_Not(m_CombineAnd( 1796 m_Value(Y), 1797 m_c_BinOp(Opcode, m_Specific(C), 1798 m_c_Xor(m_Specific(A), m_Specific(B)))))))) { 1799 // X = ~(A | B) 1800 // Y = (C | (A ^ B) 1801 Value *Or = cast<BinaryOperator>(X)->getOperand(0); 1802 return BinaryOperator::CreateNot(Builder.CreateAnd(Or, Y)); 1803 } 1804 } 1805 1806 // (~A & B & C) | ... --> ... 1807 // (~A | B | C) | ... --> ... 1808 // TODO: One use checks are conservative. We just need to check that a total 1809 // number of multiple used values does not exceed reduction 1810 // in operations. 1811 if (match(Op0, 1812 m_OneUse(m_c_BinOp(FlippedOpcode, 1813 m_BinOp(FlippedOpcode, m_Value(B), m_Value(C)), 1814 m_CombineAnd(m_Value(X), m_Not(m_Value(A)))))) || 1815 match(Op0, m_OneUse(m_c_BinOp( 1816 FlippedOpcode, 1817 m_c_BinOp(FlippedOpcode, m_Value(C), 1818 m_CombineAnd(m_Value(X), m_Not(m_Value(A)))), 1819 m_Value(B))))) { 1820 // X = ~A 1821 // (~A & B & C) | ~(A | B | C) --> ~(A | (B ^ C)) 1822 // (~A | B | C) & ~(A & B & C) --> (~A | (B ^ C)) 1823 if (match(Op1, m_OneUse(m_Not(m_c_BinOp( 1824 Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(B)), 1825 m_Specific(C))))) || 1826 match(Op1, m_OneUse(m_Not(m_c_BinOp( 1827 Opcode, m_c_BinOp(Opcode, m_Specific(B), m_Specific(C)), 1828 m_Specific(A))))) || 1829 match(Op1, m_OneUse(m_Not(m_c_BinOp( 1830 Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)), 1831 m_Specific(B)))))) { 1832 Value *Xor = Builder.CreateXor(B, C); 1833 return (Opcode == Instruction::Or) 1834 ? BinaryOperator::CreateNot(Builder.CreateOr(Xor, A)) 1835 : BinaryOperator::CreateOr(Xor, X); 1836 } 1837 1838 // (~A & B & C) | ~(A | B) --> (C | ~B) & ~A 1839 // (~A | B | C) & ~(A & B) --> (C & ~B) | ~A 1840 if (match(Op1, m_OneUse(m_Not(m_OneUse( 1841 m_c_BinOp(Opcode, m_Specific(A), m_Specific(B))))))) 1842 return BinaryOperator::Create( 1843 FlippedOpcode, Builder.CreateBinOp(Opcode, C, Builder.CreateNot(B)), 1844 X); 1845 1846 // (~A & B & C) | ~(A | C) --> (B | ~C) & ~A 1847 // (~A | B | C) & ~(A & C) --> (B & ~C) | ~A 1848 if (match(Op1, m_OneUse(m_Not(m_OneUse( 1849 m_c_BinOp(Opcode, m_Specific(A), m_Specific(C))))))) 1850 return BinaryOperator::Create( 1851 FlippedOpcode, Builder.CreateBinOp(Opcode, B, Builder.CreateNot(C)), 1852 X); 1853 } 1854 1855 return nullptr; 1856 } 1857 1858 /// Try to reassociate a pair of binops so that values with one use only are 1859 /// part of the same instruction. This may enable folds that are limited with 1860 /// multi-use restrictions and makes it more likely to match other patterns that 1861 /// are looking for a common operand. 1862 static Instruction *reassociateForUses(BinaryOperator &BO, 1863 InstCombinerImpl::BuilderTy &Builder) { 1864 Instruction::BinaryOps Opcode = BO.getOpcode(); 1865 Value *X, *Y, *Z; 1866 if (match(&BO, 1867 m_c_BinOp(Opcode, m_OneUse(m_BinOp(Opcode, m_Value(X), m_Value(Y))), 1868 m_OneUse(m_Value(Z))))) { 1869 if (!isa<Constant>(X) && !isa<Constant>(Y) && !isa<Constant>(Z)) { 1870 // (X op Y) op Z --> (Y op Z) op X 1871 if (!X->hasOneUse()) { 1872 Value *YZ = Builder.CreateBinOp(Opcode, Y, Z); 1873 return BinaryOperator::Create(Opcode, YZ, X); 1874 } 1875 // (X op Y) op Z --> (X op Z) op Y 1876 if (!Y->hasOneUse()) { 1877 Value *XZ = Builder.CreateBinOp(Opcode, X, Z); 1878 return BinaryOperator::Create(Opcode, XZ, Y); 1879 } 1880 } 1881 } 1882 1883 return nullptr; 1884 } 1885 1886 // Match 1887 // (X + C2) | C 1888 // (X + C2) ^ C 1889 // (X + C2) & C 1890 // and convert to do the bitwise logic first: 1891 // (X | C) + C2 1892 // (X ^ C) + C2 1893 // (X & C) + C2 1894 // iff bits affected by logic op are lower than last bit affected by math op 1895 static Instruction *canonicalizeLogicFirst(BinaryOperator &I, 1896 InstCombiner::BuilderTy &Builder) { 1897 Type *Ty = I.getType(); 1898 Instruction::BinaryOps OpC = I.getOpcode(); 1899 Value *Op0 = I.getOperand(0); 1900 Value *Op1 = I.getOperand(1); 1901 Value *X; 1902 const APInt *C, *C2; 1903 1904 if (!(match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C2)))) && 1905 match(Op1, m_APInt(C)))) 1906 return nullptr; 1907 1908 unsigned Width = Ty->getScalarSizeInBits(); 1909 unsigned LastOneMath = Width - C2->countTrailingZeros(); 1910 1911 switch (OpC) { 1912 case Instruction::And: 1913 if (C->countLeadingOnes() < LastOneMath) 1914 return nullptr; 1915 break; 1916 case Instruction::Xor: 1917 case Instruction::Or: 1918 if (C->countLeadingZeros() < LastOneMath) 1919 return nullptr; 1920 break; 1921 default: 1922 llvm_unreachable("Unexpected BinaryOp!"); 1923 } 1924 1925 Value *NewBinOp = Builder.CreateBinOp(OpC, X, ConstantInt::get(Ty, *C)); 1926 return BinaryOperator::CreateAdd(NewBinOp, ConstantInt::get(Ty, *C2)); 1927 } 1928 1929 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 1930 // here. We should standardize that construct where it is needed or choose some 1931 // other way to ensure that commutated variants of patterns are not missed. 1932 Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) { 1933 Type *Ty = I.getType(); 1934 1935 if (Value *V = simplifyAndInst(I.getOperand(0), I.getOperand(1), 1936 SQ.getWithInstruction(&I))) 1937 return replaceInstUsesWith(I, V); 1938 1939 if (SimplifyAssociativeOrCommutative(I)) 1940 return &I; 1941 1942 if (Instruction *X = foldVectorBinop(I)) 1943 return X; 1944 1945 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 1946 return Phi; 1947 1948 // See if we can simplify any instructions used by the instruction whose sole 1949 // purpose is to compute bits we don't care about. 1950 if (SimplifyDemandedInstructionBits(I)) 1951 return &I; 1952 1953 // Do this before using distributive laws to catch simple and/or/not patterns. 1954 if (Instruction *Xor = foldAndToXor(I, Builder)) 1955 return Xor; 1956 1957 if (Instruction *X = foldComplexAndOrPatterns(I, Builder)) 1958 return X; 1959 1960 // (A|B)&(A|C) -> A|(B&C) etc 1961 if (Value *V = foldUsingDistributiveLaws(I)) 1962 return replaceInstUsesWith(I, V); 1963 1964 if (Value *V = SimplifyBSwap(I, Builder)) 1965 return replaceInstUsesWith(I, V); 1966 1967 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1968 1969 Value *X, *Y; 1970 if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) && 1971 match(Op1, m_One())) { 1972 // (1 << X) & 1 --> zext(X == 0) 1973 // (1 >> X) & 1 --> zext(X == 0) 1974 Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, 0)); 1975 return new ZExtInst(IsZero, Ty); 1976 } 1977 1978 // (-(X & 1)) & Y --> (X & 1) == 0 ? 0 : Y 1979 Value *Neg; 1980 if (match(&I, 1981 m_c_And(m_CombineAnd(m_Value(Neg), 1982 m_OneUse(m_Neg(m_And(m_Value(), m_One())))), 1983 m_Value(Y)))) { 1984 Value *Cmp = Builder.CreateIsNull(Neg); 1985 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Y); 1986 } 1987 1988 const APInt *C; 1989 if (match(Op1, m_APInt(C))) { 1990 const APInt *XorC; 1991 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) { 1992 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2) 1993 Constant *NewC = ConstantInt::get(Ty, *C & *XorC); 1994 Value *And = Builder.CreateAnd(X, Op1); 1995 And->takeName(Op0); 1996 return BinaryOperator::CreateXor(And, NewC); 1997 } 1998 1999 const APInt *OrC; 2000 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) { 2001 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2) 2002 // NOTE: This reduces the number of bits set in the & mask, which 2003 // can expose opportunities for store narrowing for scalars. 2004 // NOTE: SimplifyDemandedBits should have already removed bits from C1 2005 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in 2006 // above, but this feels safer. 2007 APInt Together = *C & *OrC; 2008 Value *And = Builder.CreateAnd(X, ConstantInt::get(Ty, Together ^ *C)); 2009 And->takeName(Op0); 2010 return BinaryOperator::CreateOr(And, ConstantInt::get(Ty, Together)); 2011 } 2012 2013 unsigned Width = Ty->getScalarSizeInBits(); 2014 const APInt *ShiftC; 2015 if (match(Op0, m_OneUse(m_SExt(m_AShr(m_Value(X), m_APInt(ShiftC))))) && 2016 ShiftC->ult(Width)) { 2017 if (*C == APInt::getLowBitsSet(Width, Width - ShiftC->getZExtValue())) { 2018 // We are clearing high bits that were potentially set by sext+ashr: 2019 // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC 2020 Value *Sext = Builder.CreateSExt(X, Ty); 2021 Constant *ShAmtC = ConstantInt::get(Ty, ShiftC->zext(Width)); 2022 return BinaryOperator::CreateLShr(Sext, ShAmtC); 2023 } 2024 } 2025 2026 // If this 'and' clears the sign-bits added by ashr, replace with lshr: 2027 // and (ashr X, ShiftC), C --> lshr X, ShiftC 2028 if (match(Op0, m_AShr(m_Value(X), m_APInt(ShiftC))) && ShiftC->ult(Width) && 2029 C->isMask(Width - ShiftC->getZExtValue())) 2030 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, *ShiftC)); 2031 2032 const APInt *AddC; 2033 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC)))) { 2034 // If we add zeros to every bit below a mask, the add has no effect: 2035 // (X + AddC) & LowMaskC --> X & LowMaskC 2036 unsigned Ctlz = C->countLeadingZeros(); 2037 APInt LowMask(APInt::getLowBitsSet(Width, Width - Ctlz)); 2038 if ((*AddC & LowMask).isZero()) 2039 return BinaryOperator::CreateAnd(X, Op1); 2040 2041 // If we are masking the result of the add down to exactly one bit and 2042 // the constant we are adding has no bits set below that bit, then the 2043 // add is flipping a single bit. Example: 2044 // (X + 4) & 4 --> (X & 4) ^ 4 2045 if (Op0->hasOneUse() && C->isPowerOf2() && (*AddC & (*C - 1)) == 0) { 2046 assert((*C & *AddC) != 0 && "Expected common bit"); 2047 Value *NewAnd = Builder.CreateAnd(X, Op1); 2048 return BinaryOperator::CreateXor(NewAnd, Op1); 2049 } 2050 } 2051 2052 // ((C1 OP zext(X)) & C2) -> zext((C1 OP X) & C2) if C2 fits in the 2053 // bitwidth of X and OP behaves well when given trunc(C1) and X. 2054 auto isNarrowableBinOpcode = [](BinaryOperator *B) { 2055 switch (B->getOpcode()) { 2056 case Instruction::Xor: 2057 case Instruction::Or: 2058 case Instruction::Mul: 2059 case Instruction::Add: 2060 case Instruction::Sub: 2061 return true; 2062 default: 2063 return false; 2064 } 2065 }; 2066 BinaryOperator *BO; 2067 if (match(Op0, m_OneUse(m_BinOp(BO))) && isNarrowableBinOpcode(BO)) { 2068 Instruction::BinaryOps BOpcode = BO->getOpcode(); 2069 Value *X; 2070 const APInt *C1; 2071 // TODO: The one-use restrictions could be relaxed a little if the AND 2072 // is going to be removed. 2073 // Try to narrow the 'and' and a binop with constant operand: 2074 // and (bo (zext X), C1), C --> zext (and (bo X, TruncC1), TruncC) 2075 if (match(BO, m_c_BinOp(m_OneUse(m_ZExt(m_Value(X))), m_APInt(C1))) && 2076 C->isIntN(X->getType()->getScalarSizeInBits())) { 2077 unsigned XWidth = X->getType()->getScalarSizeInBits(); 2078 Constant *TruncC1 = ConstantInt::get(X->getType(), C1->trunc(XWidth)); 2079 Value *BinOp = isa<ZExtInst>(BO->getOperand(0)) 2080 ? Builder.CreateBinOp(BOpcode, X, TruncC1) 2081 : Builder.CreateBinOp(BOpcode, TruncC1, X); 2082 Constant *TruncC = ConstantInt::get(X->getType(), C->trunc(XWidth)); 2083 Value *And = Builder.CreateAnd(BinOp, TruncC); 2084 return new ZExtInst(And, Ty); 2085 } 2086 2087 // Similar to above: if the mask matches the zext input width, then the 2088 // 'and' can be eliminated, so we can truncate the other variable op: 2089 // and (bo (zext X), Y), C --> zext (bo X, (trunc Y)) 2090 if (isa<Instruction>(BO->getOperand(0)) && 2091 match(BO->getOperand(0), m_OneUse(m_ZExt(m_Value(X)))) && 2092 C->isMask(X->getType()->getScalarSizeInBits())) { 2093 Y = BO->getOperand(1); 2094 Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr"); 2095 Value *NewBO = 2096 Builder.CreateBinOp(BOpcode, X, TrY, BO->getName() + ".narrow"); 2097 return new ZExtInst(NewBO, Ty); 2098 } 2099 // and (bo Y, (zext X)), C --> zext (bo (trunc Y), X) 2100 if (isa<Instruction>(BO->getOperand(1)) && 2101 match(BO->getOperand(1), m_OneUse(m_ZExt(m_Value(X)))) && 2102 C->isMask(X->getType()->getScalarSizeInBits())) { 2103 Y = BO->getOperand(0); 2104 Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr"); 2105 Value *NewBO = 2106 Builder.CreateBinOp(BOpcode, TrY, X, BO->getName() + ".narrow"); 2107 return new ZExtInst(NewBO, Ty); 2108 } 2109 } 2110 2111 // This is intentionally placed after the narrowing transforms for 2112 // efficiency (transform directly to the narrow logic op if possible). 2113 // If the mask is only needed on one incoming arm, push the 'and' op up. 2114 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) || 2115 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { 2116 APInt NotAndMask(~(*C)); 2117 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode(); 2118 if (MaskedValueIsZero(X, NotAndMask, 0, &I)) { 2119 // Not masking anything out for the LHS, move mask to RHS. 2120 // and ({x}or X, Y), C --> {x}or X, (and Y, C) 2121 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked"); 2122 return BinaryOperator::Create(BinOp, X, NewRHS); 2123 } 2124 if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) { 2125 // Not masking anything out for the RHS, move mask to LHS. 2126 // and ({x}or X, Y), C --> {x}or (and X, C), Y 2127 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked"); 2128 return BinaryOperator::Create(BinOp, NewLHS, Y); 2129 } 2130 } 2131 2132 // When the mask is a power-of-2 constant and op0 is a shifted-power-of-2 2133 // constant, test if the shift amount equals the offset bit index: 2134 // (ShiftC << X) & C --> X == (log2(C) - log2(ShiftC)) ? C : 0 2135 // (ShiftC >> X) & C --> X == (log2(ShiftC) - log2(C)) ? C : 0 2136 if (C->isPowerOf2() && 2137 match(Op0, m_OneUse(m_LogicalShift(m_Power2(ShiftC), m_Value(X))))) { 2138 int Log2ShiftC = ShiftC->exactLogBase2(); 2139 int Log2C = C->exactLogBase2(); 2140 bool IsShiftLeft = 2141 cast<BinaryOperator>(Op0)->getOpcode() == Instruction::Shl; 2142 int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C; 2143 assert(BitNum >= 0 && "Expected demanded bits to handle impossible mask"); 2144 Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, BitNum)); 2145 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C), 2146 ConstantInt::getNullValue(Ty)); 2147 } 2148 2149 Constant *C1, *C2; 2150 const APInt *C3 = C; 2151 Value *X; 2152 if (C3->isPowerOf2()) { 2153 Constant *Log2C3 = ConstantInt::get(Ty, C3->countTrailingZeros()); 2154 if (match(Op0, m_OneUse(m_LShr(m_Shl(m_ImmConstant(C1), m_Value(X)), 2155 m_ImmConstant(C2)))) && 2156 match(C1, m_Power2())) { 2157 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1); 2158 Constant *LshrC = ConstantExpr::getAdd(C2, Log2C3); 2159 KnownBits KnownLShrc = computeKnownBits(LshrC, 0, nullptr); 2160 if (KnownLShrc.getMaxValue().ult(Width)) { 2161 // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth: 2162 // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0 2163 Constant *CmpC = ConstantExpr::getSub(LshrC, Log2C1); 2164 Value *Cmp = Builder.CreateICmpEQ(X, CmpC); 2165 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3), 2166 ConstantInt::getNullValue(Ty)); 2167 } 2168 } 2169 2170 if (match(Op0, m_OneUse(m_Shl(m_LShr(m_ImmConstant(C1), m_Value(X)), 2171 m_ImmConstant(C2)))) && 2172 match(C1, m_Power2())) { 2173 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1); 2174 Constant *Cmp = 2175 ConstantExpr::getCompare(ICmpInst::ICMP_ULT, Log2C3, C2); 2176 if (Cmp->isZeroValue()) { 2177 // iff C1,C3 is pow2 and Log2(C3) >= C2: 2178 // ((C1 >> X) << C2) & C3 -> X == (cttz(C1)+C2-cttz(C3)) ? C3 : 0 2179 Constant *ShlC = ConstantExpr::getAdd(C2, Log2C1); 2180 Constant *CmpC = ConstantExpr::getSub(ShlC, Log2C3); 2181 Value *Cmp = Builder.CreateICmpEQ(X, CmpC); 2182 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3), 2183 ConstantInt::getNullValue(Ty)); 2184 } 2185 } 2186 } 2187 } 2188 2189 if (match(&I, m_And(m_OneUse(m_Shl(m_ZExt(m_Value(X)), m_Value(Y))), 2190 m_SignMask())) && 2191 match(Y, m_SpecificInt_ICMP( 2192 ICmpInst::Predicate::ICMP_EQ, 2193 APInt(Ty->getScalarSizeInBits(), 2194 Ty->getScalarSizeInBits() - 2195 X->getType()->getScalarSizeInBits())))) { 2196 auto *SExt = Builder.CreateSExt(X, Ty, X->getName() + ".signext"); 2197 auto *SanitizedSignMask = cast<Constant>(Op1); 2198 // We must be careful with the undef elements of the sign bit mask, however: 2199 // the mask elt can be undef iff the shift amount for that lane was undef, 2200 // otherwise we need to sanitize undef masks to zero. 2201 SanitizedSignMask = Constant::replaceUndefsWith( 2202 SanitizedSignMask, ConstantInt::getNullValue(Ty->getScalarType())); 2203 SanitizedSignMask = 2204 Constant::mergeUndefsWith(SanitizedSignMask, cast<Constant>(Y)); 2205 return BinaryOperator::CreateAnd(SExt, SanitizedSignMask); 2206 } 2207 2208 if (Instruction *Z = narrowMaskedBinOp(I)) 2209 return Z; 2210 2211 if (I.getType()->isIntOrIntVectorTy(1)) { 2212 if (auto *SI0 = dyn_cast<SelectInst>(Op0)) { 2213 if (auto *I = 2214 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ true)) 2215 return I; 2216 } 2217 if (auto *SI1 = dyn_cast<SelectInst>(Op1)) { 2218 if (auto *I = 2219 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ true)) 2220 return I; 2221 } 2222 } 2223 2224 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 2225 return FoldedLogic; 2226 2227 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder)) 2228 return DeMorgan; 2229 2230 { 2231 Value *A, *B, *C; 2232 // A & (A ^ B) --> A & ~B 2233 if (match(Op1, m_OneUse(m_c_Xor(m_Specific(Op0), m_Value(B))))) 2234 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(B)); 2235 // (A ^ B) & A --> A & ~B 2236 if (match(Op0, m_OneUse(m_c_Xor(m_Specific(Op1), m_Value(B))))) 2237 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(B)); 2238 2239 // A & ~(A ^ B) --> A & B 2240 if (match(Op1, m_Not(m_c_Xor(m_Specific(Op0), m_Value(B))))) 2241 return BinaryOperator::CreateAnd(Op0, B); 2242 // ~(A ^ B) & A --> A & B 2243 if (match(Op0, m_Not(m_c_Xor(m_Specific(Op1), m_Value(B))))) 2244 return BinaryOperator::CreateAnd(Op1, B); 2245 2246 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C 2247 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) 2248 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) 2249 if (Op1->hasOneUse() || isFreeToInvert(C, C->hasOneUse())) 2250 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C)); 2251 2252 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C 2253 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) 2254 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) 2255 if (Op0->hasOneUse() || isFreeToInvert(C, C->hasOneUse())) 2256 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C)); 2257 2258 // (A | B) & (~A ^ B) -> A & B 2259 // (A | B) & (B ^ ~A) -> A & B 2260 // (B | A) & (~A ^ B) -> A & B 2261 // (B | A) & (B ^ ~A) -> A & B 2262 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && 2263 match(Op0, m_c_Or(m_Specific(A), m_Specific(B)))) 2264 return BinaryOperator::CreateAnd(A, B); 2265 2266 // (~A ^ B) & (A | B) -> A & B 2267 // (~A ^ B) & (B | A) -> A & B 2268 // (B ^ ~A) & (A | B) -> A & B 2269 // (B ^ ~A) & (B | A) -> A & B 2270 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && 2271 match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) 2272 return BinaryOperator::CreateAnd(A, B); 2273 2274 // (~A | B) & (A ^ B) -> ~A & B 2275 // (~A | B) & (B ^ A) -> ~A & B 2276 // (B | ~A) & (A ^ B) -> ~A & B 2277 // (B | ~A) & (B ^ A) -> ~A & B 2278 if (match(Op0, m_c_Or(m_Not(m_Value(A)), m_Value(B))) && 2279 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B)))) 2280 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 2281 2282 // (A ^ B) & (~A | B) -> ~A & B 2283 // (B ^ A) & (~A | B) -> ~A & B 2284 // (A ^ B) & (B | ~A) -> ~A & B 2285 // (B ^ A) & (B | ~A) -> ~A & B 2286 if (match(Op1, m_c_Or(m_Not(m_Value(A)), m_Value(B))) && 2287 match(Op0, m_c_Xor(m_Specific(A), m_Specific(B)))) 2288 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 2289 } 2290 2291 { 2292 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0); 2293 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1); 2294 if (LHS && RHS) 2295 if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ true)) 2296 return replaceInstUsesWith(I, Res); 2297 2298 // TODO: Make this recursive; it's a little tricky because an arbitrary 2299 // number of 'and' instructions might have to be created. 2300 if (LHS && match(Op1, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) { 2301 bool IsLogical = isa<SelectInst>(Op1); 2302 // LHS & (X && Y) --> (LHS && X) && Y 2303 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2304 if (Value *Res = 2305 foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true, IsLogical)) 2306 return replaceInstUsesWith(I, IsLogical 2307 ? Builder.CreateLogicalAnd(Res, Y) 2308 : Builder.CreateAnd(Res, Y)); 2309 // LHS & (X && Y) --> X && (LHS & Y) 2310 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2311 if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true, 2312 /* IsLogical */ false)) 2313 return replaceInstUsesWith(I, IsLogical 2314 ? Builder.CreateLogicalAnd(X, Res) 2315 : Builder.CreateAnd(X, Res)); 2316 } 2317 if (RHS && match(Op0, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) { 2318 bool IsLogical = isa<SelectInst>(Op0); 2319 // (X && Y) & RHS --> (X && RHS) && Y 2320 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2321 if (Value *Res = 2322 foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true, IsLogical)) 2323 return replaceInstUsesWith(I, IsLogical 2324 ? Builder.CreateLogicalAnd(Res, Y) 2325 : Builder.CreateAnd(Res, Y)); 2326 // (X && Y) & RHS --> X && (Y & RHS) 2327 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2328 if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true, 2329 /* IsLogical */ false)) 2330 return replaceInstUsesWith(I, IsLogical 2331 ? Builder.CreateLogicalAnd(X, Res) 2332 : Builder.CreateAnd(X, Res)); 2333 } 2334 } 2335 2336 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 2337 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 2338 if (Value *Res = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ true)) 2339 return replaceInstUsesWith(I, Res); 2340 2341 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder)) 2342 return FoldedFCmps; 2343 2344 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I)) 2345 return CastedAnd; 2346 2347 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I)) 2348 return Sel; 2349 2350 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>. 2351 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold 2352 // with binop identity constant. But creating a select with non-constant 2353 // arm may not be reversible due to poison semantics. Is that a good 2354 // canonicalization? 2355 Value *A; 2356 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && 2357 A->getType()->isIntOrIntVectorTy(1)) 2358 return SelectInst::Create(A, Op1, Constant::getNullValue(Ty)); 2359 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && 2360 A->getType()->isIntOrIntVectorTy(1)) 2361 return SelectInst::Create(A, Op0, Constant::getNullValue(Ty)); 2362 2363 // Similarly, a 'not' of the bool translates to a swap of the select arms: 2364 // ~sext(A) & Op1 --> A ? 0 : Op1 2365 // Op0 & ~sext(A) --> A ? 0 : Op0 2366 if (match(Op0, m_Not(m_SExt(m_Value(A)))) && 2367 A->getType()->isIntOrIntVectorTy(1)) 2368 return SelectInst::Create(A, Constant::getNullValue(Ty), Op1); 2369 if (match(Op1, m_Not(m_SExt(m_Value(A)))) && 2370 A->getType()->isIntOrIntVectorTy(1)) 2371 return SelectInst::Create(A, Constant::getNullValue(Ty), Op0); 2372 2373 // (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext 2374 if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf( 2375 m_AShr(m_Value(X), m_APIntAllowUndef(C)))), 2376 m_Value(Y))) && 2377 *C == X->getType()->getScalarSizeInBits() - 1) { 2378 Value *IsNeg = Builder.CreateIsNeg(X, "isneg"); 2379 return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty)); 2380 } 2381 // If there's a 'not' of the shifted value, swap the select operands: 2382 // ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext 2383 if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf( 2384 m_Not(m_AShr(m_Value(X), m_APIntAllowUndef(C))))), 2385 m_Value(Y))) && 2386 *C == X->getType()->getScalarSizeInBits() - 1) { 2387 Value *IsNeg = Builder.CreateIsNeg(X, "isneg"); 2388 return SelectInst::Create(IsNeg, ConstantInt::getNullValue(Ty), Y); 2389 } 2390 2391 // (~x) & y --> ~(x | (~y)) iff that gets rid of inversions 2392 if (sinkNotIntoOtherHandOfLogicalOp(I)) 2393 return &I; 2394 2395 // An and recurrence w/loop invariant step is equivelent to (and start, step) 2396 PHINode *PN = nullptr; 2397 Value *Start = nullptr, *Step = nullptr; 2398 if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN)) 2399 return replaceInstUsesWith(I, Builder.CreateAnd(Start, Step)); 2400 2401 if (Instruction *R = reassociateForUses(I, Builder)) 2402 return R; 2403 2404 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder)) 2405 return Canonicalized; 2406 2407 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1)) 2408 return Folded; 2409 2410 return nullptr; 2411 } 2412 2413 Instruction *InstCombinerImpl::matchBSwapOrBitReverse(Instruction &I, 2414 bool MatchBSwaps, 2415 bool MatchBitReversals) { 2416 SmallVector<Instruction *, 4> Insts; 2417 if (!recognizeBSwapOrBitReverseIdiom(&I, MatchBSwaps, MatchBitReversals, 2418 Insts)) 2419 return nullptr; 2420 Instruction *LastInst = Insts.pop_back_val(); 2421 LastInst->removeFromParent(); 2422 2423 for (auto *Inst : Insts) 2424 Worklist.push(Inst); 2425 return LastInst; 2426 } 2427 2428 /// Match UB-safe variants of the funnel shift intrinsic. 2429 static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) { 2430 // TODO: Can we reduce the code duplication between this and the related 2431 // rotate matching code under visitSelect and visitTrunc? 2432 unsigned Width = Or.getType()->getScalarSizeInBits(); 2433 2434 // First, find an or'd pair of opposite shifts: 2435 // or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1) 2436 BinaryOperator *Or0, *Or1; 2437 if (!match(Or.getOperand(0), m_BinOp(Or0)) || 2438 !match(Or.getOperand(1), m_BinOp(Or1))) 2439 return nullptr; 2440 2441 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1; 2442 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) || 2443 !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) || 2444 Or0->getOpcode() == Or1->getOpcode()) 2445 return nullptr; 2446 2447 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)). 2448 if (Or0->getOpcode() == BinaryOperator::LShr) { 2449 std::swap(Or0, Or1); 2450 std::swap(ShVal0, ShVal1); 2451 std::swap(ShAmt0, ShAmt1); 2452 } 2453 assert(Or0->getOpcode() == BinaryOperator::Shl && 2454 Or1->getOpcode() == BinaryOperator::LShr && 2455 "Illegal or(shift,shift) pair"); 2456 2457 // Match the shift amount operands for a funnel shift pattern. This always 2458 // matches a subtraction on the R operand. 2459 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * { 2460 // Check for constant shift amounts that sum to the bitwidth. 2461 const APInt *LI, *RI; 2462 if (match(L, m_APIntAllowUndef(LI)) && match(R, m_APIntAllowUndef(RI))) 2463 if (LI->ult(Width) && RI->ult(Width) && (*LI + *RI) == Width) 2464 return ConstantInt::get(L->getType(), *LI); 2465 2466 Constant *LC, *RC; 2467 if (match(L, m_Constant(LC)) && match(R, m_Constant(RC)) && 2468 match(L, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) && 2469 match(R, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) && 2470 match(ConstantExpr::getAdd(LC, RC), m_SpecificIntAllowUndef(Width))) 2471 return ConstantExpr::mergeUndefsWith(LC, RC); 2472 2473 // (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width. 2474 // We limit this to X < Width in case the backend re-expands the intrinsic, 2475 // and has to reintroduce a shift modulo operation (InstCombine might remove 2476 // it after this fold). This still doesn't guarantee that the final codegen 2477 // will match this original pattern. 2478 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) { 2479 KnownBits KnownL = IC.computeKnownBits(L, /*Depth*/ 0, &Or); 2480 return KnownL.getMaxValue().ult(Width) ? L : nullptr; 2481 } 2482 2483 // For non-constant cases, the following patterns currently only work for 2484 // rotation patterns. 2485 // TODO: Add general funnel-shift compatible patterns. 2486 if (ShVal0 != ShVal1) 2487 return nullptr; 2488 2489 // For non-constant cases we don't support non-pow2 shift masks. 2490 // TODO: Is it worth matching urem as well? 2491 if (!isPowerOf2_32(Width)) 2492 return nullptr; 2493 2494 // The shift amount may be masked with negation: 2495 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1))) 2496 Value *X; 2497 unsigned Mask = Width - 1; 2498 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 2499 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 2500 return X; 2501 2502 // Similar to above, but the shift amount may be extended after masking, 2503 // so return the extended value as the parameter for the intrinsic. 2504 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 2505 match(R, m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))), 2506 m_SpecificInt(Mask)))) 2507 return L; 2508 2509 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 2510 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 2511 return L; 2512 2513 return nullptr; 2514 }; 2515 2516 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width); 2517 bool IsFshl = true; // Sub on LSHR. 2518 if (!ShAmt) { 2519 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width); 2520 IsFshl = false; // Sub on SHL. 2521 } 2522 if (!ShAmt) 2523 return nullptr; 2524 2525 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 2526 Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType()); 2527 return CallInst::Create(F, {ShVal0, ShVal1, ShAmt}); 2528 } 2529 2530 /// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns. 2531 static Instruction *matchOrConcat(Instruction &Or, 2532 InstCombiner::BuilderTy &Builder) { 2533 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'"); 2534 Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1); 2535 Type *Ty = Or.getType(); 2536 2537 unsigned Width = Ty->getScalarSizeInBits(); 2538 if ((Width & 1) != 0) 2539 return nullptr; 2540 unsigned HalfWidth = Width / 2; 2541 2542 // Canonicalize zext (lower half) to LHS. 2543 if (!isa<ZExtInst>(Op0)) 2544 std::swap(Op0, Op1); 2545 2546 // Find lower/upper half. 2547 Value *LowerSrc, *ShlVal, *UpperSrc; 2548 const APInt *C; 2549 if (!match(Op0, m_OneUse(m_ZExt(m_Value(LowerSrc)))) || 2550 !match(Op1, m_OneUse(m_Shl(m_Value(ShlVal), m_APInt(C)))) || 2551 !match(ShlVal, m_OneUse(m_ZExt(m_Value(UpperSrc))))) 2552 return nullptr; 2553 if (*C != HalfWidth || LowerSrc->getType() != UpperSrc->getType() || 2554 LowerSrc->getType()->getScalarSizeInBits() != HalfWidth) 2555 return nullptr; 2556 2557 auto ConcatIntrinsicCalls = [&](Intrinsic::ID id, Value *Lo, Value *Hi) { 2558 Value *NewLower = Builder.CreateZExt(Lo, Ty); 2559 Value *NewUpper = Builder.CreateZExt(Hi, Ty); 2560 NewUpper = Builder.CreateShl(NewUpper, HalfWidth); 2561 Value *BinOp = Builder.CreateOr(NewLower, NewUpper); 2562 Function *F = Intrinsic::getDeclaration(Or.getModule(), id, Ty); 2563 return Builder.CreateCall(F, BinOp); 2564 }; 2565 2566 // BSWAP: Push the concat down, swapping the lower/upper sources. 2567 // concat(bswap(x),bswap(y)) -> bswap(concat(x,y)) 2568 Value *LowerBSwap, *UpperBSwap; 2569 if (match(LowerSrc, m_BSwap(m_Value(LowerBSwap))) && 2570 match(UpperSrc, m_BSwap(m_Value(UpperBSwap)))) 2571 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap); 2572 2573 // BITREVERSE: Push the concat down, swapping the lower/upper sources. 2574 // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y)) 2575 Value *LowerBRev, *UpperBRev; 2576 if (match(LowerSrc, m_BitReverse(m_Value(LowerBRev))) && 2577 match(UpperSrc, m_BitReverse(m_Value(UpperBRev)))) 2578 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev); 2579 2580 return nullptr; 2581 } 2582 2583 /// If all elements of two constant vectors are 0/-1 and inverses, return true. 2584 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) { 2585 unsigned NumElts = cast<FixedVectorType>(C1->getType())->getNumElements(); 2586 for (unsigned i = 0; i != NumElts; ++i) { 2587 Constant *EltC1 = C1->getAggregateElement(i); 2588 Constant *EltC2 = C2->getAggregateElement(i); 2589 if (!EltC1 || !EltC2) 2590 return false; 2591 2592 // One element must be all ones, and the other must be all zeros. 2593 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) || 2594 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes())))) 2595 return false; 2596 } 2597 return true; 2598 } 2599 2600 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or 2601 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of 2602 /// B, it can be used as the condition operand of a select instruction. 2603 /// We will detect (A & C) | ~(B | D) when the flag ABIsTheSame enabled. 2604 Value *InstCombinerImpl::getSelectCondition(Value *A, Value *B, 2605 bool ABIsTheSame) { 2606 // We may have peeked through bitcasts in the caller. 2607 // Exit immediately if we don't have (vector) integer types. 2608 Type *Ty = A->getType(); 2609 if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy()) 2610 return nullptr; 2611 2612 // If A is the 'not' operand of B and has enough signbits, we have our answer. 2613 if (ABIsTheSame ? (A == B) : match(B, m_Not(m_Specific(A)))) { 2614 // If these are scalars or vectors of i1, A can be used directly. 2615 if (Ty->isIntOrIntVectorTy(1)) 2616 return A; 2617 2618 // If we look through a vector bitcast, the caller will bitcast the operands 2619 // to match the condition's number of bits (N x i1). 2620 // To make this poison-safe, disallow bitcast from wide element to narrow 2621 // element. That could allow poison in lanes where it was not present in the 2622 // original code. 2623 A = peekThroughBitcast(A); 2624 if (A->getType()->isIntOrIntVectorTy()) { 2625 unsigned NumSignBits = ComputeNumSignBits(A); 2626 if (NumSignBits == A->getType()->getScalarSizeInBits() && 2627 NumSignBits <= Ty->getScalarSizeInBits()) 2628 return Builder.CreateTrunc(A, CmpInst::makeCmpResultType(A->getType())); 2629 } 2630 return nullptr; 2631 } 2632 2633 // TODO: add support for sext and constant case 2634 if (ABIsTheSame) 2635 return nullptr; 2636 2637 // If both operands are constants, see if the constants are inverse bitmasks. 2638 Constant *AConst, *BConst; 2639 if (match(A, m_Constant(AConst)) && match(B, m_Constant(BConst))) 2640 if (AConst == ConstantExpr::getNot(BConst) && 2641 ComputeNumSignBits(A) == Ty->getScalarSizeInBits()) 2642 return Builder.CreateZExtOrTrunc(A, CmpInst::makeCmpResultType(Ty)); 2643 2644 // Look for more complex patterns. The 'not' op may be hidden behind various 2645 // casts. Look through sexts and bitcasts to find the booleans. 2646 Value *Cond; 2647 Value *NotB; 2648 if (match(A, m_SExt(m_Value(Cond))) && 2649 Cond->getType()->isIntOrIntVectorTy(1)) { 2650 // A = sext i1 Cond; B = sext (not (i1 Cond)) 2651 if (match(B, m_SExt(m_Not(m_Specific(Cond))))) 2652 return Cond; 2653 2654 // A = sext i1 Cond; B = not ({bitcast} (sext (i1 Cond))) 2655 // TODO: The one-use checks are unnecessary or misplaced. If the caller 2656 // checked for uses on logic ops/casts, that should be enough to 2657 // make this transform worthwhile. 2658 if (match(B, m_OneUse(m_Not(m_Value(NotB))))) { 2659 NotB = peekThroughBitcast(NotB, true); 2660 if (match(NotB, m_SExt(m_Specific(Cond)))) 2661 return Cond; 2662 } 2663 } 2664 2665 // All scalar (and most vector) possibilities should be handled now. 2666 // Try more matches that only apply to non-splat constant vectors. 2667 if (!Ty->isVectorTy()) 2668 return nullptr; 2669 2670 // If both operands are xor'd with constants using the same sexted boolean 2671 // operand, see if the constants are inverse bitmasks. 2672 // TODO: Use ConstantExpr::getNot()? 2673 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AConst)))) && 2674 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BConst)))) && 2675 Cond->getType()->isIntOrIntVectorTy(1) && 2676 areInverseVectorBitmasks(AConst, BConst)) { 2677 AConst = ConstantExpr::getTrunc(AConst, CmpInst::makeCmpResultType(Ty)); 2678 return Builder.CreateXor(Cond, AConst); 2679 } 2680 return nullptr; 2681 } 2682 2683 /// We have an expression of the form (A & C) | (B & D). Try to simplify this 2684 /// to "A' ? C : D", where A' is a boolean or vector of booleans. 2685 /// When InvertFalseVal is set to true, we try to match the pattern 2686 /// where we have peeked through a 'not' op and A and B are the same: 2687 /// (A & C) | ~(A | D) --> (A & C) | (~A & ~D) --> A' ? C : ~D 2688 Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *C, Value *B, 2689 Value *D, bool InvertFalseVal) { 2690 // The potential condition of the select may be bitcasted. In that case, look 2691 // through its bitcast and the corresponding bitcast of the 'not' condition. 2692 Type *OrigType = A->getType(); 2693 A = peekThroughBitcast(A, true); 2694 B = peekThroughBitcast(B, true); 2695 if (Value *Cond = getSelectCondition(A, B, InvertFalseVal)) { 2696 // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D)) 2697 // If this is a vector, we may need to cast to match the condition's length. 2698 // The bitcasts will either all exist or all not exist. The builder will 2699 // not create unnecessary casts if the types already match. 2700 Type *SelTy = A->getType(); 2701 if (auto *VecTy = dyn_cast<VectorType>(Cond->getType())) { 2702 // For a fixed or scalable vector get N from <{vscale x} N x iM> 2703 unsigned Elts = VecTy->getElementCount().getKnownMinValue(); 2704 // For a fixed or scalable vector, get the size in bits of N x iM; for a 2705 // scalar this is just M. 2706 unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinValue(); 2707 Type *EltTy = Builder.getIntNTy(SelEltSize / Elts); 2708 SelTy = VectorType::get(EltTy, VecTy->getElementCount()); 2709 } 2710 Value *BitcastC = Builder.CreateBitCast(C, SelTy); 2711 if (InvertFalseVal) 2712 D = Builder.CreateNot(D); 2713 Value *BitcastD = Builder.CreateBitCast(D, SelTy); 2714 Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD); 2715 return Builder.CreateBitCast(Select, OrigType); 2716 } 2717 2718 return nullptr; 2719 } 2720 2721 // (icmp eq X, 0) | (icmp ult Other, X) -> (icmp ule Other, X-1) 2722 // (icmp ne X, 0) & (icmp uge Other, X) -> (icmp ugt Other, X-1) 2723 static Value *foldAndOrOfICmpEqZeroAndICmp(ICmpInst *LHS, ICmpInst *RHS, 2724 bool IsAnd, bool IsLogical, 2725 IRBuilderBase &Builder) { 2726 ICmpInst::Predicate LPred = 2727 IsAnd ? LHS->getInversePredicate() : LHS->getPredicate(); 2728 ICmpInst::Predicate RPred = 2729 IsAnd ? RHS->getInversePredicate() : RHS->getPredicate(); 2730 Value *LHS0 = LHS->getOperand(0); 2731 if (LPred != ICmpInst::ICMP_EQ || !match(LHS->getOperand(1), m_Zero()) || 2732 !LHS0->getType()->isIntOrIntVectorTy() || 2733 !(LHS->hasOneUse() || RHS->hasOneUse())) 2734 return nullptr; 2735 2736 Value *Other; 2737 if (RPred == ICmpInst::ICMP_ULT && RHS->getOperand(1) == LHS0) 2738 Other = RHS->getOperand(0); 2739 else if (RPred == ICmpInst::ICMP_UGT && RHS->getOperand(0) == LHS0) 2740 Other = RHS->getOperand(1); 2741 else 2742 return nullptr; 2743 2744 if (IsLogical) 2745 Other = Builder.CreateFreeze(Other); 2746 return Builder.CreateICmp( 2747 IsAnd ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE, 2748 Builder.CreateAdd(LHS0, Constant::getAllOnesValue(LHS0->getType())), 2749 Other); 2750 } 2751 2752 /// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible. 2753 /// If IsLogical is true, then the and/or is in select form and the transform 2754 /// must be poison-safe. 2755 Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, 2756 Instruction &I, bool IsAnd, 2757 bool IsLogical) { 2758 const SimplifyQuery Q = SQ.getWithInstruction(&I); 2759 2760 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2) 2761 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2) 2762 // if K1 and K2 are a one-bit mask. 2763 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, &I, IsAnd, IsLogical)) 2764 return V; 2765 2766 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 2767 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0); 2768 Value *LHS1 = LHS->getOperand(1), *RHS1 = RHS->getOperand(1); 2769 const APInt *LHSC = nullptr, *RHSC = nullptr; 2770 match(LHS1, m_APInt(LHSC)); 2771 match(RHS1, m_APInt(RHSC)); 2772 2773 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B) 2774 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B) 2775 if (predicatesFoldable(PredL, PredR)) { 2776 if (LHS0 == RHS1 && LHS1 == RHS0) { 2777 PredL = ICmpInst::getSwappedPredicate(PredL); 2778 std::swap(LHS0, LHS1); 2779 } 2780 if (LHS0 == RHS0 && LHS1 == RHS1) { 2781 unsigned Code = IsAnd ? getICmpCode(PredL) & getICmpCode(PredR) 2782 : getICmpCode(PredL) | getICmpCode(PredR); 2783 bool IsSigned = LHS->isSigned() || RHS->isSigned(); 2784 return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder); 2785 } 2786 } 2787 2788 // handle (roughly): 2789 // (icmp ne (A & B), C) | (icmp ne (A & D), E) 2790 // (icmp eq (A & B), C) & (icmp eq (A & D), E) 2791 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, IsAnd, IsLogical, Builder)) 2792 return V; 2793 2794 if (Value *V = 2795 foldAndOrOfICmpEqZeroAndICmp(LHS, RHS, IsAnd, IsLogical, Builder)) 2796 return V; 2797 // We can treat logical like bitwise here, because both operands are used on 2798 // the LHS, and as such poison from both will propagate. 2799 if (Value *V = foldAndOrOfICmpEqZeroAndICmp(RHS, LHS, IsAnd, 2800 /*IsLogical*/ false, Builder)) 2801 return V; 2802 2803 if (Value *V = 2804 foldAndOrOfICmpsWithConstEq(LHS, RHS, IsAnd, IsLogical, Builder, Q)) 2805 return V; 2806 // We can convert this case to bitwise and, because both operands are used 2807 // on the LHS, and as such poison from both will propagate. 2808 if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, IsAnd, 2809 /*IsLogical*/ false, Builder, Q)) 2810 return V; 2811 2812 if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, IsAnd, Builder)) 2813 return V; 2814 if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, IsAnd, Builder)) 2815 return V; 2816 2817 // TODO: One of these directions is fine with logical and/or, the other could 2818 // be supported by inserting freeze. 2819 if (!IsLogical) { 2820 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n 2821 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n 2822 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/!IsAnd)) 2823 return V; 2824 2825 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n 2826 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n 2827 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/!IsAnd)) 2828 return V; 2829 } 2830 2831 // TODO: Add conjugated or fold, check whether it is safe for logical and/or. 2832 if (IsAnd && !IsLogical) 2833 if (Value *V = foldSignedTruncationCheck(LHS, RHS, I, Builder)) 2834 return V; 2835 2836 if (Value *V = foldIsPowerOf2(LHS, RHS, IsAnd, Builder)) 2837 return V; 2838 2839 // TODO: Verify whether this is safe for logical and/or. 2840 if (!IsLogical) { 2841 if (Value *X = foldUnsignedUnderflowCheck(LHS, RHS, IsAnd, Q, Builder)) 2842 return X; 2843 if (Value *X = foldUnsignedUnderflowCheck(RHS, LHS, IsAnd, Q, Builder)) 2844 return X; 2845 } 2846 2847 if (Value *X = foldEqOfParts(LHS, RHS, IsAnd)) 2848 return X; 2849 2850 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) 2851 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0) 2852 // TODO: Remove this when foldLogOpOfMaskedICmps can handle undefs. 2853 if (!IsLogical && PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) && 2854 PredL == PredR && match(LHS1, m_ZeroInt()) && match(RHS1, m_ZeroInt()) && 2855 LHS0->getType() == RHS0->getType()) { 2856 Value *NewOr = Builder.CreateOr(LHS0, RHS0); 2857 return Builder.CreateICmp(PredL, NewOr, 2858 Constant::getNullValue(NewOr->getType())); 2859 } 2860 2861 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2). 2862 if (!LHSC || !RHSC) 2863 return nullptr; 2864 2865 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2 2866 // (trunc x) != C1 | (and x, CA) != C2 -> (and x, CA|CMAX) != C1|C2 2867 // where CMAX is the all ones value for the truncated type, 2868 // iff the lower bits of C2 and CA are zero. 2869 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) && 2870 PredL == PredR && LHS->hasOneUse() && RHS->hasOneUse()) { 2871 Value *V; 2872 const APInt *AndC, *SmallC = nullptr, *BigC = nullptr; 2873 2874 // (trunc x) == C1 & (and x, CA) == C2 2875 // (and x, CA) == C2 & (trunc x) == C1 2876 if (match(RHS0, m_Trunc(m_Value(V))) && 2877 match(LHS0, m_And(m_Specific(V), m_APInt(AndC)))) { 2878 SmallC = RHSC; 2879 BigC = LHSC; 2880 } else if (match(LHS0, m_Trunc(m_Value(V))) && 2881 match(RHS0, m_And(m_Specific(V), m_APInt(AndC)))) { 2882 SmallC = LHSC; 2883 BigC = RHSC; 2884 } 2885 2886 if (SmallC && BigC) { 2887 unsigned BigBitSize = BigC->getBitWidth(); 2888 unsigned SmallBitSize = SmallC->getBitWidth(); 2889 2890 // Check that the low bits are zero. 2891 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize); 2892 if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) { 2893 Value *NewAnd = Builder.CreateAnd(V, Low | *AndC); 2894 APInt N = SmallC->zext(BigBitSize) | *BigC; 2895 Value *NewVal = ConstantInt::get(NewAnd->getType(), N); 2896 return Builder.CreateICmp(PredL, NewAnd, NewVal); 2897 } 2898 } 2899 } 2900 2901 // Match naive pattern (and its inverted form) for checking if two values 2902 // share same sign. An example of the pattern: 2903 // (icmp slt (X & Y), 0) | (icmp sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1) 2904 // Inverted form (example): 2905 // (icmp slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0) 2906 bool TrueIfSignedL, TrueIfSignedR; 2907 if (isSignBitCheck(PredL, *LHSC, TrueIfSignedL) && 2908 isSignBitCheck(PredR, *RHSC, TrueIfSignedR) && 2909 (RHS->hasOneUse() || LHS->hasOneUse())) { 2910 Value *X, *Y; 2911 if (IsAnd) { 2912 if ((TrueIfSignedL && !TrueIfSignedR && 2913 match(LHS0, m_Or(m_Value(X), m_Value(Y))) && 2914 match(RHS0, m_c_And(m_Specific(X), m_Specific(Y)))) || 2915 (!TrueIfSignedL && TrueIfSignedR && 2916 match(LHS0, m_And(m_Value(X), m_Value(Y))) && 2917 match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y))))) { 2918 Value *NewXor = Builder.CreateXor(X, Y); 2919 return Builder.CreateIsNeg(NewXor); 2920 } 2921 } else { 2922 if ((TrueIfSignedL && !TrueIfSignedR && 2923 match(LHS0, m_And(m_Value(X), m_Value(Y))) && 2924 match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y)))) || 2925 (!TrueIfSignedL && TrueIfSignedR && 2926 match(LHS0, m_Or(m_Value(X), m_Value(Y))) && 2927 match(RHS0, m_c_And(m_Specific(X), m_Specific(Y))))) { 2928 Value *NewXor = Builder.CreateXor(X, Y); 2929 return Builder.CreateIsNotNeg(NewXor); 2930 } 2931 } 2932 } 2933 2934 return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd); 2935 } 2936 2937 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 2938 // here. We should standardize that construct where it is needed or choose some 2939 // other way to ensure that commutated variants of patterns are not missed. 2940 Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) { 2941 if (Value *V = simplifyOrInst(I.getOperand(0), I.getOperand(1), 2942 SQ.getWithInstruction(&I))) 2943 return replaceInstUsesWith(I, V); 2944 2945 if (SimplifyAssociativeOrCommutative(I)) 2946 return &I; 2947 2948 if (Instruction *X = foldVectorBinop(I)) 2949 return X; 2950 2951 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 2952 return Phi; 2953 2954 // See if we can simplify any instructions used by the instruction whose sole 2955 // purpose is to compute bits we don't care about. 2956 if (SimplifyDemandedInstructionBits(I)) 2957 return &I; 2958 2959 // Do this before using distributive laws to catch simple and/or/not patterns. 2960 if (Instruction *Xor = foldOrToXor(I, Builder)) 2961 return Xor; 2962 2963 if (Instruction *X = foldComplexAndOrPatterns(I, Builder)) 2964 return X; 2965 2966 // (A&B)|(A&C) -> A&(B|C) etc 2967 if (Value *V = foldUsingDistributiveLaws(I)) 2968 return replaceInstUsesWith(I, V); 2969 2970 if (Value *V = SimplifyBSwap(I, Builder)) 2971 return replaceInstUsesWith(I, V); 2972 2973 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2974 Type *Ty = I.getType(); 2975 if (Ty->isIntOrIntVectorTy(1)) { 2976 if (auto *SI0 = dyn_cast<SelectInst>(Op0)) { 2977 if (auto *I = 2978 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ false)) 2979 return I; 2980 } 2981 if (auto *SI1 = dyn_cast<SelectInst>(Op1)) { 2982 if (auto *I = 2983 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ false)) 2984 return I; 2985 } 2986 } 2987 2988 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 2989 return FoldedLogic; 2990 2991 if (Instruction *BitOp = matchBSwapOrBitReverse(I, /*MatchBSwaps*/ true, 2992 /*MatchBitReversals*/ true)) 2993 return BitOp; 2994 2995 if (Instruction *Funnel = matchFunnelShift(I, *this)) 2996 return Funnel; 2997 2998 if (Instruction *Concat = matchOrConcat(I, Builder)) 2999 return replaceInstUsesWith(I, Concat); 3000 3001 Value *X, *Y; 3002 const APInt *CV; 3003 if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) && 3004 !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, 0, &I)) { 3005 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0 3006 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X). 3007 Value *Or = Builder.CreateOr(X, Y); 3008 return BinaryOperator::CreateXor(Or, ConstantInt::get(Ty, *CV)); 3009 } 3010 3011 // If the operands have no common bits set: 3012 // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1) 3013 if (match(&I, 3014 m_c_Or(m_OneUse(m_Mul(m_Value(X), m_Value(Y))), m_Deferred(X))) && 3015 haveNoCommonBitsSet(Op0, Op1, DL)) { 3016 Value *IncrementY = Builder.CreateAdd(Y, ConstantInt::get(Ty, 1)); 3017 return BinaryOperator::CreateMul(X, IncrementY); 3018 } 3019 3020 // X | (X ^ Y) --> X | Y (4 commuted patterns) 3021 if (match(&I, m_c_Or(m_Value(X), m_c_Xor(m_Deferred(X), m_Value(Y))))) 3022 return BinaryOperator::CreateOr(X, Y); 3023 3024 // (A & C) | (B & D) 3025 Value *A, *B, *C, *D; 3026 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 3027 match(Op1, m_And(m_Value(B), m_Value(D)))) { 3028 3029 // (A & C0) | (B & C1) 3030 const APInt *C0, *C1; 3031 if (match(C, m_APInt(C0)) && match(D, m_APInt(C1))) { 3032 Value *X; 3033 if (*C0 == ~*C1) { 3034 // ((X | B) & MaskC) | (B & ~MaskC) -> (X & MaskC) | B 3035 if (match(A, m_c_Or(m_Value(X), m_Specific(B)))) 3036 return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C0), B); 3037 // (A & MaskC) | ((X | A) & ~MaskC) -> (X & ~MaskC) | A 3038 if (match(B, m_c_Or(m_Specific(A), m_Value(X)))) 3039 return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C1), A); 3040 3041 // ((X ^ B) & MaskC) | (B & ~MaskC) -> (X & MaskC) ^ B 3042 if (match(A, m_c_Xor(m_Value(X), m_Specific(B)))) 3043 return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C0), B); 3044 // (A & MaskC) | ((X ^ A) & ~MaskC) -> (X & ~MaskC) ^ A 3045 if (match(B, m_c_Xor(m_Specific(A), m_Value(X)))) 3046 return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C1), A); 3047 } 3048 3049 if ((*C0 & *C1).isZero()) { 3050 // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1) 3051 // iff (C0 & C1) == 0 and (X & ~C0) == 0 3052 if (match(A, m_c_Or(m_Value(X), m_Specific(B))) && 3053 MaskedValueIsZero(X, ~*C0, 0, &I)) { 3054 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); 3055 return BinaryOperator::CreateAnd(A, C01); 3056 } 3057 // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1) 3058 // iff (C0 & C1) == 0 and (X & ~C1) == 0 3059 if (match(B, m_c_Or(m_Value(X), m_Specific(A))) && 3060 MaskedValueIsZero(X, ~*C1, 0, &I)) { 3061 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); 3062 return BinaryOperator::CreateAnd(B, C01); 3063 } 3064 // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1) 3065 // iff (C0 & C1) == 0 and (C2 & ~C0) == 0 and (C3 & ~C1) == 0. 3066 const APInt *C2, *C3; 3067 if (match(A, m_Or(m_Value(X), m_APInt(C2))) && 3068 match(B, m_Or(m_Specific(X), m_APInt(C3))) && 3069 (*C2 & ~*C0).isZero() && (*C3 & ~*C1).isZero()) { 3070 Value *Or = Builder.CreateOr(X, *C2 | *C3, "bitfield"); 3071 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); 3072 return BinaryOperator::CreateAnd(Or, C01); 3073 } 3074 } 3075 } 3076 3077 // Don't try to form a select if it's unlikely that we'll get rid of at 3078 // least one of the operands. A select is generally more expensive than the 3079 // 'or' that it is replacing. 3080 if (Op0->hasOneUse() || Op1->hasOneUse()) { 3081 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants. 3082 if (Value *V = matchSelectFromAndOr(A, C, B, D)) 3083 return replaceInstUsesWith(I, V); 3084 if (Value *V = matchSelectFromAndOr(A, C, D, B)) 3085 return replaceInstUsesWith(I, V); 3086 if (Value *V = matchSelectFromAndOr(C, A, B, D)) 3087 return replaceInstUsesWith(I, V); 3088 if (Value *V = matchSelectFromAndOr(C, A, D, B)) 3089 return replaceInstUsesWith(I, V); 3090 if (Value *V = matchSelectFromAndOr(B, D, A, C)) 3091 return replaceInstUsesWith(I, V); 3092 if (Value *V = matchSelectFromAndOr(B, D, C, A)) 3093 return replaceInstUsesWith(I, V); 3094 if (Value *V = matchSelectFromAndOr(D, B, A, C)) 3095 return replaceInstUsesWith(I, V); 3096 if (Value *V = matchSelectFromAndOr(D, B, C, A)) 3097 return replaceInstUsesWith(I, V); 3098 } 3099 } 3100 3101 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 3102 match(Op1, m_Not(m_Or(m_Value(B), m_Value(D)))) && 3103 (Op0->hasOneUse() || Op1->hasOneUse())) { 3104 // (Cond & C) | ~(Cond | D) -> Cond ? C : ~D 3105 if (Value *V = matchSelectFromAndOr(A, C, B, D, true)) 3106 return replaceInstUsesWith(I, V); 3107 if (Value *V = matchSelectFromAndOr(A, C, D, B, true)) 3108 return replaceInstUsesWith(I, V); 3109 if (Value *V = matchSelectFromAndOr(C, A, B, D, true)) 3110 return replaceInstUsesWith(I, V); 3111 if (Value *V = matchSelectFromAndOr(C, A, D, B, true)) 3112 return replaceInstUsesWith(I, V); 3113 } 3114 3115 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C 3116 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) 3117 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) 3118 return BinaryOperator::CreateOr(Op0, C); 3119 3120 // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C 3121 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) 3122 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) 3123 return BinaryOperator::CreateOr(Op1, C); 3124 3125 // ((A & B) ^ C) | B -> C | B 3126 if (match(Op0, m_c_Xor(m_c_And(m_Value(A), m_Specific(Op1)), m_Value(C)))) 3127 return BinaryOperator::CreateOr(C, Op1); 3128 3129 // B | ((A & B) ^ C) -> B | C 3130 if (match(Op1, m_c_Xor(m_c_And(m_Value(A), m_Specific(Op0)), m_Value(C)))) 3131 return BinaryOperator::CreateOr(Op0, C); 3132 3133 // ((B | C) & A) | B -> B | (A & C) 3134 if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A)))) 3135 return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C)); 3136 3137 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder)) 3138 return DeMorgan; 3139 3140 // Canonicalize xor to the RHS. 3141 bool SwappedForXor = false; 3142 if (match(Op0, m_Xor(m_Value(), m_Value()))) { 3143 std::swap(Op0, Op1); 3144 SwappedForXor = true; 3145 } 3146 3147 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) { 3148 // (A | ?) | (A ^ B) --> (A | ?) | B 3149 // (B | ?) | (A ^ B) --> (B | ?) | A 3150 if (match(Op0, m_c_Or(m_Specific(A), m_Value()))) 3151 return BinaryOperator::CreateOr(Op0, B); 3152 if (match(Op0, m_c_Or(m_Specific(B), m_Value()))) 3153 return BinaryOperator::CreateOr(Op0, A); 3154 3155 // (A & B) | (A ^ B) --> A | B 3156 // (B & A) | (A ^ B) --> A | B 3157 if (match(Op0, m_And(m_Specific(A), m_Specific(B))) || 3158 match(Op0, m_And(m_Specific(B), m_Specific(A)))) 3159 return BinaryOperator::CreateOr(A, B); 3160 3161 // ~A | (A ^ B) --> ~(A & B) 3162 // ~B | (A ^ B) --> ~(A & B) 3163 // The swap above should always make Op0 the 'not'. 3164 if ((Op0->hasOneUse() || Op1->hasOneUse()) && 3165 (match(Op0, m_Not(m_Specific(A))) || match(Op0, m_Not(m_Specific(B))))) 3166 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B)); 3167 3168 // Same as above, but peek through an 'and' to the common operand: 3169 // ~(A & ?) | (A ^ B) --> ~((A & ?) & B) 3170 // ~(B & ?) | (A ^ B) --> ~((B & ?) & A) 3171 Instruction *And; 3172 if ((Op0->hasOneUse() || Op1->hasOneUse()) && 3173 match(Op0, m_Not(m_CombineAnd(m_Instruction(And), 3174 m_c_And(m_Specific(A), m_Value()))))) 3175 return BinaryOperator::CreateNot(Builder.CreateAnd(And, B)); 3176 if ((Op0->hasOneUse() || Op1->hasOneUse()) && 3177 match(Op0, m_Not(m_CombineAnd(m_Instruction(And), 3178 m_c_And(m_Specific(B), m_Value()))))) 3179 return BinaryOperator::CreateNot(Builder.CreateAnd(And, A)); 3180 3181 // (~A | C) | (A ^ B) --> ~(A & B) | C 3182 // (~B | C) | (A ^ B) --> ~(A & B) | C 3183 if (Op0->hasOneUse() && Op1->hasOneUse() && 3184 (match(Op0, m_c_Or(m_Not(m_Specific(A)), m_Value(C))) || 3185 match(Op0, m_c_Or(m_Not(m_Specific(B)), m_Value(C))))) { 3186 Value *Nand = Builder.CreateNot(Builder.CreateAnd(A, B), "nand"); 3187 return BinaryOperator::CreateOr(Nand, C); 3188 } 3189 3190 // A | (~A ^ B) --> ~B | A 3191 // B | (A ^ ~B) --> ~A | B 3192 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) { 3193 Value *NotB = Builder.CreateNot(B, B->getName() + ".not"); 3194 return BinaryOperator::CreateOr(NotB, Op0); 3195 } 3196 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) { 3197 Value *NotA = Builder.CreateNot(A, A->getName() + ".not"); 3198 return BinaryOperator::CreateOr(NotA, Op0); 3199 } 3200 } 3201 3202 // A | ~(A | B) -> A | ~B 3203 // A | ~(A ^ B) -> A | ~B 3204 if (match(Op1, m_Not(m_Value(A)))) 3205 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A)) 3206 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) && 3207 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or || 3208 B->getOpcode() == Instruction::Xor)) { 3209 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) : 3210 B->getOperand(0); 3211 Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not"); 3212 return BinaryOperator::CreateOr(Not, Op0); 3213 } 3214 3215 if (SwappedForXor) 3216 std::swap(Op0, Op1); 3217 3218 { 3219 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0); 3220 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1); 3221 if (LHS && RHS) 3222 if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ false)) 3223 return replaceInstUsesWith(I, Res); 3224 3225 // TODO: Make this recursive; it's a little tricky because an arbitrary 3226 // number of 'or' instructions might have to be created. 3227 Value *X, *Y; 3228 if (LHS && match(Op1, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) { 3229 bool IsLogical = isa<SelectInst>(Op1); 3230 // LHS | (X || Y) --> (LHS || X) || Y 3231 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 3232 if (Value *Res = 3233 foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false, IsLogical)) 3234 return replaceInstUsesWith(I, IsLogical 3235 ? Builder.CreateLogicalOr(Res, Y) 3236 : Builder.CreateOr(Res, Y)); 3237 // LHS | (X || Y) --> X || (LHS | Y) 3238 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 3239 if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false, 3240 /* IsLogical */ false)) 3241 return replaceInstUsesWith(I, IsLogical 3242 ? Builder.CreateLogicalOr(X, Res) 3243 : Builder.CreateOr(X, Res)); 3244 } 3245 if (RHS && match(Op0, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) { 3246 bool IsLogical = isa<SelectInst>(Op0); 3247 // (X || Y) | RHS --> (X || RHS) || Y 3248 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 3249 if (Value *Res = 3250 foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false, IsLogical)) 3251 return replaceInstUsesWith(I, IsLogical 3252 ? Builder.CreateLogicalOr(Res, Y) 3253 : Builder.CreateOr(Res, Y)); 3254 // (X || Y) | RHS --> X || (Y | RHS) 3255 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 3256 if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false, 3257 /* IsLogical */ false)) 3258 return replaceInstUsesWith(I, IsLogical 3259 ? Builder.CreateLogicalOr(X, Res) 3260 : Builder.CreateOr(X, Res)); 3261 } 3262 } 3263 3264 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 3265 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 3266 if (Value *Res = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ false)) 3267 return replaceInstUsesWith(I, Res); 3268 3269 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder)) 3270 return FoldedFCmps; 3271 3272 if (Instruction *CastedOr = foldCastedBitwiseLogic(I)) 3273 return CastedOr; 3274 3275 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I)) 3276 return Sel; 3277 3278 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>. 3279 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold 3280 // with binop identity constant. But creating a select with non-constant 3281 // arm may not be reversible due to poison semantics. Is that a good 3282 // canonicalization? 3283 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && 3284 A->getType()->isIntOrIntVectorTy(1)) 3285 return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), Op1); 3286 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && 3287 A->getType()->isIntOrIntVectorTy(1)) 3288 return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), Op0); 3289 3290 // Note: If we've gotten to the point of visiting the outer OR, then the 3291 // inner one couldn't be simplified. If it was a constant, then it won't 3292 // be simplified by a later pass either, so we try swapping the inner/outer 3293 // ORs in the hopes that we'll be able to simplify it this way. 3294 // (X|C) | V --> (X|V) | C 3295 ConstantInt *CI; 3296 if (Op0->hasOneUse() && !match(Op1, m_ConstantInt()) && 3297 match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) { 3298 Value *Inner = Builder.CreateOr(A, Op1); 3299 Inner->takeName(Op0); 3300 return BinaryOperator::CreateOr(Inner, CI); 3301 } 3302 3303 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D)) 3304 // Since this OR statement hasn't been optimized further yet, we hope 3305 // that this transformation will allow the new ORs to be optimized. 3306 { 3307 Value *X = nullptr, *Y = nullptr; 3308 if (Op0->hasOneUse() && Op1->hasOneUse() && 3309 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) && 3310 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) { 3311 Value *orTrue = Builder.CreateOr(A, C); 3312 Value *orFalse = Builder.CreateOr(B, D); 3313 return SelectInst::Create(X, orTrue, orFalse); 3314 } 3315 } 3316 3317 // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X) --> X s> Y ? -1 : X. 3318 { 3319 Value *X, *Y; 3320 if (match(&I, m_c_Or(m_OneUse(m_AShr( 3321 m_NSWSub(m_Value(Y), m_Value(X)), 3322 m_SpecificInt(Ty->getScalarSizeInBits() - 1))), 3323 m_Deferred(X)))) { 3324 Value *NewICmpInst = Builder.CreateICmpSGT(X, Y); 3325 Value *AllOnes = ConstantInt::getAllOnesValue(Ty); 3326 return SelectInst::Create(NewICmpInst, AllOnes, X); 3327 } 3328 } 3329 3330 if (Instruction *V = 3331 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I)) 3332 return V; 3333 3334 CmpInst::Predicate Pred; 3335 Value *Mul, *Ov, *MulIsNotZero, *UMulWithOv; 3336 // Check if the OR weakens the overflow condition for umul.with.overflow by 3337 // treating any non-zero result as overflow. In that case, we overflow if both 3338 // umul.with.overflow operands are != 0, as in that case the result can only 3339 // be 0, iff the multiplication overflows. 3340 if (match(&I, 3341 m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_Value(UMulWithOv)), 3342 m_Value(Ov)), 3343 m_CombineAnd(m_ICmp(Pred, 3344 m_CombineAnd(m_ExtractValue<0>( 3345 m_Deferred(UMulWithOv)), 3346 m_Value(Mul)), 3347 m_ZeroInt()), 3348 m_Value(MulIsNotZero)))) && 3349 (Ov->hasOneUse() || (MulIsNotZero->hasOneUse() && Mul->hasOneUse())) && 3350 Pred == CmpInst::ICMP_NE) { 3351 Value *A, *B; 3352 if (match(UMulWithOv, m_Intrinsic<Intrinsic::umul_with_overflow>( 3353 m_Value(A), m_Value(B)))) { 3354 Value *NotNullA = Builder.CreateIsNotNull(A); 3355 Value *NotNullB = Builder.CreateIsNotNull(B); 3356 return BinaryOperator::CreateAnd(NotNullA, NotNullB); 3357 } 3358 } 3359 3360 // (~x) | y --> ~(x & (~y)) iff that gets rid of inversions 3361 if (sinkNotIntoOtherHandOfLogicalOp(I)) 3362 return &I; 3363 3364 // Improve "get low bit mask up to and including bit X" pattern: 3365 // (1 << X) | ((1 << X) + -1) --> -1 l>> (bitwidth(x) - 1 - X) 3366 if (match(&I, m_c_Or(m_Add(m_Shl(m_One(), m_Value(X)), m_AllOnes()), 3367 m_Shl(m_One(), m_Deferred(X)))) && 3368 match(&I, m_c_Or(m_OneUse(m_Value()), m_Value()))) { 3369 Value *Sub = Builder.CreateSub( 3370 ConstantInt::get(Ty, Ty->getScalarSizeInBits() - 1), X); 3371 return BinaryOperator::CreateLShr(Constant::getAllOnesValue(Ty), Sub); 3372 } 3373 3374 // An or recurrence w/loop invariant step is equivelent to (or start, step) 3375 PHINode *PN = nullptr; 3376 Value *Start = nullptr, *Step = nullptr; 3377 if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN)) 3378 return replaceInstUsesWith(I, Builder.CreateOr(Start, Step)); 3379 3380 // (A & B) | (C | D) or (C | D) | (A & B) 3381 // Can be combined if C or D is of type (A/B & X) 3382 if (match(&I, m_c_Or(m_OneUse(m_And(m_Value(A), m_Value(B))), 3383 m_OneUse(m_Or(m_Value(C), m_Value(D)))))) { 3384 // (A & B) | (C | ?) -> C | (? | (A & B)) 3385 // (A & B) | (C | ?) -> C | (? | (A & B)) 3386 // (A & B) | (C | ?) -> C | (? | (A & B)) 3387 // (A & B) | (C | ?) -> C | (? | (A & B)) 3388 // (C | ?) | (A & B) -> C | (? | (A & B)) 3389 // (C | ?) | (A & B) -> C | (? | (A & B)) 3390 // (C | ?) | (A & B) -> C | (? | (A & B)) 3391 // (C | ?) | (A & B) -> C | (? | (A & B)) 3392 if (match(D, m_OneUse(m_c_And(m_Specific(A), m_Value()))) || 3393 match(D, m_OneUse(m_c_And(m_Specific(B), m_Value())))) 3394 return BinaryOperator::CreateOr( 3395 C, Builder.CreateOr(D, Builder.CreateAnd(A, B))); 3396 // (A & B) | (? | D) -> (? | (A & B)) | D 3397 // (A & B) | (? | D) -> (? | (A & B)) | D 3398 // (A & B) | (? | D) -> (? | (A & B)) | D 3399 // (A & B) | (? | D) -> (? | (A & B)) | D 3400 // (? | D) | (A & B) -> (? | (A & B)) | D 3401 // (? | D) | (A & B) -> (? | (A & B)) | D 3402 // (? | D) | (A & B) -> (? | (A & B)) | D 3403 // (? | D) | (A & B) -> (? | (A & B)) | D 3404 if (match(C, m_OneUse(m_c_And(m_Specific(A), m_Value()))) || 3405 match(C, m_OneUse(m_c_And(m_Specific(B), m_Value())))) 3406 return BinaryOperator::CreateOr( 3407 Builder.CreateOr(C, Builder.CreateAnd(A, B)), D); 3408 } 3409 3410 if (Instruction *R = reassociateForUses(I, Builder)) 3411 return R; 3412 3413 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder)) 3414 return Canonicalized; 3415 3416 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1)) 3417 return Folded; 3418 3419 return nullptr; 3420 } 3421 3422 /// A ^ B can be specified using other logic ops in a variety of patterns. We 3423 /// can fold these early and efficiently by morphing an existing instruction. 3424 static Instruction *foldXorToXor(BinaryOperator &I, 3425 InstCombiner::BuilderTy &Builder) { 3426 assert(I.getOpcode() == Instruction::Xor); 3427 Value *Op0 = I.getOperand(0); 3428 Value *Op1 = I.getOperand(1); 3429 Value *A, *B; 3430 3431 // There are 4 commuted variants for each of the basic patterns. 3432 3433 // (A & B) ^ (A | B) -> A ^ B 3434 // (A & B) ^ (B | A) -> A ^ B 3435 // (A | B) ^ (A & B) -> A ^ B 3436 // (A | B) ^ (B & A) -> A ^ B 3437 if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)), 3438 m_c_Or(m_Deferred(A), m_Deferred(B))))) 3439 return BinaryOperator::CreateXor(A, B); 3440 3441 // (A | ~B) ^ (~A | B) -> A ^ B 3442 // (~B | A) ^ (~A | B) -> A ^ B 3443 // (~A | B) ^ (A | ~B) -> A ^ B 3444 // (B | ~A) ^ (A | ~B) -> A ^ B 3445 if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))), 3446 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) 3447 return BinaryOperator::CreateXor(A, B); 3448 3449 // (A & ~B) ^ (~A & B) -> A ^ B 3450 // (~B & A) ^ (~A & B) -> A ^ B 3451 // (~A & B) ^ (A & ~B) -> A ^ B 3452 // (B & ~A) ^ (A & ~B) -> A ^ B 3453 if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))), 3454 m_c_And(m_Not(m_Deferred(A)), m_Deferred(B))))) 3455 return BinaryOperator::CreateXor(A, B); 3456 3457 // For the remaining cases we need to get rid of one of the operands. 3458 if (!Op0->hasOneUse() && !Op1->hasOneUse()) 3459 return nullptr; 3460 3461 // (A | B) ^ ~(A & B) -> ~(A ^ B) 3462 // (A | B) ^ ~(B & A) -> ~(A ^ B) 3463 // (A & B) ^ ~(A | B) -> ~(A ^ B) 3464 // (A & B) ^ ~(B | A) -> ~(A ^ B) 3465 // Complexity sorting ensures the not will be on the right side. 3466 if ((match(Op0, m_Or(m_Value(A), m_Value(B))) && 3467 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) || 3468 (match(Op0, m_And(m_Value(A), m_Value(B))) && 3469 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))) 3470 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 3471 3472 return nullptr; 3473 } 3474 3475 Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, 3476 BinaryOperator &I) { 3477 assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS && 3478 I.getOperand(1) == RHS && "Should be 'xor' with these operands"); 3479 3480 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 3481 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 3482 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 3483 3484 if (predicatesFoldable(PredL, PredR)) { 3485 if (LHS0 == RHS1 && LHS1 == RHS0) { 3486 std::swap(LHS0, LHS1); 3487 PredL = ICmpInst::getSwappedPredicate(PredL); 3488 } 3489 if (LHS0 == RHS0 && LHS1 == RHS1) { 3490 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B) 3491 unsigned Code = getICmpCode(PredL) ^ getICmpCode(PredR); 3492 bool IsSigned = LHS->isSigned() || RHS->isSigned(); 3493 return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder); 3494 } 3495 } 3496 3497 // TODO: This can be generalized to compares of non-signbits using 3498 // decomposeBitTestICmp(). It could be enhanced more by using (something like) 3499 // foldLogOpOfMaskedICmps(). 3500 const APInt *LC, *RC; 3501 if (match(LHS1, m_APInt(LC)) && match(RHS1, m_APInt(RC)) && 3502 LHS0->getType() == RHS0->getType() && 3503 LHS0->getType()->isIntOrIntVectorTy() && 3504 (LHS->hasOneUse() || RHS->hasOneUse())) { 3505 // Convert xor of signbit tests to signbit test of xor'd values: 3506 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0 3507 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0 3508 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1 3509 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1 3510 bool TrueIfSignedL, TrueIfSignedR; 3511 if (isSignBitCheck(PredL, *LC, TrueIfSignedL) && 3512 isSignBitCheck(PredR, *RC, TrueIfSignedR)) { 3513 Value *XorLR = Builder.CreateXor(LHS0, RHS0); 3514 return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg(XorLR) : 3515 Builder.CreateIsNotNeg(XorLR); 3516 } 3517 3518 // (X > C) ^ (X < C + 2) --> X != C + 1 3519 // (X < C + 2) ^ (X > C) --> X != C + 1 3520 // Considering the correctness of this pattern, we should avoid that C is 3521 // non-negative and C + 2 is negative, although it will be matched by other 3522 // patterns. 3523 const APInt *C1, *C2; 3524 if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_APInt(C1)) && 3525 PredR == CmpInst::ICMP_SLT && match(RHS1, m_APInt(C2))) || 3526 (PredL == CmpInst::ICMP_SLT && match(LHS1, m_APInt(C2)) && 3527 PredR == CmpInst::ICMP_SGT && match(RHS1, m_APInt(C1)))) 3528 if (LHS0 == RHS0 && *C1 + 2 == *C2 && 3529 (C1->isNegative() || C2->isNonNegative())) 3530 return Builder.CreateICmpNE(LHS0, 3531 ConstantInt::get(LHS0->getType(), *C1 + 1)); 3532 } 3533 3534 // Instead of trying to imitate the folds for and/or, decompose this 'xor' 3535 // into those logic ops. That is, try to turn this into an and-of-icmps 3536 // because we have many folds for that pattern. 3537 // 3538 // This is based on a truth table definition of xor: 3539 // X ^ Y --> (X | Y) & !(X & Y) 3540 if (Value *OrICmp = simplifyBinOp(Instruction::Or, LHS, RHS, SQ)) { 3541 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y). 3542 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?). 3543 if (Value *AndICmp = simplifyBinOp(Instruction::And, LHS, RHS, SQ)) { 3544 // TODO: Independently handle cases where the 'and' side is a constant. 3545 ICmpInst *X = nullptr, *Y = nullptr; 3546 if (OrICmp == LHS && AndICmp == RHS) { 3547 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS --> X & !Y 3548 X = LHS; 3549 Y = RHS; 3550 } 3551 if (OrICmp == RHS && AndICmp == LHS) { 3552 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS --> !Y & X 3553 X = RHS; 3554 Y = LHS; 3555 } 3556 if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(Y, &I))) { 3557 // Invert the predicate of 'Y', thus inverting its output. 3558 Y->setPredicate(Y->getInversePredicate()); 3559 // So, are there other uses of Y? 3560 if (!Y->hasOneUse()) { 3561 // We need to adapt other uses of Y though. Get a value that matches 3562 // the original value of Y before inversion. While this increases 3563 // immediate instruction count, we have just ensured that all the 3564 // users are freely-invertible, so that 'not' *will* get folded away. 3565 BuilderTy::InsertPointGuard Guard(Builder); 3566 // Set insertion point to right after the Y. 3567 Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator())); 3568 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3569 // Replace all uses of Y (excluding the one in NotY!) with NotY. 3570 Worklist.pushUsersToWorkList(*Y); 3571 Y->replaceUsesWithIf(NotY, 3572 [NotY](Use &U) { return U.getUser() != NotY; }); 3573 } 3574 // All done. 3575 return Builder.CreateAnd(LHS, RHS); 3576 } 3577 } 3578 } 3579 3580 return nullptr; 3581 } 3582 3583 /// If we have a masked merge, in the canonical form of: 3584 /// (assuming that A only has one use.) 3585 /// | A | |B| 3586 /// ((x ^ y) & M) ^ y 3587 /// | D | 3588 /// * If M is inverted: 3589 /// | D | 3590 /// ((x ^ y) & ~M) ^ y 3591 /// We can canonicalize by swapping the final xor operand 3592 /// to eliminate the 'not' of the mask. 3593 /// ((x ^ y) & M) ^ x 3594 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops 3595 /// because that shortens the dependency chain and improves analysis: 3596 /// (x & M) | (y & ~M) 3597 static Instruction *visitMaskedMerge(BinaryOperator &I, 3598 InstCombiner::BuilderTy &Builder) { 3599 Value *B, *X, *D; 3600 Value *M; 3601 if (!match(&I, m_c_Xor(m_Value(B), 3602 m_OneUse(m_c_And( 3603 m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)), 3604 m_Value(D)), 3605 m_Value(M)))))) 3606 return nullptr; 3607 3608 Value *NotM; 3609 if (match(M, m_Not(m_Value(NotM)))) { 3610 // De-invert the mask and swap the value in B part. 3611 Value *NewA = Builder.CreateAnd(D, NotM); 3612 return BinaryOperator::CreateXor(NewA, X); 3613 } 3614 3615 Constant *C; 3616 if (D->hasOneUse() && match(M, m_Constant(C))) { 3617 // Propagating undef is unsafe. Clamp undef elements to -1. 3618 Type *EltTy = C->getType()->getScalarType(); 3619 C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy)); 3620 // Unfold. 3621 Value *LHS = Builder.CreateAnd(X, C); 3622 Value *NotC = Builder.CreateNot(C); 3623 Value *RHS = Builder.CreateAnd(B, NotC); 3624 return BinaryOperator::CreateOr(LHS, RHS); 3625 } 3626 3627 return nullptr; 3628 } 3629 3630 // Transform 3631 // ~(x ^ y) 3632 // into: 3633 // (~x) ^ y 3634 // or into 3635 // x ^ (~y) 3636 static Instruction *sinkNotIntoXor(BinaryOperator &I, Value *X, Value *Y, 3637 InstCombiner::BuilderTy &Builder) { 3638 // We only want to do the transform if it is free to do. 3639 if (InstCombiner::isFreeToInvert(X, X->hasOneUse())) { 3640 // Ok, good. 3641 } else if (InstCombiner::isFreeToInvert(Y, Y->hasOneUse())) { 3642 std::swap(X, Y); 3643 } else 3644 return nullptr; 3645 3646 Value *NotX = Builder.CreateNot(X, X->getName() + ".not"); 3647 return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan"); 3648 } 3649 3650 static Instruction *foldNotXor(BinaryOperator &I, 3651 InstCombiner::BuilderTy &Builder) { 3652 Value *X, *Y; 3653 // FIXME: one-use check is not needed in general, but currently we are unable 3654 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182) 3655 if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y)))))) 3656 return nullptr; 3657 3658 if (Instruction *NewXor = sinkNotIntoXor(I, X, Y, Builder)) 3659 return NewXor; 3660 3661 auto hasCommonOperand = [](Value *A, Value *B, Value *C, Value *D) { 3662 return A == C || A == D || B == C || B == D; 3663 }; 3664 3665 Value *A, *B, *C, *D; 3666 // Canonicalize ~((A & B) ^ (A | ?)) -> (A & B) | ~(A | ?) 3667 // 4 commuted variants 3668 if (match(X, m_And(m_Value(A), m_Value(B))) && 3669 match(Y, m_Or(m_Value(C), m_Value(D))) && hasCommonOperand(A, B, C, D)) { 3670 Value *NotY = Builder.CreateNot(Y); 3671 return BinaryOperator::CreateOr(X, NotY); 3672 }; 3673 3674 // Canonicalize ~((A | ?) ^ (A & B)) -> (A & B) | ~(A | ?) 3675 // 4 commuted variants 3676 if (match(Y, m_And(m_Value(A), m_Value(B))) && 3677 match(X, m_Or(m_Value(C), m_Value(D))) && hasCommonOperand(A, B, C, D)) { 3678 Value *NotX = Builder.CreateNot(X); 3679 return BinaryOperator::CreateOr(Y, NotX); 3680 }; 3681 3682 return nullptr; 3683 } 3684 3685 /// Canonicalize a shifty way to code absolute value to the more common pattern 3686 /// that uses negation and select. 3687 static Instruction *canonicalizeAbs(BinaryOperator &Xor, 3688 InstCombiner::BuilderTy &Builder) { 3689 assert(Xor.getOpcode() == Instruction::Xor && "Expected an xor instruction."); 3690 3691 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1. 3692 // We're relying on the fact that we only do this transform when the shift has 3693 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase 3694 // instructions). 3695 Value *Op0 = Xor.getOperand(0), *Op1 = Xor.getOperand(1); 3696 if (Op0->hasNUses(2)) 3697 std::swap(Op0, Op1); 3698 3699 Type *Ty = Xor.getType(); 3700 Value *A; 3701 const APInt *ShAmt; 3702 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) && 3703 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 && 3704 match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) { 3705 // Op1 = ashr i32 A, 31 ; smear the sign bit 3706 // xor (add A, Op1), Op1 ; add -1 and flip bits if negative 3707 // --> (A < 0) ? -A : A 3708 Value *IsNeg = Builder.CreateIsNeg(A); 3709 // Copy the nuw/nsw flags from the add to the negate. 3710 auto *Add = cast<BinaryOperator>(Op0); 3711 Value *NegA = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(), 3712 Add->hasNoSignedWrap()); 3713 return SelectInst::Create(IsNeg, NegA, A); 3714 } 3715 return nullptr; 3716 } 3717 3718 // Transform 3719 // z = ~(x &/| y) 3720 // into: 3721 // z = ((~x) |/& (~y)) 3722 // iff both x and y are free to invert and all uses of z can be freely updated. 3723 bool InstCombinerImpl::sinkNotIntoLogicalOp(Instruction &I) { 3724 Value *Op0, *Op1; 3725 if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1)))) 3726 return false; 3727 3728 // If this logic op has not been simplified yet, just bail out and let that 3729 // happen first. Otherwise, the code below may wrongly invert. 3730 if (Op0 == Op1) 3731 return false; 3732 3733 Instruction::BinaryOps NewOpc = 3734 match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And; 3735 bool IsBinaryOp = isa<BinaryOperator>(I); 3736 3737 // Can our users be adapted? 3738 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr)) 3739 return false; 3740 3741 // And can the operands be adapted? 3742 for (Value *Op : {Op0, Op1}) 3743 if (!(InstCombiner::isFreeToInvert(Op, /*WillInvertAllUses=*/true) && 3744 (match(Op, m_ImmConstant()) || 3745 (isa<Instruction>(Op) && 3746 InstCombiner::canFreelyInvertAllUsersOf(cast<Instruction>(Op), 3747 /*IgnoredUser=*/&I))))) 3748 return false; 3749 3750 for (Value **Op : {&Op0, &Op1}) { 3751 Value *NotOp; 3752 if (auto *C = dyn_cast<Constant>(*Op)) { 3753 NotOp = ConstantExpr::getNot(C); 3754 } else { 3755 Builder.SetInsertPoint( 3756 &*cast<Instruction>(*Op)->getInsertionPointAfterDef()); 3757 NotOp = Builder.CreateNot(*Op, (*Op)->getName() + ".not"); 3758 (*Op)->replaceUsesWithIf( 3759 NotOp, [NotOp](Use &U) { return U.getUser() != NotOp; }); 3760 freelyInvertAllUsersOf(NotOp, /*IgnoredUser=*/&I); 3761 } 3762 *Op = NotOp; 3763 } 3764 3765 Builder.SetInsertPoint(I.getInsertionPointAfterDef()); 3766 Value *NewLogicOp; 3767 if (IsBinaryOp) 3768 NewLogicOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not"); 3769 else 3770 NewLogicOp = 3771 Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not"); 3772 3773 replaceInstUsesWith(I, NewLogicOp); 3774 // We can not just create an outer `not`, it will most likely be immediately 3775 // folded back, reconstructing our initial pattern, and causing an 3776 // infinite combine loop, so immediately manually fold it away. 3777 freelyInvertAllUsersOf(NewLogicOp); 3778 return true; 3779 } 3780 3781 // Transform 3782 // z = (~x) &/| y 3783 // into: 3784 // z = ~(x |/& (~y)) 3785 // iff y is free to invert and all uses of z can be freely updated. 3786 bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction &I) { 3787 Value *Op0, *Op1; 3788 if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1)))) 3789 return false; 3790 Instruction::BinaryOps NewOpc = 3791 match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And; 3792 bool IsBinaryOp = isa<BinaryOperator>(I); 3793 3794 Value *NotOp0 = nullptr; 3795 Value *NotOp1 = nullptr; 3796 Value **OpToInvert = nullptr; 3797 if (match(Op0, m_Not(m_Value(NotOp0))) && 3798 InstCombiner::isFreeToInvert(Op1, /*WillInvertAllUses=*/true) && 3799 (match(Op1, m_ImmConstant()) || 3800 (isa<Instruction>(Op1) && 3801 InstCombiner::canFreelyInvertAllUsersOf(cast<Instruction>(Op1), 3802 /*IgnoredUser=*/&I)))) { 3803 Op0 = NotOp0; 3804 OpToInvert = &Op1; 3805 } else if (match(Op1, m_Not(m_Value(NotOp1))) && 3806 InstCombiner::isFreeToInvert(Op0, /*WillInvertAllUses=*/true) && 3807 (match(Op0, m_ImmConstant()) || 3808 (isa<Instruction>(Op0) && 3809 InstCombiner::canFreelyInvertAllUsersOf(cast<Instruction>(Op0), 3810 /*IgnoredUser=*/&I)))) { 3811 Op1 = NotOp1; 3812 OpToInvert = &Op0; 3813 } else 3814 return false; 3815 3816 // And can our users be adapted? 3817 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr)) 3818 return false; 3819 3820 if (auto *C = dyn_cast<Constant>(*OpToInvert)) { 3821 *OpToInvert = ConstantExpr::getNot(C); 3822 } else { 3823 Builder.SetInsertPoint( 3824 &*cast<Instruction>(*OpToInvert)->getInsertionPointAfterDef()); 3825 Value *NotOpToInvert = 3826 Builder.CreateNot(*OpToInvert, (*OpToInvert)->getName() + ".not"); 3827 (*OpToInvert)->replaceUsesWithIf(NotOpToInvert, [NotOpToInvert](Use &U) { 3828 return U.getUser() != NotOpToInvert; 3829 }); 3830 freelyInvertAllUsersOf(NotOpToInvert, /*IgnoredUser=*/&I); 3831 *OpToInvert = NotOpToInvert; 3832 } 3833 3834 Builder.SetInsertPoint(&*I.getInsertionPointAfterDef()); 3835 Value *NewBinOp; 3836 if (IsBinaryOp) 3837 NewBinOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not"); 3838 else 3839 NewBinOp = Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not"); 3840 replaceInstUsesWith(I, NewBinOp); 3841 // We can not just create an outer `not`, it will most likely be immediately 3842 // folded back, reconstructing our initial pattern, and causing an 3843 // infinite combine loop, so immediately manually fold it away. 3844 freelyInvertAllUsersOf(NewBinOp); 3845 return true; 3846 } 3847 3848 Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) { 3849 Value *NotOp; 3850 if (!match(&I, m_Not(m_Value(NotOp)))) 3851 return nullptr; 3852 3853 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand. 3854 // We must eliminate the and/or (one-use) for these transforms to not increase 3855 // the instruction count. 3856 // 3857 // ~(~X & Y) --> (X | ~Y) 3858 // ~(Y & ~X) --> (X | ~Y) 3859 // 3860 // Note: The logical matches do not check for the commuted patterns because 3861 // those are handled via SimplifySelectsFeedingBinaryOp(). 3862 Type *Ty = I.getType(); 3863 Value *X, *Y; 3864 if (match(NotOp, m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y))))) { 3865 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3866 return BinaryOperator::CreateOr(X, NotY); 3867 } 3868 if (match(NotOp, m_OneUse(m_LogicalAnd(m_Not(m_Value(X)), m_Value(Y))))) { 3869 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3870 return SelectInst::Create(X, ConstantInt::getTrue(Ty), NotY); 3871 } 3872 3873 // ~(~X | Y) --> (X & ~Y) 3874 // ~(Y | ~X) --> (X & ~Y) 3875 if (match(NotOp, m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y))))) { 3876 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3877 return BinaryOperator::CreateAnd(X, NotY); 3878 } 3879 if (match(NotOp, m_OneUse(m_LogicalOr(m_Not(m_Value(X)), m_Value(Y))))) { 3880 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3881 return SelectInst::Create(X, NotY, ConstantInt::getFalse(Ty)); 3882 } 3883 3884 // Is this a 'not' (~) fed by a binary operator? 3885 BinaryOperator *NotVal; 3886 if (match(NotOp, m_BinOp(NotVal))) { 3887 // ~((-X) | Y) --> (X - 1) & (~Y) 3888 if (match(NotVal, 3889 m_OneUse(m_c_Or(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))) { 3890 Value *DecX = Builder.CreateAdd(X, ConstantInt::getAllOnesValue(Ty)); 3891 Value *NotY = Builder.CreateNot(Y); 3892 return BinaryOperator::CreateAnd(DecX, NotY); 3893 } 3894 3895 // ~(~X >>s Y) --> (X >>s Y) 3896 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y)))) 3897 return BinaryOperator::CreateAShr(X, Y); 3898 3899 // Bit-hack form of a signbit test: 3900 // iN ~X >>s (N-1) --> sext i1 (X > -1) to iN 3901 unsigned FullShift = Ty->getScalarSizeInBits() - 1; 3902 if (match(NotVal, m_OneUse(m_AShr(m_Value(X), m_SpecificInt(FullShift))))) { 3903 Value *IsNotNeg = Builder.CreateIsNotNeg(X, "isnotneg"); 3904 return new SExtInst(IsNotNeg, Ty); 3905 } 3906 3907 // If we are inverting a right-shifted constant, we may be able to eliminate 3908 // the 'not' by inverting the constant and using the opposite shift type. 3909 // Canonicalization rules ensure that only a negative constant uses 'ashr', 3910 // but we must check that in case that transform has not fired yet. 3911 3912 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits) 3913 Constant *C; 3914 if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) && 3915 match(C, m_Negative())) { 3916 // We matched a negative constant, so propagating undef is unsafe. 3917 // Clamp undef elements to -1. 3918 Type *EltTy = Ty->getScalarType(); 3919 C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy)); 3920 return BinaryOperator::CreateLShr(ConstantExpr::getNot(C), Y); 3921 } 3922 3923 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits) 3924 if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) && 3925 match(C, m_NonNegative())) { 3926 // We matched a non-negative constant, so propagating undef is unsafe. 3927 // Clamp undef elements to 0. 3928 Type *EltTy = Ty->getScalarType(); 3929 C = Constant::replaceUndefsWith(C, ConstantInt::getNullValue(EltTy)); 3930 return BinaryOperator::CreateAShr(ConstantExpr::getNot(C), Y); 3931 } 3932 3933 // ~(X + C) --> ~C - X 3934 if (match(NotVal, m_c_Add(m_Value(X), m_ImmConstant(C)))) 3935 return BinaryOperator::CreateSub(ConstantExpr::getNot(C), X); 3936 3937 // ~(X - Y) --> ~X + Y 3938 // FIXME: is it really beneficial to sink the `not` here? 3939 if (match(NotVal, m_Sub(m_Value(X), m_Value(Y)))) 3940 if (isa<Constant>(X) || NotVal->hasOneUse()) 3941 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y); 3942 3943 // ~(~X + Y) --> X - Y 3944 if (match(NotVal, m_c_Add(m_Not(m_Value(X)), m_Value(Y)))) 3945 return BinaryOperator::CreateWithCopiedFlags(Instruction::Sub, X, Y, 3946 NotVal); 3947 } 3948 3949 // not (cmp A, B) = !cmp A, B 3950 CmpInst::Predicate Pred; 3951 if (match(NotOp, m_Cmp(Pred, m_Value(), m_Value())) && 3952 (NotOp->hasOneUse() || 3953 InstCombiner::canFreelyInvertAllUsersOf(cast<Instruction>(NotOp), 3954 /*IgnoredUser=*/nullptr))) { 3955 cast<CmpInst>(NotOp)->setPredicate(CmpInst::getInversePredicate(Pred)); 3956 freelyInvertAllUsersOf(NotOp); 3957 return &I; 3958 } 3959 3960 // Move a 'not' ahead of casts of a bool to enable logic reduction: 3961 // not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X)) 3962 if (match(NotOp, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X)))))) && X->getType()->isIntOrIntVectorTy(1)) { 3963 Type *SextTy = cast<BitCastOperator>(NotOp)->getSrcTy(); 3964 Value *NotX = Builder.CreateNot(X); 3965 Value *Sext = Builder.CreateSExt(NotX, SextTy); 3966 return CastInst::CreateBitOrPointerCast(Sext, Ty); 3967 } 3968 3969 if (auto *NotOpI = dyn_cast<Instruction>(NotOp)) 3970 if (sinkNotIntoLogicalOp(*NotOpI)) 3971 return &I; 3972 3973 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max: 3974 // ~min(~X, ~Y) --> max(X, Y) 3975 // ~max(~X, Y) --> min(X, ~Y) 3976 auto *II = dyn_cast<IntrinsicInst>(NotOp); 3977 if (II && II->hasOneUse()) { 3978 if (match(NotOp, m_MaxOrMin(m_Value(X), m_Value(Y))) && 3979 isFreeToInvert(X, X->hasOneUse()) && 3980 isFreeToInvert(Y, Y->hasOneUse())) { 3981 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); 3982 Value *NotX = Builder.CreateNot(X); 3983 Value *NotY = Builder.CreateNot(Y); 3984 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, NotX, NotY); 3985 return replaceInstUsesWith(I, InvMaxMin); 3986 } 3987 if (match(NotOp, m_c_MaxOrMin(m_Not(m_Value(X)), m_Value(Y)))) { 3988 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); 3989 Value *NotY = Builder.CreateNot(Y); 3990 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, NotY); 3991 return replaceInstUsesWith(I, InvMaxMin); 3992 } 3993 3994 if (II->getIntrinsicID() == Intrinsic::is_fpclass) { 3995 ConstantInt *ClassMask = cast<ConstantInt>(II->getArgOperand(1)); 3996 II->setArgOperand( 3997 1, ConstantInt::get(ClassMask->getType(), 3998 ~ClassMask->getZExtValue() & fcAllFlags)); 3999 return replaceInstUsesWith(I, II); 4000 } 4001 } 4002 4003 if (NotOp->hasOneUse()) { 4004 // Pull 'not' into operands of select if both operands are one-use compares 4005 // or one is one-use compare and the other one is a constant. 4006 // Inverting the predicates eliminates the 'not' operation. 4007 // Example: 4008 // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) --> 4009 // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?) 4010 // not (select ?, (cmp TPred, ?, ?), true --> 4011 // select ?, (cmp InvTPred, ?, ?), false 4012 if (auto *Sel = dyn_cast<SelectInst>(NotOp)) { 4013 Value *TV = Sel->getTrueValue(); 4014 Value *FV = Sel->getFalseValue(); 4015 auto *CmpT = dyn_cast<CmpInst>(TV); 4016 auto *CmpF = dyn_cast<CmpInst>(FV); 4017 bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(TV); 4018 bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(FV); 4019 if (InvertibleT && InvertibleF) { 4020 if (CmpT) 4021 CmpT->setPredicate(CmpT->getInversePredicate()); 4022 else 4023 Sel->setTrueValue(ConstantExpr::getNot(cast<Constant>(TV))); 4024 if (CmpF) 4025 CmpF->setPredicate(CmpF->getInversePredicate()); 4026 else 4027 Sel->setFalseValue(ConstantExpr::getNot(cast<Constant>(FV))); 4028 return replaceInstUsesWith(I, Sel); 4029 } 4030 } 4031 } 4032 4033 if (Instruction *NewXor = foldNotXor(I, Builder)) 4034 return NewXor; 4035 4036 return nullptr; 4037 } 4038 4039 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 4040 // here. We should standardize that construct where it is needed or choose some 4041 // other way to ensure that commutated variants of patterns are not missed. 4042 Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) { 4043 if (Value *V = simplifyXorInst(I.getOperand(0), I.getOperand(1), 4044 SQ.getWithInstruction(&I))) 4045 return replaceInstUsesWith(I, V); 4046 4047 if (SimplifyAssociativeOrCommutative(I)) 4048 return &I; 4049 4050 if (Instruction *X = foldVectorBinop(I)) 4051 return X; 4052 4053 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 4054 return Phi; 4055 4056 if (Instruction *NewXor = foldXorToXor(I, Builder)) 4057 return NewXor; 4058 4059 // (A&B)^(A&C) -> A&(B^C) etc 4060 if (Value *V = foldUsingDistributiveLaws(I)) 4061 return replaceInstUsesWith(I, V); 4062 4063 // See if we can simplify any instructions used by the instruction whose sole 4064 // purpose is to compute bits we don't care about. 4065 if (SimplifyDemandedInstructionBits(I)) 4066 return &I; 4067 4068 if (Value *V = SimplifyBSwap(I, Builder)) 4069 return replaceInstUsesWith(I, V); 4070 4071 if (Instruction *R = foldNot(I)) 4072 return R; 4073 4074 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M) 4075 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits 4076 // calls in there are unnecessary as SimplifyDemandedInstructionBits should 4077 // have already taken care of those cases. 4078 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 4079 Value *M; 4080 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()), 4081 m_c_And(m_Deferred(M), m_Value())))) 4082 return BinaryOperator::CreateOr(Op0, Op1); 4083 4084 if (Instruction *Xor = visitMaskedMerge(I, Builder)) 4085 return Xor; 4086 4087 Value *X, *Y; 4088 Constant *C1; 4089 if (match(Op1, m_Constant(C1))) { 4090 Constant *C2; 4091 4092 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_ImmConstant(C2)))) && 4093 match(C1, m_ImmConstant())) { 4094 // (X | C2) ^ C1 --> (X & ~C2) ^ (C1^C2) 4095 C2 = Constant::replaceUndefsWith( 4096 C2, Constant::getAllOnesValue(C2->getType()->getScalarType())); 4097 Value *And = Builder.CreateAnd( 4098 X, Constant::mergeUndefsWith(ConstantExpr::getNot(C2), C1)); 4099 return BinaryOperator::CreateXor( 4100 And, Constant::mergeUndefsWith(ConstantExpr::getXor(C1, C2), C1)); 4101 } 4102 4103 // Use DeMorgan and reassociation to eliminate a 'not' op. 4104 if (match(Op0, m_OneUse(m_Or(m_Not(m_Value(X)), m_Constant(C2))))) { 4105 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1 4106 Value *And = Builder.CreateAnd(X, ConstantExpr::getNot(C2)); 4107 return BinaryOperator::CreateXor(And, ConstantExpr::getNot(C1)); 4108 } 4109 if (match(Op0, m_OneUse(m_And(m_Not(m_Value(X)), m_Constant(C2))))) { 4110 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1 4111 Value *Or = Builder.CreateOr(X, ConstantExpr::getNot(C2)); 4112 return BinaryOperator::CreateXor(Or, ConstantExpr::getNot(C1)); 4113 } 4114 4115 // Convert xor ([trunc] (ashr X, BW-1)), C => 4116 // select(X >s -1, C, ~C) 4117 // The ashr creates "AllZeroOrAllOne's", which then optionally inverses the 4118 // constant depending on whether this input is less than 0. 4119 const APInt *CA; 4120 if (match(Op0, m_OneUse(m_TruncOrSelf( 4121 m_AShr(m_Value(X), m_APIntAllowUndef(CA))))) && 4122 *CA == X->getType()->getScalarSizeInBits() - 1 && 4123 !match(C1, m_AllOnes())) { 4124 assert(!C1->isZeroValue() && "Unexpected xor with 0"); 4125 Value *IsNotNeg = Builder.CreateIsNotNeg(X); 4126 return SelectInst::Create(IsNotNeg, Op1, Builder.CreateNot(Op1)); 4127 } 4128 } 4129 4130 Type *Ty = I.getType(); 4131 { 4132 const APInt *RHSC; 4133 if (match(Op1, m_APInt(RHSC))) { 4134 Value *X; 4135 const APInt *C; 4136 // (C - X) ^ signmaskC --> (C + signmaskC) - X 4137 if (RHSC->isSignMask() && match(Op0, m_Sub(m_APInt(C), m_Value(X)))) 4138 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C + *RHSC), X); 4139 4140 // (X + C) ^ signmaskC --> X + (C + signmaskC) 4141 if (RHSC->isSignMask() && match(Op0, m_Add(m_Value(X), m_APInt(C)))) 4142 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C + *RHSC)); 4143 4144 // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0 4145 if (match(Op0, m_Or(m_Value(X), m_APInt(C))) && 4146 MaskedValueIsZero(X, *C, 0, &I)) 4147 return BinaryOperator::CreateXor(X, ConstantInt::get(Ty, *C ^ *RHSC)); 4148 4149 // When X is a power-of-two or zero and zero input is poison: 4150 // ctlz(i32 X) ^ 31 --> cttz(X) 4151 // cttz(i32 X) ^ 31 --> ctlz(X) 4152 auto *II = dyn_cast<IntrinsicInst>(Op0); 4153 if (II && II->hasOneUse() && *RHSC == Ty->getScalarSizeInBits() - 1) { 4154 Intrinsic::ID IID = II->getIntrinsicID(); 4155 if ((IID == Intrinsic::ctlz || IID == Intrinsic::cttz) && 4156 match(II->getArgOperand(1), m_One()) && 4157 isKnownToBeAPowerOfTwo(II->getArgOperand(0), /*OrZero */ true)) { 4158 IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz; 4159 Function *F = Intrinsic::getDeclaration(II->getModule(), IID, Ty); 4160 return CallInst::Create(F, {II->getArgOperand(0), Builder.getTrue()}); 4161 } 4162 } 4163 4164 // If RHSC is inverting the remaining bits of shifted X, 4165 // canonicalize to a 'not' before the shift to help SCEV and codegen: 4166 // (X << C) ^ RHSC --> ~X << C 4167 if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_APInt(C)))) && 4168 *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).shl(*C)) { 4169 Value *NotX = Builder.CreateNot(X); 4170 return BinaryOperator::CreateShl(NotX, ConstantInt::get(Ty, *C)); 4171 } 4172 // (X >>u C) ^ RHSC --> ~X >>u C 4173 if (match(Op0, m_OneUse(m_LShr(m_Value(X), m_APInt(C)))) && 4174 *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).lshr(*C)) { 4175 Value *NotX = Builder.CreateNot(X); 4176 return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *C)); 4177 } 4178 // TODO: We could handle 'ashr' here as well. That would be matching 4179 // a 'not' op and moving it before the shift. Doing that requires 4180 // preventing the inverse fold in canShiftBinOpWithConstantRHS(). 4181 } 4182 } 4183 4184 // FIXME: This should not be limited to scalar (pull into APInt match above). 4185 { 4186 Value *X; 4187 ConstantInt *C1, *C2, *C3; 4188 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3) 4189 if (match(Op1, m_ConstantInt(C3)) && 4190 match(Op0, m_LShr(m_Xor(m_Value(X), m_ConstantInt(C1)), 4191 m_ConstantInt(C2))) && 4192 Op0->hasOneUse()) { 4193 // fold (C1 >> C2) ^ C3 4194 APInt FoldConst = C1->getValue().lshr(C2->getValue()); 4195 FoldConst ^= C3->getValue(); 4196 // Prepare the two operands. 4197 auto *Opnd0 = Builder.CreateLShr(X, C2); 4198 Opnd0->takeName(Op0); 4199 return BinaryOperator::CreateXor(Opnd0, ConstantInt::get(Ty, FoldConst)); 4200 } 4201 } 4202 4203 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 4204 return FoldedLogic; 4205 4206 // Y ^ (X | Y) --> X & ~Y 4207 // Y ^ (Y | X) --> X & ~Y 4208 if (match(Op1, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op0))))) 4209 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op0)); 4210 // (X | Y) ^ Y --> X & ~Y 4211 // (Y | X) ^ Y --> X & ~Y 4212 if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op1))))) 4213 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op1)); 4214 4215 // Y ^ (X & Y) --> ~X & Y 4216 // Y ^ (Y & X) --> ~X & Y 4217 if (match(Op1, m_OneUse(m_c_And(m_Value(X), m_Specific(Op0))))) 4218 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(X)); 4219 // (X & Y) ^ Y --> ~X & Y 4220 // (Y & X) ^ Y --> ~X & Y 4221 // Canonical form is (X & C) ^ C; don't touch that. 4222 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must 4223 // be fixed to prefer that (otherwise we get infinite looping). 4224 if (!match(Op1, m_Constant()) && 4225 match(Op0, m_OneUse(m_c_And(m_Value(X), m_Specific(Op1))))) 4226 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(X)); 4227 4228 Value *A, *B, *C; 4229 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants. 4230 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))), 4231 m_OneUse(m_c_Or(m_Deferred(A), m_Value(C)))))) 4232 return BinaryOperator::CreateXor( 4233 Builder.CreateAnd(Builder.CreateNot(A), C), B); 4234 4235 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants. 4236 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))), 4237 m_OneUse(m_c_Or(m_Deferred(B), m_Value(C)))))) 4238 return BinaryOperator::CreateXor( 4239 Builder.CreateAnd(Builder.CreateNot(B), C), A); 4240 4241 // (A & B) ^ (A ^ B) -> (A | B) 4242 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 4243 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B)))) 4244 return BinaryOperator::CreateOr(A, B); 4245 // (A ^ B) ^ (A & B) -> (A | B) 4246 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 4247 match(Op1, m_c_And(m_Specific(A), m_Specific(B)))) 4248 return BinaryOperator::CreateOr(A, B); 4249 4250 // (A & ~B) ^ ~A -> ~(A & B) 4251 // (~B & A) ^ ~A -> ~(A & B) 4252 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && 4253 match(Op1, m_Not(m_Specific(A)))) 4254 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B)); 4255 4256 // (~A & B) ^ A --> A | B -- There are 4 commuted variants. 4257 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(A)), m_Value(B)), m_Deferred(A)))) 4258 return BinaryOperator::CreateOr(A, B); 4259 4260 // (~A | B) ^ A --> ~(A & B) 4261 if (match(Op0, m_OneUse(m_c_Or(m_Not(m_Specific(Op1)), m_Value(B))))) 4262 return BinaryOperator::CreateNot(Builder.CreateAnd(Op1, B)); 4263 4264 // A ^ (~A | B) --> ~(A & B) 4265 if (match(Op1, m_OneUse(m_c_Or(m_Not(m_Specific(Op0)), m_Value(B))))) 4266 return BinaryOperator::CreateNot(Builder.CreateAnd(Op0, B)); 4267 4268 // (A | B) ^ (A | C) --> (B ^ C) & ~A -- There are 4 commuted variants. 4269 // TODO: Loosen one-use restriction if common operand is a constant. 4270 Value *D; 4271 if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B)))) && 4272 match(Op1, m_OneUse(m_Or(m_Value(C), m_Value(D))))) { 4273 if (B == C || B == D) 4274 std::swap(A, B); 4275 if (A == C) 4276 std::swap(C, D); 4277 if (A == D) { 4278 Value *NotA = Builder.CreateNot(A); 4279 return BinaryOperator::CreateAnd(Builder.CreateXor(B, C), NotA); 4280 } 4281 } 4282 4283 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) 4284 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) 4285 if (Value *V = foldXorOfICmps(LHS, RHS, I)) 4286 return replaceInstUsesWith(I, V); 4287 4288 if (Instruction *CastedXor = foldCastedBitwiseLogic(I)) 4289 return CastedXor; 4290 4291 if (Instruction *Abs = canonicalizeAbs(I, Builder)) 4292 return Abs; 4293 4294 // Otherwise, if all else failed, try to hoist the xor-by-constant: 4295 // (X ^ C) ^ Y --> (X ^ Y) ^ C 4296 // Just like we do in other places, we completely avoid the fold 4297 // for constantexprs, at least to avoid endless combine loop. 4298 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_CombineAnd(m_Value(X), 4299 m_Unless(m_ConstantExpr())), 4300 m_ImmConstant(C1))), 4301 m_Value(Y)))) 4302 return BinaryOperator::CreateXor(Builder.CreateXor(X, Y), C1); 4303 4304 if (Instruction *R = reassociateForUses(I, Builder)) 4305 return R; 4306 4307 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder)) 4308 return Canonicalized; 4309 4310 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1)) 4311 return Folded; 4312 4313 if (Instruction *Folded = canonicalizeConditionalNegationViaMathToSelect(I)) 4314 return Folded; 4315 4316 return nullptr; 4317 } 4318