1 //===- InstCombineAndOrXor.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitAnd, visitOr, and visitXor functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/Analysis/CmpInstAnalysis.h" 15 #include "llvm/Analysis/InstructionSimplify.h" 16 #include "llvm/IR/ConstantRange.h" 17 #include "llvm/IR/Intrinsics.h" 18 #include "llvm/IR/PatternMatch.h" 19 #include "llvm/Transforms/InstCombine/InstCombiner.h" 20 #include "llvm/Transforms/Utils/Local.h" 21 22 using namespace llvm; 23 using namespace PatternMatch; 24 25 #define DEBUG_TYPE "instcombine" 26 27 /// This is the complement of getICmpCode, which turns an opcode and two 28 /// operands into either a constant true or false, or a brand new ICmp 29 /// instruction. The sign is passed in to determine which kind of predicate to 30 /// use in the new icmp instruction. 31 static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS, 32 InstCombiner::BuilderTy &Builder) { 33 ICmpInst::Predicate NewPred; 34 if (Constant *TorF = getPredForICmpCode(Code, Sign, LHS->getType(), NewPred)) 35 return TorF; 36 return Builder.CreateICmp(NewPred, LHS, RHS); 37 } 38 39 /// This is the complement of getFCmpCode, which turns an opcode and two 40 /// operands into either a FCmp instruction, or a true/false constant. 41 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS, 42 InstCombiner::BuilderTy &Builder) { 43 FCmpInst::Predicate NewPred; 44 if (Constant *TorF = getPredForFCmpCode(Code, LHS->getType(), NewPred)) 45 return TorF; 46 return Builder.CreateFCmp(NewPred, LHS, RHS); 47 } 48 49 /// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or 50 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B)) 51 /// \param I Binary operator to transform. 52 /// \return Pointer to node that must replace the original binary operator, or 53 /// null pointer if no transformation was made. 54 static Value *SimplifyBSwap(BinaryOperator &I, 55 InstCombiner::BuilderTy &Builder) { 56 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying"); 57 58 Value *OldLHS = I.getOperand(0); 59 Value *OldRHS = I.getOperand(1); 60 61 Value *NewLHS; 62 if (!match(OldLHS, m_BSwap(m_Value(NewLHS)))) 63 return nullptr; 64 65 Value *NewRHS; 66 const APInt *C; 67 68 if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) { 69 // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) ) 70 if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse()) 71 return nullptr; 72 // NewRHS initialized by the matcher. 73 } else if (match(OldRHS, m_APInt(C))) { 74 // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) ) 75 if (!OldLHS->hasOneUse()) 76 return nullptr; 77 NewRHS = ConstantInt::get(I.getType(), C->byteSwap()); 78 } else 79 return nullptr; 80 81 Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS); 82 Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap, 83 I.getType()); 84 return Builder.CreateCall(F, BinOp); 85 } 86 87 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise 88 /// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates 89 /// whether to treat V, Lo, and Hi as signed or not. 90 Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo, 91 const APInt &Hi, bool isSigned, 92 bool Inside) { 93 assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) && 94 "Lo is not < Hi in range emission code!"); 95 96 Type *Ty = V->getType(); 97 98 // V >= Min && V < Hi --> V < Hi 99 // V < Min || V >= Hi --> V >= Hi 100 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE; 101 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) { 102 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred; 103 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi)); 104 } 105 106 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo 107 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo 108 Value *VMinusLo = 109 Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off"); 110 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo); 111 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo); 112 } 113 114 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns 115 /// that can be simplified. 116 /// One of A and B is considered the mask. The other is the value. This is 117 /// described as the "AMask" or "BMask" part of the enum. If the enum contains 118 /// only "Mask", then both A and B can be considered masks. If A is the mask, 119 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0. 120 /// If both A and C are constants, this proof is also easy. 121 /// For the following explanations, we assume that A is the mask. 122 /// 123 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all 124 /// bits of A are set in B. 125 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes 126 /// 127 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all 128 /// bits of A are cleared in B. 129 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes 130 /// 131 /// "Mixed" declares that (A & B) == C and C might or might not contain any 132 /// number of one bits and zero bits. 133 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed 134 /// 135 /// "Not" means that in above descriptions "==" should be replaced by "!=". 136 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes 137 /// 138 /// If the mask A contains a single bit, then the following is equivalent: 139 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0) 140 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0) 141 enum MaskedICmpType { 142 AMask_AllOnes = 1, 143 AMask_NotAllOnes = 2, 144 BMask_AllOnes = 4, 145 BMask_NotAllOnes = 8, 146 Mask_AllZeros = 16, 147 Mask_NotAllZeros = 32, 148 AMask_Mixed = 64, 149 AMask_NotMixed = 128, 150 BMask_Mixed = 256, 151 BMask_NotMixed = 512 152 }; 153 154 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C) 155 /// satisfies. 156 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C, 157 ICmpInst::Predicate Pred) { 158 const APInt *ConstA = nullptr, *ConstB = nullptr, *ConstC = nullptr; 159 match(A, m_APInt(ConstA)); 160 match(B, m_APInt(ConstB)); 161 match(C, m_APInt(ConstC)); 162 bool IsEq = (Pred == ICmpInst::ICMP_EQ); 163 bool IsAPow2 = ConstA && ConstA->isPowerOf2(); 164 bool IsBPow2 = ConstB && ConstB->isPowerOf2(); 165 unsigned MaskVal = 0; 166 if (ConstC && ConstC->isZero()) { 167 // if C is zero, then both A and B qualify as mask 168 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed) 169 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed)); 170 if (IsAPow2) 171 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed) 172 : (AMask_AllOnes | AMask_Mixed)); 173 if (IsBPow2) 174 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed) 175 : (BMask_AllOnes | BMask_Mixed)); 176 return MaskVal; 177 } 178 179 if (A == C) { 180 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed) 181 : (AMask_NotAllOnes | AMask_NotMixed)); 182 if (IsAPow2) 183 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed) 184 : (Mask_AllZeros | AMask_Mixed)); 185 } else if (ConstA && ConstC && ConstC->isSubsetOf(*ConstA)) { 186 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed); 187 } 188 189 if (B == C) { 190 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed) 191 : (BMask_NotAllOnes | BMask_NotMixed)); 192 if (IsBPow2) 193 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed) 194 : (Mask_AllZeros | BMask_Mixed)); 195 } else if (ConstB && ConstC && ConstC->isSubsetOf(*ConstB)) { 196 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed); 197 } 198 199 return MaskVal; 200 } 201 202 /// Convert an analysis of a masked ICmp into its equivalent if all boolean 203 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=) 204 /// is adjacent to the corresponding normal flag (recording ==), this just 205 /// involves swapping those bits over. 206 static unsigned conjugateICmpMask(unsigned Mask) { 207 unsigned NewMask; 208 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros | 209 AMask_Mixed | BMask_Mixed)) 210 << 1; 211 212 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros | 213 AMask_NotMixed | BMask_NotMixed)) 214 >> 1; 215 216 return NewMask; 217 } 218 219 // Adapts the external decomposeBitTestICmp for local use. 220 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, 221 Value *&X, Value *&Y, Value *&Z) { 222 APInt Mask; 223 if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask)) 224 return false; 225 226 Y = ConstantInt::get(X->getType(), Mask); 227 Z = ConstantInt::get(X->getType(), 0); 228 return true; 229 } 230 231 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E). 232 /// Return the pattern classes (from MaskedICmpType) for the left hand side and 233 /// the right hand side as a pair. 234 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL 235 /// and PredR are their predicates, respectively. 236 static 237 Optional<std::pair<unsigned, unsigned>> 238 getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, 239 Value *&D, Value *&E, ICmpInst *LHS, 240 ICmpInst *RHS, 241 ICmpInst::Predicate &PredL, 242 ICmpInst::Predicate &PredR) { 243 // Don't allow pointers. Splat vectors are fine. 244 if (!LHS->getOperand(0)->getType()->isIntOrIntVectorTy() || 245 !RHS->getOperand(0)->getType()->isIntOrIntVectorTy()) 246 return None; 247 248 // Here comes the tricky part: 249 // LHS might be of the form L11 & L12 == X, X == L21 & L22, 250 // and L11 & L12 == L21 & L22. The same goes for RHS. 251 // Now we must find those components L** and R**, that are equal, so 252 // that we can extract the parameters A, B, C, D, and E for the canonical 253 // above. 254 Value *L1 = LHS->getOperand(0); 255 Value *L2 = LHS->getOperand(1); 256 Value *L11, *L12, *L21, *L22; 257 // Check whether the icmp can be decomposed into a bit test. 258 if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) { 259 L21 = L22 = L1 = nullptr; 260 } else { 261 // Look for ANDs in the LHS icmp. 262 if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) { 263 // Any icmp can be viewed as being trivially masked; if it allows us to 264 // remove one, it's worth it. 265 L11 = L1; 266 L12 = Constant::getAllOnesValue(L1->getType()); 267 } 268 269 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) { 270 L21 = L2; 271 L22 = Constant::getAllOnesValue(L2->getType()); 272 } 273 } 274 275 // Bail if LHS was a icmp that can't be decomposed into an equality. 276 if (!ICmpInst::isEquality(PredL)) 277 return None; 278 279 Value *R1 = RHS->getOperand(0); 280 Value *R2 = RHS->getOperand(1); 281 Value *R11, *R12; 282 bool Ok = false; 283 if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) { 284 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 285 A = R11; 286 D = R12; 287 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 288 A = R12; 289 D = R11; 290 } else { 291 return None; 292 } 293 E = R2; 294 R1 = nullptr; 295 Ok = true; 296 } else { 297 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) { 298 // As before, model no mask as a trivial mask if it'll let us do an 299 // optimization. 300 R11 = R1; 301 R12 = Constant::getAllOnesValue(R1->getType()); 302 } 303 304 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 305 A = R11; 306 D = R12; 307 E = R2; 308 Ok = true; 309 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 310 A = R12; 311 D = R11; 312 E = R2; 313 Ok = true; 314 } 315 } 316 317 // Bail if RHS was a icmp that can't be decomposed into an equality. 318 if (!ICmpInst::isEquality(PredR)) 319 return None; 320 321 // Look for ANDs on the right side of the RHS icmp. 322 if (!Ok) { 323 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) { 324 R11 = R2; 325 R12 = Constant::getAllOnesValue(R2->getType()); 326 } 327 328 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 329 A = R11; 330 D = R12; 331 E = R1; 332 Ok = true; 333 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 334 A = R12; 335 D = R11; 336 E = R1; 337 Ok = true; 338 } else { 339 return None; 340 } 341 342 assert(Ok && "Failed to find AND on the right side of the RHS icmp."); 343 } 344 345 if (L11 == A) { 346 B = L12; 347 C = L2; 348 } else if (L12 == A) { 349 B = L11; 350 C = L2; 351 } else if (L21 == A) { 352 B = L22; 353 C = L1; 354 } else if (L22 == A) { 355 B = L21; 356 C = L1; 357 } 358 359 unsigned LeftType = getMaskedICmpType(A, B, C, PredL); 360 unsigned RightType = getMaskedICmpType(A, D, E, PredR); 361 return Optional<std::pair<unsigned, unsigned>>(std::make_pair(LeftType, RightType)); 362 } 363 364 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single 365 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros 366 /// and the right hand side is of type BMask_Mixed. For example, 367 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8). 368 /// Also used for logical and/or, must be poison safe. 369 static Value *foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 370 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, 371 Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, 372 InstCombiner::BuilderTy &Builder) { 373 // We are given the canonical form: 374 // (icmp ne (A & B), 0) & (icmp eq (A & D), E). 375 // where D & E == E. 376 // 377 // If IsAnd is false, we get it in negated form: 378 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) -> 379 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)). 380 // 381 // We currently handle the case of B, C, D, E are constant. 382 // 383 const APInt *BCst, *CCst, *DCst, *OrigECst; 384 if (!match(B, m_APInt(BCst)) || !match(C, m_APInt(CCst)) || 385 !match(D, m_APInt(DCst)) || !match(E, m_APInt(OrigECst))) 386 return nullptr; 387 388 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 389 390 // Update E to the canonical form when D is a power of two and RHS is 391 // canonicalized as, 392 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or 393 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0). 394 APInt ECst = *OrigECst; 395 if (PredR != NewCC) 396 ECst ^= *DCst; 397 398 // If B or D is zero, skip because if LHS or RHS can be trivially folded by 399 // other folding rules and this pattern won't apply any more. 400 if (*BCst == 0 || *DCst == 0) 401 return nullptr; 402 403 // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't 404 // deduce anything from it. 405 // For example, 406 // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding. 407 if ((*BCst & *DCst) == 0) 408 return nullptr; 409 410 // If the following two conditions are met: 411 // 412 // 1. mask B covers only a single bit that's not covered by mask D, that is, 413 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of 414 // B and D has only one bit set) and, 415 // 416 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other 417 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0 418 // 419 // then that single bit in B must be one and thus the whole expression can be 420 // folded to 421 // (A & (B | D)) == (B & (B ^ D)) | E. 422 // 423 // For example, 424 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9) 425 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8) 426 if ((((*BCst & *DCst) & ECst) == 0) && 427 (*BCst & (*BCst ^ *DCst)).isPowerOf2()) { 428 APInt BorD = *BCst | *DCst; 429 APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst; 430 Value *NewMask = ConstantInt::get(A->getType(), BorD); 431 Value *NewMaskedValue = ConstantInt::get(A->getType(), BandBxorDorE); 432 Value *NewAnd = Builder.CreateAnd(A, NewMask); 433 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue); 434 } 435 436 auto IsSubSetOrEqual = [](const APInt *C1, const APInt *C2) { 437 return (*C1 & *C2) == *C1; 438 }; 439 auto IsSuperSetOrEqual = [](const APInt *C1, const APInt *C2) { 440 return (*C1 & *C2) == *C2; 441 }; 442 443 // In the following, we consider only the cases where B is a superset of D, B 444 // is a subset of D, or B == D because otherwise there's at least one bit 445 // covered by B but not D, in which case we can't deduce much from it, so 446 // no folding (aside from the single must-be-one bit case right above.) 447 // For example, 448 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding. 449 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst)) 450 return nullptr; 451 452 // At this point, either B is a superset of D, B is a subset of D or B == D. 453 454 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict 455 // and the whole expression becomes false (or true if negated), otherwise, no 456 // folding. 457 // For example, 458 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false. 459 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding. 460 if (ECst.isZero()) { 461 if (IsSubSetOrEqual(BCst, DCst)) 462 return ConstantInt::get(LHS->getType(), !IsAnd); 463 return nullptr; 464 } 465 466 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B == 467 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is 468 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes 469 // RHS. For example, 470 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 471 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 472 if (IsSuperSetOrEqual(BCst, DCst)) 473 return RHS; 474 // Otherwise, B is a subset of D. If B and E have a common bit set, 475 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example. 476 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 477 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code"); 478 if ((*BCst & ECst) != 0) 479 return RHS; 480 // Otherwise, LHS and RHS contradict and the whole expression becomes false 481 // (or true if negated.) For example, 482 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false. 483 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false. 484 return ConstantInt::get(LHS->getType(), !IsAnd); 485 } 486 487 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single 488 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side 489 /// aren't of the common mask pattern type. 490 /// Also used for logical and/or, must be poison safe. 491 static Value *foldLogOpOfMaskedICmpsAsymmetric( 492 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C, 493 Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, 494 unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) { 495 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && 496 "Expected equality predicates for masked type of icmps."); 497 // Handle Mask_NotAllZeros-BMask_Mixed cases. 498 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or 499 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E) 500 // which gets swapped to 501 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C). 502 if (!IsAnd) { 503 LHSMask = conjugateICmpMask(LHSMask); 504 RHSMask = conjugateICmpMask(RHSMask); 505 } 506 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) { 507 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 508 LHS, RHS, IsAnd, A, B, C, D, E, 509 PredL, PredR, Builder)) { 510 return V; 511 } 512 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) { 513 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 514 RHS, LHS, IsAnd, A, D, E, B, C, 515 PredR, PredL, Builder)) { 516 return V; 517 } 518 } 519 return nullptr; 520 } 521 522 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) 523 /// into a single (icmp(A & X) ==/!= Y). 524 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 525 bool IsLogical, 526 InstCombiner::BuilderTy &Builder) { 527 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr; 528 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 529 Optional<std::pair<unsigned, unsigned>> MaskPair = 530 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR); 531 if (!MaskPair) 532 return nullptr; 533 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && 534 "Expected equality predicates for masked type of icmps."); 535 unsigned LHSMask = MaskPair->first; 536 unsigned RHSMask = MaskPair->second; 537 unsigned Mask = LHSMask & RHSMask; 538 if (Mask == 0) { 539 // Even if the two sides don't share a common pattern, check if folding can 540 // still happen. 541 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric( 542 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask, 543 Builder)) 544 return V; 545 return nullptr; 546 } 547 548 // In full generality: 549 // (icmp (A & B) Op C) | (icmp (A & D) Op E) 550 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ] 551 // 552 // If the latter can be converted into (icmp (A & X) Op Y) then the former is 553 // equivalent to (icmp (A & X) !Op Y). 554 // 555 // Therefore, we can pretend for the rest of this function that we're dealing 556 // with the conjunction, provided we flip the sense of any comparisons (both 557 // input and output). 558 559 // In most cases we're going to produce an EQ for the "&&" case. 560 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 561 if (!IsAnd) { 562 // Convert the masking analysis into its equivalent with negated 563 // comparisons. 564 Mask = conjugateICmpMask(Mask); 565 } 566 567 if (Mask & Mask_AllZeros) { 568 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0) 569 // -> (icmp eq (A & (B|D)), 0) 570 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D)) 571 return nullptr; // TODO: Use freeze? 572 Value *NewOr = Builder.CreateOr(B, D); 573 Value *NewAnd = Builder.CreateAnd(A, NewOr); 574 // We can't use C as zero because we might actually handle 575 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 576 // with B and D, having a single bit set. 577 Value *Zero = Constant::getNullValue(A->getType()); 578 return Builder.CreateICmp(NewCC, NewAnd, Zero); 579 } 580 if (Mask & BMask_AllOnes) { 581 // (icmp eq (A & B), B) & (icmp eq (A & D), D) 582 // -> (icmp eq (A & (B|D)), (B|D)) 583 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D)) 584 return nullptr; // TODO: Use freeze? 585 Value *NewOr = Builder.CreateOr(B, D); 586 Value *NewAnd = Builder.CreateAnd(A, NewOr); 587 return Builder.CreateICmp(NewCC, NewAnd, NewOr); 588 } 589 if (Mask & AMask_AllOnes) { 590 // (icmp eq (A & B), A) & (icmp eq (A & D), A) 591 // -> (icmp eq (A & (B&D)), A) 592 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D)) 593 return nullptr; // TODO: Use freeze? 594 Value *NewAnd1 = Builder.CreateAnd(B, D); 595 Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1); 596 return Builder.CreateICmp(NewCC, NewAnd2, A); 597 } 598 599 // Remaining cases assume at least that B and D are constant, and depend on 600 // their actual values. This isn't strictly necessary, just a "handle the 601 // easy cases for now" decision. 602 const APInt *ConstB, *ConstD; 603 if (!match(B, m_APInt(ConstB)) || !match(D, m_APInt(ConstD))) 604 return nullptr; 605 606 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) { 607 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and 608 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 609 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0) 610 // Only valid if one of the masks is a superset of the other (check "B&D" is 611 // the same as either B or D). 612 APInt NewMask = *ConstB & *ConstD; 613 if (NewMask == *ConstB) 614 return LHS; 615 else if (NewMask == *ConstD) 616 return RHS; 617 } 618 619 if (Mask & AMask_NotAllOnes) { 620 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 621 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A) 622 // Only valid if one of the masks is a superset of the other (check "B|D" is 623 // the same as either B or D). 624 APInt NewMask = *ConstB | *ConstD; 625 if (NewMask == *ConstB) 626 return LHS; 627 else if (NewMask == *ConstD) 628 return RHS; 629 } 630 631 if (Mask & BMask_Mixed) { 632 // (icmp eq (A & B), C) & (icmp eq (A & D), E) 633 // We already know that B & C == C && D & E == E. 634 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of 635 // C and E, which are shared by both the mask B and the mask D, don't 636 // contradict, then we can transform to 637 // -> (icmp eq (A & (B|D)), (C|E)) 638 // Currently, we only handle the case of B, C, D, and E being constant. 639 // We can't simply use C and E because we might actually handle 640 // (icmp ne (A & B), B) & (icmp eq (A & D), D) 641 // with B and D, having a single bit set. 642 const APInt *OldConstC, *OldConstE; 643 if (!match(C, m_APInt(OldConstC)) || !match(E, m_APInt(OldConstE))) 644 return nullptr; 645 646 const APInt ConstC = PredL != NewCC ? *ConstB ^ *OldConstC : *OldConstC; 647 const APInt ConstE = PredR != NewCC ? *ConstD ^ *OldConstE : *OldConstE; 648 649 // If there is a conflict, we should actually return a false for the 650 // whole construct. 651 if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue()) 652 return ConstantInt::get(LHS->getType(), !IsAnd); 653 654 Value *NewOr1 = Builder.CreateOr(B, D); 655 Value *NewAnd = Builder.CreateAnd(A, NewOr1); 656 Constant *NewOr2 = ConstantInt::get(A->getType(), ConstC | ConstE); 657 return Builder.CreateICmp(NewCC, NewAnd, NewOr2); 658 } 659 660 return nullptr; 661 } 662 663 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp. 664 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n 665 /// If \p Inverted is true then the check is for the inverted range, e.g. 666 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n 667 Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, 668 bool Inverted) { 669 // Check the lower range comparison, e.g. x >= 0 670 // InstCombine already ensured that if there is a constant it's on the RHS. 671 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1)); 672 if (!RangeStart) 673 return nullptr; 674 675 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() : 676 Cmp0->getPredicate()); 677 678 // Accept x > -1 or x >= 0 (after potentially inverting the predicate). 679 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) || 680 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero()))) 681 return nullptr; 682 683 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() : 684 Cmp1->getPredicate()); 685 686 Value *Input = Cmp0->getOperand(0); 687 Value *RangeEnd; 688 if (Cmp1->getOperand(0) == Input) { 689 // For the upper range compare we have: icmp x, n 690 RangeEnd = Cmp1->getOperand(1); 691 } else if (Cmp1->getOperand(1) == Input) { 692 // For the upper range compare we have: icmp n, x 693 RangeEnd = Cmp1->getOperand(0); 694 Pred1 = ICmpInst::getSwappedPredicate(Pred1); 695 } else { 696 return nullptr; 697 } 698 699 // Check the upper range comparison, e.g. x < n 700 ICmpInst::Predicate NewPred; 701 switch (Pred1) { 702 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break; 703 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break; 704 default: return nullptr; 705 } 706 707 // This simplification is only valid if the upper range is not negative. 708 KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1); 709 if (!Known.isNonNegative()) 710 return nullptr; 711 712 if (Inverted) 713 NewPred = ICmpInst::getInversePredicate(NewPred); 714 715 return Builder.CreateICmp(NewPred, Input, RangeEnd); 716 } 717 718 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2) 719 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2) 720 Value *InstCombinerImpl::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, 721 ICmpInst *RHS, 722 Instruction *CxtI, 723 bool IsAnd, 724 bool IsLogical) { 725 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ; 726 if (LHS->getPredicate() != Pred || RHS->getPredicate() != Pred) 727 return nullptr; 728 729 if (!match(LHS->getOperand(1), m_Zero()) || 730 !match(RHS->getOperand(1), m_Zero())) 731 return nullptr; 732 733 Value *L1, *L2, *R1, *R2; 734 if (match(LHS->getOperand(0), m_And(m_Value(L1), m_Value(L2))) && 735 match(RHS->getOperand(0), m_And(m_Value(R1), m_Value(R2)))) { 736 if (L1 == R2 || L2 == R2) 737 std::swap(R1, R2); 738 if (L2 == R1) 739 std::swap(L1, L2); 740 741 if (L1 == R1 && 742 isKnownToBeAPowerOfTwo(L2, false, 0, CxtI) && 743 isKnownToBeAPowerOfTwo(R2, false, 0, CxtI)) { 744 // If this is a logical and/or, then we must prevent propagation of a 745 // poison value from the RHS by inserting freeze. 746 if (IsLogical) 747 R2 = Builder.CreateFreeze(R2); 748 Value *Mask = Builder.CreateOr(L2, R2); 749 Value *Masked = Builder.CreateAnd(L1, Mask); 750 auto NewPred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE; 751 return Builder.CreateICmp(NewPred, Masked, Mask); 752 } 753 } 754 755 return nullptr; 756 } 757 758 /// General pattern: 759 /// X & Y 760 /// 761 /// Where Y is checking that all the high bits (covered by a mask 4294967168) 762 /// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0 763 /// Pattern can be one of: 764 /// %t = add i32 %arg, 128 765 /// %r = icmp ult i32 %t, 256 766 /// Or 767 /// %t0 = shl i32 %arg, 24 768 /// %t1 = ashr i32 %t0, 24 769 /// %r = icmp eq i32 %t1, %arg 770 /// Or 771 /// %t0 = trunc i32 %arg to i8 772 /// %t1 = sext i8 %t0 to i32 773 /// %r = icmp eq i32 %t1, %arg 774 /// This pattern is a signed truncation check. 775 /// 776 /// And X is checking that some bit in that same mask is zero. 777 /// I.e. can be one of: 778 /// %r = icmp sgt i32 %arg, -1 779 /// Or 780 /// %t = and i32 %arg, 2147483648 781 /// %r = icmp eq i32 %t, 0 782 /// 783 /// Since we are checking that all the bits in that mask are the same, 784 /// and a particular bit is zero, what we are really checking is that all the 785 /// masked bits are zero. 786 /// So this should be transformed to: 787 /// %r = icmp ult i32 %arg, 128 788 static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1, 789 Instruction &CxtI, 790 InstCombiner::BuilderTy &Builder) { 791 assert(CxtI.getOpcode() == Instruction::And); 792 793 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two) 794 auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X, 795 APInt &SignBitMask) -> bool { 796 CmpInst::Predicate Pred; 797 const APInt *I01, *I1; // powers of two; I1 == I01 << 1 798 if (!(match(ICmp, 799 m_ICmp(Pred, m_Add(m_Value(X), m_Power2(I01)), m_Power2(I1))) && 800 Pred == ICmpInst::ICMP_ULT && I1->ugt(*I01) && I01->shl(1) == *I1)) 801 return false; 802 // Which bit is the new sign bit as per the 'signed truncation' pattern? 803 SignBitMask = *I01; 804 return true; 805 }; 806 807 // One icmp needs to be 'signed truncation check'. 808 // We need to match this first, else we will mismatch commutative cases. 809 Value *X1; 810 APInt HighestBit; 811 ICmpInst *OtherICmp; 812 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit)) 813 OtherICmp = ICmp0; 814 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit)) 815 OtherICmp = ICmp1; 816 else 817 return nullptr; 818 819 assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)"); 820 821 // Try to match/decompose into: icmp eq (X & Mask), 0 822 auto tryToDecompose = [](ICmpInst *ICmp, Value *&X, 823 APInt &UnsetBitsMask) -> bool { 824 CmpInst::Predicate Pred = ICmp->getPredicate(); 825 // Can it be decomposed into icmp eq (X & Mask), 0 ? 826 if (llvm::decomposeBitTestICmp(ICmp->getOperand(0), ICmp->getOperand(1), 827 Pred, X, UnsetBitsMask, 828 /*LookThroughTrunc=*/false) && 829 Pred == ICmpInst::ICMP_EQ) 830 return true; 831 // Is it icmp eq (X & Mask), 0 already? 832 const APInt *Mask; 833 if (match(ICmp, m_ICmp(Pred, m_And(m_Value(X), m_APInt(Mask)), m_Zero())) && 834 Pred == ICmpInst::ICMP_EQ) { 835 UnsetBitsMask = *Mask; 836 return true; 837 } 838 return false; 839 }; 840 841 // And the other icmp needs to be decomposable into a bit test. 842 Value *X0; 843 APInt UnsetBitsMask; 844 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask)) 845 return nullptr; 846 847 assert(!UnsetBitsMask.isZero() && "empty mask makes no sense."); 848 849 // Are they working on the same value? 850 Value *X; 851 if (X1 == X0) { 852 // Ok as is. 853 X = X1; 854 } else if (match(X0, m_Trunc(m_Specific(X1)))) { 855 UnsetBitsMask = UnsetBitsMask.zext(X1->getType()->getScalarSizeInBits()); 856 X = X1; 857 } else 858 return nullptr; 859 860 // So which bits should be uniform as per the 'signed truncation check'? 861 // (all the bits starting with (i.e. including) HighestBit) 862 APInt SignBitsMask = ~(HighestBit - 1U); 863 864 // UnsetBitsMask must have some common bits with SignBitsMask, 865 if (!UnsetBitsMask.intersects(SignBitsMask)) 866 return nullptr; 867 868 // Does UnsetBitsMask contain any bits outside of SignBitsMask? 869 if (!UnsetBitsMask.isSubsetOf(SignBitsMask)) { 870 APInt OtherHighestBit = (~UnsetBitsMask) + 1U; 871 if (!OtherHighestBit.isPowerOf2()) 872 return nullptr; 873 HighestBit = APIntOps::umin(HighestBit, OtherHighestBit); 874 } 875 // Else, if it does not, then all is ok as-is. 876 877 // %r = icmp ult %X, SignBit 878 return Builder.CreateICmpULT(X, ConstantInt::get(X->getType(), HighestBit), 879 CxtI.getName() + ".simplified"); 880 } 881 882 /// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and 883 /// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1). 884 /// Also used for logical and/or, must be poison safe. 885 static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd, 886 InstCombiner::BuilderTy &Builder) { 887 CmpInst::Predicate Pred0, Pred1; 888 Value *X; 889 if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)), 890 m_SpecificInt(1))) || 891 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt()))) 892 return nullptr; 893 894 Value *CtPop = Cmp0->getOperand(0); 895 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_NE) 896 return Builder.CreateICmpUGT(CtPop, ConstantInt::get(CtPop->getType(), 1)); 897 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_EQ) 898 return Builder.CreateICmpULT(CtPop, ConstantInt::get(CtPop->getType(), 2)); 899 900 return nullptr; 901 } 902 903 /// Reduce a pair of compares that check if a value has exactly 1 bit set. 904 /// Also used for logical and/or, must be poison safe. 905 static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd, 906 InstCombiner::BuilderTy &Builder) { 907 // Handle 'and' / 'or' commutation: make the equality check the first operand. 908 if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE) 909 std::swap(Cmp0, Cmp1); 910 else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ) 911 std::swap(Cmp0, Cmp1); 912 913 // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1 914 CmpInst::Predicate Pred0, Pred1; 915 Value *X; 916 if (JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) && 917 match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)), 918 m_SpecificInt(2))) && 919 Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT) { 920 Value *CtPop = Cmp1->getOperand(0); 921 return Builder.CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1)); 922 } 923 // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1 924 if (!JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) && 925 match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)), 926 m_SpecificInt(1))) && 927 Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_UGT) { 928 Value *CtPop = Cmp1->getOperand(0); 929 return Builder.CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1)); 930 } 931 return nullptr; 932 } 933 934 /// Commuted variants are assumed to be handled by calling this function again 935 /// with the parameters swapped. 936 static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp, 937 ICmpInst *UnsignedICmp, bool IsAnd, 938 const SimplifyQuery &Q, 939 InstCombiner::BuilderTy &Builder) { 940 Value *ZeroCmpOp; 941 ICmpInst::Predicate EqPred; 942 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(ZeroCmpOp), m_Zero())) || 943 !ICmpInst::isEquality(EqPred)) 944 return nullptr; 945 946 auto IsKnownNonZero = [&](Value *V) { 947 return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 948 }; 949 950 ICmpInst::Predicate UnsignedPred; 951 952 Value *A, *B; 953 if (match(UnsignedICmp, 954 m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) && 955 match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) && 956 (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) { 957 auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) { 958 if (!IsKnownNonZero(NonZero)) 959 std::swap(NonZero, Other); 960 return IsKnownNonZero(NonZero); 961 }; 962 963 // Given ZeroCmpOp = (A + B) 964 // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff 965 // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff 966 // with X being the value (A/B) that is known to be non-zero, 967 // and Y being remaining value. 968 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE && 969 IsAnd && GetKnownNonZeroAndOther(B, A)) 970 return Builder.CreateICmpULT(Builder.CreateNeg(B), A); 971 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ && 972 !IsAnd && GetKnownNonZeroAndOther(B, A)) 973 return Builder.CreateICmpUGE(Builder.CreateNeg(B), A); 974 } 975 976 Value *Base, *Offset; 977 if (!match(ZeroCmpOp, m_Sub(m_Value(Base), m_Value(Offset)))) 978 return nullptr; 979 980 if (!match(UnsignedICmp, 981 m_c_ICmp(UnsignedPred, m_Specific(Base), m_Specific(Offset))) || 982 !ICmpInst::isUnsigned(UnsignedPred)) 983 return nullptr; 984 985 // Base >=/> Offset && (Base - Offset) != 0 <--> Base > Offset 986 // (no overflow and not null) 987 if ((UnsignedPred == ICmpInst::ICMP_UGE || 988 UnsignedPred == ICmpInst::ICMP_UGT) && 989 EqPred == ICmpInst::ICMP_NE && IsAnd) 990 return Builder.CreateICmpUGT(Base, Offset); 991 992 // Base <=/< Offset || (Base - Offset) == 0 <--> Base <= Offset 993 // (overflow or null) 994 if ((UnsignedPred == ICmpInst::ICMP_ULE || 995 UnsignedPred == ICmpInst::ICMP_ULT) && 996 EqPred == ICmpInst::ICMP_EQ && !IsAnd) 997 return Builder.CreateICmpULE(Base, Offset); 998 999 // Base <= Offset && (Base - Offset) != 0 --> Base < Offset 1000 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && 1001 IsAnd) 1002 return Builder.CreateICmpULT(Base, Offset); 1003 1004 // Base > Offset || (Base - Offset) == 0 --> Base >= Offset 1005 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && 1006 !IsAnd) 1007 return Builder.CreateICmpUGE(Base, Offset); 1008 1009 return nullptr; 1010 } 1011 1012 struct IntPart { 1013 Value *From; 1014 unsigned StartBit; 1015 unsigned NumBits; 1016 }; 1017 1018 /// Match an extraction of bits from an integer. 1019 static Optional<IntPart> matchIntPart(Value *V) { 1020 Value *X; 1021 if (!match(V, m_OneUse(m_Trunc(m_Value(X))))) 1022 return None; 1023 1024 unsigned NumOriginalBits = X->getType()->getScalarSizeInBits(); 1025 unsigned NumExtractedBits = V->getType()->getScalarSizeInBits(); 1026 Value *Y; 1027 const APInt *Shift; 1028 // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits 1029 // from Y, not any shifted-in zeroes. 1030 if (match(X, m_OneUse(m_LShr(m_Value(Y), m_APInt(Shift)))) && 1031 Shift->ule(NumOriginalBits - NumExtractedBits)) 1032 return {{Y, (unsigned)Shift->getZExtValue(), NumExtractedBits}}; 1033 return {{X, 0, NumExtractedBits}}; 1034 } 1035 1036 /// Materialize an extraction of bits from an integer in IR. 1037 static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) { 1038 Value *V = P.From; 1039 if (P.StartBit) 1040 V = Builder.CreateLShr(V, P.StartBit); 1041 Type *TruncTy = V->getType()->getWithNewBitWidth(P.NumBits); 1042 if (TruncTy != V->getType()) 1043 V = Builder.CreateTrunc(V, TruncTy); 1044 return V; 1045 } 1046 1047 /// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01 1048 /// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01 1049 /// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer. 1050 Value *InstCombinerImpl::foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1, 1051 bool IsAnd) { 1052 if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse()) 1053 return nullptr; 1054 1055 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE; 1056 if (Cmp0->getPredicate() != Pred || Cmp1->getPredicate() != Pred) 1057 return nullptr; 1058 1059 Optional<IntPart> L0 = matchIntPart(Cmp0->getOperand(0)); 1060 Optional<IntPart> R0 = matchIntPart(Cmp0->getOperand(1)); 1061 Optional<IntPart> L1 = matchIntPart(Cmp1->getOperand(0)); 1062 Optional<IntPart> R1 = matchIntPart(Cmp1->getOperand(1)); 1063 if (!L0 || !R0 || !L1 || !R1) 1064 return nullptr; 1065 1066 // Make sure the LHS/RHS compare a part of the same value, possibly after 1067 // an operand swap. 1068 if (L0->From != L1->From || R0->From != R1->From) { 1069 if (L0->From != R1->From || R0->From != L1->From) 1070 return nullptr; 1071 std::swap(L1, R1); 1072 } 1073 1074 // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being 1075 // the low part and L1/R1 being the high part. 1076 if (L0->StartBit + L0->NumBits != L1->StartBit || 1077 R0->StartBit + R0->NumBits != R1->StartBit) { 1078 if (L1->StartBit + L1->NumBits != L0->StartBit || 1079 R1->StartBit + R1->NumBits != R0->StartBit) 1080 return nullptr; 1081 std::swap(L0, L1); 1082 std::swap(R0, R1); 1083 } 1084 1085 // We can simplify to a comparison of these larger parts of the integers. 1086 IntPart L = {L0->From, L0->StartBit, L0->NumBits + L1->NumBits}; 1087 IntPart R = {R0->From, R0->StartBit, R0->NumBits + R1->NumBits}; 1088 Value *LValue = extractIntPart(L, Builder); 1089 Value *RValue = extractIntPart(R, Builder); 1090 return Builder.CreateICmp(Pred, LValue, RValue); 1091 } 1092 1093 /// Reduce logic-of-compares with equality to a constant by substituting a 1094 /// common operand with the constant. Callers are expected to call this with 1095 /// Cmp0/Cmp1 switched to handle logic op commutativity. 1096 static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1, 1097 bool IsAnd, 1098 InstCombiner::BuilderTy &Builder, 1099 const SimplifyQuery &Q) { 1100 // Match an equality compare with a non-poison constant as Cmp0. 1101 // Also, give up if the compare can be constant-folded to avoid looping. 1102 ICmpInst::Predicate Pred0; 1103 Value *X; 1104 Constant *C; 1105 if (!match(Cmp0, m_ICmp(Pred0, m_Value(X), m_Constant(C))) || 1106 !isGuaranteedNotToBeUndefOrPoison(C) || isa<Constant>(X)) 1107 return nullptr; 1108 if ((IsAnd && Pred0 != ICmpInst::ICMP_EQ) || 1109 (!IsAnd && Pred0 != ICmpInst::ICMP_NE)) 1110 return nullptr; 1111 1112 // The other compare must include a common operand (X). Canonicalize the 1113 // common operand as operand 1 (Pred1 is swapped if the common operand was 1114 // operand 0). 1115 Value *Y; 1116 ICmpInst::Predicate Pred1; 1117 if (!match(Cmp1, m_c_ICmp(Pred1, m_Value(Y), m_Deferred(X)))) 1118 return nullptr; 1119 1120 // Replace variable with constant value equivalence to remove a variable use: 1121 // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C) 1122 // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C) 1123 // Can think of the 'or' substitution with the 'and' bool equivalent: 1124 // A || B --> A || (!A && B) 1125 Value *SubstituteCmp = simplifyICmpInst(Pred1, Y, C, Q); 1126 if (!SubstituteCmp) { 1127 // If we need to create a new instruction, require that the old compare can 1128 // be removed. 1129 if (!Cmp1->hasOneUse()) 1130 return nullptr; 1131 SubstituteCmp = Builder.CreateICmp(Pred1, Y, C); 1132 } 1133 return Builder.CreateBinOp(IsAnd ? Instruction::And : Instruction::Or, Cmp0, 1134 SubstituteCmp); 1135 } 1136 1137 /// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2) 1138 /// or (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2) 1139 /// into a single comparison using range-based reasoning. 1140 /// NOTE: This is also used for logical and/or, must be poison-safe! 1141 Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1, 1142 ICmpInst *ICmp2, 1143 bool IsAnd) { 1144 ICmpInst::Predicate Pred1, Pred2; 1145 Value *V1, *V2; 1146 const APInt *C1, *C2; 1147 if (!match(ICmp1, m_ICmp(Pred1, m_Value(V1), m_APInt(C1))) || 1148 !match(ICmp2, m_ICmp(Pred2, m_Value(V2), m_APInt(C2)))) 1149 return nullptr; 1150 1151 // Look through add of a constant offset on V1, V2, or both operands. This 1152 // allows us to interpret the V + C' < C'' range idiom into a proper range. 1153 const APInt *Offset1 = nullptr, *Offset2 = nullptr; 1154 if (V1 != V2) { 1155 Value *X; 1156 if (match(V1, m_Add(m_Value(X), m_APInt(Offset1)))) 1157 V1 = X; 1158 if (match(V2, m_Add(m_Value(X), m_APInt(Offset2)))) 1159 V2 = X; 1160 } 1161 1162 if (V1 != V2) 1163 return nullptr; 1164 1165 ConstantRange CR1 = ConstantRange::makeExactICmpRegion( 1166 IsAnd ? ICmpInst::getInversePredicate(Pred1) : Pred1, *C1); 1167 if (Offset1) 1168 CR1 = CR1.subtract(*Offset1); 1169 1170 ConstantRange CR2 = ConstantRange::makeExactICmpRegion( 1171 IsAnd ? ICmpInst::getInversePredicate(Pred2) : Pred2, *C2); 1172 if (Offset2) 1173 CR2 = CR2.subtract(*Offset2); 1174 1175 Type *Ty = V1->getType(); 1176 Value *NewV = V1; 1177 Optional<ConstantRange> CR = CR1.exactUnionWith(CR2); 1178 if (!CR) { 1179 if (!(ICmp1->hasOneUse() && ICmp2->hasOneUse()) || CR1.isWrappedSet() || 1180 CR2.isWrappedSet()) 1181 return nullptr; 1182 1183 // Check whether we have equal-size ranges that only differ by one bit. 1184 // In that case we can apply a mask to map one range onto the other. 1185 APInt LowerDiff = CR1.getLower() ^ CR2.getLower(); 1186 APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1); 1187 APInt CR1Size = CR1.getUpper() - CR1.getLower(); 1188 if (!LowerDiff.isPowerOf2() || LowerDiff != UpperDiff || 1189 CR1Size != CR2.getUpper() - CR2.getLower()) 1190 return nullptr; 1191 1192 CR = CR1.getLower().ult(CR2.getLower()) ? CR1 : CR2; 1193 NewV = Builder.CreateAnd(NewV, ConstantInt::get(Ty, ~LowerDiff)); 1194 } 1195 1196 if (IsAnd) 1197 CR = CR->inverse(); 1198 1199 CmpInst::Predicate NewPred; 1200 APInt NewC, Offset; 1201 CR->getEquivalentICmp(NewPred, NewC, Offset); 1202 1203 if (Offset != 0) 1204 NewV = Builder.CreateAdd(NewV, ConstantInt::get(Ty, Offset)); 1205 return Builder.CreateICmp(NewPred, NewV, ConstantInt::get(Ty, NewC)); 1206 } 1207 1208 Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, 1209 bool IsAnd, bool IsLogicalSelect) { 1210 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1211 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1212 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1213 1214 if (LHS0 == RHS1 && RHS0 == LHS1) { 1215 // Swap RHS operands to match LHS. 1216 PredR = FCmpInst::getSwappedPredicate(PredR); 1217 std::swap(RHS0, RHS1); 1218 } 1219 1220 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y). 1221 // Suppose the relation between x and y is R, where R is one of 1222 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for 1223 // testing the desired relations. 1224 // 1225 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this: 1226 // bool(R & CC0) && bool(R & CC1) 1227 // = bool((R & CC0) & (R & CC1)) 1228 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency 1229 // 1230 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this: 1231 // bool(R & CC0) || bool(R & CC1) 1232 // = bool((R & CC0) | (R & CC1)) 1233 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;) 1234 if (LHS0 == RHS0 && LHS1 == RHS1) { 1235 unsigned FCmpCodeL = getFCmpCode(PredL); 1236 unsigned FCmpCodeR = getFCmpCode(PredR); 1237 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR; 1238 1239 // Intersect the fast math flags. 1240 // TODO: We can union the fast math flags unless this is a logical select. 1241 IRBuilder<>::FastMathFlagGuard FMFG(Builder); 1242 FastMathFlags FMF = LHS->getFastMathFlags(); 1243 FMF &= RHS->getFastMathFlags(); 1244 Builder.setFastMathFlags(FMF); 1245 1246 return getFCmpValue(NewPred, LHS0, LHS1, Builder); 1247 } 1248 1249 // This transform is not valid for a logical select. 1250 if (!IsLogicalSelect && 1251 ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || 1252 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && 1253 !IsAnd))) { 1254 if (LHS0->getType() != RHS0->getType()) 1255 return nullptr; 1256 1257 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and 1258 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0). 1259 if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP())) 1260 // Ignore the constants because they are obviously not NANs: 1261 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y) 1262 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y) 1263 return Builder.CreateFCmp(PredL, LHS0, RHS0); 1264 } 1265 1266 return nullptr; 1267 } 1268 1269 /// This a limited reassociation for a special case (see above) where we are 1270 /// checking if two values are either both NAN (unordered) or not-NAN (ordered). 1271 /// This could be handled more generally in '-reassociation', but it seems like 1272 /// an unlikely pattern for a large number of logic ops and fcmps. 1273 static Instruction *reassociateFCmps(BinaryOperator &BO, 1274 InstCombiner::BuilderTy &Builder) { 1275 Instruction::BinaryOps Opcode = BO.getOpcode(); 1276 assert((Opcode == Instruction::And || Opcode == Instruction::Or) && 1277 "Expecting and/or op for fcmp transform"); 1278 1279 // There are 4 commuted variants of the pattern. Canonicalize operands of this 1280 // logic op so an fcmp is operand 0 and a matching logic op is operand 1. 1281 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1), *X; 1282 FCmpInst::Predicate Pred; 1283 if (match(Op1, m_FCmp(Pred, m_Value(), m_AnyZeroFP()))) 1284 std::swap(Op0, Op1); 1285 1286 // Match inner binop and the predicate for combining 2 NAN checks into 1. 1287 Value *BO10, *BO11; 1288 FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD 1289 : FCmpInst::FCMP_UNO; 1290 if (!match(Op0, m_FCmp(Pred, m_Value(X), m_AnyZeroFP())) || Pred != NanPred || 1291 !match(Op1, m_BinOp(Opcode, m_Value(BO10), m_Value(BO11)))) 1292 return nullptr; 1293 1294 // The inner logic op must have a matching fcmp operand. 1295 Value *Y; 1296 if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) || 1297 Pred != NanPred || X->getType() != Y->getType()) 1298 std::swap(BO10, BO11); 1299 1300 if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) || 1301 Pred != NanPred || X->getType() != Y->getType()) 1302 return nullptr; 1303 1304 // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z 1305 // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z 1306 Value *NewFCmp = Builder.CreateFCmp(Pred, X, Y); 1307 if (auto *NewFCmpInst = dyn_cast<FCmpInst>(NewFCmp)) { 1308 // Intersect FMF from the 2 source fcmps. 1309 NewFCmpInst->copyIRFlags(Op0); 1310 NewFCmpInst->andIRFlags(BO10); 1311 } 1312 return BinaryOperator::Create(Opcode, NewFCmp, BO11); 1313 } 1314 1315 /// Match variations of De Morgan's Laws: 1316 /// (~A & ~B) == (~(A | B)) 1317 /// (~A | ~B) == (~(A & B)) 1318 static Instruction *matchDeMorgansLaws(BinaryOperator &I, 1319 InstCombiner::BuilderTy &Builder) { 1320 const Instruction::BinaryOps Opcode = I.getOpcode(); 1321 assert((Opcode == Instruction::And || Opcode == Instruction::Or) && 1322 "Trying to match De Morgan's Laws with something other than and/or"); 1323 1324 // Flip the logic operation. 1325 const Instruction::BinaryOps FlippedOpcode = 1326 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And; 1327 1328 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1329 Value *A, *B; 1330 if (match(Op0, m_OneUse(m_Not(m_Value(A)))) && 1331 match(Op1, m_OneUse(m_Not(m_Value(B)))) && 1332 !InstCombiner::isFreeToInvert(A, A->hasOneUse()) && 1333 !InstCombiner::isFreeToInvert(B, B->hasOneUse())) { 1334 Value *AndOr = 1335 Builder.CreateBinOp(FlippedOpcode, A, B, I.getName() + ".demorgan"); 1336 return BinaryOperator::CreateNot(AndOr); 1337 } 1338 1339 // The 'not' ops may require reassociation. 1340 // (A & ~B) & ~C --> A & ~(B | C) 1341 // (~B & A) & ~C --> A & ~(B | C) 1342 // (A | ~B) | ~C --> A | ~(B & C) 1343 // (~B | A) | ~C --> A | ~(B & C) 1344 Value *C; 1345 if (match(Op0, m_OneUse(m_c_BinOp(Opcode, m_Value(A), m_Not(m_Value(B))))) && 1346 match(Op1, m_Not(m_Value(C)))) { 1347 Value *FlippedBO = Builder.CreateBinOp(FlippedOpcode, B, C); 1348 return BinaryOperator::Create(Opcode, A, Builder.CreateNot(FlippedBO)); 1349 } 1350 1351 return nullptr; 1352 } 1353 1354 bool InstCombinerImpl::shouldOptimizeCast(CastInst *CI) { 1355 Value *CastSrc = CI->getOperand(0); 1356 1357 // Noop casts and casts of constants should be eliminated trivially. 1358 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc)) 1359 return false; 1360 1361 // If this cast is paired with another cast that can be eliminated, we prefer 1362 // to have it eliminated. 1363 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc)) 1364 if (isEliminableCastPair(PrecedingCI, CI)) 1365 return false; 1366 1367 return true; 1368 } 1369 1370 /// Fold {and,or,xor} (cast X), C. 1371 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, 1372 InstCombiner::BuilderTy &Builder) { 1373 Constant *C = dyn_cast<Constant>(Logic.getOperand(1)); 1374 if (!C) 1375 return nullptr; 1376 1377 auto LogicOpc = Logic.getOpcode(); 1378 Type *DestTy = Logic.getType(); 1379 Type *SrcTy = Cast->getSrcTy(); 1380 1381 // Move the logic operation ahead of a zext or sext if the constant is 1382 // unchanged in the smaller source type. Performing the logic in a smaller 1383 // type may provide more information to later folds, and the smaller logic 1384 // instruction may be cheaper (particularly in the case of vectors). 1385 Value *X; 1386 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) { 1387 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); 1388 Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy); 1389 if (ZextTruncC == C) { 1390 // LogicOpc (zext X), C --> zext (LogicOpc X, C) 1391 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC); 1392 return new ZExtInst(NewOp, DestTy); 1393 } 1394 } 1395 1396 if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) { 1397 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); 1398 Constant *SextTruncC = ConstantExpr::getSExt(TruncC, DestTy); 1399 if (SextTruncC == C) { 1400 // LogicOpc (sext X), C --> sext (LogicOpc X, C) 1401 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC); 1402 return new SExtInst(NewOp, DestTy); 1403 } 1404 } 1405 1406 return nullptr; 1407 } 1408 1409 /// Fold {and,or,xor} (cast X), Y. 1410 Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) { 1411 auto LogicOpc = I.getOpcode(); 1412 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding"); 1413 1414 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1415 CastInst *Cast0 = dyn_cast<CastInst>(Op0); 1416 if (!Cast0) 1417 return nullptr; 1418 1419 // This must be a cast from an integer or integer vector source type to allow 1420 // transformation of the logic operation to the source type. 1421 Type *DestTy = I.getType(); 1422 Type *SrcTy = Cast0->getSrcTy(); 1423 if (!SrcTy->isIntOrIntVectorTy()) 1424 return nullptr; 1425 1426 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder)) 1427 return Ret; 1428 1429 CastInst *Cast1 = dyn_cast<CastInst>(Op1); 1430 if (!Cast1) 1431 return nullptr; 1432 1433 // Both operands of the logic operation are casts. The casts must be of the 1434 // same type for reduction. 1435 auto CastOpcode = Cast0->getOpcode(); 1436 if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy()) 1437 return nullptr; 1438 1439 Value *Cast0Src = Cast0->getOperand(0); 1440 Value *Cast1Src = Cast1->getOperand(0); 1441 1442 // fold logic(cast(A), cast(B)) -> cast(logic(A, B)) 1443 if ((Cast0->hasOneUse() || Cast1->hasOneUse()) && 1444 shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) { 1445 Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src, 1446 I.getName()); 1447 return CastInst::Create(CastOpcode, NewOp, DestTy); 1448 } 1449 1450 // For now, only 'and'/'or' have optimizations after this. 1451 if (LogicOpc == Instruction::Xor) 1452 return nullptr; 1453 1454 // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the 1455 // cast is otherwise not optimizable. This happens for vector sexts. 1456 ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src); 1457 ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src); 1458 if (ICmp0 && ICmp1) { 1459 if (Value *Res = 1460 foldAndOrOfICmps(ICmp0, ICmp1, I, LogicOpc == Instruction::And)) 1461 return CastInst::Create(CastOpcode, Res, DestTy); 1462 return nullptr; 1463 } 1464 1465 // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the 1466 // cast is otherwise not optimizable. This happens for vector sexts. 1467 FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src); 1468 FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src); 1469 if (FCmp0 && FCmp1) 1470 if (Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And)) 1471 return CastInst::Create(CastOpcode, R, DestTy); 1472 1473 return nullptr; 1474 } 1475 1476 static Instruction *foldAndToXor(BinaryOperator &I, 1477 InstCombiner::BuilderTy &Builder) { 1478 assert(I.getOpcode() == Instruction::And); 1479 Value *Op0 = I.getOperand(0); 1480 Value *Op1 = I.getOperand(1); 1481 Value *A, *B; 1482 1483 // Operand complexity canonicalization guarantees that the 'or' is Op0. 1484 // (A | B) & ~(A & B) --> A ^ B 1485 // (A | B) & ~(B & A) --> A ^ B 1486 if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)), 1487 m_Not(m_c_And(m_Deferred(A), m_Deferred(B)))))) 1488 return BinaryOperator::CreateXor(A, B); 1489 1490 // (A | ~B) & (~A | B) --> ~(A ^ B) 1491 // (A | ~B) & (B | ~A) --> ~(A ^ B) 1492 // (~B | A) & (~A | B) --> ~(A ^ B) 1493 // (~B | A) & (B | ~A) --> ~(A ^ B) 1494 if (Op0->hasOneUse() || Op1->hasOneUse()) 1495 if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))), 1496 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) 1497 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 1498 1499 return nullptr; 1500 } 1501 1502 static Instruction *foldOrToXor(BinaryOperator &I, 1503 InstCombiner::BuilderTy &Builder) { 1504 assert(I.getOpcode() == Instruction::Or); 1505 Value *Op0 = I.getOperand(0); 1506 Value *Op1 = I.getOperand(1); 1507 Value *A, *B; 1508 1509 // Operand complexity canonicalization guarantees that the 'and' is Op0. 1510 // (A & B) | ~(A | B) --> ~(A ^ B) 1511 // (A & B) | ~(B | A) --> ~(A ^ B) 1512 if (Op0->hasOneUse() || Op1->hasOneUse()) 1513 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 1514 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 1515 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 1516 1517 // Operand complexity canonicalization guarantees that the 'xor' is Op0. 1518 // (A ^ B) | ~(A | B) --> ~(A & B) 1519 // (A ^ B) | ~(B | A) --> ~(A & B) 1520 if (Op0->hasOneUse() || Op1->hasOneUse()) 1521 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 1522 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 1523 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B)); 1524 1525 // (A & ~B) | (~A & B) --> A ^ B 1526 // (A & ~B) | (B & ~A) --> A ^ B 1527 // (~B & A) | (~A & B) --> A ^ B 1528 // (~B & A) | (B & ~A) --> A ^ B 1529 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && 1530 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))) 1531 return BinaryOperator::CreateXor(A, B); 1532 1533 return nullptr; 1534 } 1535 1536 /// Return true if a constant shift amount is always less than the specified 1537 /// bit-width. If not, the shift could create poison in the narrower type. 1538 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) { 1539 APInt Threshold(C->getType()->getScalarSizeInBits(), BitWidth); 1540 return match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold)); 1541 } 1542 1543 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and 1544 /// a common zext operand: and (binop (zext X), C), (zext X). 1545 Instruction *InstCombinerImpl::narrowMaskedBinOp(BinaryOperator &And) { 1546 // This transform could also apply to {or, and, xor}, but there are better 1547 // folds for those cases, so we don't expect those patterns here. AShr is not 1548 // handled because it should always be transformed to LShr in this sequence. 1549 // The subtract transform is different because it has a constant on the left. 1550 // Add/mul commute the constant to RHS; sub with constant RHS becomes add. 1551 Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1); 1552 Constant *C; 1553 if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) && 1554 !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) && 1555 !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) && 1556 !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) && 1557 !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1))))) 1558 return nullptr; 1559 1560 Value *X; 1561 if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3)) 1562 return nullptr; 1563 1564 Type *Ty = And.getType(); 1565 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType())) 1566 return nullptr; 1567 1568 // If we're narrowing a shift, the shift amount must be safe (less than the 1569 // width) in the narrower type. If the shift amount is greater, instsimplify 1570 // usually handles that case, but we can't guarantee/assert it. 1571 Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode(); 1572 if (Opc == Instruction::LShr || Opc == Instruction::Shl) 1573 if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits())) 1574 return nullptr; 1575 1576 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X) 1577 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X) 1578 Value *NewC = ConstantExpr::getTrunc(C, X->getType()); 1579 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X) 1580 : Builder.CreateBinOp(Opc, X, NewC); 1581 return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty); 1582 } 1583 1584 /// Try folding relatively complex patterns for both And and Or operations 1585 /// with all And and Or swapped. 1586 static Instruction *foldComplexAndOrPatterns(BinaryOperator &I, 1587 InstCombiner::BuilderTy &Builder) { 1588 const Instruction::BinaryOps Opcode = I.getOpcode(); 1589 assert(Opcode == Instruction::And || Opcode == Instruction::Or); 1590 1591 // Flip the logic operation. 1592 const Instruction::BinaryOps FlippedOpcode = 1593 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And; 1594 1595 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1596 Value *A, *B, *C, *X, *Y, *Dummy; 1597 1598 // Match following expressions: 1599 // (~(A | B) & C) 1600 // (~(A & B) | C) 1601 // Captures X = ~(A | B) or ~(A & B) 1602 const auto matchNotOrAnd = 1603 [Opcode, FlippedOpcode](Value *Op, auto m_A, auto m_B, auto m_C, 1604 Value *&X, bool CountUses = false) -> bool { 1605 if (CountUses && !Op->hasOneUse()) 1606 return false; 1607 1608 if (match(Op, m_c_BinOp(FlippedOpcode, 1609 m_CombineAnd(m_Value(X), 1610 m_Not(m_c_BinOp(Opcode, m_A, m_B))), 1611 m_C))) 1612 return !CountUses || X->hasOneUse(); 1613 1614 return false; 1615 }; 1616 1617 // (~(A | B) & C) | ... --> ... 1618 // (~(A & B) | C) & ... --> ... 1619 // TODO: One use checks are conservative. We just need to check that a total 1620 // number of multiple used values does not exceed reduction 1621 // in operations. 1622 if (matchNotOrAnd(Op0, m_Value(A), m_Value(B), m_Value(C), X)) { 1623 // (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A 1624 // (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A) 1625 if (matchNotOrAnd(Op1, m_Specific(A), m_Specific(C), m_Specific(B), Dummy, 1626 true)) { 1627 Value *Xor = Builder.CreateXor(B, C); 1628 return (Opcode == Instruction::Or) 1629 ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(A)) 1630 : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, A)); 1631 } 1632 1633 // (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B 1634 // (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B) 1635 if (matchNotOrAnd(Op1, m_Specific(B), m_Specific(C), m_Specific(A), Dummy, 1636 true)) { 1637 Value *Xor = Builder.CreateXor(A, C); 1638 return (Opcode == Instruction::Or) 1639 ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(B)) 1640 : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, B)); 1641 } 1642 1643 // (~(A | B) & C) | ~(A | C) --> ~((B & C) | A) 1644 // (~(A & B) | C) & ~(A & C) --> ~((B | C) & A) 1645 if (match(Op1, m_OneUse(m_Not(m_OneUse( 1646 m_c_BinOp(Opcode, m_Specific(A), m_Specific(C))))))) 1647 return BinaryOperator::CreateNot(Builder.CreateBinOp( 1648 Opcode, Builder.CreateBinOp(FlippedOpcode, B, C), A)); 1649 1650 // (~(A | B) & C) | ~(B | C) --> ~((A & C) | B) 1651 // (~(A & B) | C) & ~(B & C) --> ~((A | C) & B) 1652 if (match(Op1, m_OneUse(m_Not(m_OneUse( 1653 m_c_BinOp(Opcode, m_Specific(B), m_Specific(C))))))) 1654 return BinaryOperator::CreateNot(Builder.CreateBinOp( 1655 Opcode, Builder.CreateBinOp(FlippedOpcode, A, C), B)); 1656 1657 // (~(A | B) & C) | ~(C | (A ^ B)) --> ~((A | B) & (C | (A ^ B))) 1658 // Note, the pattern with swapped and/or is not handled because the 1659 // result is more undefined than a source: 1660 // (~(A & B) | C) & ~(C & (A ^ B)) --> (A ^ B ^ C) | ~(A | C) is invalid. 1661 if (Opcode == Instruction::Or && Op0->hasOneUse() && 1662 match(Op1, m_OneUse(m_Not(m_CombineAnd( 1663 m_Value(Y), 1664 m_c_BinOp(Opcode, m_Specific(C), 1665 m_c_Xor(m_Specific(A), m_Specific(B)))))))) { 1666 // X = ~(A | B) 1667 // Y = (C | (A ^ B) 1668 Value *Or = cast<BinaryOperator>(X)->getOperand(0); 1669 return BinaryOperator::CreateNot(Builder.CreateAnd(Or, Y)); 1670 } 1671 } 1672 1673 // (~A & B & C) | ... --> ... 1674 // (~A | B | C) | ... --> ... 1675 // TODO: One use checks are conservative. We just need to check that a total 1676 // number of multiple used values does not exceed reduction 1677 // in operations. 1678 if (match(Op0, 1679 m_OneUse(m_c_BinOp(FlippedOpcode, 1680 m_BinOp(FlippedOpcode, m_Value(B), m_Value(C)), 1681 m_CombineAnd(m_Value(X), m_Not(m_Value(A)))))) || 1682 match(Op0, m_OneUse(m_c_BinOp( 1683 FlippedOpcode, 1684 m_c_BinOp(FlippedOpcode, m_Value(C), 1685 m_CombineAnd(m_Value(X), m_Not(m_Value(A)))), 1686 m_Value(B))))) { 1687 // X = ~A 1688 // (~A & B & C) | ~(A | B | C) --> ~(A | (B ^ C)) 1689 // (~A | B | C) & ~(A & B & C) --> (~A | (B ^ C)) 1690 if (match(Op1, m_OneUse(m_Not(m_c_BinOp( 1691 Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(B)), 1692 m_Specific(C))))) || 1693 match(Op1, m_OneUse(m_Not(m_c_BinOp( 1694 Opcode, m_c_BinOp(Opcode, m_Specific(B), m_Specific(C)), 1695 m_Specific(A))))) || 1696 match(Op1, m_OneUse(m_Not(m_c_BinOp( 1697 Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)), 1698 m_Specific(B)))))) { 1699 Value *Xor = Builder.CreateXor(B, C); 1700 return (Opcode == Instruction::Or) 1701 ? BinaryOperator::CreateNot(Builder.CreateOr(Xor, A)) 1702 : BinaryOperator::CreateOr(Xor, X); 1703 } 1704 1705 // (~A & B & C) | ~(A | B) --> (C | ~B) & ~A 1706 // (~A | B | C) & ~(A & B) --> (C & ~B) | ~A 1707 if (match(Op1, m_OneUse(m_Not(m_OneUse( 1708 m_c_BinOp(Opcode, m_Specific(A), m_Specific(B))))))) 1709 return BinaryOperator::Create( 1710 FlippedOpcode, Builder.CreateBinOp(Opcode, C, Builder.CreateNot(B)), 1711 X); 1712 1713 // (~A & B & C) | ~(A | C) --> (B | ~C) & ~A 1714 // (~A | B | C) & ~(A & C) --> (B & ~C) | ~A 1715 if (match(Op1, m_OneUse(m_Not(m_OneUse( 1716 m_c_BinOp(Opcode, m_Specific(A), m_Specific(C))))))) 1717 return BinaryOperator::Create( 1718 FlippedOpcode, Builder.CreateBinOp(Opcode, B, Builder.CreateNot(C)), 1719 X); 1720 } 1721 1722 return nullptr; 1723 } 1724 1725 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 1726 // here. We should standardize that construct where it is needed or choose some 1727 // other way to ensure that commutated variants of patterns are not missed. 1728 Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) { 1729 Type *Ty = I.getType(); 1730 1731 if (Value *V = simplifyAndInst(I.getOperand(0), I.getOperand(1), 1732 SQ.getWithInstruction(&I))) 1733 return replaceInstUsesWith(I, V); 1734 1735 if (SimplifyAssociativeOrCommutative(I)) 1736 return &I; 1737 1738 if (Instruction *X = foldVectorBinop(I)) 1739 return X; 1740 1741 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 1742 return Phi; 1743 1744 // See if we can simplify any instructions used by the instruction whose sole 1745 // purpose is to compute bits we don't care about. 1746 if (SimplifyDemandedInstructionBits(I)) 1747 return &I; 1748 1749 // Do this before using distributive laws to catch simple and/or/not patterns. 1750 if (Instruction *Xor = foldAndToXor(I, Builder)) 1751 return Xor; 1752 1753 if (Instruction *X = foldComplexAndOrPatterns(I, Builder)) 1754 return X; 1755 1756 // (A|B)&(A|C) -> A|(B&C) etc 1757 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1758 return replaceInstUsesWith(I, V); 1759 1760 if (Value *V = SimplifyBSwap(I, Builder)) 1761 return replaceInstUsesWith(I, V); 1762 1763 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1764 1765 Value *X, *Y; 1766 if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) && 1767 match(Op1, m_One())) { 1768 // (1 << X) & 1 --> zext(X == 0) 1769 // (1 >> X) & 1 --> zext(X == 0) 1770 Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, 0)); 1771 return new ZExtInst(IsZero, Ty); 1772 } 1773 1774 // (-(X & 1)) & Y --> (X & 1) == 0 ? 0 : Y 1775 Value *Neg; 1776 if (match(&I, 1777 m_c_And(m_CombineAnd(m_Value(Neg), 1778 m_OneUse(m_Neg(m_And(m_Value(), m_One())))), 1779 m_Value(Y)))) { 1780 Value *Cmp = Builder.CreateIsNull(Neg); 1781 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Y); 1782 } 1783 1784 const APInt *C; 1785 if (match(Op1, m_APInt(C))) { 1786 const APInt *XorC; 1787 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) { 1788 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2) 1789 Constant *NewC = ConstantInt::get(Ty, *C & *XorC); 1790 Value *And = Builder.CreateAnd(X, Op1); 1791 And->takeName(Op0); 1792 return BinaryOperator::CreateXor(And, NewC); 1793 } 1794 1795 const APInt *OrC; 1796 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) { 1797 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2) 1798 // NOTE: This reduces the number of bits set in the & mask, which 1799 // can expose opportunities for store narrowing for scalars. 1800 // NOTE: SimplifyDemandedBits should have already removed bits from C1 1801 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in 1802 // above, but this feels safer. 1803 APInt Together = *C & *OrC; 1804 Value *And = Builder.CreateAnd(X, ConstantInt::get(Ty, Together ^ *C)); 1805 And->takeName(Op0); 1806 return BinaryOperator::CreateOr(And, ConstantInt::get(Ty, Together)); 1807 } 1808 1809 unsigned Width = Ty->getScalarSizeInBits(); 1810 const APInt *ShiftC; 1811 if (match(Op0, m_OneUse(m_SExt(m_AShr(m_Value(X), m_APInt(ShiftC))))) && 1812 ShiftC->ult(Width)) { 1813 if (*C == APInt::getLowBitsSet(Width, Width - ShiftC->getZExtValue())) { 1814 // We are clearing high bits that were potentially set by sext+ashr: 1815 // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC 1816 Value *Sext = Builder.CreateSExt(X, Ty); 1817 Constant *ShAmtC = ConstantInt::get(Ty, ShiftC->zext(Width)); 1818 return BinaryOperator::CreateLShr(Sext, ShAmtC); 1819 } 1820 } 1821 1822 // If this 'and' clears the sign-bits added by ashr, replace with lshr: 1823 // and (ashr X, ShiftC), C --> lshr X, ShiftC 1824 if (match(Op0, m_AShr(m_Value(X), m_APInt(ShiftC))) && ShiftC->ult(Width) && 1825 C->isMask(Width - ShiftC->getZExtValue())) 1826 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, *ShiftC)); 1827 1828 const APInt *AddC; 1829 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC)))) { 1830 // If we add zeros to every bit below a mask, the add has no effect: 1831 // (X + AddC) & LowMaskC --> X & LowMaskC 1832 unsigned Ctlz = C->countLeadingZeros(); 1833 APInt LowMask(APInt::getLowBitsSet(Width, Width - Ctlz)); 1834 if ((*AddC & LowMask).isZero()) 1835 return BinaryOperator::CreateAnd(X, Op1); 1836 1837 // If we are masking the result of the add down to exactly one bit and 1838 // the constant we are adding has no bits set below that bit, then the 1839 // add is flipping a single bit. Example: 1840 // (X + 4) & 4 --> (X & 4) ^ 4 1841 if (Op0->hasOneUse() && C->isPowerOf2() && (*AddC & (*C - 1)) == 0) { 1842 assert((*C & *AddC) != 0 && "Expected common bit"); 1843 Value *NewAnd = Builder.CreateAnd(X, Op1); 1844 return BinaryOperator::CreateXor(NewAnd, Op1); 1845 } 1846 } 1847 1848 // ((C1 OP zext(X)) & C2) -> zext((C1 OP X) & C2) if C2 fits in the 1849 // bitwidth of X and OP behaves well when given trunc(C1) and X. 1850 auto isNarrowableBinOpcode = [](BinaryOperator *B) { 1851 switch (B->getOpcode()) { 1852 case Instruction::Xor: 1853 case Instruction::Or: 1854 case Instruction::Mul: 1855 case Instruction::Add: 1856 case Instruction::Sub: 1857 return true; 1858 default: 1859 return false; 1860 } 1861 }; 1862 BinaryOperator *BO; 1863 if (match(Op0, m_OneUse(m_BinOp(BO))) && isNarrowableBinOpcode(BO)) { 1864 Instruction::BinaryOps BOpcode = BO->getOpcode(); 1865 Value *X; 1866 const APInt *C1; 1867 // TODO: The one-use restrictions could be relaxed a little if the AND 1868 // is going to be removed. 1869 // Try to narrow the 'and' and a binop with constant operand: 1870 // and (bo (zext X), C1), C --> zext (and (bo X, TruncC1), TruncC) 1871 if (match(BO, m_c_BinOp(m_OneUse(m_ZExt(m_Value(X))), m_APInt(C1))) && 1872 C->isIntN(X->getType()->getScalarSizeInBits())) { 1873 unsigned XWidth = X->getType()->getScalarSizeInBits(); 1874 Constant *TruncC1 = ConstantInt::get(X->getType(), C1->trunc(XWidth)); 1875 Value *BinOp = isa<ZExtInst>(BO->getOperand(0)) 1876 ? Builder.CreateBinOp(BOpcode, X, TruncC1) 1877 : Builder.CreateBinOp(BOpcode, TruncC1, X); 1878 Constant *TruncC = ConstantInt::get(X->getType(), C->trunc(XWidth)); 1879 Value *And = Builder.CreateAnd(BinOp, TruncC); 1880 return new ZExtInst(And, Ty); 1881 } 1882 1883 // Similar to above: if the mask matches the zext input width, then the 1884 // 'and' can be eliminated, so we can truncate the other variable op: 1885 // and (bo (zext X), Y), C --> zext (bo X, (trunc Y)) 1886 if (isa<Instruction>(BO->getOperand(0)) && 1887 match(BO->getOperand(0), m_OneUse(m_ZExt(m_Value(X)))) && 1888 C->isMask(X->getType()->getScalarSizeInBits())) { 1889 Y = BO->getOperand(1); 1890 Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr"); 1891 Value *NewBO = 1892 Builder.CreateBinOp(BOpcode, X, TrY, BO->getName() + ".narrow"); 1893 return new ZExtInst(NewBO, Ty); 1894 } 1895 // and (bo Y, (zext X)), C --> zext (bo (trunc Y), X) 1896 if (isa<Instruction>(BO->getOperand(1)) && 1897 match(BO->getOperand(1), m_OneUse(m_ZExt(m_Value(X)))) && 1898 C->isMask(X->getType()->getScalarSizeInBits())) { 1899 Y = BO->getOperand(0); 1900 Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr"); 1901 Value *NewBO = 1902 Builder.CreateBinOp(BOpcode, TrY, X, BO->getName() + ".narrow"); 1903 return new ZExtInst(NewBO, Ty); 1904 } 1905 } 1906 1907 // This is intentionally placed after the narrowing transforms for 1908 // efficiency (transform directly to the narrow logic op if possible). 1909 // If the mask is only needed on one incoming arm, push the 'and' op up. 1910 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) || 1911 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { 1912 APInt NotAndMask(~(*C)); 1913 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode(); 1914 if (MaskedValueIsZero(X, NotAndMask, 0, &I)) { 1915 // Not masking anything out for the LHS, move mask to RHS. 1916 // and ({x}or X, Y), C --> {x}or X, (and Y, C) 1917 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked"); 1918 return BinaryOperator::Create(BinOp, X, NewRHS); 1919 } 1920 if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) { 1921 // Not masking anything out for the RHS, move mask to LHS. 1922 // and ({x}or X, Y), C --> {x}or (and X, C), Y 1923 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked"); 1924 return BinaryOperator::Create(BinOp, NewLHS, Y); 1925 } 1926 } 1927 1928 // When the mask is a power-of-2 constant and op0 is a shifted-power-of-2 1929 // constant, test if the shift amount equals the offset bit index: 1930 // (ShiftC << X) & C --> X == (log2(C) - log2(ShiftC)) ? C : 0 1931 // (ShiftC >> X) & C --> X == (log2(ShiftC) - log2(C)) ? C : 0 1932 if (C->isPowerOf2() && 1933 match(Op0, m_OneUse(m_LogicalShift(m_Power2(ShiftC), m_Value(X))))) { 1934 int Log2ShiftC = ShiftC->exactLogBase2(); 1935 int Log2C = C->exactLogBase2(); 1936 bool IsShiftLeft = 1937 cast<BinaryOperator>(Op0)->getOpcode() == Instruction::Shl; 1938 int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C; 1939 assert(BitNum >= 0 && "Expected demanded bits to handle impossible mask"); 1940 Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, BitNum)); 1941 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C), 1942 ConstantInt::getNullValue(Ty)); 1943 } 1944 1945 Constant *C1, *C2; 1946 const APInt *C3 = C; 1947 Value *X; 1948 if (C3->isPowerOf2()) { 1949 Constant *Log2C3 = ConstantInt::get(Ty, C3->countTrailingZeros()); 1950 if (match(Op0, m_OneUse(m_LShr(m_Shl(m_ImmConstant(C1), m_Value(X)), 1951 m_ImmConstant(C2)))) && 1952 match(C1, m_Power2())) { 1953 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1); 1954 Constant *LshrC = ConstantExpr::getAdd(C2, Log2C3); 1955 KnownBits KnownLShrc = computeKnownBits(LshrC, 0, nullptr); 1956 if (KnownLShrc.getMaxValue().ult(Width)) { 1957 // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth: 1958 // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0 1959 Constant *CmpC = ConstantExpr::getSub(LshrC, Log2C1); 1960 Value *Cmp = Builder.CreateICmpEQ(X, CmpC); 1961 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3), 1962 ConstantInt::getNullValue(Ty)); 1963 } 1964 } 1965 1966 if (match(Op0, m_OneUse(m_Shl(m_LShr(m_ImmConstant(C1), m_Value(X)), 1967 m_ImmConstant(C2)))) && 1968 match(C1, m_Power2())) { 1969 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1); 1970 Constant *Cmp = 1971 ConstantExpr::getCompare(ICmpInst::ICMP_ULT, Log2C3, C2); 1972 if (Cmp->isZeroValue()) { 1973 // iff C1,C3 is pow2 and Log2(C3) >= C2: 1974 // ((C1 >> X) << C2) & C3 -> X == (cttz(C1)+C2-cttz(C3)) ? C3 : 0 1975 Constant *ShlC = ConstantExpr::getAdd(C2, Log2C1); 1976 Constant *CmpC = ConstantExpr::getSub(ShlC, Log2C3); 1977 Value *Cmp = Builder.CreateICmpEQ(X, CmpC); 1978 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3), 1979 ConstantInt::getNullValue(Ty)); 1980 } 1981 } 1982 } 1983 } 1984 1985 if (match(&I, m_And(m_OneUse(m_Shl(m_ZExt(m_Value(X)), m_Value(Y))), 1986 m_SignMask())) && 1987 match(Y, m_SpecificInt_ICMP( 1988 ICmpInst::Predicate::ICMP_EQ, 1989 APInt(Ty->getScalarSizeInBits(), 1990 Ty->getScalarSizeInBits() - 1991 X->getType()->getScalarSizeInBits())))) { 1992 auto *SExt = Builder.CreateSExt(X, Ty, X->getName() + ".signext"); 1993 auto *SanitizedSignMask = cast<Constant>(Op1); 1994 // We must be careful with the undef elements of the sign bit mask, however: 1995 // the mask elt can be undef iff the shift amount for that lane was undef, 1996 // otherwise we need to sanitize undef masks to zero. 1997 SanitizedSignMask = Constant::replaceUndefsWith( 1998 SanitizedSignMask, ConstantInt::getNullValue(Ty->getScalarType())); 1999 SanitizedSignMask = 2000 Constant::mergeUndefsWith(SanitizedSignMask, cast<Constant>(Y)); 2001 return BinaryOperator::CreateAnd(SExt, SanitizedSignMask); 2002 } 2003 2004 if (Instruction *Z = narrowMaskedBinOp(I)) 2005 return Z; 2006 2007 if (I.getType()->isIntOrIntVectorTy(1)) { 2008 if (auto *SI0 = dyn_cast<SelectInst>(Op0)) { 2009 if (auto *I = 2010 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ true)) 2011 return I; 2012 } 2013 if (auto *SI1 = dyn_cast<SelectInst>(Op1)) { 2014 if (auto *I = 2015 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ true)) 2016 return I; 2017 } 2018 } 2019 2020 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 2021 return FoldedLogic; 2022 2023 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder)) 2024 return DeMorgan; 2025 2026 { 2027 Value *A, *B, *C; 2028 // A & (A ^ B) --> A & ~B 2029 if (match(Op1, m_OneUse(m_c_Xor(m_Specific(Op0), m_Value(B))))) 2030 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(B)); 2031 // (A ^ B) & A --> A & ~B 2032 if (match(Op0, m_OneUse(m_c_Xor(m_Specific(Op1), m_Value(B))))) 2033 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(B)); 2034 2035 // A & ~(A ^ B) --> A & B 2036 if (match(Op1, m_Not(m_c_Xor(m_Specific(Op0), m_Value(B))))) 2037 return BinaryOperator::CreateAnd(Op0, B); 2038 // ~(A ^ B) & A --> A & B 2039 if (match(Op0, m_Not(m_c_Xor(m_Specific(Op1), m_Value(B))))) 2040 return BinaryOperator::CreateAnd(Op1, B); 2041 2042 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C 2043 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) 2044 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) 2045 if (Op1->hasOneUse() || isFreeToInvert(C, C->hasOneUse())) 2046 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C)); 2047 2048 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C 2049 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) 2050 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) 2051 if (Op0->hasOneUse() || isFreeToInvert(C, C->hasOneUse())) 2052 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C)); 2053 2054 // (A | B) & (~A ^ B) -> A & B 2055 // (A | B) & (B ^ ~A) -> A & B 2056 // (B | A) & (~A ^ B) -> A & B 2057 // (B | A) & (B ^ ~A) -> A & B 2058 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && 2059 match(Op0, m_c_Or(m_Specific(A), m_Specific(B)))) 2060 return BinaryOperator::CreateAnd(A, B); 2061 2062 // (~A ^ B) & (A | B) -> A & B 2063 // (~A ^ B) & (B | A) -> A & B 2064 // (B ^ ~A) & (A | B) -> A & B 2065 // (B ^ ~A) & (B | A) -> A & B 2066 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && 2067 match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) 2068 return BinaryOperator::CreateAnd(A, B); 2069 2070 // (~A | B) & (A ^ B) -> ~A & B 2071 // (~A | B) & (B ^ A) -> ~A & B 2072 // (B | ~A) & (A ^ B) -> ~A & B 2073 // (B | ~A) & (B ^ A) -> ~A & B 2074 if (match(Op0, m_c_Or(m_Not(m_Value(A)), m_Value(B))) && 2075 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B)))) 2076 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 2077 2078 // (A ^ B) & (~A | B) -> ~A & B 2079 // (B ^ A) & (~A | B) -> ~A & B 2080 // (A ^ B) & (B | ~A) -> ~A & B 2081 // (B ^ A) & (B | ~A) -> ~A & B 2082 if (match(Op1, m_c_Or(m_Not(m_Value(A)), m_Value(B))) && 2083 match(Op0, m_c_Xor(m_Specific(A), m_Specific(B)))) 2084 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B); 2085 } 2086 2087 { 2088 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0); 2089 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1); 2090 if (LHS && RHS) 2091 if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ true)) 2092 return replaceInstUsesWith(I, Res); 2093 2094 // TODO: Make this recursive; it's a little tricky because an arbitrary 2095 // number of 'and' instructions might have to be created. 2096 if (LHS && match(Op1, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) { 2097 bool IsLogical = isa<SelectInst>(Op1); 2098 // LHS & (X && Y) --> (LHS && X) && Y 2099 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2100 if (Value *Res = 2101 foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true, IsLogical)) 2102 return replaceInstUsesWith(I, IsLogical 2103 ? Builder.CreateLogicalAnd(Res, Y) 2104 : Builder.CreateAnd(Res, Y)); 2105 // LHS & (X && Y) --> X && (LHS & Y) 2106 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2107 if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true, 2108 /* IsLogical */ false)) 2109 return replaceInstUsesWith(I, IsLogical 2110 ? Builder.CreateLogicalAnd(X, Res) 2111 : Builder.CreateAnd(X, Res)); 2112 } 2113 if (RHS && match(Op0, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) { 2114 bool IsLogical = isa<SelectInst>(Op0); 2115 // (X && Y) & RHS --> (X && RHS) && Y 2116 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2117 if (Value *Res = 2118 foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true, IsLogical)) 2119 return replaceInstUsesWith(I, IsLogical 2120 ? Builder.CreateLogicalAnd(Res, Y) 2121 : Builder.CreateAnd(Res, Y)); 2122 // (X && Y) & RHS --> X && (Y & RHS) 2123 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2124 if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true, 2125 /* IsLogical */ false)) 2126 return replaceInstUsesWith(I, IsLogical 2127 ? Builder.CreateLogicalAnd(X, Res) 2128 : Builder.CreateAnd(X, Res)); 2129 } 2130 } 2131 2132 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 2133 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 2134 if (Value *Res = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ true)) 2135 return replaceInstUsesWith(I, Res); 2136 2137 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder)) 2138 return FoldedFCmps; 2139 2140 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I)) 2141 return CastedAnd; 2142 2143 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I)) 2144 return Sel; 2145 2146 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>. 2147 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold 2148 // with binop identity constant. But creating a select with non-constant 2149 // arm may not be reversible due to poison semantics. Is that a good 2150 // canonicalization? 2151 Value *A; 2152 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && 2153 A->getType()->isIntOrIntVectorTy(1)) 2154 return SelectInst::Create(A, Op1, Constant::getNullValue(Ty)); 2155 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && 2156 A->getType()->isIntOrIntVectorTy(1)) 2157 return SelectInst::Create(A, Op0, Constant::getNullValue(Ty)); 2158 2159 // (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 2160 unsigned FullShift = Ty->getScalarSizeInBits() - 1; 2161 if (match(&I, m_c_And(m_OneUse(m_AShr(m_Value(X), m_SpecificInt(FullShift))), 2162 m_Value(Y)))) { 2163 Value *IsNeg = Builder.CreateIsNeg(X, "isneg"); 2164 return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty)); 2165 } 2166 // If there's a 'not' of the shifted value, swap the select operands: 2167 // ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y 2168 if (match(&I, m_c_And(m_OneUse(m_Not( 2169 m_AShr(m_Value(X), m_SpecificInt(FullShift)))), 2170 m_Value(Y)))) { 2171 Value *IsNeg = Builder.CreateIsNeg(X, "isneg"); 2172 return SelectInst::Create(IsNeg, ConstantInt::getNullValue(Ty), Y); 2173 } 2174 2175 // (~x) & y --> ~(x | (~y)) iff that gets rid of inversions 2176 if (sinkNotIntoOtherHandOfAndOrOr(I)) 2177 return &I; 2178 2179 // An and recurrence w/loop invariant step is equivelent to (and start, step) 2180 PHINode *PN = nullptr; 2181 Value *Start = nullptr, *Step = nullptr; 2182 if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN)) 2183 return replaceInstUsesWith(I, Builder.CreateAnd(Start, Step)); 2184 2185 return nullptr; 2186 } 2187 2188 Instruction *InstCombinerImpl::matchBSwapOrBitReverse(Instruction &I, 2189 bool MatchBSwaps, 2190 bool MatchBitReversals) { 2191 SmallVector<Instruction *, 4> Insts; 2192 if (!recognizeBSwapOrBitReverseIdiom(&I, MatchBSwaps, MatchBitReversals, 2193 Insts)) 2194 return nullptr; 2195 Instruction *LastInst = Insts.pop_back_val(); 2196 LastInst->removeFromParent(); 2197 2198 for (auto *Inst : Insts) 2199 Worklist.push(Inst); 2200 return LastInst; 2201 } 2202 2203 /// Match UB-safe variants of the funnel shift intrinsic. 2204 static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) { 2205 // TODO: Can we reduce the code duplication between this and the related 2206 // rotate matching code under visitSelect and visitTrunc? 2207 unsigned Width = Or.getType()->getScalarSizeInBits(); 2208 2209 // First, find an or'd pair of opposite shifts: 2210 // or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1) 2211 BinaryOperator *Or0, *Or1; 2212 if (!match(Or.getOperand(0), m_BinOp(Or0)) || 2213 !match(Or.getOperand(1), m_BinOp(Or1))) 2214 return nullptr; 2215 2216 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1; 2217 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) || 2218 !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) || 2219 Or0->getOpcode() == Or1->getOpcode()) 2220 return nullptr; 2221 2222 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)). 2223 if (Or0->getOpcode() == BinaryOperator::LShr) { 2224 std::swap(Or0, Or1); 2225 std::swap(ShVal0, ShVal1); 2226 std::swap(ShAmt0, ShAmt1); 2227 } 2228 assert(Or0->getOpcode() == BinaryOperator::Shl && 2229 Or1->getOpcode() == BinaryOperator::LShr && 2230 "Illegal or(shift,shift) pair"); 2231 2232 // Match the shift amount operands for a funnel shift pattern. This always 2233 // matches a subtraction on the R operand. 2234 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * { 2235 // Check for constant shift amounts that sum to the bitwidth. 2236 const APInt *LI, *RI; 2237 if (match(L, m_APIntAllowUndef(LI)) && match(R, m_APIntAllowUndef(RI))) 2238 if (LI->ult(Width) && RI->ult(Width) && (*LI + *RI) == Width) 2239 return ConstantInt::get(L->getType(), *LI); 2240 2241 Constant *LC, *RC; 2242 if (match(L, m_Constant(LC)) && match(R, m_Constant(RC)) && 2243 match(L, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) && 2244 match(R, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) && 2245 match(ConstantExpr::getAdd(LC, RC), m_SpecificIntAllowUndef(Width))) 2246 return ConstantExpr::mergeUndefsWith(LC, RC); 2247 2248 // (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width. 2249 // We limit this to X < Width in case the backend re-expands the intrinsic, 2250 // and has to reintroduce a shift modulo operation (InstCombine might remove 2251 // it after this fold). This still doesn't guarantee that the final codegen 2252 // will match this original pattern. 2253 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) { 2254 KnownBits KnownL = IC.computeKnownBits(L, /*Depth*/ 0, &Or); 2255 return KnownL.getMaxValue().ult(Width) ? L : nullptr; 2256 } 2257 2258 // For non-constant cases, the following patterns currently only work for 2259 // rotation patterns. 2260 // TODO: Add general funnel-shift compatible patterns. 2261 if (ShVal0 != ShVal1) 2262 return nullptr; 2263 2264 // For non-constant cases we don't support non-pow2 shift masks. 2265 // TODO: Is it worth matching urem as well? 2266 if (!isPowerOf2_32(Width)) 2267 return nullptr; 2268 2269 // The shift amount may be masked with negation: 2270 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1))) 2271 Value *X; 2272 unsigned Mask = Width - 1; 2273 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 2274 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 2275 return X; 2276 2277 // Similar to above, but the shift amount may be extended after masking, 2278 // so return the extended value as the parameter for the intrinsic. 2279 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 2280 match(R, m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))), 2281 m_SpecificInt(Mask)))) 2282 return L; 2283 2284 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 2285 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))) 2286 return L; 2287 2288 return nullptr; 2289 }; 2290 2291 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width); 2292 bool IsFshl = true; // Sub on LSHR. 2293 if (!ShAmt) { 2294 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width); 2295 IsFshl = false; // Sub on SHL. 2296 } 2297 if (!ShAmt) 2298 return nullptr; 2299 2300 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 2301 Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType()); 2302 return CallInst::Create(F, {ShVal0, ShVal1, ShAmt}); 2303 } 2304 2305 /// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns. 2306 static Instruction *matchOrConcat(Instruction &Or, 2307 InstCombiner::BuilderTy &Builder) { 2308 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'"); 2309 Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1); 2310 Type *Ty = Or.getType(); 2311 2312 unsigned Width = Ty->getScalarSizeInBits(); 2313 if ((Width & 1) != 0) 2314 return nullptr; 2315 unsigned HalfWidth = Width / 2; 2316 2317 // Canonicalize zext (lower half) to LHS. 2318 if (!isa<ZExtInst>(Op0)) 2319 std::swap(Op0, Op1); 2320 2321 // Find lower/upper half. 2322 Value *LowerSrc, *ShlVal, *UpperSrc; 2323 const APInt *C; 2324 if (!match(Op0, m_OneUse(m_ZExt(m_Value(LowerSrc)))) || 2325 !match(Op1, m_OneUse(m_Shl(m_Value(ShlVal), m_APInt(C)))) || 2326 !match(ShlVal, m_OneUse(m_ZExt(m_Value(UpperSrc))))) 2327 return nullptr; 2328 if (*C != HalfWidth || LowerSrc->getType() != UpperSrc->getType() || 2329 LowerSrc->getType()->getScalarSizeInBits() != HalfWidth) 2330 return nullptr; 2331 2332 auto ConcatIntrinsicCalls = [&](Intrinsic::ID id, Value *Lo, Value *Hi) { 2333 Value *NewLower = Builder.CreateZExt(Lo, Ty); 2334 Value *NewUpper = Builder.CreateZExt(Hi, Ty); 2335 NewUpper = Builder.CreateShl(NewUpper, HalfWidth); 2336 Value *BinOp = Builder.CreateOr(NewLower, NewUpper); 2337 Function *F = Intrinsic::getDeclaration(Or.getModule(), id, Ty); 2338 return Builder.CreateCall(F, BinOp); 2339 }; 2340 2341 // BSWAP: Push the concat down, swapping the lower/upper sources. 2342 // concat(bswap(x),bswap(y)) -> bswap(concat(x,y)) 2343 Value *LowerBSwap, *UpperBSwap; 2344 if (match(LowerSrc, m_BSwap(m_Value(LowerBSwap))) && 2345 match(UpperSrc, m_BSwap(m_Value(UpperBSwap)))) 2346 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap); 2347 2348 // BITREVERSE: Push the concat down, swapping the lower/upper sources. 2349 // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y)) 2350 Value *LowerBRev, *UpperBRev; 2351 if (match(LowerSrc, m_BitReverse(m_Value(LowerBRev))) && 2352 match(UpperSrc, m_BitReverse(m_Value(UpperBRev)))) 2353 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev); 2354 2355 return nullptr; 2356 } 2357 2358 /// If all elements of two constant vectors are 0/-1 and inverses, return true. 2359 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) { 2360 unsigned NumElts = cast<FixedVectorType>(C1->getType())->getNumElements(); 2361 for (unsigned i = 0; i != NumElts; ++i) { 2362 Constant *EltC1 = C1->getAggregateElement(i); 2363 Constant *EltC2 = C2->getAggregateElement(i); 2364 if (!EltC1 || !EltC2) 2365 return false; 2366 2367 // One element must be all ones, and the other must be all zeros. 2368 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) || 2369 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes())))) 2370 return false; 2371 } 2372 return true; 2373 } 2374 2375 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or 2376 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of 2377 /// B, it can be used as the condition operand of a select instruction. 2378 Value *InstCombinerImpl::getSelectCondition(Value *A, Value *B) { 2379 // We may have peeked through bitcasts in the caller. 2380 // Exit immediately if we don't have (vector) integer types. 2381 Type *Ty = A->getType(); 2382 if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy()) 2383 return nullptr; 2384 2385 // If A is the 'not' operand of B and has enough signbits, we have our answer. 2386 if (match(B, m_Not(m_Specific(A)))) { 2387 // If these are scalars or vectors of i1, A can be used directly. 2388 if (Ty->isIntOrIntVectorTy(1)) 2389 return A; 2390 2391 // If we look through a vector bitcast, the caller will bitcast the operands 2392 // to match the condition's number of bits (N x i1). 2393 // To make this poison-safe, disallow bitcast from wide element to narrow 2394 // element. That could allow poison in lanes where it was not present in the 2395 // original code. 2396 A = peekThroughBitcast(A); 2397 if (A->getType()->isIntOrIntVectorTy()) { 2398 unsigned NumSignBits = ComputeNumSignBits(A); 2399 if (NumSignBits == A->getType()->getScalarSizeInBits() && 2400 NumSignBits <= Ty->getScalarSizeInBits()) 2401 return Builder.CreateTrunc(A, CmpInst::makeCmpResultType(A->getType())); 2402 } 2403 return nullptr; 2404 } 2405 2406 // If both operands are constants, see if the constants are inverse bitmasks. 2407 Constant *AConst, *BConst; 2408 if (match(A, m_Constant(AConst)) && match(B, m_Constant(BConst))) 2409 if (AConst == ConstantExpr::getNot(BConst) && 2410 ComputeNumSignBits(A) == Ty->getScalarSizeInBits()) 2411 return Builder.CreateZExtOrTrunc(A, CmpInst::makeCmpResultType(Ty)); 2412 2413 // Look for more complex patterns. The 'not' op may be hidden behind various 2414 // casts. Look through sexts and bitcasts to find the booleans. 2415 Value *Cond; 2416 Value *NotB; 2417 if (match(A, m_SExt(m_Value(Cond))) && 2418 Cond->getType()->isIntOrIntVectorTy(1)) { 2419 // A = sext i1 Cond; B = sext (not (i1 Cond)) 2420 if (match(B, m_SExt(m_Not(m_Specific(Cond))))) 2421 return Cond; 2422 2423 // A = sext i1 Cond; B = not ({bitcast} (sext (i1 Cond))) 2424 // TODO: The one-use checks are unnecessary or misplaced. If the caller 2425 // checked for uses on logic ops/casts, that should be enough to 2426 // make this transform worthwhile. 2427 if (match(B, m_OneUse(m_Not(m_Value(NotB))))) { 2428 NotB = peekThroughBitcast(NotB, true); 2429 if (match(NotB, m_SExt(m_Specific(Cond)))) 2430 return Cond; 2431 } 2432 } 2433 2434 // All scalar (and most vector) possibilities should be handled now. 2435 // Try more matches that only apply to non-splat constant vectors. 2436 if (!Ty->isVectorTy()) 2437 return nullptr; 2438 2439 // If both operands are xor'd with constants using the same sexted boolean 2440 // operand, see if the constants are inverse bitmasks. 2441 // TODO: Use ConstantExpr::getNot()? 2442 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AConst)))) && 2443 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BConst)))) && 2444 Cond->getType()->isIntOrIntVectorTy(1) && 2445 areInverseVectorBitmasks(AConst, BConst)) { 2446 AConst = ConstantExpr::getTrunc(AConst, CmpInst::makeCmpResultType(Ty)); 2447 return Builder.CreateXor(Cond, AConst); 2448 } 2449 return nullptr; 2450 } 2451 2452 /// We have an expression of the form (A & C) | (B & D). Try to simplify this 2453 /// to "A' ? C : D", where A' is a boolean or vector of booleans. 2454 Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *C, Value *B, 2455 Value *D) { 2456 // The potential condition of the select may be bitcasted. In that case, look 2457 // through its bitcast and the corresponding bitcast of the 'not' condition. 2458 Type *OrigType = A->getType(); 2459 A = peekThroughBitcast(A, true); 2460 B = peekThroughBitcast(B, true); 2461 if (Value *Cond = getSelectCondition(A, B)) { 2462 // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D)) 2463 // If this is a vector, we may need to cast to match the condition's length. 2464 // The bitcasts will either all exist or all not exist. The builder will 2465 // not create unnecessary casts if the types already match. 2466 Type *SelTy = A->getType(); 2467 if (auto *VecTy = dyn_cast<VectorType>(Cond->getType())) { 2468 // For a fixed or scalable vector get N from <{vscale x} N x iM> 2469 unsigned Elts = VecTy->getElementCount().getKnownMinValue(); 2470 // For a fixed or scalable vector, get the size in bits of N x iM; for a 2471 // scalar this is just M. 2472 unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinSize(); 2473 Type *EltTy = Builder.getIntNTy(SelEltSize / Elts); 2474 SelTy = VectorType::get(EltTy, VecTy->getElementCount()); 2475 } 2476 Value *BitcastC = Builder.CreateBitCast(C, SelTy); 2477 Value *BitcastD = Builder.CreateBitCast(D, SelTy); 2478 Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD); 2479 return Builder.CreateBitCast(Select, OrigType); 2480 } 2481 2482 return nullptr; 2483 } 2484 2485 // (icmp eq X, 0) | (icmp ult Other, X) -> (icmp ule Other, X-1) 2486 // (icmp ne X, 0) & (icmp uge Other, X) -> (icmp ugt Other, X-1) 2487 Value *foldAndOrOfICmpEqZeroAndICmp(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 2488 IRBuilderBase &Builder) { 2489 ICmpInst::Predicate LPred = 2490 IsAnd ? LHS->getInversePredicate() : LHS->getPredicate(); 2491 ICmpInst::Predicate RPred = 2492 IsAnd ? RHS->getInversePredicate() : RHS->getPredicate(); 2493 Value *LHS0 = LHS->getOperand(0); 2494 if (LPred != ICmpInst::ICMP_EQ || !match(LHS->getOperand(1), m_Zero()) || 2495 !LHS0->getType()->isIntOrIntVectorTy() || 2496 !(LHS->hasOneUse() || RHS->hasOneUse())) 2497 return nullptr; 2498 2499 Value *Other; 2500 if (RPred == ICmpInst::ICMP_ULT && RHS->getOperand(1) == LHS0) 2501 Other = RHS->getOperand(0); 2502 else if (RPred == ICmpInst::ICMP_UGT && RHS->getOperand(0) == LHS0) 2503 Other = RHS->getOperand(1); 2504 else 2505 return nullptr; 2506 2507 return Builder.CreateICmp( 2508 IsAnd ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE, 2509 Builder.CreateAdd(LHS0, Constant::getAllOnesValue(LHS0->getType())), 2510 Other); 2511 } 2512 2513 /// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible. 2514 /// If IsLogical is true, then the and/or is in select form and the transform 2515 /// must be poison-safe. 2516 Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, 2517 Instruction &I, bool IsAnd, 2518 bool IsLogical) { 2519 const SimplifyQuery Q = SQ.getWithInstruction(&I); 2520 2521 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2) 2522 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2) 2523 // if K1 and K2 are a one-bit mask. 2524 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, &I, IsAnd, IsLogical)) 2525 return V; 2526 2527 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 2528 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0); 2529 Value *LHS1 = LHS->getOperand(1), *RHS1 = RHS->getOperand(1); 2530 const APInt *LHSC = nullptr, *RHSC = nullptr; 2531 match(LHS1, m_APInt(LHSC)); 2532 match(RHS1, m_APInt(RHSC)); 2533 2534 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B) 2535 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B) 2536 if (predicatesFoldable(PredL, PredR)) { 2537 if (LHS0 == RHS1 && LHS1 == RHS0) { 2538 PredL = ICmpInst::getSwappedPredicate(PredL); 2539 std::swap(LHS0, LHS1); 2540 } 2541 if (LHS0 == RHS0 && LHS1 == RHS1) { 2542 unsigned Code = IsAnd ? getICmpCode(PredL) & getICmpCode(PredR) 2543 : getICmpCode(PredL) | getICmpCode(PredR); 2544 bool IsSigned = LHS->isSigned() || RHS->isSigned(); 2545 return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder); 2546 } 2547 } 2548 2549 // handle (roughly): 2550 // (icmp ne (A & B), C) | (icmp ne (A & D), E) 2551 // (icmp eq (A & B), C) & (icmp eq (A & D), E) 2552 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, IsAnd, IsLogical, Builder)) 2553 return V; 2554 2555 // TODO: One of these directions is fine with logical and/or, the other could 2556 // be supported by inserting freeze. 2557 if (!IsLogical) { 2558 if (Value *V = foldAndOrOfICmpEqZeroAndICmp(LHS, RHS, IsAnd, Builder)) 2559 return V; 2560 if (Value *V = foldAndOrOfICmpEqZeroAndICmp(RHS, LHS, IsAnd, Builder)) 2561 return V; 2562 } 2563 2564 // TODO: Verify whether this is safe for logical and/or. 2565 if (!IsLogical) { 2566 if (Value *V = foldAndOrOfICmpsWithConstEq(LHS, RHS, IsAnd, Builder, Q)) 2567 return V; 2568 if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, IsAnd, Builder, Q)) 2569 return V; 2570 } 2571 2572 if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, IsAnd, Builder)) 2573 return V; 2574 if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, IsAnd, Builder)) 2575 return V; 2576 2577 // TODO: One of these directions is fine with logical and/or, the other could 2578 // be supported by inserting freeze. 2579 if (!IsLogical) { 2580 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n 2581 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n 2582 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/!IsAnd)) 2583 return V; 2584 2585 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n 2586 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n 2587 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/!IsAnd)) 2588 return V; 2589 } 2590 2591 // TODO: Add conjugated or fold, check whether it is safe for logical and/or. 2592 if (IsAnd && !IsLogical) 2593 if (Value *V = foldSignedTruncationCheck(LHS, RHS, I, Builder)) 2594 return V; 2595 2596 if (Value *V = foldIsPowerOf2(LHS, RHS, IsAnd, Builder)) 2597 return V; 2598 2599 // TODO: Verify whether this is safe for logical and/or. 2600 if (!IsLogical) { 2601 if (Value *X = foldUnsignedUnderflowCheck(LHS, RHS, IsAnd, Q, Builder)) 2602 return X; 2603 if (Value *X = foldUnsignedUnderflowCheck(RHS, LHS, IsAnd, Q, Builder)) 2604 return X; 2605 } 2606 2607 if (Value *X = foldEqOfParts(LHS, RHS, IsAnd)) 2608 return X; 2609 2610 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) 2611 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0) 2612 // TODO: Remove this when foldLogOpOfMaskedICmps can handle undefs. 2613 if (!IsLogical && PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) && 2614 PredL == PredR && match(LHS1, m_ZeroInt()) && match(RHS1, m_ZeroInt()) && 2615 LHS0->getType() == RHS0->getType()) { 2616 Value *NewOr = Builder.CreateOr(LHS0, RHS0); 2617 return Builder.CreateICmp(PredL, NewOr, 2618 Constant::getNullValue(NewOr->getType())); 2619 } 2620 2621 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2). 2622 if (!LHSC || !RHSC) 2623 return nullptr; 2624 2625 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2 2626 // (trunc x) != C1 | (and x, CA) != C2 -> (and x, CA|CMAX) != C1|C2 2627 // where CMAX is the all ones value for the truncated type, 2628 // iff the lower bits of C2 and CA are zero. 2629 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) && 2630 PredL == PredR && LHS->hasOneUse() && RHS->hasOneUse()) { 2631 Value *V; 2632 const APInt *AndC, *SmallC = nullptr, *BigC = nullptr; 2633 2634 // (trunc x) == C1 & (and x, CA) == C2 2635 // (and x, CA) == C2 & (trunc x) == C1 2636 if (match(RHS0, m_Trunc(m_Value(V))) && 2637 match(LHS0, m_And(m_Specific(V), m_APInt(AndC)))) { 2638 SmallC = RHSC; 2639 BigC = LHSC; 2640 } else if (match(LHS0, m_Trunc(m_Value(V))) && 2641 match(RHS0, m_And(m_Specific(V), m_APInt(AndC)))) { 2642 SmallC = LHSC; 2643 BigC = RHSC; 2644 } 2645 2646 if (SmallC && BigC) { 2647 unsigned BigBitSize = BigC->getBitWidth(); 2648 unsigned SmallBitSize = SmallC->getBitWidth(); 2649 2650 // Check that the low bits are zero. 2651 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize); 2652 if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) { 2653 Value *NewAnd = Builder.CreateAnd(V, Low | *AndC); 2654 APInt N = SmallC->zext(BigBitSize) | *BigC; 2655 Value *NewVal = ConstantInt::get(NewAnd->getType(), N); 2656 return Builder.CreateICmp(PredL, NewAnd, NewVal); 2657 } 2658 } 2659 } 2660 2661 // Match naive pattern (and its inverted form) for checking if two values 2662 // share same sign. An example of the pattern: 2663 // (icmp slt (X & Y), 0) | (icmp sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1) 2664 // Inverted form (example): 2665 // (icmp slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0) 2666 bool TrueIfSignedL, TrueIfSignedR; 2667 if (isSignBitCheck(PredL, *LHSC, TrueIfSignedL) && 2668 isSignBitCheck(PredR, *RHSC, TrueIfSignedR) && 2669 (RHS->hasOneUse() || LHS->hasOneUse())) { 2670 Value *X, *Y; 2671 if (IsAnd) { 2672 if ((TrueIfSignedL && !TrueIfSignedR && 2673 match(LHS0, m_Or(m_Value(X), m_Value(Y))) && 2674 match(RHS0, m_c_And(m_Specific(X), m_Specific(Y)))) || 2675 (!TrueIfSignedL && TrueIfSignedR && 2676 match(LHS0, m_And(m_Value(X), m_Value(Y))) && 2677 match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y))))) { 2678 Value *NewXor = Builder.CreateXor(X, Y); 2679 return Builder.CreateIsNeg(NewXor); 2680 } 2681 } else { 2682 if ((TrueIfSignedL && !TrueIfSignedR && 2683 match(LHS0, m_And(m_Value(X), m_Value(Y))) && 2684 match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y)))) || 2685 (!TrueIfSignedL && TrueIfSignedR && 2686 match(LHS0, m_Or(m_Value(X), m_Value(Y))) && 2687 match(RHS0, m_c_And(m_Specific(X), m_Specific(Y))))) { 2688 Value *NewXor = Builder.CreateXor(X, Y); 2689 return Builder.CreateIsNotNeg(NewXor); 2690 } 2691 } 2692 } 2693 2694 return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd); 2695 } 2696 2697 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 2698 // here. We should standardize that construct where it is needed or choose some 2699 // other way to ensure that commutated variants of patterns are not missed. 2700 Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) { 2701 if (Value *V = simplifyOrInst(I.getOperand(0), I.getOperand(1), 2702 SQ.getWithInstruction(&I))) 2703 return replaceInstUsesWith(I, V); 2704 2705 if (SimplifyAssociativeOrCommutative(I)) 2706 return &I; 2707 2708 if (Instruction *X = foldVectorBinop(I)) 2709 return X; 2710 2711 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 2712 return Phi; 2713 2714 // See if we can simplify any instructions used by the instruction whose sole 2715 // purpose is to compute bits we don't care about. 2716 if (SimplifyDemandedInstructionBits(I)) 2717 return &I; 2718 2719 // Do this before using distributive laws to catch simple and/or/not patterns. 2720 if (Instruction *Xor = foldOrToXor(I, Builder)) 2721 return Xor; 2722 2723 if (Instruction *X = foldComplexAndOrPatterns(I, Builder)) 2724 return X; 2725 2726 // (A&B)|(A&C) -> A&(B|C) etc 2727 if (Value *V = SimplifyUsingDistributiveLaws(I)) 2728 return replaceInstUsesWith(I, V); 2729 2730 if (Value *V = SimplifyBSwap(I, Builder)) 2731 return replaceInstUsesWith(I, V); 2732 2733 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2734 Type *Ty = I.getType(); 2735 if (Ty->isIntOrIntVectorTy(1)) { 2736 if (auto *SI0 = dyn_cast<SelectInst>(Op0)) { 2737 if (auto *I = 2738 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ false)) 2739 return I; 2740 } 2741 if (auto *SI1 = dyn_cast<SelectInst>(Op1)) { 2742 if (auto *I = 2743 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ false)) 2744 return I; 2745 } 2746 } 2747 2748 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 2749 return FoldedLogic; 2750 2751 if (Instruction *BitOp = matchBSwapOrBitReverse(I, /*MatchBSwaps*/ true, 2752 /*MatchBitReversals*/ true)) 2753 return BitOp; 2754 2755 if (Instruction *Funnel = matchFunnelShift(I, *this)) 2756 return Funnel; 2757 2758 if (Instruction *Concat = matchOrConcat(I, Builder)) 2759 return replaceInstUsesWith(I, Concat); 2760 2761 Value *X, *Y; 2762 const APInt *CV; 2763 if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) && 2764 !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, 0, &I)) { 2765 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0 2766 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X). 2767 Value *Or = Builder.CreateOr(X, Y); 2768 return BinaryOperator::CreateXor(Or, ConstantInt::get(Ty, *CV)); 2769 } 2770 2771 // If the operands have no common bits set: 2772 // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1) 2773 if (match(&I, 2774 m_c_Or(m_OneUse(m_Mul(m_Value(X), m_Value(Y))), m_Deferred(X))) && 2775 haveNoCommonBitsSet(Op0, Op1, DL)) { 2776 Value *IncrementY = Builder.CreateAdd(Y, ConstantInt::get(Ty, 1)); 2777 return BinaryOperator::CreateMul(X, IncrementY); 2778 } 2779 2780 // (A & C) | (B & D) 2781 Value *A, *B, *C, *D; 2782 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 2783 match(Op1, m_And(m_Value(B), m_Value(D)))) { 2784 2785 // (A & C0) | (B & C1) 2786 const APInt *C0, *C1; 2787 if (match(C, m_APInt(C0)) && match(D, m_APInt(C1))) { 2788 Value *X; 2789 if (*C0 == ~*C1) { 2790 // ((X | B) & MaskC) | (B & ~MaskC) -> (X & MaskC) | B 2791 if (match(A, m_c_Or(m_Value(X), m_Specific(B)))) 2792 return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C0), B); 2793 // (A & MaskC) | ((X | A) & ~MaskC) -> (X & ~MaskC) | A 2794 if (match(B, m_c_Or(m_Specific(A), m_Value(X)))) 2795 return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C1), A); 2796 2797 // ((X ^ B) & MaskC) | (B & ~MaskC) -> (X & MaskC) ^ B 2798 if (match(A, m_c_Xor(m_Value(X), m_Specific(B)))) 2799 return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C0), B); 2800 // (A & MaskC) | ((X ^ A) & ~MaskC) -> (X & ~MaskC) ^ A 2801 if (match(B, m_c_Xor(m_Specific(A), m_Value(X)))) 2802 return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C1), A); 2803 } 2804 2805 if ((*C0 & *C1).isZero()) { 2806 // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1) 2807 // iff (C0 & C1) == 0 and (X & ~C0) == 0 2808 if (match(A, m_c_Or(m_Value(X), m_Specific(B))) && 2809 MaskedValueIsZero(X, ~*C0, 0, &I)) { 2810 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); 2811 return BinaryOperator::CreateAnd(A, C01); 2812 } 2813 // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1) 2814 // iff (C0 & C1) == 0 and (X & ~C1) == 0 2815 if (match(B, m_c_Or(m_Value(X), m_Specific(A))) && 2816 MaskedValueIsZero(X, ~*C1, 0, &I)) { 2817 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); 2818 return BinaryOperator::CreateAnd(B, C01); 2819 } 2820 // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1) 2821 // iff (C0 & C1) == 0 and (C2 & ~C0) == 0 and (C3 & ~C1) == 0. 2822 const APInt *C2, *C3; 2823 if (match(A, m_Or(m_Value(X), m_APInt(C2))) && 2824 match(B, m_Or(m_Specific(X), m_APInt(C3))) && 2825 (*C2 & ~*C0).isZero() && (*C3 & ~*C1).isZero()) { 2826 Value *Or = Builder.CreateOr(X, *C2 | *C3, "bitfield"); 2827 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1); 2828 return BinaryOperator::CreateAnd(Or, C01); 2829 } 2830 } 2831 } 2832 2833 // Don't try to form a select if it's unlikely that we'll get rid of at 2834 // least one of the operands. A select is generally more expensive than the 2835 // 'or' that it is replacing. 2836 if (Op0->hasOneUse() || Op1->hasOneUse()) { 2837 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants. 2838 if (Value *V = matchSelectFromAndOr(A, C, B, D)) 2839 return replaceInstUsesWith(I, V); 2840 if (Value *V = matchSelectFromAndOr(A, C, D, B)) 2841 return replaceInstUsesWith(I, V); 2842 if (Value *V = matchSelectFromAndOr(C, A, B, D)) 2843 return replaceInstUsesWith(I, V); 2844 if (Value *V = matchSelectFromAndOr(C, A, D, B)) 2845 return replaceInstUsesWith(I, V); 2846 if (Value *V = matchSelectFromAndOr(B, D, A, C)) 2847 return replaceInstUsesWith(I, V); 2848 if (Value *V = matchSelectFromAndOr(B, D, C, A)) 2849 return replaceInstUsesWith(I, V); 2850 if (Value *V = matchSelectFromAndOr(D, B, A, C)) 2851 return replaceInstUsesWith(I, V); 2852 if (Value *V = matchSelectFromAndOr(D, B, C, A)) 2853 return replaceInstUsesWith(I, V); 2854 } 2855 } 2856 2857 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C 2858 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) 2859 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) 2860 return BinaryOperator::CreateOr(Op0, C); 2861 2862 // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C 2863 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) 2864 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) 2865 return BinaryOperator::CreateOr(Op1, C); 2866 2867 // ((A & B) ^ C) | B -> C | B 2868 if (match(Op0, m_c_Xor(m_c_And(m_Value(A), m_Specific(Op1)), m_Value(C)))) 2869 return BinaryOperator::CreateOr(C, Op1); 2870 2871 // B | ((A & B) ^ C) -> B | C 2872 if (match(Op1, m_c_Xor(m_c_And(m_Value(A), m_Specific(Op0)), m_Value(C)))) 2873 return BinaryOperator::CreateOr(Op0, C); 2874 2875 // ((B | C) & A) | B -> B | (A & C) 2876 if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A)))) 2877 return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C)); 2878 2879 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder)) 2880 return DeMorgan; 2881 2882 // Canonicalize xor to the RHS. 2883 bool SwappedForXor = false; 2884 if (match(Op0, m_Xor(m_Value(), m_Value()))) { 2885 std::swap(Op0, Op1); 2886 SwappedForXor = true; 2887 } 2888 2889 // A | ( A ^ B) -> A | B 2890 // A | (~A ^ B) -> A | ~B 2891 // (A & B) | (A ^ B) 2892 // ~A | (A ^ B) -> ~(A & B) 2893 // The swap above should always make Op0 the 'not' for the last case. 2894 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) { 2895 if (Op0 == A || Op0 == B) 2896 return BinaryOperator::CreateOr(A, B); 2897 2898 if (match(Op0, m_And(m_Specific(A), m_Specific(B))) || 2899 match(Op0, m_And(m_Specific(B), m_Specific(A)))) 2900 return BinaryOperator::CreateOr(A, B); 2901 2902 if ((Op0->hasOneUse() || Op1->hasOneUse()) && 2903 (match(Op0, m_Not(m_Specific(A))) || match(Op0, m_Not(m_Specific(B))))) 2904 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B)); 2905 2906 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) { 2907 Value *Not = Builder.CreateNot(B, B->getName() + ".not"); 2908 return BinaryOperator::CreateOr(Not, Op0); 2909 } 2910 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) { 2911 Value *Not = Builder.CreateNot(A, A->getName() + ".not"); 2912 return BinaryOperator::CreateOr(Not, Op0); 2913 } 2914 } 2915 2916 // A | ~(A | B) -> A | ~B 2917 // A | ~(A ^ B) -> A | ~B 2918 if (match(Op1, m_Not(m_Value(A)))) 2919 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A)) 2920 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) && 2921 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or || 2922 B->getOpcode() == Instruction::Xor)) { 2923 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) : 2924 B->getOperand(0); 2925 Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not"); 2926 return BinaryOperator::CreateOr(Not, Op0); 2927 } 2928 2929 if (SwappedForXor) 2930 std::swap(Op0, Op1); 2931 2932 { 2933 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0); 2934 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1); 2935 if (LHS && RHS) 2936 if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ false)) 2937 return replaceInstUsesWith(I, Res); 2938 2939 // TODO: Make this recursive; it's a little tricky because an arbitrary 2940 // number of 'or' instructions might have to be created. 2941 Value *X, *Y; 2942 if (LHS && match(Op1, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) { 2943 bool IsLogical = isa<SelectInst>(Op1); 2944 // LHS | (X || Y) --> (LHS || X) || Y 2945 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2946 if (Value *Res = 2947 foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false, IsLogical)) 2948 return replaceInstUsesWith(I, IsLogical 2949 ? Builder.CreateLogicalOr(Res, Y) 2950 : Builder.CreateOr(Res, Y)); 2951 // LHS | (X || Y) --> X || (LHS | Y) 2952 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2953 if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false, 2954 /* IsLogical */ false)) 2955 return replaceInstUsesWith(I, IsLogical 2956 ? Builder.CreateLogicalOr(X, Res) 2957 : Builder.CreateOr(X, Res)); 2958 } 2959 if (RHS && match(Op0, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) { 2960 bool IsLogical = isa<SelectInst>(Op0); 2961 // (X || Y) | RHS --> (X || RHS) || Y 2962 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2963 if (Value *Res = 2964 foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false, IsLogical)) 2965 return replaceInstUsesWith(I, IsLogical 2966 ? Builder.CreateLogicalOr(Res, Y) 2967 : Builder.CreateOr(Res, Y)); 2968 // (X || Y) | RHS --> X || (Y | RHS) 2969 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2970 if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false, 2971 /* IsLogical */ false)) 2972 return replaceInstUsesWith(I, IsLogical 2973 ? Builder.CreateLogicalOr(X, Res) 2974 : Builder.CreateOr(X, Res)); 2975 } 2976 } 2977 2978 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 2979 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 2980 if (Value *Res = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ false)) 2981 return replaceInstUsesWith(I, Res); 2982 2983 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder)) 2984 return FoldedFCmps; 2985 2986 if (Instruction *CastedOr = foldCastedBitwiseLogic(I)) 2987 return CastedOr; 2988 2989 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I)) 2990 return Sel; 2991 2992 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>. 2993 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold 2994 // with binop identity constant. But creating a select with non-constant 2995 // arm may not be reversible due to poison semantics. Is that a good 2996 // canonicalization? 2997 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && 2998 A->getType()->isIntOrIntVectorTy(1)) 2999 return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), Op1); 3000 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && 3001 A->getType()->isIntOrIntVectorTy(1)) 3002 return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), Op0); 3003 3004 // Note: If we've gotten to the point of visiting the outer OR, then the 3005 // inner one couldn't be simplified. If it was a constant, then it won't 3006 // be simplified by a later pass either, so we try swapping the inner/outer 3007 // ORs in the hopes that we'll be able to simplify it this way. 3008 // (X|C) | V --> (X|V) | C 3009 ConstantInt *CI; 3010 if (Op0->hasOneUse() && !match(Op1, m_ConstantInt()) && 3011 match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) { 3012 Value *Inner = Builder.CreateOr(A, Op1); 3013 Inner->takeName(Op0); 3014 return BinaryOperator::CreateOr(Inner, CI); 3015 } 3016 3017 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D)) 3018 // Since this OR statement hasn't been optimized further yet, we hope 3019 // that this transformation will allow the new ORs to be optimized. 3020 { 3021 Value *X = nullptr, *Y = nullptr; 3022 if (Op0->hasOneUse() && Op1->hasOneUse() && 3023 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) && 3024 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) { 3025 Value *orTrue = Builder.CreateOr(A, C); 3026 Value *orFalse = Builder.CreateOr(B, D); 3027 return SelectInst::Create(X, orTrue, orFalse); 3028 } 3029 } 3030 3031 // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X) --> X s> Y ? -1 : X. 3032 { 3033 Value *X, *Y; 3034 if (match(&I, m_c_Or(m_OneUse(m_AShr( 3035 m_NSWSub(m_Value(Y), m_Value(X)), 3036 m_SpecificInt(Ty->getScalarSizeInBits() - 1))), 3037 m_Deferred(X)))) { 3038 Value *NewICmpInst = Builder.CreateICmpSGT(X, Y); 3039 Value *AllOnes = ConstantInt::getAllOnesValue(Ty); 3040 return SelectInst::Create(NewICmpInst, AllOnes, X); 3041 } 3042 } 3043 3044 if (Instruction *V = 3045 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I)) 3046 return V; 3047 3048 CmpInst::Predicate Pred; 3049 Value *Mul, *Ov, *MulIsNotZero, *UMulWithOv; 3050 // Check if the OR weakens the overflow condition for umul.with.overflow by 3051 // treating any non-zero result as overflow. In that case, we overflow if both 3052 // umul.with.overflow operands are != 0, as in that case the result can only 3053 // be 0, iff the multiplication overflows. 3054 if (match(&I, 3055 m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_Value(UMulWithOv)), 3056 m_Value(Ov)), 3057 m_CombineAnd(m_ICmp(Pred, 3058 m_CombineAnd(m_ExtractValue<0>( 3059 m_Deferred(UMulWithOv)), 3060 m_Value(Mul)), 3061 m_ZeroInt()), 3062 m_Value(MulIsNotZero)))) && 3063 (Ov->hasOneUse() || (MulIsNotZero->hasOneUse() && Mul->hasOneUse())) && 3064 Pred == CmpInst::ICMP_NE) { 3065 Value *A, *B; 3066 if (match(UMulWithOv, m_Intrinsic<Intrinsic::umul_with_overflow>( 3067 m_Value(A), m_Value(B)))) { 3068 Value *NotNullA = Builder.CreateIsNotNull(A); 3069 Value *NotNullB = Builder.CreateIsNotNull(B); 3070 return BinaryOperator::CreateAnd(NotNullA, NotNullB); 3071 } 3072 } 3073 3074 // (~x) | y --> ~(x & (~y)) iff that gets rid of inversions 3075 if (sinkNotIntoOtherHandOfAndOrOr(I)) 3076 return &I; 3077 3078 // Improve "get low bit mask up to and including bit X" pattern: 3079 // (1 << X) | ((1 << X) + -1) --> -1 l>> (bitwidth(x) - 1 - X) 3080 if (match(&I, m_c_Or(m_Add(m_Shl(m_One(), m_Value(X)), m_AllOnes()), 3081 m_Shl(m_One(), m_Deferred(X)))) && 3082 match(&I, m_c_Or(m_OneUse(m_Value()), m_Value()))) { 3083 Value *Sub = Builder.CreateSub( 3084 ConstantInt::get(Ty, Ty->getScalarSizeInBits() - 1), X); 3085 return BinaryOperator::CreateLShr(Constant::getAllOnesValue(Ty), Sub); 3086 } 3087 3088 // An or recurrence w/loop invariant step is equivelent to (or start, step) 3089 PHINode *PN = nullptr; 3090 Value *Start = nullptr, *Step = nullptr; 3091 if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN)) 3092 return replaceInstUsesWith(I, Builder.CreateOr(Start, Step)); 3093 3094 // (A & B) | (C | D) or (C | D) | (A & B) 3095 // Can be combined if C or D is of type (A/B & X) 3096 if (match(&I, m_c_Or(m_OneUse(m_And(m_Value(A), m_Value(B))), 3097 m_OneUse(m_Or(m_Value(C), m_Value(D)))))) { 3098 // (A & B) | (C | ?) -> C | (? | (A & B)) 3099 // (A & B) | (C | ?) -> C | (? | (A & B)) 3100 // (A & B) | (C | ?) -> C | (? | (A & B)) 3101 // (A & B) | (C | ?) -> C | (? | (A & B)) 3102 // (C | ?) | (A & B) -> C | (? | (A & B)) 3103 // (C | ?) | (A & B) -> C | (? | (A & B)) 3104 // (C | ?) | (A & B) -> C | (? | (A & B)) 3105 // (C | ?) | (A & B) -> C | (? | (A & B)) 3106 if (match(D, m_OneUse(m_c_And(m_Specific(A), m_Value()))) || 3107 match(D, m_OneUse(m_c_And(m_Specific(B), m_Value())))) 3108 return BinaryOperator::CreateOr( 3109 C, Builder.CreateOr(D, Builder.CreateAnd(A, B))); 3110 // (A & B) | (? | D) -> (? | (A & B)) | D 3111 // (A & B) | (? | D) -> (? | (A & B)) | D 3112 // (A & B) | (? | D) -> (? | (A & B)) | D 3113 // (A & B) | (? | D) -> (? | (A & B)) | D 3114 // (? | D) | (A & B) -> (? | (A & B)) | D 3115 // (? | D) | (A & B) -> (? | (A & B)) | D 3116 // (? | D) | (A & B) -> (? | (A & B)) | D 3117 // (? | D) | (A & B) -> (? | (A & B)) | D 3118 if (match(C, m_OneUse(m_c_And(m_Specific(A), m_Value()))) || 3119 match(C, m_OneUse(m_c_And(m_Specific(B), m_Value())))) 3120 return BinaryOperator::CreateOr( 3121 Builder.CreateOr(C, Builder.CreateAnd(A, B)), D); 3122 } 3123 3124 return nullptr; 3125 } 3126 3127 /// A ^ B can be specified using other logic ops in a variety of patterns. We 3128 /// can fold these early and efficiently by morphing an existing instruction. 3129 static Instruction *foldXorToXor(BinaryOperator &I, 3130 InstCombiner::BuilderTy &Builder) { 3131 assert(I.getOpcode() == Instruction::Xor); 3132 Value *Op0 = I.getOperand(0); 3133 Value *Op1 = I.getOperand(1); 3134 Value *A, *B; 3135 3136 // There are 4 commuted variants for each of the basic patterns. 3137 3138 // (A & B) ^ (A | B) -> A ^ B 3139 // (A & B) ^ (B | A) -> A ^ B 3140 // (A | B) ^ (A & B) -> A ^ B 3141 // (A | B) ^ (B & A) -> A ^ B 3142 if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)), 3143 m_c_Or(m_Deferred(A), m_Deferred(B))))) 3144 return BinaryOperator::CreateXor(A, B); 3145 3146 // (A | ~B) ^ (~A | B) -> A ^ B 3147 // (~B | A) ^ (~A | B) -> A ^ B 3148 // (~A | B) ^ (A | ~B) -> A ^ B 3149 // (B | ~A) ^ (A | ~B) -> A ^ B 3150 if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))), 3151 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) 3152 return BinaryOperator::CreateXor(A, B); 3153 3154 // (A & ~B) ^ (~A & B) -> A ^ B 3155 // (~B & A) ^ (~A & B) -> A ^ B 3156 // (~A & B) ^ (A & ~B) -> A ^ B 3157 // (B & ~A) ^ (A & ~B) -> A ^ B 3158 if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))), 3159 m_c_And(m_Not(m_Deferred(A)), m_Deferred(B))))) 3160 return BinaryOperator::CreateXor(A, B); 3161 3162 // For the remaining cases we need to get rid of one of the operands. 3163 if (!Op0->hasOneUse() && !Op1->hasOneUse()) 3164 return nullptr; 3165 3166 // (A | B) ^ ~(A & B) -> ~(A ^ B) 3167 // (A | B) ^ ~(B & A) -> ~(A ^ B) 3168 // (A & B) ^ ~(A | B) -> ~(A ^ B) 3169 // (A & B) ^ ~(B | A) -> ~(A ^ B) 3170 // Complexity sorting ensures the not will be on the right side. 3171 if ((match(Op0, m_Or(m_Value(A), m_Value(B))) && 3172 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) || 3173 (match(Op0, m_And(m_Value(A), m_Value(B))) && 3174 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))) 3175 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 3176 3177 return nullptr; 3178 } 3179 3180 Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, 3181 BinaryOperator &I) { 3182 assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS && 3183 I.getOperand(1) == RHS && "Should be 'xor' with these operands"); 3184 3185 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 3186 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 3187 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 3188 3189 if (predicatesFoldable(PredL, PredR)) { 3190 if (LHS0 == RHS1 && LHS1 == RHS0) { 3191 std::swap(LHS0, LHS1); 3192 PredL = ICmpInst::getSwappedPredicate(PredL); 3193 } 3194 if (LHS0 == RHS0 && LHS1 == RHS1) { 3195 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B) 3196 unsigned Code = getICmpCode(PredL) ^ getICmpCode(PredR); 3197 bool IsSigned = LHS->isSigned() || RHS->isSigned(); 3198 return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder); 3199 } 3200 } 3201 3202 // TODO: This can be generalized to compares of non-signbits using 3203 // decomposeBitTestICmp(). It could be enhanced more by using (something like) 3204 // foldLogOpOfMaskedICmps(). 3205 const APInt *LC, *RC; 3206 if (match(LHS1, m_APInt(LC)) && match(RHS1, m_APInt(RC)) && 3207 LHS0->getType() == RHS0->getType() && 3208 LHS0->getType()->isIntOrIntVectorTy() && 3209 (LHS->hasOneUse() || RHS->hasOneUse())) { 3210 // Convert xor of signbit tests to signbit test of xor'd values: 3211 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0 3212 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0 3213 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1 3214 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1 3215 bool TrueIfSignedL, TrueIfSignedR; 3216 if (isSignBitCheck(PredL, *LC, TrueIfSignedL) && 3217 isSignBitCheck(PredR, *RC, TrueIfSignedR)) { 3218 Value *XorLR = Builder.CreateXor(LHS0, RHS0); 3219 return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg(XorLR) : 3220 Builder.CreateIsNotNeg(XorLR); 3221 } 3222 3223 // (X > C) ^ (X < C + 2) --> X != C + 1 3224 // (X < C + 2) ^ (X > C) --> X != C + 1 3225 // Considering the correctness of this pattern, we should avoid that C is 3226 // non-negative and C + 2 is negative, although it will be matched by other 3227 // patterns. 3228 const APInt *C1, *C2; 3229 if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_APInt(C1)) && 3230 PredR == CmpInst::ICMP_SLT && match(RHS1, m_APInt(C2))) || 3231 (PredL == CmpInst::ICMP_SLT && match(LHS1, m_APInt(C2)) && 3232 PredR == CmpInst::ICMP_SGT && match(RHS1, m_APInt(C1)))) 3233 if (LHS0 == RHS0 && *C1 + 2 == *C2 && 3234 (C1->isNegative() || C2->isNonNegative())) 3235 return Builder.CreateICmpNE(LHS0, 3236 ConstantInt::get(LHS0->getType(), *C1 + 1)); 3237 } 3238 3239 // Instead of trying to imitate the folds for and/or, decompose this 'xor' 3240 // into those logic ops. That is, try to turn this into an and-of-icmps 3241 // because we have many folds for that pattern. 3242 // 3243 // This is based on a truth table definition of xor: 3244 // X ^ Y --> (X | Y) & !(X & Y) 3245 if (Value *OrICmp = simplifyBinOp(Instruction::Or, LHS, RHS, SQ)) { 3246 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y). 3247 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?). 3248 if (Value *AndICmp = simplifyBinOp(Instruction::And, LHS, RHS, SQ)) { 3249 // TODO: Independently handle cases where the 'and' side is a constant. 3250 ICmpInst *X = nullptr, *Y = nullptr; 3251 if (OrICmp == LHS && AndICmp == RHS) { 3252 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS --> X & !Y 3253 X = LHS; 3254 Y = RHS; 3255 } 3256 if (OrICmp == RHS && AndICmp == LHS) { 3257 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS --> !Y & X 3258 X = RHS; 3259 Y = LHS; 3260 } 3261 if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(Y, &I))) { 3262 // Invert the predicate of 'Y', thus inverting its output. 3263 Y->setPredicate(Y->getInversePredicate()); 3264 // So, are there other uses of Y? 3265 if (!Y->hasOneUse()) { 3266 // We need to adapt other uses of Y though. Get a value that matches 3267 // the original value of Y before inversion. While this increases 3268 // immediate instruction count, we have just ensured that all the 3269 // users are freely-invertible, so that 'not' *will* get folded away. 3270 BuilderTy::InsertPointGuard Guard(Builder); 3271 // Set insertion point to right after the Y. 3272 Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator())); 3273 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3274 // Replace all uses of Y (excluding the one in NotY!) with NotY. 3275 Worklist.pushUsersToWorkList(*Y); 3276 Y->replaceUsesWithIf(NotY, 3277 [NotY](Use &U) { return U.getUser() != NotY; }); 3278 } 3279 // All done. 3280 return Builder.CreateAnd(LHS, RHS); 3281 } 3282 } 3283 } 3284 3285 return nullptr; 3286 } 3287 3288 /// If we have a masked merge, in the canonical form of: 3289 /// (assuming that A only has one use.) 3290 /// | A | |B| 3291 /// ((x ^ y) & M) ^ y 3292 /// | D | 3293 /// * If M is inverted: 3294 /// | D | 3295 /// ((x ^ y) & ~M) ^ y 3296 /// We can canonicalize by swapping the final xor operand 3297 /// to eliminate the 'not' of the mask. 3298 /// ((x ^ y) & M) ^ x 3299 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops 3300 /// because that shortens the dependency chain and improves analysis: 3301 /// (x & M) | (y & ~M) 3302 static Instruction *visitMaskedMerge(BinaryOperator &I, 3303 InstCombiner::BuilderTy &Builder) { 3304 Value *B, *X, *D; 3305 Value *M; 3306 if (!match(&I, m_c_Xor(m_Value(B), 3307 m_OneUse(m_c_And( 3308 m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)), 3309 m_Value(D)), 3310 m_Value(M)))))) 3311 return nullptr; 3312 3313 Value *NotM; 3314 if (match(M, m_Not(m_Value(NotM)))) { 3315 // De-invert the mask and swap the value in B part. 3316 Value *NewA = Builder.CreateAnd(D, NotM); 3317 return BinaryOperator::CreateXor(NewA, X); 3318 } 3319 3320 Constant *C; 3321 if (D->hasOneUse() && match(M, m_Constant(C))) { 3322 // Propagating undef is unsafe. Clamp undef elements to -1. 3323 Type *EltTy = C->getType()->getScalarType(); 3324 C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy)); 3325 // Unfold. 3326 Value *LHS = Builder.CreateAnd(X, C); 3327 Value *NotC = Builder.CreateNot(C); 3328 Value *RHS = Builder.CreateAnd(B, NotC); 3329 return BinaryOperator::CreateOr(LHS, RHS); 3330 } 3331 3332 return nullptr; 3333 } 3334 3335 // Transform 3336 // ~(x ^ y) 3337 // into: 3338 // (~x) ^ y 3339 // or into 3340 // x ^ (~y) 3341 static Instruction *sinkNotIntoXor(BinaryOperator &I, 3342 InstCombiner::BuilderTy &Builder) { 3343 Value *X, *Y; 3344 // FIXME: one-use check is not needed in general, but currently we are unable 3345 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182) 3346 if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y)))))) 3347 return nullptr; 3348 3349 // We only want to do the transform if it is free to do. 3350 if (InstCombiner::isFreeToInvert(X, X->hasOneUse())) { 3351 // Ok, good. 3352 } else if (InstCombiner::isFreeToInvert(Y, Y->hasOneUse())) { 3353 std::swap(X, Y); 3354 } else 3355 return nullptr; 3356 3357 Value *NotX = Builder.CreateNot(X, X->getName() + ".not"); 3358 return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan"); 3359 } 3360 3361 /// Canonicalize a shifty way to code absolute value to the more common pattern 3362 /// that uses negation and select. 3363 static Instruction *canonicalizeAbs(BinaryOperator &Xor, 3364 InstCombiner::BuilderTy &Builder) { 3365 assert(Xor.getOpcode() == Instruction::Xor && "Expected an xor instruction."); 3366 3367 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1. 3368 // We're relying on the fact that we only do this transform when the shift has 3369 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase 3370 // instructions). 3371 Value *Op0 = Xor.getOperand(0), *Op1 = Xor.getOperand(1); 3372 if (Op0->hasNUses(2)) 3373 std::swap(Op0, Op1); 3374 3375 Type *Ty = Xor.getType(); 3376 Value *A; 3377 const APInt *ShAmt; 3378 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) && 3379 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 && 3380 match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) { 3381 // Op1 = ashr i32 A, 31 ; smear the sign bit 3382 // xor (add A, Op1), Op1 ; add -1 and flip bits if negative 3383 // --> (A < 0) ? -A : A 3384 Value *IsNeg = Builder.CreateIsNeg(A); 3385 // Copy the nuw/nsw flags from the add to the negate. 3386 auto *Add = cast<BinaryOperator>(Op0); 3387 Value *NegA = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(), 3388 Add->hasNoSignedWrap()); 3389 return SelectInst::Create(IsNeg, NegA, A); 3390 } 3391 return nullptr; 3392 } 3393 3394 // Transform 3395 // z = (~x) &/| y 3396 // into: 3397 // z = ~(x |/& (~y)) 3398 // iff y is free to invert and all uses of z can be freely updated. 3399 bool InstCombinerImpl::sinkNotIntoOtherHandOfAndOrOr(BinaryOperator &I) { 3400 Instruction::BinaryOps NewOpc; 3401 switch (I.getOpcode()) { 3402 case Instruction::And: 3403 NewOpc = Instruction::Or; 3404 break; 3405 case Instruction::Or: 3406 NewOpc = Instruction::And; 3407 break; 3408 default: 3409 return false; 3410 }; 3411 3412 Value *X, *Y; 3413 if (!match(&I, m_c_BinOp(m_Not(m_Value(X)), m_Value(Y)))) 3414 return false; 3415 3416 // Will we be able to fold the `not` into Y eventually? 3417 if (!InstCombiner::isFreeToInvert(Y, Y->hasOneUse())) 3418 return false; 3419 3420 // And can our users be adapted? 3421 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr)) 3422 return false; 3423 3424 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3425 Value *NewBinOp = 3426 BinaryOperator::Create(NewOpc, X, NotY, I.getName() + ".not"); 3427 Builder.Insert(NewBinOp); 3428 replaceInstUsesWith(I, NewBinOp); 3429 // We can not just create an outer `not`, it will most likely be immediately 3430 // folded back, reconstructing our initial pattern, and causing an 3431 // infinite combine loop, so immediately manually fold it away. 3432 freelyInvertAllUsersOf(NewBinOp); 3433 return true; 3434 } 3435 3436 Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) { 3437 Value *NotOp; 3438 if (!match(&I, m_Not(m_Value(NotOp)))) 3439 return nullptr; 3440 3441 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand. 3442 // We must eliminate the and/or (one-use) for these transforms to not increase 3443 // the instruction count. 3444 // 3445 // ~(~X & Y) --> (X | ~Y) 3446 // ~(Y & ~X) --> (X | ~Y) 3447 // 3448 // Note: The logical matches do not check for the commuted patterns because 3449 // those are handled via SimplifySelectsFeedingBinaryOp(). 3450 Type *Ty = I.getType(); 3451 Value *X, *Y; 3452 if (match(NotOp, m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y))))) { 3453 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3454 return BinaryOperator::CreateOr(X, NotY); 3455 } 3456 if (match(NotOp, m_OneUse(m_LogicalAnd(m_Not(m_Value(X)), m_Value(Y))))) { 3457 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3458 return SelectInst::Create(X, ConstantInt::getTrue(Ty), NotY); 3459 } 3460 3461 // ~(~X | Y) --> (X & ~Y) 3462 // ~(Y | ~X) --> (X & ~Y) 3463 if (match(NotOp, m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y))))) { 3464 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3465 return BinaryOperator::CreateAnd(X, NotY); 3466 } 3467 if (match(NotOp, m_OneUse(m_LogicalOr(m_Not(m_Value(X)), m_Value(Y))))) { 3468 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3469 return SelectInst::Create(X, NotY, ConstantInt::getFalse(Ty)); 3470 } 3471 3472 // Is this a 'not' (~) fed by a binary operator? 3473 BinaryOperator *NotVal; 3474 if (match(NotOp, m_BinOp(NotVal))) { 3475 if (NotVal->getOpcode() == Instruction::And || 3476 NotVal->getOpcode() == Instruction::Or) { 3477 // Apply DeMorgan's Law when inverts are free: 3478 // ~(X & Y) --> (~X | ~Y) 3479 // ~(X | Y) --> (~X & ~Y) 3480 if (isFreeToInvert(NotVal->getOperand(0), 3481 NotVal->getOperand(0)->hasOneUse()) && 3482 isFreeToInvert(NotVal->getOperand(1), 3483 NotVal->getOperand(1)->hasOneUse())) { 3484 Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs"); 3485 Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs"); 3486 if (NotVal->getOpcode() == Instruction::And) 3487 return BinaryOperator::CreateOr(NotX, NotY); 3488 return BinaryOperator::CreateAnd(NotX, NotY); 3489 } 3490 } 3491 3492 // ~((-X) | Y) --> (X - 1) & (~Y) 3493 if (match(NotVal, 3494 m_OneUse(m_c_Or(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))) { 3495 Value *DecX = Builder.CreateAdd(X, ConstantInt::getAllOnesValue(Ty)); 3496 Value *NotY = Builder.CreateNot(Y); 3497 return BinaryOperator::CreateAnd(DecX, NotY); 3498 } 3499 3500 // ~(~X >>s Y) --> (X >>s Y) 3501 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y)))) 3502 return BinaryOperator::CreateAShr(X, Y); 3503 3504 // If we are inverting a right-shifted constant, we may be able to eliminate 3505 // the 'not' by inverting the constant and using the opposite shift type. 3506 // Canonicalization rules ensure that only a negative constant uses 'ashr', 3507 // but we must check that in case that transform has not fired yet. 3508 3509 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits) 3510 Constant *C; 3511 if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) && 3512 match(C, m_Negative())) { 3513 // We matched a negative constant, so propagating undef is unsafe. 3514 // Clamp undef elements to -1. 3515 Type *EltTy = Ty->getScalarType(); 3516 C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy)); 3517 return BinaryOperator::CreateLShr(ConstantExpr::getNot(C), Y); 3518 } 3519 3520 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits) 3521 if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) && 3522 match(C, m_NonNegative())) { 3523 // We matched a non-negative constant, so propagating undef is unsafe. 3524 // Clamp undef elements to 0. 3525 Type *EltTy = Ty->getScalarType(); 3526 C = Constant::replaceUndefsWith(C, ConstantInt::getNullValue(EltTy)); 3527 return BinaryOperator::CreateAShr(ConstantExpr::getNot(C), Y); 3528 } 3529 3530 // ~(X + C) --> ~C - X 3531 if (match(NotVal, m_c_Add(m_Value(X), m_ImmConstant(C)))) 3532 return BinaryOperator::CreateSub(ConstantExpr::getNot(C), X); 3533 3534 // ~(X - Y) --> ~X + Y 3535 // FIXME: is it really beneficial to sink the `not` here? 3536 if (match(NotVal, m_Sub(m_Value(X), m_Value(Y)))) 3537 if (isa<Constant>(X) || NotVal->hasOneUse()) 3538 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y); 3539 3540 // ~(~X + Y) --> X - Y 3541 if (match(NotVal, m_c_Add(m_Not(m_Value(X)), m_Value(Y)))) 3542 return BinaryOperator::CreateWithCopiedFlags(Instruction::Sub, X, Y, 3543 NotVal); 3544 } 3545 3546 // not (cmp A, B) = !cmp A, B 3547 CmpInst::Predicate Pred; 3548 if (match(NotOp, m_OneUse(m_Cmp(Pred, m_Value(), m_Value())))) { 3549 cast<CmpInst>(NotOp)->setPredicate(CmpInst::getInversePredicate(Pred)); 3550 return replaceInstUsesWith(I, NotOp); 3551 } 3552 3553 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max: 3554 // ~min(~X, ~Y) --> max(X, Y) 3555 // ~max(~X, Y) --> min(X, ~Y) 3556 auto *II = dyn_cast<IntrinsicInst>(NotOp); 3557 if (II && II->hasOneUse()) { 3558 if (match(NotOp, m_MaxOrMin(m_Value(X), m_Value(Y))) && 3559 isFreeToInvert(X, X->hasOneUse()) && 3560 isFreeToInvert(Y, Y->hasOneUse())) { 3561 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); 3562 Value *NotX = Builder.CreateNot(X); 3563 Value *NotY = Builder.CreateNot(Y); 3564 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, NotX, NotY); 3565 return replaceInstUsesWith(I, InvMaxMin); 3566 } 3567 if (match(NotOp, m_c_MaxOrMin(m_Not(m_Value(X)), m_Value(Y)))) { 3568 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); 3569 Value *NotY = Builder.CreateNot(Y); 3570 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, NotY); 3571 return replaceInstUsesWith(I, InvMaxMin); 3572 } 3573 } 3574 3575 if (NotOp->hasOneUse()) { 3576 // Pull 'not' into operands of select if both operands are one-use compares 3577 // or one is one-use compare and the other one is a constant. 3578 // Inverting the predicates eliminates the 'not' operation. 3579 // Example: 3580 // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) --> 3581 // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?) 3582 // not (select ?, (cmp TPred, ?, ?), true --> 3583 // select ?, (cmp InvTPred, ?, ?), false 3584 if (auto *Sel = dyn_cast<SelectInst>(NotOp)) { 3585 Value *TV = Sel->getTrueValue(); 3586 Value *FV = Sel->getFalseValue(); 3587 auto *CmpT = dyn_cast<CmpInst>(TV); 3588 auto *CmpF = dyn_cast<CmpInst>(FV); 3589 bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(TV); 3590 bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(FV); 3591 if (InvertibleT && InvertibleF) { 3592 if (CmpT) 3593 CmpT->setPredicate(CmpT->getInversePredicate()); 3594 else 3595 Sel->setTrueValue(ConstantExpr::getNot(cast<Constant>(TV))); 3596 if (CmpF) 3597 CmpF->setPredicate(CmpF->getInversePredicate()); 3598 else 3599 Sel->setFalseValue(ConstantExpr::getNot(cast<Constant>(FV))); 3600 return replaceInstUsesWith(I, Sel); 3601 } 3602 } 3603 } 3604 3605 if (Instruction *NewXor = sinkNotIntoXor(I, Builder)) 3606 return NewXor; 3607 3608 return nullptr; 3609 } 3610 3611 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 3612 // here. We should standardize that construct where it is needed or choose some 3613 // other way to ensure that commutated variants of patterns are not missed. 3614 Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) { 3615 if (Value *V = simplifyXorInst(I.getOperand(0), I.getOperand(1), 3616 SQ.getWithInstruction(&I))) 3617 return replaceInstUsesWith(I, V); 3618 3619 if (SimplifyAssociativeOrCommutative(I)) 3620 return &I; 3621 3622 if (Instruction *X = foldVectorBinop(I)) 3623 return X; 3624 3625 if (Instruction *Phi = foldBinopWithPhiOperands(I)) 3626 return Phi; 3627 3628 if (Instruction *NewXor = foldXorToXor(I, Builder)) 3629 return NewXor; 3630 3631 // (A&B)^(A&C) -> A&(B^C) etc 3632 if (Value *V = SimplifyUsingDistributiveLaws(I)) 3633 return replaceInstUsesWith(I, V); 3634 3635 // See if we can simplify any instructions used by the instruction whose sole 3636 // purpose is to compute bits we don't care about. 3637 if (SimplifyDemandedInstructionBits(I)) 3638 return &I; 3639 3640 if (Value *V = SimplifyBSwap(I, Builder)) 3641 return replaceInstUsesWith(I, V); 3642 3643 if (Instruction *R = foldNot(I)) 3644 return R; 3645 3646 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M) 3647 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits 3648 // calls in there are unnecessary as SimplifyDemandedInstructionBits should 3649 // have already taken care of those cases. 3650 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3651 Value *M; 3652 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()), 3653 m_c_And(m_Deferred(M), m_Value())))) 3654 return BinaryOperator::CreateOr(Op0, Op1); 3655 3656 if (Instruction *Xor = visitMaskedMerge(I, Builder)) 3657 return Xor; 3658 3659 Value *X, *Y; 3660 Constant *C1; 3661 if (match(Op1, m_Constant(C1))) { 3662 Constant *C2; 3663 3664 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_ImmConstant(C2)))) && 3665 match(C1, m_ImmConstant())) { 3666 // (X | C2) ^ C1 --> (X & ~C2) ^ (C1^C2) 3667 C2 = Constant::replaceUndefsWith( 3668 C2, Constant::getAllOnesValue(C2->getType()->getScalarType())); 3669 Value *And = Builder.CreateAnd( 3670 X, Constant::mergeUndefsWith(ConstantExpr::getNot(C2), C1)); 3671 return BinaryOperator::CreateXor( 3672 And, Constant::mergeUndefsWith(ConstantExpr::getXor(C1, C2), C1)); 3673 } 3674 3675 // Use DeMorgan and reassociation to eliminate a 'not' op. 3676 if (match(Op0, m_OneUse(m_Or(m_Not(m_Value(X)), m_Constant(C2))))) { 3677 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1 3678 Value *And = Builder.CreateAnd(X, ConstantExpr::getNot(C2)); 3679 return BinaryOperator::CreateXor(And, ConstantExpr::getNot(C1)); 3680 } 3681 if (match(Op0, m_OneUse(m_And(m_Not(m_Value(X)), m_Constant(C2))))) { 3682 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1 3683 Value *Or = Builder.CreateOr(X, ConstantExpr::getNot(C2)); 3684 return BinaryOperator::CreateXor(Or, ConstantExpr::getNot(C1)); 3685 } 3686 3687 // Convert xor ([trunc] (ashr X, BW-1)), C => 3688 // select(X >s -1, C, ~C) 3689 // The ashr creates "AllZeroOrAllOne's", which then optionally inverses the 3690 // constant depending on whether this input is less than 0. 3691 const APInt *CA; 3692 if (match(Op0, m_OneUse(m_TruncOrSelf( 3693 m_AShr(m_Value(X), m_APIntAllowUndef(CA))))) && 3694 *CA == X->getType()->getScalarSizeInBits() - 1 && 3695 !match(C1, m_AllOnes())) { 3696 assert(!C1->isZeroValue() && "Unexpected xor with 0"); 3697 Value *IsNotNeg = Builder.CreateIsNotNeg(X); 3698 return SelectInst::Create(IsNotNeg, Op1, Builder.CreateNot(Op1)); 3699 } 3700 } 3701 3702 Type *Ty = I.getType(); 3703 { 3704 const APInt *RHSC; 3705 if (match(Op1, m_APInt(RHSC))) { 3706 Value *X; 3707 const APInt *C; 3708 // (C - X) ^ signmaskC --> (C + signmaskC) - X 3709 if (RHSC->isSignMask() && match(Op0, m_Sub(m_APInt(C), m_Value(X)))) 3710 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C + *RHSC), X); 3711 3712 // (X + C) ^ signmaskC --> X + (C + signmaskC) 3713 if (RHSC->isSignMask() && match(Op0, m_Add(m_Value(X), m_APInt(C)))) 3714 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C + *RHSC)); 3715 3716 // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0 3717 if (match(Op0, m_Or(m_Value(X), m_APInt(C))) && 3718 MaskedValueIsZero(X, *C, 0, &I)) 3719 return BinaryOperator::CreateXor(X, ConstantInt::get(Ty, *C ^ *RHSC)); 3720 3721 // If RHSC is inverting the remaining bits of shifted X, 3722 // canonicalize to a 'not' before the shift to help SCEV and codegen: 3723 // (X << C) ^ RHSC --> ~X << C 3724 if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_APInt(C)))) && 3725 *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).shl(*C)) { 3726 Value *NotX = Builder.CreateNot(X); 3727 return BinaryOperator::CreateShl(NotX, ConstantInt::get(Ty, *C)); 3728 } 3729 // (X >>u C) ^ RHSC --> ~X >>u C 3730 if (match(Op0, m_OneUse(m_LShr(m_Value(X), m_APInt(C)))) && 3731 *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).lshr(*C)) { 3732 Value *NotX = Builder.CreateNot(X); 3733 return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *C)); 3734 } 3735 // TODO: We could handle 'ashr' here as well. That would be matching 3736 // a 'not' op and moving it before the shift. Doing that requires 3737 // preventing the inverse fold in canShiftBinOpWithConstantRHS(). 3738 } 3739 } 3740 3741 // FIXME: This should not be limited to scalar (pull into APInt match above). 3742 { 3743 Value *X; 3744 ConstantInt *C1, *C2, *C3; 3745 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3) 3746 if (match(Op1, m_ConstantInt(C3)) && 3747 match(Op0, m_LShr(m_Xor(m_Value(X), m_ConstantInt(C1)), 3748 m_ConstantInt(C2))) && 3749 Op0->hasOneUse()) { 3750 // fold (C1 >> C2) ^ C3 3751 APInt FoldConst = C1->getValue().lshr(C2->getValue()); 3752 FoldConst ^= C3->getValue(); 3753 // Prepare the two operands. 3754 auto *Opnd0 = Builder.CreateLShr(X, C2); 3755 Opnd0->takeName(Op0); 3756 return BinaryOperator::CreateXor(Opnd0, ConstantInt::get(Ty, FoldConst)); 3757 } 3758 } 3759 3760 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 3761 return FoldedLogic; 3762 3763 // Y ^ (X | Y) --> X & ~Y 3764 // Y ^ (Y | X) --> X & ~Y 3765 if (match(Op1, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op0))))) 3766 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op0)); 3767 // (X | Y) ^ Y --> X & ~Y 3768 // (Y | X) ^ Y --> X & ~Y 3769 if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op1))))) 3770 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op1)); 3771 3772 // Y ^ (X & Y) --> ~X & Y 3773 // Y ^ (Y & X) --> ~X & Y 3774 if (match(Op1, m_OneUse(m_c_And(m_Value(X), m_Specific(Op0))))) 3775 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(X)); 3776 // (X & Y) ^ Y --> ~X & Y 3777 // (Y & X) ^ Y --> ~X & Y 3778 // Canonical form is (X & C) ^ C; don't touch that. 3779 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must 3780 // be fixed to prefer that (otherwise we get infinite looping). 3781 if (!match(Op1, m_Constant()) && 3782 match(Op0, m_OneUse(m_c_And(m_Value(X), m_Specific(Op1))))) 3783 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(X)); 3784 3785 Value *A, *B, *C; 3786 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants. 3787 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))), 3788 m_OneUse(m_c_Or(m_Deferred(A), m_Value(C)))))) 3789 return BinaryOperator::CreateXor( 3790 Builder.CreateAnd(Builder.CreateNot(A), C), B); 3791 3792 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants. 3793 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))), 3794 m_OneUse(m_c_Or(m_Deferred(B), m_Value(C)))))) 3795 return BinaryOperator::CreateXor( 3796 Builder.CreateAnd(Builder.CreateNot(B), C), A); 3797 3798 // (A & B) ^ (A ^ B) -> (A | B) 3799 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 3800 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B)))) 3801 return BinaryOperator::CreateOr(A, B); 3802 // (A ^ B) ^ (A & B) -> (A | B) 3803 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 3804 match(Op1, m_c_And(m_Specific(A), m_Specific(B)))) 3805 return BinaryOperator::CreateOr(A, B); 3806 3807 // (A & ~B) ^ ~A -> ~(A & B) 3808 // (~B & A) ^ ~A -> ~(A & B) 3809 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && 3810 match(Op1, m_Not(m_Specific(A)))) 3811 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B)); 3812 3813 // (~A & B) ^ A --> A | B -- There are 4 commuted variants. 3814 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(A)), m_Value(B)), m_Deferred(A)))) 3815 return BinaryOperator::CreateOr(A, B); 3816 3817 // (~A | B) ^ A --> ~(A & B) 3818 if (match(Op0, m_OneUse(m_c_Or(m_Not(m_Specific(Op1)), m_Value(B))))) 3819 return BinaryOperator::CreateNot(Builder.CreateAnd(Op1, B)); 3820 3821 // A ^ (~A | B) --> ~(A & B) 3822 if (match(Op1, m_OneUse(m_c_Or(m_Not(m_Specific(Op0)), m_Value(B))))) 3823 return BinaryOperator::CreateNot(Builder.CreateAnd(Op0, B)); 3824 3825 // (A | B) ^ (A | C) --> (B ^ C) & ~A -- There are 4 commuted variants. 3826 // TODO: Loosen one-use restriction if common operand is a constant. 3827 Value *D; 3828 if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B)))) && 3829 match(Op1, m_OneUse(m_Or(m_Value(C), m_Value(D))))) { 3830 if (B == C || B == D) 3831 std::swap(A, B); 3832 if (A == C) 3833 std::swap(C, D); 3834 if (A == D) { 3835 Value *NotA = Builder.CreateNot(A); 3836 return BinaryOperator::CreateAnd(Builder.CreateXor(B, C), NotA); 3837 } 3838 } 3839 3840 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) 3841 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) 3842 if (Value *V = foldXorOfICmps(LHS, RHS, I)) 3843 return replaceInstUsesWith(I, V); 3844 3845 if (Instruction *CastedXor = foldCastedBitwiseLogic(I)) 3846 return CastedXor; 3847 3848 if (Instruction *Abs = canonicalizeAbs(I, Builder)) 3849 return Abs; 3850 3851 // Otherwise, if all else failed, try to hoist the xor-by-constant: 3852 // (X ^ C) ^ Y --> (X ^ Y) ^ C 3853 // Just like we do in other places, we completely avoid the fold 3854 // for constantexprs, at least to avoid endless combine loop. 3855 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_CombineAnd(m_Value(X), 3856 m_Unless(m_ConstantExpr())), 3857 m_ImmConstant(C1))), 3858 m_Value(Y)))) 3859 return BinaryOperator::CreateXor(Builder.CreateXor(X, Y), C1); 3860 3861 return nullptr; 3862 } 3863