1 //===- InstCombineAndOrXor.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the visitAnd, visitOr, and visitXor functions. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "InstCombineInternal.h" 14 #include "llvm/Analysis/CmpInstAnalysis.h" 15 #include "llvm/Analysis/InstructionSimplify.h" 16 #include "llvm/Transforms/Utils/Local.h" 17 #include "llvm/IR/ConstantRange.h" 18 #include "llvm/IR/Intrinsics.h" 19 #include "llvm/IR/PatternMatch.h" 20 using namespace llvm; 21 using namespace PatternMatch; 22 23 #define DEBUG_TYPE "instcombine" 24 25 /// Similar to getICmpCode but for FCmpInst. This encodes a fcmp predicate into 26 /// a four bit mask. 27 static unsigned getFCmpCode(FCmpInst::Predicate CC) { 28 assert(FCmpInst::FCMP_FALSE <= CC && CC <= FCmpInst::FCMP_TRUE && 29 "Unexpected FCmp predicate!"); 30 // Take advantage of the bit pattern of FCmpInst::Predicate here. 31 // U L G E 32 static_assert(FCmpInst::FCMP_FALSE == 0, ""); // 0 0 0 0 33 static_assert(FCmpInst::FCMP_OEQ == 1, ""); // 0 0 0 1 34 static_assert(FCmpInst::FCMP_OGT == 2, ""); // 0 0 1 0 35 static_assert(FCmpInst::FCMP_OGE == 3, ""); // 0 0 1 1 36 static_assert(FCmpInst::FCMP_OLT == 4, ""); // 0 1 0 0 37 static_assert(FCmpInst::FCMP_OLE == 5, ""); // 0 1 0 1 38 static_assert(FCmpInst::FCMP_ONE == 6, ""); // 0 1 1 0 39 static_assert(FCmpInst::FCMP_ORD == 7, ""); // 0 1 1 1 40 static_assert(FCmpInst::FCMP_UNO == 8, ""); // 1 0 0 0 41 static_assert(FCmpInst::FCMP_UEQ == 9, ""); // 1 0 0 1 42 static_assert(FCmpInst::FCMP_UGT == 10, ""); // 1 0 1 0 43 static_assert(FCmpInst::FCMP_UGE == 11, ""); // 1 0 1 1 44 static_assert(FCmpInst::FCMP_ULT == 12, ""); // 1 1 0 0 45 static_assert(FCmpInst::FCMP_ULE == 13, ""); // 1 1 0 1 46 static_assert(FCmpInst::FCMP_UNE == 14, ""); // 1 1 1 0 47 static_assert(FCmpInst::FCMP_TRUE == 15, ""); // 1 1 1 1 48 return CC; 49 } 50 51 /// This is the complement of getICmpCode, which turns an opcode and two 52 /// operands into either a constant true or false, or a brand new ICmp 53 /// instruction. The sign is passed in to determine which kind of predicate to 54 /// use in the new icmp instruction. 55 static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS, 56 InstCombiner::BuilderTy &Builder) { 57 ICmpInst::Predicate NewPred; 58 if (Constant *TorF = getPredForICmpCode(Code, Sign, LHS->getType(), NewPred)) 59 return TorF; 60 return Builder.CreateICmp(NewPred, LHS, RHS); 61 } 62 63 /// This is the complement of getFCmpCode, which turns an opcode and two 64 /// operands into either a FCmp instruction, or a true/false constant. 65 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS, 66 InstCombiner::BuilderTy &Builder) { 67 const auto Pred = static_cast<FCmpInst::Predicate>(Code); 68 assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE && 69 "Unexpected FCmp predicate!"); 70 if (Pred == FCmpInst::FCMP_FALSE) 71 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); 72 if (Pred == FCmpInst::FCMP_TRUE) 73 return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1); 74 return Builder.CreateFCmp(Pred, LHS, RHS); 75 } 76 77 /// Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or 78 /// BITWISE_OP(BSWAP(A), Constant) to BSWAP(BITWISE_OP(A, B)) 79 /// \param I Binary operator to transform. 80 /// \return Pointer to node that must replace the original binary operator, or 81 /// null pointer if no transformation was made. 82 static Value *SimplifyBSwap(BinaryOperator &I, 83 InstCombiner::BuilderTy &Builder) { 84 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying"); 85 86 Value *OldLHS = I.getOperand(0); 87 Value *OldRHS = I.getOperand(1); 88 89 Value *NewLHS; 90 if (!match(OldLHS, m_BSwap(m_Value(NewLHS)))) 91 return nullptr; 92 93 Value *NewRHS; 94 const APInt *C; 95 96 if (match(OldRHS, m_BSwap(m_Value(NewRHS)))) { 97 // OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) ) 98 if (!OldLHS->hasOneUse() && !OldRHS->hasOneUse()) 99 return nullptr; 100 // NewRHS initialized by the matcher. 101 } else if (match(OldRHS, m_APInt(C))) { 102 // OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) ) 103 if (!OldLHS->hasOneUse()) 104 return nullptr; 105 NewRHS = ConstantInt::get(I.getType(), C->byteSwap()); 106 } else 107 return nullptr; 108 109 Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS); 110 Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap, 111 I.getType()); 112 return Builder.CreateCall(F, BinOp); 113 } 114 115 /// This handles expressions of the form ((val OP C1) & C2). Where 116 /// the Op parameter is 'OP', OpRHS is 'C1', and AndRHS is 'C2'. 117 Instruction *InstCombiner::OptAndOp(BinaryOperator *Op, 118 ConstantInt *OpRHS, 119 ConstantInt *AndRHS, 120 BinaryOperator &TheAnd) { 121 Value *X = Op->getOperand(0); 122 123 switch (Op->getOpcode()) { 124 default: break; 125 case Instruction::Add: 126 if (Op->hasOneUse()) { 127 // Adding a one to a single bit bit-field should be turned into an XOR 128 // of the bit. First thing to check is to see if this AND is with a 129 // single bit constant. 130 const APInt &AndRHSV = AndRHS->getValue(); 131 132 // If there is only one bit set. 133 if (AndRHSV.isPowerOf2()) { 134 // Ok, at this point, we know that we are masking the result of the 135 // ADD down to exactly one bit. If the constant we are adding has 136 // no bits set below this bit, then we can eliminate the ADD. 137 const APInt& AddRHS = OpRHS->getValue(); 138 139 // Check to see if any bits below the one bit set in AndRHSV are set. 140 if ((AddRHS & (AndRHSV - 1)).isNullValue()) { 141 // If not, the only thing that can effect the output of the AND is 142 // the bit specified by AndRHSV. If that bit is set, the effect of 143 // the XOR is to toggle the bit. If it is clear, then the ADD has 144 // no effect. 145 if ((AddRHS & AndRHSV).isNullValue()) { // Bit is not set, noop 146 return replaceOperand(TheAnd, 0, X); 147 } else { 148 // Pull the XOR out of the AND. 149 Value *NewAnd = Builder.CreateAnd(X, AndRHS); 150 NewAnd->takeName(Op); 151 return BinaryOperator::CreateXor(NewAnd, AndRHS); 152 } 153 } 154 } 155 } 156 break; 157 } 158 return nullptr; 159 } 160 161 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise 162 /// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates 163 /// whether to treat V, Lo, and Hi as signed or not. 164 Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi, 165 bool isSigned, bool Inside) { 166 assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) && 167 "Lo is not < Hi in range emission code!"); 168 169 Type *Ty = V->getType(); 170 171 // V >= Min && V < Hi --> V < Hi 172 // V < Min || V >= Hi --> V >= Hi 173 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE; 174 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) { 175 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred; 176 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi)); 177 } 178 179 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo 180 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo 181 Value *VMinusLo = 182 Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off"); 183 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo); 184 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo); 185 } 186 187 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns 188 /// that can be simplified. 189 /// One of A and B is considered the mask. The other is the value. This is 190 /// described as the "AMask" or "BMask" part of the enum. If the enum contains 191 /// only "Mask", then both A and B can be considered masks. If A is the mask, 192 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0. 193 /// If both A and C are constants, this proof is also easy. 194 /// For the following explanations, we assume that A is the mask. 195 /// 196 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all 197 /// bits of A are set in B. 198 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes 199 /// 200 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all 201 /// bits of A are cleared in B. 202 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes 203 /// 204 /// "Mixed" declares that (A & B) == C and C might or might not contain any 205 /// number of one bits and zero bits. 206 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed 207 /// 208 /// "Not" means that in above descriptions "==" should be replaced by "!=". 209 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes 210 /// 211 /// If the mask A contains a single bit, then the following is equivalent: 212 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0) 213 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0) 214 enum MaskedICmpType { 215 AMask_AllOnes = 1, 216 AMask_NotAllOnes = 2, 217 BMask_AllOnes = 4, 218 BMask_NotAllOnes = 8, 219 Mask_AllZeros = 16, 220 Mask_NotAllZeros = 32, 221 AMask_Mixed = 64, 222 AMask_NotMixed = 128, 223 BMask_Mixed = 256, 224 BMask_NotMixed = 512 225 }; 226 227 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C) 228 /// satisfies. 229 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C, 230 ICmpInst::Predicate Pred) { 231 ConstantInt *ACst = dyn_cast<ConstantInt>(A); 232 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 233 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 234 bool IsEq = (Pred == ICmpInst::ICMP_EQ); 235 bool IsAPow2 = (ACst && !ACst->isZero() && ACst->getValue().isPowerOf2()); 236 bool IsBPow2 = (BCst && !BCst->isZero() && BCst->getValue().isPowerOf2()); 237 unsigned MaskVal = 0; 238 if (CCst && CCst->isZero()) { 239 // if C is zero, then both A and B qualify as mask 240 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed) 241 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed)); 242 if (IsAPow2) 243 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed) 244 : (AMask_AllOnes | AMask_Mixed)); 245 if (IsBPow2) 246 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed) 247 : (BMask_AllOnes | BMask_Mixed)); 248 return MaskVal; 249 } 250 251 if (A == C) { 252 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed) 253 : (AMask_NotAllOnes | AMask_NotMixed)); 254 if (IsAPow2) 255 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed) 256 : (Mask_AllZeros | AMask_Mixed)); 257 } else if (ACst && CCst && ConstantExpr::getAnd(ACst, CCst) == CCst) { 258 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed); 259 } 260 261 if (B == C) { 262 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed) 263 : (BMask_NotAllOnes | BMask_NotMixed)); 264 if (IsBPow2) 265 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed) 266 : (Mask_AllZeros | BMask_Mixed)); 267 } else if (BCst && CCst && ConstantExpr::getAnd(BCst, CCst) == CCst) { 268 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed); 269 } 270 271 return MaskVal; 272 } 273 274 /// Convert an analysis of a masked ICmp into its equivalent if all boolean 275 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=) 276 /// is adjacent to the corresponding normal flag (recording ==), this just 277 /// involves swapping those bits over. 278 static unsigned conjugateICmpMask(unsigned Mask) { 279 unsigned NewMask; 280 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros | 281 AMask_Mixed | BMask_Mixed)) 282 << 1; 283 284 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros | 285 AMask_NotMixed | BMask_NotMixed)) 286 >> 1; 287 288 return NewMask; 289 } 290 291 // Adapts the external decomposeBitTestICmp for local use. 292 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred, 293 Value *&X, Value *&Y, Value *&Z) { 294 APInt Mask; 295 if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask)) 296 return false; 297 298 Y = ConstantInt::get(X->getType(), Mask); 299 Z = ConstantInt::get(X->getType(), 0); 300 return true; 301 } 302 303 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E). 304 /// Return the pattern classes (from MaskedICmpType) for the left hand side and 305 /// the right hand side as a pair. 306 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL 307 /// and PredR are their predicates, respectively. 308 static 309 Optional<std::pair<unsigned, unsigned>> 310 getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C, 311 Value *&D, Value *&E, ICmpInst *LHS, 312 ICmpInst *RHS, 313 ICmpInst::Predicate &PredL, 314 ICmpInst::Predicate &PredR) { 315 // vectors are not (yet?) supported. Don't support pointers either. 316 if (!LHS->getOperand(0)->getType()->isIntegerTy() || 317 !RHS->getOperand(0)->getType()->isIntegerTy()) 318 return None; 319 320 // Here comes the tricky part: 321 // LHS might be of the form L11 & L12 == X, X == L21 & L22, 322 // and L11 & L12 == L21 & L22. The same goes for RHS. 323 // Now we must find those components L** and R**, that are equal, so 324 // that we can extract the parameters A, B, C, D, and E for the canonical 325 // above. 326 Value *L1 = LHS->getOperand(0); 327 Value *L2 = LHS->getOperand(1); 328 Value *L11, *L12, *L21, *L22; 329 // Check whether the icmp can be decomposed into a bit test. 330 if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) { 331 L21 = L22 = L1 = nullptr; 332 } else { 333 // Look for ANDs in the LHS icmp. 334 if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) { 335 // Any icmp can be viewed as being trivially masked; if it allows us to 336 // remove one, it's worth it. 337 L11 = L1; 338 L12 = Constant::getAllOnesValue(L1->getType()); 339 } 340 341 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) { 342 L21 = L2; 343 L22 = Constant::getAllOnesValue(L2->getType()); 344 } 345 } 346 347 // Bail if LHS was a icmp that can't be decomposed into an equality. 348 if (!ICmpInst::isEquality(PredL)) 349 return None; 350 351 Value *R1 = RHS->getOperand(0); 352 Value *R2 = RHS->getOperand(1); 353 Value *R11, *R12; 354 bool Ok = false; 355 if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) { 356 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 357 A = R11; 358 D = R12; 359 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 360 A = R12; 361 D = R11; 362 } else { 363 return None; 364 } 365 E = R2; 366 R1 = nullptr; 367 Ok = true; 368 } else { 369 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) { 370 // As before, model no mask as a trivial mask if it'll let us do an 371 // optimization. 372 R11 = R1; 373 R12 = Constant::getAllOnesValue(R1->getType()); 374 } 375 376 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 377 A = R11; 378 D = R12; 379 E = R2; 380 Ok = true; 381 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 382 A = R12; 383 D = R11; 384 E = R2; 385 Ok = true; 386 } 387 } 388 389 // Bail if RHS was a icmp that can't be decomposed into an equality. 390 if (!ICmpInst::isEquality(PredR)) 391 return None; 392 393 // Look for ANDs on the right side of the RHS icmp. 394 if (!Ok) { 395 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) { 396 R11 = R2; 397 R12 = Constant::getAllOnesValue(R2->getType()); 398 } 399 400 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) { 401 A = R11; 402 D = R12; 403 E = R1; 404 Ok = true; 405 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) { 406 A = R12; 407 D = R11; 408 E = R1; 409 Ok = true; 410 } else { 411 return None; 412 } 413 } 414 if (!Ok) 415 return None; 416 417 if (L11 == A) { 418 B = L12; 419 C = L2; 420 } else if (L12 == A) { 421 B = L11; 422 C = L2; 423 } else if (L21 == A) { 424 B = L22; 425 C = L1; 426 } else if (L22 == A) { 427 B = L21; 428 C = L1; 429 } 430 431 unsigned LeftType = getMaskedICmpType(A, B, C, PredL); 432 unsigned RightType = getMaskedICmpType(A, D, E, PredR); 433 return Optional<std::pair<unsigned, unsigned>>(std::make_pair(LeftType, RightType)); 434 } 435 436 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single 437 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros 438 /// and the right hand side is of type BMask_Mixed. For example, 439 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8). 440 static Value * foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 441 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 442 Value *A, Value *B, Value *C, Value *D, Value *E, 443 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, 444 llvm::InstCombiner::BuilderTy &Builder) { 445 // We are given the canonical form: 446 // (icmp ne (A & B), 0) & (icmp eq (A & D), E). 447 // where D & E == E. 448 // 449 // If IsAnd is false, we get it in negated form: 450 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) -> 451 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)). 452 // 453 // We currently handle the case of B, C, D, E are constant. 454 // 455 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 456 if (!BCst) 457 return nullptr; 458 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 459 if (!CCst) 460 return nullptr; 461 ConstantInt *DCst = dyn_cast<ConstantInt>(D); 462 if (!DCst) 463 return nullptr; 464 ConstantInt *ECst = dyn_cast<ConstantInt>(E); 465 if (!ECst) 466 return nullptr; 467 468 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 469 470 // Update E to the canonical form when D is a power of two and RHS is 471 // canonicalized as, 472 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or 473 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0). 474 if (PredR != NewCC) 475 ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst)); 476 477 // If B or D is zero, skip because if LHS or RHS can be trivially folded by 478 // other folding rules and this pattern won't apply any more. 479 if (BCst->getValue() == 0 || DCst->getValue() == 0) 480 return nullptr; 481 482 // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't 483 // deduce anything from it. 484 // For example, 485 // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding. 486 if ((BCst->getValue() & DCst->getValue()) == 0) 487 return nullptr; 488 489 // If the following two conditions are met: 490 // 491 // 1. mask B covers only a single bit that's not covered by mask D, that is, 492 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of 493 // B and D has only one bit set) and, 494 // 495 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other 496 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0 497 // 498 // then that single bit in B must be one and thus the whole expression can be 499 // folded to 500 // (A & (B | D)) == (B & (B ^ D)) | E. 501 // 502 // For example, 503 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9) 504 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8) 505 if ((((BCst->getValue() & DCst->getValue()) & ECst->getValue()) == 0) && 506 (BCst->getValue() & (BCst->getValue() ^ DCst->getValue())).isPowerOf2()) { 507 APInt BorD = BCst->getValue() | DCst->getValue(); 508 APInt BandBxorDorE = (BCst->getValue() & (BCst->getValue() ^ DCst->getValue())) | 509 ECst->getValue(); 510 Value *NewMask = ConstantInt::get(BCst->getType(), BorD); 511 Value *NewMaskedValue = ConstantInt::get(BCst->getType(), BandBxorDorE); 512 Value *NewAnd = Builder.CreateAnd(A, NewMask); 513 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue); 514 } 515 516 auto IsSubSetOrEqual = [](ConstantInt *C1, ConstantInt *C2) { 517 return (C1->getValue() & C2->getValue()) == C1->getValue(); 518 }; 519 auto IsSuperSetOrEqual = [](ConstantInt *C1, ConstantInt *C2) { 520 return (C1->getValue() & C2->getValue()) == C2->getValue(); 521 }; 522 523 // In the following, we consider only the cases where B is a superset of D, B 524 // is a subset of D, or B == D because otherwise there's at least one bit 525 // covered by B but not D, in which case we can't deduce much from it, so 526 // no folding (aside from the single must-be-one bit case right above.) 527 // For example, 528 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding. 529 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst)) 530 return nullptr; 531 532 // At this point, either B is a superset of D, B is a subset of D or B == D. 533 534 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict 535 // and the whole expression becomes false (or true if negated), otherwise, no 536 // folding. 537 // For example, 538 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false. 539 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding. 540 if (ECst->isZero()) { 541 if (IsSubSetOrEqual(BCst, DCst)) 542 return ConstantInt::get(LHS->getType(), !IsAnd); 543 return nullptr; 544 } 545 546 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B == 547 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is 548 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes 549 // RHS. For example, 550 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 551 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 552 if (IsSuperSetOrEqual(BCst, DCst)) 553 return RHS; 554 // Otherwise, B is a subset of D. If B and E have a common bit set, 555 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example. 556 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8). 557 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code"); 558 if ((BCst->getValue() & ECst->getValue()) != 0) 559 return RHS; 560 // Otherwise, LHS and RHS contradict and the whole expression becomes false 561 // (or true if negated.) For example, 562 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false. 563 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false. 564 return ConstantInt::get(LHS->getType(), !IsAnd); 565 } 566 567 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single 568 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side 569 /// aren't of the common mask pattern type. 570 static Value *foldLogOpOfMaskedICmpsAsymmetric( 571 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 572 Value *A, Value *B, Value *C, Value *D, Value *E, 573 ICmpInst::Predicate PredL, ICmpInst::Predicate PredR, 574 unsigned LHSMask, unsigned RHSMask, 575 llvm::InstCombiner::BuilderTy &Builder) { 576 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && 577 "Expected equality predicates for masked type of icmps."); 578 // Handle Mask_NotAllZeros-BMask_Mixed cases. 579 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or 580 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E) 581 // which gets swapped to 582 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C). 583 if (!IsAnd) { 584 LHSMask = conjugateICmpMask(LHSMask); 585 RHSMask = conjugateICmpMask(RHSMask); 586 } 587 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) { 588 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 589 LHS, RHS, IsAnd, A, B, C, D, E, 590 PredL, PredR, Builder)) { 591 return V; 592 } 593 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) { 594 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed( 595 RHS, LHS, IsAnd, A, D, E, B, C, 596 PredR, PredL, Builder)) { 597 return V; 598 } 599 } 600 return nullptr; 601 } 602 603 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) 604 /// into a single (icmp(A & X) ==/!= Y). 605 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, 606 llvm::InstCombiner::BuilderTy &Builder) { 607 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr; 608 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 609 Optional<std::pair<unsigned, unsigned>> MaskPair = 610 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR); 611 if (!MaskPair) 612 return nullptr; 613 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) && 614 "Expected equality predicates for masked type of icmps."); 615 unsigned LHSMask = MaskPair->first; 616 unsigned RHSMask = MaskPair->second; 617 unsigned Mask = LHSMask & RHSMask; 618 if (Mask == 0) { 619 // Even if the two sides don't share a common pattern, check if folding can 620 // still happen. 621 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric( 622 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask, 623 Builder)) 624 return V; 625 return nullptr; 626 } 627 628 // In full generality: 629 // (icmp (A & B) Op C) | (icmp (A & D) Op E) 630 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ] 631 // 632 // If the latter can be converted into (icmp (A & X) Op Y) then the former is 633 // equivalent to (icmp (A & X) !Op Y). 634 // 635 // Therefore, we can pretend for the rest of this function that we're dealing 636 // with the conjunction, provided we flip the sense of any comparisons (both 637 // input and output). 638 639 // In most cases we're going to produce an EQ for the "&&" case. 640 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 641 if (!IsAnd) { 642 // Convert the masking analysis into its equivalent with negated 643 // comparisons. 644 Mask = conjugateICmpMask(Mask); 645 } 646 647 if (Mask & Mask_AllZeros) { 648 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0) 649 // -> (icmp eq (A & (B|D)), 0) 650 Value *NewOr = Builder.CreateOr(B, D); 651 Value *NewAnd = Builder.CreateAnd(A, NewOr); 652 // We can't use C as zero because we might actually handle 653 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 654 // with B and D, having a single bit set. 655 Value *Zero = Constant::getNullValue(A->getType()); 656 return Builder.CreateICmp(NewCC, NewAnd, Zero); 657 } 658 if (Mask & BMask_AllOnes) { 659 // (icmp eq (A & B), B) & (icmp eq (A & D), D) 660 // -> (icmp eq (A & (B|D)), (B|D)) 661 Value *NewOr = Builder.CreateOr(B, D); 662 Value *NewAnd = Builder.CreateAnd(A, NewOr); 663 return Builder.CreateICmp(NewCC, NewAnd, NewOr); 664 } 665 if (Mask & AMask_AllOnes) { 666 // (icmp eq (A & B), A) & (icmp eq (A & D), A) 667 // -> (icmp eq (A & (B&D)), A) 668 Value *NewAnd1 = Builder.CreateAnd(B, D); 669 Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1); 670 return Builder.CreateICmp(NewCC, NewAnd2, A); 671 } 672 673 // Remaining cases assume at least that B and D are constant, and depend on 674 // their actual values. This isn't strictly necessary, just a "handle the 675 // easy cases for now" decision. 676 ConstantInt *BCst = dyn_cast<ConstantInt>(B); 677 if (!BCst) 678 return nullptr; 679 ConstantInt *DCst = dyn_cast<ConstantInt>(D); 680 if (!DCst) 681 return nullptr; 682 683 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) { 684 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and 685 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 686 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0) 687 // Only valid if one of the masks is a superset of the other (check "B&D" is 688 // the same as either B or D). 689 APInt NewMask = BCst->getValue() & DCst->getValue(); 690 691 if (NewMask == BCst->getValue()) 692 return LHS; 693 else if (NewMask == DCst->getValue()) 694 return RHS; 695 } 696 697 if (Mask & AMask_NotAllOnes) { 698 // (icmp ne (A & B), B) & (icmp ne (A & D), D) 699 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A) 700 // Only valid if one of the masks is a superset of the other (check "B|D" is 701 // the same as either B or D). 702 APInt NewMask = BCst->getValue() | DCst->getValue(); 703 704 if (NewMask == BCst->getValue()) 705 return LHS; 706 else if (NewMask == DCst->getValue()) 707 return RHS; 708 } 709 710 if (Mask & BMask_Mixed) { 711 // (icmp eq (A & B), C) & (icmp eq (A & D), E) 712 // We already know that B & C == C && D & E == E. 713 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of 714 // C and E, which are shared by both the mask B and the mask D, don't 715 // contradict, then we can transform to 716 // -> (icmp eq (A & (B|D)), (C|E)) 717 // Currently, we only handle the case of B, C, D, and E being constant. 718 // We can't simply use C and E because we might actually handle 719 // (icmp ne (A & B), B) & (icmp eq (A & D), D) 720 // with B and D, having a single bit set. 721 ConstantInt *CCst = dyn_cast<ConstantInt>(C); 722 if (!CCst) 723 return nullptr; 724 ConstantInt *ECst = dyn_cast<ConstantInt>(E); 725 if (!ECst) 726 return nullptr; 727 if (PredL != NewCC) 728 CCst = cast<ConstantInt>(ConstantExpr::getXor(BCst, CCst)); 729 if (PredR != NewCC) 730 ECst = cast<ConstantInt>(ConstantExpr::getXor(DCst, ECst)); 731 732 // If there is a conflict, we should actually return a false for the 733 // whole construct. 734 if (((BCst->getValue() & DCst->getValue()) & 735 (CCst->getValue() ^ ECst->getValue())).getBoolValue()) 736 return ConstantInt::get(LHS->getType(), !IsAnd); 737 738 Value *NewOr1 = Builder.CreateOr(B, D); 739 Value *NewOr2 = ConstantExpr::getOr(CCst, ECst); 740 Value *NewAnd = Builder.CreateAnd(A, NewOr1); 741 return Builder.CreateICmp(NewCC, NewAnd, NewOr2); 742 } 743 744 return nullptr; 745 } 746 747 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp. 748 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n 749 /// If \p Inverted is true then the check is for the inverted range, e.g. 750 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n 751 Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1, 752 bool Inverted) { 753 // Check the lower range comparison, e.g. x >= 0 754 // InstCombine already ensured that if there is a constant it's on the RHS. 755 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1)); 756 if (!RangeStart) 757 return nullptr; 758 759 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() : 760 Cmp0->getPredicate()); 761 762 // Accept x > -1 or x >= 0 (after potentially inverting the predicate). 763 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) || 764 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero()))) 765 return nullptr; 766 767 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() : 768 Cmp1->getPredicate()); 769 770 Value *Input = Cmp0->getOperand(0); 771 Value *RangeEnd; 772 if (Cmp1->getOperand(0) == Input) { 773 // For the upper range compare we have: icmp x, n 774 RangeEnd = Cmp1->getOperand(1); 775 } else if (Cmp1->getOperand(1) == Input) { 776 // For the upper range compare we have: icmp n, x 777 RangeEnd = Cmp1->getOperand(0); 778 Pred1 = ICmpInst::getSwappedPredicate(Pred1); 779 } else { 780 return nullptr; 781 } 782 783 // Check the upper range comparison, e.g. x < n 784 ICmpInst::Predicate NewPred; 785 switch (Pred1) { 786 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break; 787 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break; 788 default: return nullptr; 789 } 790 791 // This simplification is only valid if the upper range is not negative. 792 KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1); 793 if (!Known.isNonNegative()) 794 return nullptr; 795 796 if (Inverted) 797 NewPred = ICmpInst::getInversePredicate(NewPred); 798 799 return Builder.CreateICmp(NewPred, Input, RangeEnd); 800 } 801 802 static Value * 803 foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS, 804 bool JoinedByAnd, 805 InstCombiner::BuilderTy &Builder) { 806 Value *X = LHS->getOperand(0); 807 if (X != RHS->getOperand(0)) 808 return nullptr; 809 810 const APInt *C1, *C2; 811 if (!match(LHS->getOperand(1), m_APInt(C1)) || 812 !match(RHS->getOperand(1), m_APInt(C2))) 813 return nullptr; 814 815 // We only handle (X != C1 && X != C2) and (X == C1 || X == C2). 816 ICmpInst::Predicate Pred = LHS->getPredicate(); 817 if (Pred != RHS->getPredicate()) 818 return nullptr; 819 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE) 820 return nullptr; 821 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ) 822 return nullptr; 823 824 // The larger unsigned constant goes on the right. 825 if (C1->ugt(*C2)) 826 std::swap(C1, C2); 827 828 APInt Xor = *C1 ^ *C2; 829 if (Xor.isPowerOf2()) { 830 // If LHSC and RHSC differ by only one bit, then set that bit in X and 831 // compare against the larger constant: 832 // (X == C1 || X == C2) --> (X | (C1 ^ C2)) == C2 833 // (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2 834 // We choose an 'or' with a Pow2 constant rather than the inverse mask with 835 // 'and' because that may lead to smaller codegen from a smaller constant. 836 Value *Or = Builder.CreateOr(X, ConstantInt::get(X->getType(), Xor)); 837 return Builder.CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2)); 838 } 839 840 // Special case: get the ordering right when the values wrap around zero. 841 // Ie, we assumed the constants were unsigned when swapping earlier. 842 if (C1->isNullValue() && C2->isAllOnesValue()) 843 std::swap(C1, C2); 844 845 if (*C1 == *C2 - 1) { 846 // (X == 13 || X == 14) --> X - 13 <=u 1 847 // (X != 13 && X != 14) --> X - 13 >u 1 848 // An 'add' is the canonical IR form, so favor that over a 'sub'. 849 Value *Add = Builder.CreateAdd(X, ConstantInt::get(X->getType(), -(*C1))); 850 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE; 851 return Builder.CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1)); 852 } 853 854 return nullptr; 855 } 856 857 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2) 858 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2) 859 Value *InstCombiner::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS, 860 BinaryOperator &Logic) { 861 bool JoinedByAnd = Logic.getOpcode() == Instruction::And; 862 assert((JoinedByAnd || Logic.getOpcode() == Instruction::Or) && 863 "Wrong opcode"); 864 ICmpInst::Predicate Pred = LHS->getPredicate(); 865 if (Pred != RHS->getPredicate()) 866 return nullptr; 867 if (JoinedByAnd && Pred != ICmpInst::ICMP_NE) 868 return nullptr; 869 if (!JoinedByAnd && Pred != ICmpInst::ICMP_EQ) 870 return nullptr; 871 872 // TODO support vector splats 873 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1)); 874 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1)); 875 if (!LHSC || !RHSC || !LHSC->isZero() || !RHSC->isZero()) 876 return nullptr; 877 878 Value *A, *B, *C, *D; 879 if (match(LHS->getOperand(0), m_And(m_Value(A), m_Value(B))) && 880 match(RHS->getOperand(0), m_And(m_Value(C), m_Value(D)))) { 881 if (A == D || B == D) 882 std::swap(C, D); 883 if (B == C) 884 std::swap(A, B); 885 886 if (A == C && 887 isKnownToBeAPowerOfTwo(B, false, 0, &Logic) && 888 isKnownToBeAPowerOfTwo(D, false, 0, &Logic)) { 889 Value *Mask = Builder.CreateOr(B, D); 890 Value *Masked = Builder.CreateAnd(A, Mask); 891 auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; 892 return Builder.CreateICmp(NewPred, Masked, Mask); 893 } 894 } 895 896 return nullptr; 897 } 898 899 /// General pattern: 900 /// X & Y 901 /// 902 /// Where Y is checking that all the high bits (covered by a mask 4294967168) 903 /// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0 904 /// Pattern can be one of: 905 /// %t = add i32 %arg, 128 906 /// %r = icmp ult i32 %t, 256 907 /// Or 908 /// %t0 = shl i32 %arg, 24 909 /// %t1 = ashr i32 %t0, 24 910 /// %r = icmp eq i32 %t1, %arg 911 /// Or 912 /// %t0 = trunc i32 %arg to i8 913 /// %t1 = sext i8 %t0 to i32 914 /// %r = icmp eq i32 %t1, %arg 915 /// This pattern is a signed truncation check. 916 /// 917 /// And X is checking that some bit in that same mask is zero. 918 /// I.e. can be one of: 919 /// %r = icmp sgt i32 %arg, -1 920 /// Or 921 /// %t = and i32 %arg, 2147483648 922 /// %r = icmp eq i32 %t, 0 923 /// 924 /// Since we are checking that all the bits in that mask are the same, 925 /// and a particular bit is zero, what we are really checking is that all the 926 /// masked bits are zero. 927 /// So this should be transformed to: 928 /// %r = icmp ult i32 %arg, 128 929 static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1, 930 Instruction &CxtI, 931 InstCombiner::BuilderTy &Builder) { 932 assert(CxtI.getOpcode() == Instruction::And); 933 934 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two) 935 auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X, 936 APInt &SignBitMask) -> bool { 937 CmpInst::Predicate Pred; 938 const APInt *I01, *I1; // powers of two; I1 == I01 << 1 939 if (!(match(ICmp, 940 m_ICmp(Pred, m_Add(m_Value(X), m_Power2(I01)), m_Power2(I1))) && 941 Pred == ICmpInst::ICMP_ULT && I1->ugt(*I01) && I01->shl(1) == *I1)) 942 return false; 943 // Which bit is the new sign bit as per the 'signed truncation' pattern? 944 SignBitMask = *I01; 945 return true; 946 }; 947 948 // One icmp needs to be 'signed truncation check'. 949 // We need to match this first, else we will mismatch commutative cases. 950 Value *X1; 951 APInt HighestBit; 952 ICmpInst *OtherICmp; 953 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit)) 954 OtherICmp = ICmp0; 955 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit)) 956 OtherICmp = ICmp1; 957 else 958 return nullptr; 959 960 assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)"); 961 962 // Try to match/decompose into: icmp eq (X & Mask), 0 963 auto tryToDecompose = [](ICmpInst *ICmp, Value *&X, 964 APInt &UnsetBitsMask) -> bool { 965 CmpInst::Predicate Pred = ICmp->getPredicate(); 966 // Can it be decomposed into icmp eq (X & Mask), 0 ? 967 if (llvm::decomposeBitTestICmp(ICmp->getOperand(0), ICmp->getOperand(1), 968 Pred, X, UnsetBitsMask, 969 /*LookThroughTrunc=*/false) && 970 Pred == ICmpInst::ICMP_EQ) 971 return true; 972 // Is it icmp eq (X & Mask), 0 already? 973 const APInt *Mask; 974 if (match(ICmp, m_ICmp(Pred, m_And(m_Value(X), m_APInt(Mask)), m_Zero())) && 975 Pred == ICmpInst::ICMP_EQ) { 976 UnsetBitsMask = *Mask; 977 return true; 978 } 979 return false; 980 }; 981 982 // And the other icmp needs to be decomposable into a bit test. 983 Value *X0; 984 APInt UnsetBitsMask; 985 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask)) 986 return nullptr; 987 988 assert(!UnsetBitsMask.isNullValue() && "empty mask makes no sense."); 989 990 // Are they working on the same value? 991 Value *X; 992 if (X1 == X0) { 993 // Ok as is. 994 X = X1; 995 } else if (match(X0, m_Trunc(m_Specific(X1)))) { 996 UnsetBitsMask = UnsetBitsMask.zext(X1->getType()->getScalarSizeInBits()); 997 X = X1; 998 } else 999 return nullptr; 1000 1001 // So which bits should be uniform as per the 'signed truncation check'? 1002 // (all the bits starting with (i.e. including) HighestBit) 1003 APInt SignBitsMask = ~(HighestBit - 1U); 1004 1005 // UnsetBitsMask must have some common bits with SignBitsMask, 1006 if (!UnsetBitsMask.intersects(SignBitsMask)) 1007 return nullptr; 1008 1009 // Does UnsetBitsMask contain any bits outside of SignBitsMask? 1010 if (!UnsetBitsMask.isSubsetOf(SignBitsMask)) { 1011 APInt OtherHighestBit = (~UnsetBitsMask) + 1U; 1012 if (!OtherHighestBit.isPowerOf2()) 1013 return nullptr; 1014 HighestBit = APIntOps::umin(HighestBit, OtherHighestBit); 1015 } 1016 // Else, if it does not, then all is ok as-is. 1017 1018 // %r = icmp ult %X, SignBit 1019 return Builder.CreateICmpULT(X, ConstantInt::get(X->getType(), HighestBit), 1020 CxtI.getName() + ".simplified"); 1021 } 1022 1023 /// Reduce a pair of compares that check if a value has exactly 1 bit set. 1024 static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd, 1025 InstCombiner::BuilderTy &Builder) { 1026 // Handle 'and' / 'or' commutation: make the equality check the first operand. 1027 if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE) 1028 std::swap(Cmp0, Cmp1); 1029 else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ) 1030 std::swap(Cmp0, Cmp1); 1031 1032 // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1 1033 CmpInst::Predicate Pred0, Pred1; 1034 Value *X; 1035 if (JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) && 1036 match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)), 1037 m_SpecificInt(2))) && 1038 Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT) { 1039 Value *CtPop = Cmp1->getOperand(0); 1040 return Builder.CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1)); 1041 } 1042 // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1 1043 if (!JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) && 1044 match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)), 1045 m_SpecificInt(1))) && 1046 Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_UGT) { 1047 Value *CtPop = Cmp1->getOperand(0); 1048 return Builder.CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1)); 1049 } 1050 return nullptr; 1051 } 1052 1053 /// Commuted variants are assumed to be handled by calling this function again 1054 /// with the parameters swapped. 1055 static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp, 1056 ICmpInst *UnsignedICmp, bool IsAnd, 1057 const SimplifyQuery &Q, 1058 InstCombiner::BuilderTy &Builder) { 1059 Value *ZeroCmpOp; 1060 ICmpInst::Predicate EqPred; 1061 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(ZeroCmpOp), m_Zero())) || 1062 !ICmpInst::isEquality(EqPred)) 1063 return nullptr; 1064 1065 auto IsKnownNonZero = [&](Value *V) { 1066 return isKnownNonZero(V, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 1067 }; 1068 1069 ICmpInst::Predicate UnsignedPred; 1070 1071 Value *A, *B; 1072 if (match(UnsignedICmp, 1073 m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) && 1074 match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) && 1075 (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) { 1076 auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) { 1077 if (!IsKnownNonZero(NonZero)) 1078 std::swap(NonZero, Other); 1079 return IsKnownNonZero(NonZero); 1080 }; 1081 1082 // Given ZeroCmpOp = (A + B) 1083 // ZeroCmpOp <= A && ZeroCmpOp != 0 --> (0-B) < A 1084 // ZeroCmpOp > A || ZeroCmpOp == 0 --> (0-B) >= A 1085 // 1086 // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff 1087 // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff 1088 // with X being the value (A/B) that is known to be non-zero, 1089 // and Y being remaining value. 1090 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && 1091 IsAnd) 1092 return Builder.CreateICmpULT(Builder.CreateNeg(B), A); 1093 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE && 1094 IsAnd && GetKnownNonZeroAndOther(B, A)) 1095 return Builder.CreateICmpULT(Builder.CreateNeg(B), A); 1096 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && 1097 !IsAnd) 1098 return Builder.CreateICmpUGE(Builder.CreateNeg(B), A); 1099 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ && 1100 !IsAnd && GetKnownNonZeroAndOther(B, A)) 1101 return Builder.CreateICmpUGE(Builder.CreateNeg(B), A); 1102 } 1103 1104 Value *Base, *Offset; 1105 if (!match(ZeroCmpOp, m_Sub(m_Value(Base), m_Value(Offset)))) 1106 return nullptr; 1107 1108 if (!match(UnsignedICmp, 1109 m_c_ICmp(UnsignedPred, m_Specific(Base), m_Specific(Offset))) || 1110 !ICmpInst::isUnsigned(UnsignedPred)) 1111 return nullptr; 1112 1113 // Base >=/> Offset && (Base - Offset) != 0 <--> Base > Offset 1114 // (no overflow and not null) 1115 if ((UnsignedPred == ICmpInst::ICMP_UGE || 1116 UnsignedPred == ICmpInst::ICMP_UGT) && 1117 EqPred == ICmpInst::ICMP_NE && IsAnd) 1118 return Builder.CreateICmpUGT(Base, Offset); 1119 1120 // Base <=/< Offset || (Base - Offset) == 0 <--> Base <= Offset 1121 // (overflow or null) 1122 if ((UnsignedPred == ICmpInst::ICMP_ULE || 1123 UnsignedPred == ICmpInst::ICMP_ULT) && 1124 EqPred == ICmpInst::ICMP_EQ && !IsAnd) 1125 return Builder.CreateICmpULE(Base, Offset); 1126 1127 // Base <= Offset && (Base - Offset) != 0 --> Base < Offset 1128 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && 1129 IsAnd) 1130 return Builder.CreateICmpULT(Base, Offset); 1131 1132 // Base > Offset || (Base - Offset) == 0 --> Base >= Offset 1133 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && 1134 !IsAnd) 1135 return Builder.CreateICmpUGE(Base, Offset); 1136 1137 return nullptr; 1138 } 1139 1140 /// Reduce logic-of-compares with equality to a constant by substituting a 1141 /// common operand with the constant. Callers are expected to call this with 1142 /// Cmp0/Cmp1 switched to handle logic op commutativity. 1143 static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1, 1144 BinaryOperator &Logic, 1145 InstCombiner::BuilderTy &Builder, 1146 const SimplifyQuery &Q) { 1147 bool IsAnd = Logic.getOpcode() == Instruction::And; 1148 assert((IsAnd || Logic.getOpcode() == Instruction::Or) && "Wrong logic op"); 1149 1150 // Match an equality compare with a non-poison constant as Cmp0. 1151 // Also, give up if the compare can be constant-folded to avoid looping. 1152 ICmpInst::Predicate Pred0; 1153 Value *X; 1154 Constant *C; 1155 if (!match(Cmp0, m_ICmp(Pred0, m_Value(X), m_Constant(C))) || 1156 !isGuaranteedNotToBeUndefOrPoison(C) || isa<Constant>(X)) 1157 return nullptr; 1158 if ((IsAnd && Pred0 != ICmpInst::ICMP_EQ) || 1159 (!IsAnd && Pred0 != ICmpInst::ICMP_NE)) 1160 return nullptr; 1161 1162 // The other compare must include a common operand (X). Canonicalize the 1163 // common operand as operand 1 (Pred1 is swapped if the common operand was 1164 // operand 0). 1165 Value *Y; 1166 ICmpInst::Predicate Pred1; 1167 if (!match(Cmp1, m_c_ICmp(Pred1, m_Value(Y), m_Deferred(X)))) 1168 return nullptr; 1169 1170 // Replace variable with constant value equivalence to remove a variable use: 1171 // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C) 1172 // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C) 1173 // Can think of the 'or' substitution with the 'and' bool equivalent: 1174 // A || B --> A || (!A && B) 1175 Value *SubstituteCmp = SimplifyICmpInst(Pred1, Y, C, Q); 1176 if (!SubstituteCmp) { 1177 // If we need to create a new instruction, require that the old compare can 1178 // be removed. 1179 if (!Cmp1->hasOneUse()) 1180 return nullptr; 1181 SubstituteCmp = Builder.CreateICmp(Pred1, Y, C); 1182 } 1183 return Builder.CreateBinOp(Logic.getOpcode(), Cmp0, SubstituteCmp); 1184 } 1185 1186 /// Fold (icmp)&(icmp) if possible. 1187 Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS, 1188 BinaryOperator &And) { 1189 const SimplifyQuery Q = SQ.getWithInstruction(&And); 1190 1191 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2) 1192 // if K1 and K2 are a one-bit mask. 1193 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, And)) 1194 return V; 1195 1196 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1197 1198 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B) 1199 if (predicatesFoldable(PredL, PredR)) { 1200 if (LHS->getOperand(0) == RHS->getOperand(1) && 1201 LHS->getOperand(1) == RHS->getOperand(0)) 1202 LHS->swapOperands(); 1203 if (LHS->getOperand(0) == RHS->getOperand(0) && 1204 LHS->getOperand(1) == RHS->getOperand(1)) { 1205 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 1206 unsigned Code = getICmpCode(LHS) & getICmpCode(RHS); 1207 bool IsSigned = LHS->isSigned() || RHS->isSigned(); 1208 return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder); 1209 } 1210 } 1211 1212 // handle (roughly): (icmp eq (A & B), C) & (icmp eq (A & D), E) 1213 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, true, Builder)) 1214 return V; 1215 1216 if (Value *V = foldAndOrOfICmpsWithConstEq(LHS, RHS, And, Builder, Q)) 1217 return V; 1218 if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, And, Builder, Q)) 1219 return V; 1220 1221 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n 1222 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/false)) 1223 return V; 1224 1225 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n 1226 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/false)) 1227 return V; 1228 1229 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, true, Builder)) 1230 return V; 1231 1232 if (Value *V = foldSignedTruncationCheck(LHS, RHS, And, Builder)) 1233 return V; 1234 1235 if (Value *V = foldIsPowerOf2(LHS, RHS, true /* JoinedByAnd */, Builder)) 1236 return V; 1237 1238 if (Value *X = 1239 foldUnsignedUnderflowCheck(LHS, RHS, /*IsAnd=*/true, Q, Builder)) 1240 return X; 1241 if (Value *X = 1242 foldUnsignedUnderflowCheck(RHS, LHS, /*IsAnd=*/true, Q, Builder)) 1243 return X; 1244 1245 // This only handles icmp of constants: (icmp1 A, C1) & (icmp2 B, C2). 1246 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0); 1247 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1)); 1248 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1)); 1249 if (!LHSC || !RHSC) 1250 return nullptr; 1251 1252 if (LHSC == RHSC && PredL == PredR) { 1253 // (icmp ult A, C) & (icmp ult B, C) --> (icmp ult (A|B), C) 1254 // where C is a power of 2 or 1255 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0) 1256 if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) || 1257 (PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) { 1258 Value *NewOr = Builder.CreateOr(LHS0, RHS0); 1259 return Builder.CreateICmp(PredL, NewOr, LHSC); 1260 } 1261 } 1262 1263 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2 1264 // where CMAX is the all ones value for the truncated type, 1265 // iff the lower bits of C2 and CA are zero. 1266 if (PredL == ICmpInst::ICMP_EQ && PredL == PredR && LHS->hasOneUse() && 1267 RHS->hasOneUse()) { 1268 Value *V; 1269 ConstantInt *AndC, *SmallC = nullptr, *BigC = nullptr; 1270 1271 // (trunc x) == C1 & (and x, CA) == C2 1272 // (and x, CA) == C2 & (trunc x) == C1 1273 if (match(RHS0, m_Trunc(m_Value(V))) && 1274 match(LHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) { 1275 SmallC = RHSC; 1276 BigC = LHSC; 1277 } else if (match(LHS0, m_Trunc(m_Value(V))) && 1278 match(RHS0, m_And(m_Specific(V), m_ConstantInt(AndC)))) { 1279 SmallC = LHSC; 1280 BigC = RHSC; 1281 } 1282 1283 if (SmallC && BigC) { 1284 unsigned BigBitSize = BigC->getType()->getBitWidth(); 1285 unsigned SmallBitSize = SmallC->getType()->getBitWidth(); 1286 1287 // Check that the low bits are zero. 1288 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize); 1289 if ((Low & AndC->getValue()).isNullValue() && 1290 (Low & BigC->getValue()).isNullValue()) { 1291 Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue()); 1292 APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue(); 1293 Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N); 1294 return Builder.CreateICmp(PredL, NewAnd, NewVal); 1295 } 1296 } 1297 } 1298 1299 // From here on, we only handle: 1300 // (icmp1 A, C1) & (icmp2 A, C2) --> something simpler. 1301 if (LHS0 != RHS0) 1302 return nullptr; 1303 1304 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere. 1305 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE || 1306 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE || 1307 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE || 1308 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE) 1309 return nullptr; 1310 1311 // We can't fold (ugt x, C) & (sgt x, C2). 1312 if (!predicatesFoldable(PredL, PredR)) 1313 return nullptr; 1314 1315 // Ensure that the larger constant is on the RHS. 1316 bool ShouldSwap; 1317 if (CmpInst::isSigned(PredL) || 1318 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR))) 1319 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue()); 1320 else 1321 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue()); 1322 1323 if (ShouldSwap) { 1324 std::swap(LHS, RHS); 1325 std::swap(LHSC, RHSC); 1326 std::swap(PredL, PredR); 1327 } 1328 1329 // At this point, we know we have two icmp instructions 1330 // comparing a value against two constants and and'ing the result 1331 // together. Because of the above check, we know that we only have 1332 // icmp eq, icmp ne, icmp [su]lt, and icmp [SU]gt here. We also know 1333 // (from the icmp folding check above), that the two constants 1334 // are not equal and that the larger constant is on the RHS 1335 assert(LHSC != RHSC && "Compares not folded above?"); 1336 1337 switch (PredL) { 1338 default: 1339 llvm_unreachable("Unknown integer condition code!"); 1340 case ICmpInst::ICMP_NE: 1341 switch (PredR) { 1342 default: 1343 llvm_unreachable("Unknown integer condition code!"); 1344 case ICmpInst::ICMP_ULT: 1345 // (X != 13 & X u< 14) -> X < 13 1346 if (LHSC->getValue() == (RHSC->getValue() - 1)) 1347 return Builder.CreateICmpULT(LHS0, LHSC); 1348 if (LHSC->isZero()) // (X != 0 & X u< C) -> X-1 u< C-1 1349 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), 1350 false, true); 1351 break; // (X != 13 & X u< 15) -> no change 1352 case ICmpInst::ICMP_SLT: 1353 // (X != 13 & X s< 14) -> X < 13 1354 if (LHSC->getValue() == (RHSC->getValue() - 1)) 1355 return Builder.CreateICmpSLT(LHS0, LHSC); 1356 // (X != INT_MIN & X s< C) -> X-(INT_MIN+1) u< (C-(INT_MIN+1)) 1357 if (LHSC->isMinValue(true)) 1358 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), 1359 true, true); 1360 break; // (X != 13 & X s< 15) -> no change 1361 case ICmpInst::ICMP_NE: 1362 // Potential folds for this case should already be handled. 1363 break; 1364 } 1365 break; 1366 case ICmpInst::ICMP_UGT: 1367 switch (PredR) { 1368 default: 1369 llvm_unreachable("Unknown integer condition code!"); 1370 case ICmpInst::ICMP_NE: 1371 // (X u> 13 & X != 14) -> X u> 14 1372 if (RHSC->getValue() == (LHSC->getValue() + 1)) 1373 return Builder.CreateICmp(PredL, LHS0, RHSC); 1374 // X u> C & X != UINT_MAX -> (X-(C+1)) u< UINT_MAX-(C+1) 1375 if (RHSC->isMaxValue(false)) 1376 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), 1377 false, true); 1378 break; // (X u> 13 & X != 15) -> no change 1379 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) u< 1 1380 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), 1381 false, true); 1382 } 1383 break; 1384 case ICmpInst::ICMP_SGT: 1385 switch (PredR) { 1386 default: 1387 llvm_unreachable("Unknown integer condition code!"); 1388 case ICmpInst::ICMP_NE: 1389 // (X s> 13 & X != 14) -> X s> 14 1390 if (RHSC->getValue() == (LHSC->getValue() + 1)) 1391 return Builder.CreateICmp(PredL, LHS0, RHSC); 1392 // X s> C & X != INT_MAX -> (X-(C+1)) u< INT_MAX-(C+1) 1393 if (RHSC->isMaxValue(true)) 1394 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), 1395 true, true); 1396 break; // (X s> 13 & X != 15) -> no change 1397 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) u< 1 1398 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true, 1399 true); 1400 } 1401 break; 1402 } 1403 1404 return nullptr; 1405 } 1406 1407 Value *InstCombiner::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { 1408 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1409 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1410 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1411 1412 if (LHS0 == RHS1 && RHS0 == LHS1) { 1413 // Swap RHS operands to match LHS. 1414 PredR = FCmpInst::getSwappedPredicate(PredR); 1415 std::swap(RHS0, RHS1); 1416 } 1417 1418 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y). 1419 // Suppose the relation between x and y is R, where R is one of 1420 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for 1421 // testing the desired relations. 1422 // 1423 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this: 1424 // bool(R & CC0) && bool(R & CC1) 1425 // = bool((R & CC0) & (R & CC1)) 1426 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency 1427 // 1428 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this: 1429 // bool(R & CC0) || bool(R & CC1) 1430 // = bool((R & CC0) | (R & CC1)) 1431 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;) 1432 if (LHS0 == RHS0 && LHS1 == RHS1) { 1433 unsigned FCmpCodeL = getFCmpCode(PredL); 1434 unsigned FCmpCodeR = getFCmpCode(PredR); 1435 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR; 1436 return getFCmpValue(NewPred, LHS0, LHS1, Builder); 1437 } 1438 1439 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || 1440 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) { 1441 if (LHS0->getType() != RHS0->getType()) 1442 return nullptr; 1443 1444 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and 1445 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0). 1446 if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP())) 1447 // Ignore the constants because they are obviously not NANs: 1448 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y) 1449 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y) 1450 return Builder.CreateFCmp(PredL, LHS0, RHS0); 1451 } 1452 1453 return nullptr; 1454 } 1455 1456 /// This a limited reassociation for a special case (see above) where we are 1457 /// checking if two values are either both NAN (unordered) or not-NAN (ordered). 1458 /// This could be handled more generally in '-reassociation', but it seems like 1459 /// an unlikely pattern for a large number of logic ops and fcmps. 1460 static Instruction *reassociateFCmps(BinaryOperator &BO, 1461 InstCombiner::BuilderTy &Builder) { 1462 Instruction::BinaryOps Opcode = BO.getOpcode(); 1463 assert((Opcode == Instruction::And || Opcode == Instruction::Or) && 1464 "Expecting and/or op for fcmp transform"); 1465 1466 // There are 4 commuted variants of the pattern. Canonicalize operands of this 1467 // logic op so an fcmp is operand 0 and a matching logic op is operand 1. 1468 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1), *X; 1469 FCmpInst::Predicate Pred; 1470 if (match(Op1, m_FCmp(Pred, m_Value(), m_AnyZeroFP()))) 1471 std::swap(Op0, Op1); 1472 1473 // Match inner binop and the predicate for combining 2 NAN checks into 1. 1474 BinaryOperator *BO1; 1475 FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD 1476 : FCmpInst::FCMP_UNO; 1477 if (!match(Op0, m_FCmp(Pred, m_Value(X), m_AnyZeroFP())) || Pred != NanPred || 1478 !match(Op1, m_BinOp(BO1)) || BO1->getOpcode() != Opcode) 1479 return nullptr; 1480 1481 // The inner logic op must have a matching fcmp operand. 1482 Value *BO10 = BO1->getOperand(0), *BO11 = BO1->getOperand(1), *Y; 1483 if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) || 1484 Pred != NanPred || X->getType() != Y->getType()) 1485 std::swap(BO10, BO11); 1486 1487 if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) || 1488 Pred != NanPred || X->getType() != Y->getType()) 1489 return nullptr; 1490 1491 // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z 1492 // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z 1493 Value *NewFCmp = Builder.CreateFCmp(Pred, X, Y); 1494 if (auto *NewFCmpInst = dyn_cast<FCmpInst>(NewFCmp)) { 1495 // Intersect FMF from the 2 source fcmps. 1496 NewFCmpInst->copyIRFlags(Op0); 1497 NewFCmpInst->andIRFlags(BO10); 1498 } 1499 return BinaryOperator::Create(Opcode, NewFCmp, BO11); 1500 } 1501 1502 /// Match De Morgan's Laws: 1503 /// (~A & ~B) == (~(A | B)) 1504 /// (~A | ~B) == (~(A & B)) 1505 static Instruction *matchDeMorgansLaws(BinaryOperator &I, 1506 InstCombiner::BuilderTy &Builder) { 1507 auto Opcode = I.getOpcode(); 1508 assert((Opcode == Instruction::And || Opcode == Instruction::Or) && 1509 "Trying to match De Morgan's Laws with something other than and/or"); 1510 1511 // Flip the logic operation. 1512 Opcode = (Opcode == Instruction::And) ? Instruction::Or : Instruction::And; 1513 1514 Value *A, *B; 1515 if (match(I.getOperand(0), m_OneUse(m_Not(m_Value(A)))) && 1516 match(I.getOperand(1), m_OneUse(m_Not(m_Value(B)))) && 1517 !isFreeToInvert(A, A->hasOneUse()) && 1518 !isFreeToInvert(B, B->hasOneUse())) { 1519 Value *AndOr = Builder.CreateBinOp(Opcode, A, B, I.getName() + ".demorgan"); 1520 return BinaryOperator::CreateNot(AndOr); 1521 } 1522 1523 return nullptr; 1524 } 1525 1526 bool InstCombiner::shouldOptimizeCast(CastInst *CI) { 1527 Value *CastSrc = CI->getOperand(0); 1528 1529 // Noop casts and casts of constants should be eliminated trivially. 1530 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc)) 1531 return false; 1532 1533 // If this cast is paired with another cast that can be eliminated, we prefer 1534 // to have it eliminated. 1535 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc)) 1536 if (isEliminableCastPair(PrecedingCI, CI)) 1537 return false; 1538 1539 return true; 1540 } 1541 1542 /// Fold {and,or,xor} (cast X), C. 1543 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, 1544 InstCombiner::BuilderTy &Builder) { 1545 Constant *C = dyn_cast<Constant>(Logic.getOperand(1)); 1546 if (!C) 1547 return nullptr; 1548 1549 auto LogicOpc = Logic.getOpcode(); 1550 Type *DestTy = Logic.getType(); 1551 Type *SrcTy = Cast->getSrcTy(); 1552 1553 // Move the logic operation ahead of a zext or sext if the constant is 1554 // unchanged in the smaller source type. Performing the logic in a smaller 1555 // type may provide more information to later folds, and the smaller logic 1556 // instruction may be cheaper (particularly in the case of vectors). 1557 Value *X; 1558 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) { 1559 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); 1560 Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy); 1561 if (ZextTruncC == C) { 1562 // LogicOpc (zext X), C --> zext (LogicOpc X, C) 1563 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC); 1564 return new ZExtInst(NewOp, DestTy); 1565 } 1566 } 1567 1568 if (match(Cast, m_OneUse(m_SExt(m_Value(X))))) { 1569 Constant *TruncC = ConstantExpr::getTrunc(C, SrcTy); 1570 Constant *SextTruncC = ConstantExpr::getSExt(TruncC, DestTy); 1571 if (SextTruncC == C) { 1572 // LogicOpc (sext X), C --> sext (LogicOpc X, C) 1573 Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC); 1574 return new SExtInst(NewOp, DestTy); 1575 } 1576 } 1577 1578 return nullptr; 1579 } 1580 1581 /// Fold {and,or,xor} (cast X), Y. 1582 Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) { 1583 auto LogicOpc = I.getOpcode(); 1584 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding"); 1585 1586 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1587 CastInst *Cast0 = dyn_cast<CastInst>(Op0); 1588 if (!Cast0) 1589 return nullptr; 1590 1591 // This must be a cast from an integer or integer vector source type to allow 1592 // transformation of the logic operation to the source type. 1593 Type *DestTy = I.getType(); 1594 Type *SrcTy = Cast0->getSrcTy(); 1595 if (!SrcTy->isIntOrIntVectorTy()) 1596 return nullptr; 1597 1598 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, Builder)) 1599 return Ret; 1600 1601 CastInst *Cast1 = dyn_cast<CastInst>(Op1); 1602 if (!Cast1) 1603 return nullptr; 1604 1605 // Both operands of the logic operation are casts. The casts must be of the 1606 // same type for reduction. 1607 auto CastOpcode = Cast0->getOpcode(); 1608 if (CastOpcode != Cast1->getOpcode() || SrcTy != Cast1->getSrcTy()) 1609 return nullptr; 1610 1611 Value *Cast0Src = Cast0->getOperand(0); 1612 Value *Cast1Src = Cast1->getOperand(0); 1613 1614 // fold logic(cast(A), cast(B)) -> cast(logic(A, B)) 1615 if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) { 1616 Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src, 1617 I.getName()); 1618 return CastInst::Create(CastOpcode, NewOp, DestTy); 1619 } 1620 1621 // For now, only 'and'/'or' have optimizations after this. 1622 if (LogicOpc == Instruction::Xor) 1623 return nullptr; 1624 1625 // If this is logic(cast(icmp), cast(icmp)), try to fold this even if the 1626 // cast is otherwise not optimizable. This happens for vector sexts. 1627 ICmpInst *ICmp0 = dyn_cast<ICmpInst>(Cast0Src); 1628 ICmpInst *ICmp1 = dyn_cast<ICmpInst>(Cast1Src); 1629 if (ICmp0 && ICmp1) { 1630 Value *Res = LogicOpc == Instruction::And ? foldAndOfICmps(ICmp0, ICmp1, I) 1631 : foldOrOfICmps(ICmp0, ICmp1, I); 1632 if (Res) 1633 return CastInst::Create(CastOpcode, Res, DestTy); 1634 return nullptr; 1635 } 1636 1637 // If this is logic(cast(fcmp), cast(fcmp)), try to fold this even if the 1638 // cast is otherwise not optimizable. This happens for vector sexts. 1639 FCmpInst *FCmp0 = dyn_cast<FCmpInst>(Cast0Src); 1640 FCmpInst *FCmp1 = dyn_cast<FCmpInst>(Cast1Src); 1641 if (FCmp0 && FCmp1) 1642 if (Value *R = foldLogicOfFCmps(FCmp0, FCmp1, LogicOpc == Instruction::And)) 1643 return CastInst::Create(CastOpcode, R, DestTy); 1644 1645 return nullptr; 1646 } 1647 1648 static Instruction *foldAndToXor(BinaryOperator &I, 1649 InstCombiner::BuilderTy &Builder) { 1650 assert(I.getOpcode() == Instruction::And); 1651 Value *Op0 = I.getOperand(0); 1652 Value *Op1 = I.getOperand(1); 1653 Value *A, *B; 1654 1655 // Operand complexity canonicalization guarantees that the 'or' is Op0. 1656 // (A | B) & ~(A & B) --> A ^ B 1657 // (A | B) & ~(B & A) --> A ^ B 1658 if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)), 1659 m_Not(m_c_And(m_Deferred(A), m_Deferred(B)))))) 1660 return BinaryOperator::CreateXor(A, B); 1661 1662 // (A | ~B) & (~A | B) --> ~(A ^ B) 1663 // (A | ~B) & (B | ~A) --> ~(A ^ B) 1664 // (~B | A) & (~A | B) --> ~(A ^ B) 1665 // (~B | A) & (B | ~A) --> ~(A ^ B) 1666 if (Op0->hasOneUse() || Op1->hasOneUse()) 1667 if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))), 1668 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) 1669 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 1670 1671 return nullptr; 1672 } 1673 1674 static Instruction *foldOrToXor(BinaryOperator &I, 1675 InstCombiner::BuilderTy &Builder) { 1676 assert(I.getOpcode() == Instruction::Or); 1677 Value *Op0 = I.getOperand(0); 1678 Value *Op1 = I.getOperand(1); 1679 Value *A, *B; 1680 1681 // Operand complexity canonicalization guarantees that the 'and' is Op0. 1682 // (A & B) | ~(A | B) --> ~(A ^ B) 1683 // (A & B) | ~(B | A) --> ~(A ^ B) 1684 if (Op0->hasOneUse() || Op1->hasOneUse()) 1685 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 1686 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 1687 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 1688 1689 // (A & ~B) | (~A & B) --> A ^ B 1690 // (A & ~B) | (B & ~A) --> A ^ B 1691 // (~B & A) | (~A & B) --> A ^ B 1692 // (~B & A) | (B & ~A) --> A ^ B 1693 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && 1694 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))) 1695 return BinaryOperator::CreateXor(A, B); 1696 1697 return nullptr; 1698 } 1699 1700 /// Return true if a constant shift amount is always less than the specified 1701 /// bit-width. If not, the shift could create poison in the narrower type. 1702 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) { 1703 if (auto *ScalarC = dyn_cast<ConstantInt>(C)) 1704 return ScalarC->getZExtValue() < BitWidth; 1705 1706 if (C->getType()->isVectorTy()) { 1707 // Check each element of a constant vector. 1708 unsigned NumElts = cast<VectorType>(C->getType())->getNumElements(); 1709 for (unsigned i = 0; i != NumElts; ++i) { 1710 Constant *Elt = C->getAggregateElement(i); 1711 if (!Elt) 1712 return false; 1713 if (isa<UndefValue>(Elt)) 1714 continue; 1715 auto *CI = dyn_cast<ConstantInt>(Elt); 1716 if (!CI || CI->getZExtValue() >= BitWidth) 1717 return false; 1718 } 1719 return true; 1720 } 1721 1722 // The constant is a constant expression or unknown. 1723 return false; 1724 } 1725 1726 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and 1727 /// a common zext operand: and (binop (zext X), C), (zext X). 1728 Instruction *InstCombiner::narrowMaskedBinOp(BinaryOperator &And) { 1729 // This transform could also apply to {or, and, xor}, but there are better 1730 // folds for those cases, so we don't expect those patterns here. AShr is not 1731 // handled because it should always be transformed to LShr in this sequence. 1732 // The subtract transform is different because it has a constant on the left. 1733 // Add/mul commute the constant to RHS; sub with constant RHS becomes add. 1734 Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1); 1735 Constant *C; 1736 if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) && 1737 !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) && 1738 !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) && 1739 !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) && 1740 !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1))))) 1741 return nullptr; 1742 1743 Value *X; 1744 if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3)) 1745 return nullptr; 1746 1747 Type *Ty = And.getType(); 1748 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType())) 1749 return nullptr; 1750 1751 // If we're narrowing a shift, the shift amount must be safe (less than the 1752 // width) in the narrower type. If the shift amount is greater, instsimplify 1753 // usually handles that case, but we can't guarantee/assert it. 1754 Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode(); 1755 if (Opc == Instruction::LShr || Opc == Instruction::Shl) 1756 if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits())) 1757 return nullptr; 1758 1759 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X) 1760 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X) 1761 Value *NewC = ConstantExpr::getTrunc(C, X->getType()); 1762 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X) 1763 : Builder.CreateBinOp(Opc, X, NewC); 1764 return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty); 1765 } 1766 1767 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 1768 // here. We should standardize that construct where it is needed or choose some 1769 // other way to ensure that commutated variants of patterns are not missed. 1770 Instruction *InstCombiner::visitAnd(BinaryOperator &I) { 1771 if (Value *V = SimplifyAndInst(I.getOperand(0), I.getOperand(1), 1772 SQ.getWithInstruction(&I))) 1773 return replaceInstUsesWith(I, V); 1774 1775 if (SimplifyAssociativeOrCommutative(I)) 1776 return &I; 1777 1778 if (Instruction *X = foldVectorBinop(I)) 1779 return X; 1780 1781 // See if we can simplify any instructions used by the instruction whose sole 1782 // purpose is to compute bits we don't care about. 1783 if (SimplifyDemandedInstructionBits(I)) 1784 return &I; 1785 1786 // Do this before using distributive laws to catch simple and/or/not patterns. 1787 if (Instruction *Xor = foldAndToXor(I, Builder)) 1788 return Xor; 1789 1790 // (A|B)&(A|C) -> A|(B&C) etc 1791 if (Value *V = SimplifyUsingDistributiveLaws(I)) 1792 return replaceInstUsesWith(I, V); 1793 1794 if (Value *V = SimplifyBSwap(I, Builder)) 1795 return replaceInstUsesWith(I, V); 1796 1797 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 1798 const APInt *C; 1799 if (match(Op1, m_APInt(C))) { 1800 Value *X, *Y; 1801 if (match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) && 1802 C->isOneValue()) { 1803 // (1 << X) & 1 --> zext(X == 0) 1804 // (1 >> X) & 1 --> zext(X == 0) 1805 Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(I.getType(), 0)); 1806 return new ZExtInst(IsZero, I.getType()); 1807 } 1808 1809 const APInt *XorC; 1810 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) { 1811 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2) 1812 Constant *NewC = ConstantInt::get(I.getType(), *C & *XorC); 1813 Value *And = Builder.CreateAnd(X, Op1); 1814 And->takeName(Op0); 1815 return BinaryOperator::CreateXor(And, NewC); 1816 } 1817 1818 const APInt *OrC; 1819 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) { 1820 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2) 1821 // NOTE: This reduces the number of bits set in the & mask, which 1822 // can expose opportunities for store narrowing for scalars. 1823 // NOTE: SimplifyDemandedBits should have already removed bits from C1 1824 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in 1825 // above, but this feels safer. 1826 APInt Together = *C & *OrC; 1827 Value *And = Builder.CreateAnd(X, ConstantInt::get(I.getType(), 1828 Together ^ *C)); 1829 And->takeName(Op0); 1830 return BinaryOperator::CreateOr(And, ConstantInt::get(I.getType(), 1831 Together)); 1832 } 1833 1834 // If the mask is only needed on one incoming arm, push the 'and' op up. 1835 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) || 1836 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { 1837 APInt NotAndMask(~(*C)); 1838 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode(); 1839 if (MaskedValueIsZero(X, NotAndMask, 0, &I)) { 1840 // Not masking anything out for the LHS, move mask to RHS. 1841 // and ({x}or X, Y), C --> {x}or X, (and Y, C) 1842 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked"); 1843 return BinaryOperator::Create(BinOp, X, NewRHS); 1844 } 1845 if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) { 1846 // Not masking anything out for the RHS, move mask to LHS. 1847 // and ({x}or X, Y), C --> {x}or (and X, C), Y 1848 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked"); 1849 return BinaryOperator::Create(BinOp, NewLHS, Y); 1850 } 1851 } 1852 const APInt *ShiftC; 1853 if (match(Op0, m_OneUse(m_SExt(m_AShr(m_Value(X), m_APInt(ShiftC)))))) { 1854 unsigned Width = I.getType()->getScalarSizeInBits(); 1855 if (*C == APInt::getLowBitsSet(Width, Width - ShiftC->getZExtValue())) { 1856 // We are clearing high bits that were potentially set by sext+ashr: 1857 // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC 1858 Value *Sext = Builder.CreateSExt(X, I.getType()); 1859 Constant *ShAmtC = ConstantInt::get(I.getType(), ShiftC->zext(Width)); 1860 return BinaryOperator::CreateLShr(Sext, ShAmtC); 1861 } 1862 } 1863 } 1864 1865 if (ConstantInt *AndRHS = dyn_cast<ConstantInt>(Op1)) { 1866 const APInt &AndRHSMask = AndRHS->getValue(); 1867 1868 // Optimize a variety of ((val OP C1) & C2) combinations... 1869 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 1870 // ((C1 OP zext(X)) & C2) -> zext((C1-X) & C2) if C2 fits in the bitwidth 1871 // of X and OP behaves well when given trunc(C1) and X. 1872 // TODO: Do this for vectors by using m_APInt isntead of m_ConstantInt. 1873 switch (Op0I->getOpcode()) { 1874 default: 1875 break; 1876 case Instruction::Xor: 1877 case Instruction::Or: 1878 case Instruction::Mul: 1879 case Instruction::Add: 1880 case Instruction::Sub: 1881 Value *X; 1882 ConstantInt *C1; 1883 // TODO: The one use restrictions could be relaxed a little if the AND 1884 // is going to be removed. 1885 if (match(Op0I, m_OneUse(m_c_BinOp(m_OneUse(m_ZExt(m_Value(X))), 1886 m_ConstantInt(C1))))) { 1887 if (AndRHSMask.isIntN(X->getType()->getScalarSizeInBits())) { 1888 auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType()); 1889 Value *BinOp; 1890 Value *Op0LHS = Op0I->getOperand(0); 1891 if (isa<ZExtInst>(Op0LHS)) 1892 BinOp = Builder.CreateBinOp(Op0I->getOpcode(), X, TruncC1); 1893 else 1894 BinOp = Builder.CreateBinOp(Op0I->getOpcode(), TruncC1, X); 1895 auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType()); 1896 auto *And = Builder.CreateAnd(BinOp, TruncC2); 1897 return new ZExtInst(And, I.getType()); 1898 } 1899 } 1900 } 1901 1902 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) 1903 if (Instruction *Res = OptAndOp(Op0I, Op0CI, AndRHS, I)) 1904 return Res; 1905 } 1906 1907 // If this is an integer truncation, and if the source is an 'and' with 1908 // immediate, transform it. This frequently occurs for bitfield accesses. 1909 { 1910 Value *X = nullptr; ConstantInt *YC = nullptr; 1911 if (match(Op0, m_Trunc(m_And(m_Value(X), m_ConstantInt(YC))))) { 1912 // Change: and (trunc (and X, YC) to T), C2 1913 // into : and (trunc X to T), trunc(YC) & C2 1914 // This will fold the two constants together, which may allow 1915 // other simplifications. 1916 Value *NewCast = Builder.CreateTrunc(X, I.getType(), "and.shrunk"); 1917 Constant *C3 = ConstantExpr::getTrunc(YC, I.getType()); 1918 C3 = ConstantExpr::getAnd(C3, AndRHS); 1919 return BinaryOperator::CreateAnd(NewCast, C3); 1920 } 1921 } 1922 } 1923 1924 if (Instruction *Z = narrowMaskedBinOp(I)) 1925 return Z; 1926 1927 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 1928 return FoldedLogic; 1929 1930 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder)) 1931 return DeMorgan; 1932 1933 { 1934 Value *A, *B, *C; 1935 // A & (A ^ B) --> A & ~B 1936 if (match(Op1, m_OneUse(m_c_Xor(m_Specific(Op0), m_Value(B))))) 1937 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(B)); 1938 // (A ^ B) & A --> A & ~B 1939 if (match(Op0, m_OneUse(m_c_Xor(m_Specific(Op1), m_Value(B))))) 1940 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(B)); 1941 1942 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C 1943 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) 1944 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) 1945 if (Op1->hasOneUse() || isFreeToInvert(C, C->hasOneUse())) 1946 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C)); 1947 1948 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C 1949 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) 1950 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) 1951 if (Op0->hasOneUse() || isFreeToInvert(C, C->hasOneUse())) 1952 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C)); 1953 1954 // (A | B) & ((~A) ^ B) -> (A & B) 1955 // (A | B) & (B ^ (~A)) -> (A & B) 1956 // (B | A) & ((~A) ^ B) -> (A & B) 1957 // (B | A) & (B ^ (~A)) -> (A & B) 1958 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && 1959 match(Op0, m_c_Or(m_Specific(A), m_Specific(B)))) 1960 return BinaryOperator::CreateAnd(A, B); 1961 1962 // ((~A) ^ B) & (A | B) -> (A & B) 1963 // ((~A) ^ B) & (B | A) -> (A & B) 1964 // (B ^ (~A)) & (A | B) -> (A & B) 1965 // (B ^ (~A)) & (B | A) -> (A & B) 1966 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && 1967 match(Op1, m_c_Or(m_Specific(A), m_Specific(B)))) 1968 return BinaryOperator::CreateAnd(A, B); 1969 } 1970 1971 { 1972 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0); 1973 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1); 1974 if (LHS && RHS) 1975 if (Value *Res = foldAndOfICmps(LHS, RHS, I)) 1976 return replaceInstUsesWith(I, Res); 1977 1978 // TODO: Make this recursive; it's a little tricky because an arbitrary 1979 // number of 'and' instructions might have to be created. 1980 Value *X, *Y; 1981 if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) { 1982 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 1983 if (Value *Res = foldAndOfICmps(LHS, Cmp, I)) 1984 return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y)); 1985 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 1986 if (Value *Res = foldAndOfICmps(LHS, Cmp, I)) 1987 return replaceInstUsesWith(I, Builder.CreateAnd(Res, X)); 1988 } 1989 if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) { 1990 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 1991 if (Value *Res = foldAndOfICmps(Cmp, RHS, I)) 1992 return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y)); 1993 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 1994 if (Value *Res = foldAndOfICmps(Cmp, RHS, I)) 1995 return replaceInstUsesWith(I, Builder.CreateAnd(Res, X)); 1996 } 1997 } 1998 1999 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 2000 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 2001 if (Value *Res = foldLogicOfFCmps(LHS, RHS, true)) 2002 return replaceInstUsesWith(I, Res); 2003 2004 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder)) 2005 return FoldedFCmps; 2006 2007 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I)) 2008 return CastedAnd; 2009 2010 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>. 2011 Value *A; 2012 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && 2013 A->getType()->isIntOrIntVectorTy(1)) 2014 return SelectInst::Create(A, Op1, Constant::getNullValue(I.getType())); 2015 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && 2016 A->getType()->isIntOrIntVectorTy(1)) 2017 return SelectInst::Create(A, Op0, Constant::getNullValue(I.getType())); 2018 2019 // and(ashr(subNSW(Y, X), ScalarSizeInBits(Y)-1), X) --> X s> Y ? X : 0. 2020 { 2021 Value *X, *Y; 2022 const APInt *ShAmt; 2023 Type *Ty = I.getType(); 2024 if (match(&I, m_c_And(m_OneUse(m_AShr(m_NSWSub(m_Value(Y), m_Value(X)), 2025 m_APInt(ShAmt))), 2026 m_Deferred(X))) && 2027 *ShAmt == Ty->getScalarSizeInBits() - 1) { 2028 Value *NewICmpInst = Builder.CreateICmpSGT(X, Y); 2029 return SelectInst::Create(NewICmpInst, X, ConstantInt::getNullValue(Ty)); 2030 } 2031 } 2032 2033 return nullptr; 2034 } 2035 2036 Instruction *InstCombiner::matchBSwap(BinaryOperator &Or) { 2037 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'"); 2038 Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1); 2039 2040 // Look through zero extends. 2041 if (Instruction *Ext = dyn_cast<ZExtInst>(Op0)) 2042 Op0 = Ext->getOperand(0); 2043 2044 if (Instruction *Ext = dyn_cast<ZExtInst>(Op1)) 2045 Op1 = Ext->getOperand(0); 2046 2047 // (A | B) | C and A | (B | C) -> bswap if possible. 2048 bool OrOfOrs = match(Op0, m_Or(m_Value(), m_Value())) || 2049 match(Op1, m_Or(m_Value(), m_Value())); 2050 2051 // (A >> B) | (C << D) and (A << B) | (B >> C) -> bswap if possible. 2052 bool OrOfShifts = match(Op0, m_LogicalShift(m_Value(), m_Value())) && 2053 match(Op1, m_LogicalShift(m_Value(), m_Value())); 2054 2055 // (A & B) | (C & D) -> bswap if possible. 2056 bool OrOfAnds = match(Op0, m_And(m_Value(), m_Value())) && 2057 match(Op1, m_And(m_Value(), m_Value())); 2058 2059 // (A << B) | (C & D) -> bswap if possible. 2060 // The bigger pattern here is ((A & C1) << C2) | ((B >> C2) & C1), which is a 2061 // part of the bswap idiom for specific values of C1, C2 (e.g. C1 = 16711935, 2062 // C2 = 8 for i32). 2063 // This pattern can occur when the operands of the 'or' are not canonicalized 2064 // for some reason (not having only one use, for example). 2065 bool OrOfAndAndSh = (match(Op0, m_LogicalShift(m_Value(), m_Value())) && 2066 match(Op1, m_And(m_Value(), m_Value()))) || 2067 (match(Op0, m_And(m_Value(), m_Value())) && 2068 match(Op1, m_LogicalShift(m_Value(), m_Value()))); 2069 2070 if (!OrOfOrs && !OrOfShifts && !OrOfAnds && !OrOfAndAndSh) 2071 return nullptr; 2072 2073 SmallVector<Instruction*, 4> Insts; 2074 if (!recognizeBSwapOrBitReverseIdiom(&Or, true, false, Insts)) 2075 return nullptr; 2076 Instruction *LastInst = Insts.pop_back_val(); 2077 LastInst->removeFromParent(); 2078 2079 for (auto *Inst : Insts) 2080 Worklist.push(Inst); 2081 return LastInst; 2082 } 2083 2084 /// Transform UB-safe variants of bitwise rotate to the funnel shift intrinsic. 2085 static Instruction *matchRotate(Instruction &Or) { 2086 // TODO: Can we reduce the code duplication between this and the related 2087 // rotate matching code under visitSelect and visitTrunc? 2088 unsigned Width = Or.getType()->getScalarSizeInBits(); 2089 if (!isPowerOf2_32(Width)) 2090 return nullptr; 2091 2092 // First, find an or'd pair of opposite shifts with the same shifted operand: 2093 // or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1) 2094 BinaryOperator *Or0, *Or1; 2095 if (!match(Or.getOperand(0), m_BinOp(Or0)) || 2096 !match(Or.getOperand(1), m_BinOp(Or1))) 2097 return nullptr; 2098 2099 Value *ShVal, *ShAmt0, *ShAmt1; 2100 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) || 2101 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1))))) 2102 return nullptr; 2103 2104 BinaryOperator::BinaryOps ShiftOpcode0 = Or0->getOpcode(); 2105 BinaryOperator::BinaryOps ShiftOpcode1 = Or1->getOpcode(); 2106 if (ShiftOpcode0 == ShiftOpcode1) 2107 return nullptr; 2108 2109 // Match the shift amount operands for a rotate pattern. This always matches 2110 // a subtraction on the R operand. 2111 auto matchShiftAmount = [](Value *L, Value *R, unsigned Width) -> Value * { 2112 // The shift amount may be masked with negation: 2113 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1))) 2114 Value *X; 2115 unsigned Mask = Width - 1; 2116 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) && 2117 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))) 2118 return X; 2119 2120 // Similar to above, but the shift amount may be extended after masking, 2121 // so return the extended value as the parameter for the intrinsic. 2122 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) && 2123 match(R, m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))), 2124 m_SpecificInt(Mask)))) 2125 return L; 2126 2127 return nullptr; 2128 }; 2129 2130 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width); 2131 bool SubIsOnLHS = false; 2132 if (!ShAmt) { 2133 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width); 2134 SubIsOnLHS = true; 2135 } 2136 if (!ShAmt) 2137 return nullptr; 2138 2139 bool IsFshl = (!SubIsOnLHS && ShiftOpcode0 == BinaryOperator::Shl) || 2140 (SubIsOnLHS && ShiftOpcode1 == BinaryOperator::Shl); 2141 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr; 2142 Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType()); 2143 return IntrinsicInst::Create(F, { ShVal, ShVal, ShAmt }); 2144 } 2145 2146 /// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns. 2147 static Instruction *matchOrConcat(Instruction &Or, 2148 InstCombiner::BuilderTy &Builder) { 2149 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'"); 2150 Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1); 2151 Type *Ty = Or.getType(); 2152 2153 unsigned Width = Ty->getScalarSizeInBits(); 2154 if ((Width & 1) != 0) 2155 return nullptr; 2156 unsigned HalfWidth = Width / 2; 2157 2158 // Canonicalize zext (lower half) to LHS. 2159 if (!isa<ZExtInst>(Op0)) 2160 std::swap(Op0, Op1); 2161 2162 // Find lower/upper half. 2163 Value *LowerSrc, *ShlVal, *UpperSrc; 2164 const APInt *C; 2165 if (!match(Op0, m_OneUse(m_ZExt(m_Value(LowerSrc)))) || 2166 !match(Op1, m_OneUse(m_Shl(m_Value(ShlVal), m_APInt(C)))) || 2167 !match(ShlVal, m_OneUse(m_ZExt(m_Value(UpperSrc))))) 2168 return nullptr; 2169 if (*C != HalfWidth || LowerSrc->getType() != UpperSrc->getType() || 2170 LowerSrc->getType()->getScalarSizeInBits() != HalfWidth) 2171 return nullptr; 2172 2173 auto ConcatIntrinsicCalls = [&](Intrinsic::ID id, Value *Lo, Value *Hi) { 2174 Value *NewLower = Builder.CreateZExt(Lo, Ty); 2175 Value *NewUpper = Builder.CreateZExt(Hi, Ty); 2176 NewUpper = Builder.CreateShl(NewUpper, HalfWidth); 2177 Value *BinOp = Builder.CreateOr(NewLower, NewUpper); 2178 Function *F = Intrinsic::getDeclaration(Or.getModule(), id, Ty); 2179 return Builder.CreateCall(F, BinOp); 2180 }; 2181 2182 // BSWAP: Push the concat down, swapping the lower/upper sources. 2183 // concat(bswap(x),bswap(y)) -> bswap(concat(x,y)) 2184 Value *LowerBSwap, *UpperBSwap; 2185 if (match(LowerSrc, m_BSwap(m_Value(LowerBSwap))) && 2186 match(UpperSrc, m_BSwap(m_Value(UpperBSwap)))) 2187 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap); 2188 2189 // BITREVERSE: Push the concat down, swapping the lower/upper sources. 2190 // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y)) 2191 Value *LowerBRev, *UpperBRev; 2192 if (match(LowerSrc, m_BitReverse(m_Value(LowerBRev))) && 2193 match(UpperSrc, m_BitReverse(m_Value(UpperBRev)))) 2194 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev); 2195 2196 return nullptr; 2197 } 2198 2199 /// If all elements of two constant vectors are 0/-1 and inverses, return true. 2200 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) { 2201 unsigned NumElts = cast<VectorType>(C1->getType())->getNumElements(); 2202 for (unsigned i = 0; i != NumElts; ++i) { 2203 Constant *EltC1 = C1->getAggregateElement(i); 2204 Constant *EltC2 = C2->getAggregateElement(i); 2205 if (!EltC1 || !EltC2) 2206 return false; 2207 2208 // One element must be all ones, and the other must be all zeros. 2209 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) || 2210 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes())))) 2211 return false; 2212 } 2213 return true; 2214 } 2215 2216 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or 2217 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of 2218 /// B, it can be used as the condition operand of a select instruction. 2219 Value *InstCombiner::getSelectCondition(Value *A, Value *B) { 2220 // Step 1: We may have peeked through bitcasts in the caller. 2221 // Exit immediately if we don't have (vector) integer types. 2222 Type *Ty = A->getType(); 2223 if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy()) 2224 return nullptr; 2225 2226 // Step 2: We need 0 or all-1's bitmasks. 2227 if (ComputeNumSignBits(A) != Ty->getScalarSizeInBits()) 2228 return nullptr; 2229 2230 // Step 3: If B is the 'not' value of A, we have our answer. 2231 if (match(A, m_Not(m_Specific(B)))) { 2232 // If these are scalars or vectors of i1, A can be used directly. 2233 if (Ty->isIntOrIntVectorTy(1)) 2234 return A; 2235 return Builder.CreateTrunc(A, CmpInst::makeCmpResultType(Ty)); 2236 } 2237 2238 // If both operands are constants, see if the constants are inverse bitmasks. 2239 Constant *AConst, *BConst; 2240 if (match(A, m_Constant(AConst)) && match(B, m_Constant(BConst))) 2241 if (AConst == ConstantExpr::getNot(BConst)) 2242 return Builder.CreateZExtOrTrunc(A, CmpInst::makeCmpResultType(Ty)); 2243 2244 // Look for more complex patterns. The 'not' op may be hidden behind various 2245 // casts. Look through sexts and bitcasts to find the booleans. 2246 Value *Cond; 2247 Value *NotB; 2248 if (match(A, m_SExt(m_Value(Cond))) && 2249 Cond->getType()->isIntOrIntVectorTy(1) && 2250 match(B, m_OneUse(m_Not(m_Value(NotB))))) { 2251 NotB = peekThroughBitcast(NotB, true); 2252 if (match(NotB, m_SExt(m_Specific(Cond)))) 2253 return Cond; 2254 } 2255 2256 // All scalar (and most vector) possibilities should be handled now. 2257 // Try more matches that only apply to non-splat constant vectors. 2258 if (!Ty->isVectorTy()) 2259 return nullptr; 2260 2261 // If both operands are xor'd with constants using the same sexted boolean 2262 // operand, see if the constants are inverse bitmasks. 2263 // TODO: Use ConstantExpr::getNot()? 2264 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AConst)))) && 2265 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BConst)))) && 2266 Cond->getType()->isIntOrIntVectorTy(1) && 2267 areInverseVectorBitmasks(AConst, BConst)) { 2268 AConst = ConstantExpr::getTrunc(AConst, CmpInst::makeCmpResultType(Ty)); 2269 return Builder.CreateXor(Cond, AConst); 2270 } 2271 return nullptr; 2272 } 2273 2274 /// We have an expression of the form (A & C) | (B & D). Try to simplify this 2275 /// to "A' ? C : D", where A' is a boolean or vector of booleans. 2276 Value *InstCombiner::matchSelectFromAndOr(Value *A, Value *C, Value *B, 2277 Value *D) { 2278 // The potential condition of the select may be bitcasted. In that case, look 2279 // through its bitcast and the corresponding bitcast of the 'not' condition. 2280 Type *OrigType = A->getType(); 2281 A = peekThroughBitcast(A, true); 2282 B = peekThroughBitcast(B, true); 2283 if (Value *Cond = getSelectCondition(A, B)) { 2284 // ((bc Cond) & C) | ((bc ~Cond) & D) --> bc (select Cond, (bc C), (bc D)) 2285 // The bitcasts will either all exist or all not exist. The builder will 2286 // not create unnecessary casts if the types already match. 2287 Value *BitcastC = Builder.CreateBitCast(C, A->getType()); 2288 Value *BitcastD = Builder.CreateBitCast(D, A->getType()); 2289 Value *Select = Builder.CreateSelect(Cond, BitcastC, BitcastD); 2290 return Builder.CreateBitCast(Select, OrigType); 2291 } 2292 2293 return nullptr; 2294 } 2295 2296 /// Fold (icmp)|(icmp) if possible. 2297 Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS, 2298 BinaryOperator &Or) { 2299 const SimplifyQuery Q = SQ.getWithInstruction(&Or); 2300 2301 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2) 2302 // if K1 and K2 are a one-bit mask. 2303 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, Or)) 2304 return V; 2305 2306 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 2307 2308 ConstantInt *LHSC = dyn_cast<ConstantInt>(LHS->getOperand(1)); 2309 ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS->getOperand(1)); 2310 2311 // Fold (icmp ult/ule (A + C1), C3) | (icmp ult/ule (A + C2), C3) 2312 // --> (icmp ult/ule ((A & ~(C1 ^ C2)) + max(C1, C2)), C3) 2313 // The original condition actually refers to the following two ranges: 2314 // [MAX_UINT-C1+1, MAX_UINT-C1+1+C3] and [MAX_UINT-C2+1, MAX_UINT-C2+1+C3] 2315 // We can fold these two ranges if: 2316 // 1) C1 and C2 is unsigned greater than C3. 2317 // 2) The two ranges are separated. 2318 // 3) C1 ^ C2 is one-bit mask. 2319 // 4) LowRange1 ^ LowRange2 and HighRange1 ^ HighRange2 are one-bit mask. 2320 // This implies all values in the two ranges differ by exactly one bit. 2321 2322 if ((PredL == ICmpInst::ICMP_ULT || PredL == ICmpInst::ICMP_ULE) && 2323 PredL == PredR && LHSC && RHSC && LHS->hasOneUse() && RHS->hasOneUse() && 2324 LHSC->getType() == RHSC->getType() && 2325 LHSC->getValue() == (RHSC->getValue())) { 2326 2327 Value *LAdd = LHS->getOperand(0); 2328 Value *RAdd = RHS->getOperand(0); 2329 2330 Value *LAddOpnd, *RAddOpnd; 2331 ConstantInt *LAddC, *RAddC; 2332 if (match(LAdd, m_Add(m_Value(LAddOpnd), m_ConstantInt(LAddC))) && 2333 match(RAdd, m_Add(m_Value(RAddOpnd), m_ConstantInt(RAddC))) && 2334 LAddC->getValue().ugt(LHSC->getValue()) && 2335 RAddC->getValue().ugt(LHSC->getValue())) { 2336 2337 APInt DiffC = LAddC->getValue() ^ RAddC->getValue(); 2338 if (LAddOpnd == RAddOpnd && DiffC.isPowerOf2()) { 2339 ConstantInt *MaxAddC = nullptr; 2340 if (LAddC->getValue().ult(RAddC->getValue())) 2341 MaxAddC = RAddC; 2342 else 2343 MaxAddC = LAddC; 2344 2345 APInt RRangeLow = -RAddC->getValue(); 2346 APInt RRangeHigh = RRangeLow + LHSC->getValue(); 2347 APInt LRangeLow = -LAddC->getValue(); 2348 APInt LRangeHigh = LRangeLow + LHSC->getValue(); 2349 APInt LowRangeDiff = RRangeLow ^ LRangeLow; 2350 APInt HighRangeDiff = RRangeHigh ^ LRangeHigh; 2351 APInt RangeDiff = LRangeLow.sgt(RRangeLow) ? LRangeLow - RRangeLow 2352 : RRangeLow - LRangeLow; 2353 2354 if (LowRangeDiff.isPowerOf2() && LowRangeDiff == HighRangeDiff && 2355 RangeDiff.ugt(LHSC->getValue())) { 2356 Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC); 2357 2358 Value *NewAnd = Builder.CreateAnd(LAddOpnd, MaskC); 2359 Value *NewAdd = Builder.CreateAdd(NewAnd, MaxAddC); 2360 return Builder.CreateICmp(LHS->getPredicate(), NewAdd, LHSC); 2361 } 2362 } 2363 } 2364 } 2365 2366 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B) 2367 if (predicatesFoldable(PredL, PredR)) { 2368 if (LHS->getOperand(0) == RHS->getOperand(1) && 2369 LHS->getOperand(1) == RHS->getOperand(0)) 2370 LHS->swapOperands(); 2371 if (LHS->getOperand(0) == RHS->getOperand(0) && 2372 LHS->getOperand(1) == RHS->getOperand(1)) { 2373 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 2374 unsigned Code = getICmpCode(LHS) | getICmpCode(RHS); 2375 bool IsSigned = LHS->isSigned() || RHS->isSigned(); 2376 return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder); 2377 } 2378 } 2379 2380 // handle (roughly): 2381 // (icmp ne (A & B), C) | (icmp ne (A & D), E) 2382 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, false, Builder)) 2383 return V; 2384 2385 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0); 2386 if (LHS->hasOneUse() || RHS->hasOneUse()) { 2387 // (icmp eq B, 0) | (icmp ult A, B) -> (icmp ule A, B-1) 2388 // (icmp eq B, 0) | (icmp ugt B, A) -> (icmp ule A, B-1) 2389 Value *A = nullptr, *B = nullptr; 2390 if (PredL == ICmpInst::ICMP_EQ && LHSC && LHSC->isZero()) { 2391 B = LHS0; 2392 if (PredR == ICmpInst::ICMP_ULT && LHS0 == RHS->getOperand(1)) 2393 A = RHS0; 2394 else if (PredR == ICmpInst::ICMP_UGT && LHS0 == RHS0) 2395 A = RHS->getOperand(1); 2396 } 2397 // (icmp ult A, B) | (icmp eq B, 0) -> (icmp ule A, B-1) 2398 // (icmp ugt B, A) | (icmp eq B, 0) -> (icmp ule A, B-1) 2399 else if (PredR == ICmpInst::ICMP_EQ && RHSC && RHSC->isZero()) { 2400 B = RHS0; 2401 if (PredL == ICmpInst::ICMP_ULT && RHS0 == LHS->getOperand(1)) 2402 A = LHS0; 2403 else if (PredL == ICmpInst::ICMP_UGT && LHS0 == RHS0) 2404 A = LHS->getOperand(1); 2405 } 2406 if (A && B) 2407 return Builder.CreateICmp( 2408 ICmpInst::ICMP_UGE, 2409 Builder.CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A); 2410 } 2411 2412 if (Value *V = foldAndOrOfICmpsWithConstEq(LHS, RHS, Or, Builder, Q)) 2413 return V; 2414 if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, Or, Builder, Q)) 2415 return V; 2416 2417 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n 2418 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/true)) 2419 return V; 2420 2421 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n 2422 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/true)) 2423 return V; 2424 2425 if (Value *V = foldAndOrOfEqualityCmpsWithConstants(LHS, RHS, false, Builder)) 2426 return V; 2427 2428 if (Value *V = foldIsPowerOf2(LHS, RHS, false /* JoinedByAnd */, Builder)) 2429 return V; 2430 2431 if (Value *X = 2432 foldUnsignedUnderflowCheck(LHS, RHS, /*IsAnd=*/false, Q, Builder)) 2433 return X; 2434 if (Value *X = 2435 foldUnsignedUnderflowCheck(RHS, LHS, /*IsAnd=*/false, Q, Builder)) 2436 return X; 2437 2438 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2). 2439 if (!LHSC || !RHSC) 2440 return nullptr; 2441 2442 if (LHSC == RHSC && PredL == PredR) { 2443 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) 2444 if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) { 2445 Value *NewOr = Builder.CreateOr(LHS0, RHS0); 2446 return Builder.CreateICmp(PredL, NewOr, LHSC); 2447 } 2448 } 2449 2450 // (icmp ult (X + CA), C1) | (icmp eq X, C2) -> (icmp ule (X + CA), C1) 2451 // iff C2 + CA == C1. 2452 if (PredL == ICmpInst::ICMP_ULT && PredR == ICmpInst::ICMP_EQ) { 2453 ConstantInt *AddC; 2454 if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC)))) 2455 if (RHSC->getValue() + AddC->getValue() == LHSC->getValue()) 2456 return Builder.CreateICmpULE(LHS0, LHSC); 2457 } 2458 2459 // From here on, we only handle: 2460 // (icmp1 A, C1) | (icmp2 A, C2) --> something simpler. 2461 if (LHS0 != RHS0) 2462 return nullptr; 2463 2464 // ICMP_[US][GL]E X, C is folded to ICMP_[US][GL]T elsewhere. 2465 if (PredL == ICmpInst::ICMP_UGE || PredL == ICmpInst::ICMP_ULE || 2466 PredR == ICmpInst::ICMP_UGE || PredR == ICmpInst::ICMP_ULE || 2467 PredL == ICmpInst::ICMP_SGE || PredL == ICmpInst::ICMP_SLE || 2468 PredR == ICmpInst::ICMP_SGE || PredR == ICmpInst::ICMP_SLE) 2469 return nullptr; 2470 2471 // We can't fold (ugt x, C) | (sgt x, C2). 2472 if (!predicatesFoldable(PredL, PredR)) 2473 return nullptr; 2474 2475 // Ensure that the larger constant is on the RHS. 2476 bool ShouldSwap; 2477 if (CmpInst::isSigned(PredL) || 2478 (ICmpInst::isEquality(PredL) && CmpInst::isSigned(PredR))) 2479 ShouldSwap = LHSC->getValue().sgt(RHSC->getValue()); 2480 else 2481 ShouldSwap = LHSC->getValue().ugt(RHSC->getValue()); 2482 2483 if (ShouldSwap) { 2484 std::swap(LHS, RHS); 2485 std::swap(LHSC, RHSC); 2486 std::swap(PredL, PredR); 2487 } 2488 2489 // At this point, we know we have two icmp instructions 2490 // comparing a value against two constants and or'ing the result 2491 // together. Because of the above check, we know that we only have 2492 // ICMP_EQ, ICMP_NE, ICMP_LT, and ICMP_GT here. We also know (from the 2493 // icmp folding check above), that the two constants are not 2494 // equal. 2495 assert(LHSC != RHSC && "Compares not folded above?"); 2496 2497 switch (PredL) { 2498 default: 2499 llvm_unreachable("Unknown integer condition code!"); 2500 case ICmpInst::ICMP_EQ: 2501 switch (PredR) { 2502 default: 2503 llvm_unreachable("Unknown integer condition code!"); 2504 case ICmpInst::ICMP_EQ: 2505 // Potential folds for this case should already be handled. 2506 break; 2507 case ICmpInst::ICMP_UGT: 2508 // (X == 0 || X u> C) -> (X-1) u>= C 2509 if (LHSC->isMinValue(false)) 2510 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue() + 1, 2511 false, false); 2512 // (X == 13 | X u> 14) -> no change 2513 break; 2514 case ICmpInst::ICMP_SGT: 2515 // (X == INT_MIN || X s> C) -> (X-(INT_MIN+1)) u>= C-INT_MIN 2516 if (LHSC->isMinValue(true)) 2517 return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue() + 1, 2518 true, false); 2519 // (X == 13 | X s> 14) -> no change 2520 break; 2521 } 2522 break; 2523 case ICmpInst::ICMP_ULT: 2524 switch (PredR) { 2525 default: 2526 llvm_unreachable("Unknown integer condition code!"); 2527 case ICmpInst::ICMP_EQ: // (X u< 13 | X == 14) -> no change 2528 // (X u< C || X == UINT_MAX) => (X-C) u>= UINT_MAX-C 2529 if (RHSC->isMaxValue(false)) 2530 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue(), 2531 false, false); 2532 break; 2533 case ICmpInst::ICMP_UGT: // (X u< 13 | X u> 15) -> (X-13) u> 2 2534 assert(!RHSC->isMaxValue(false) && "Missed icmp simplification"); 2535 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, 2536 false, false); 2537 } 2538 break; 2539 case ICmpInst::ICMP_SLT: 2540 switch (PredR) { 2541 default: 2542 llvm_unreachable("Unknown integer condition code!"); 2543 case ICmpInst::ICMP_EQ: 2544 // (X s< C || X == INT_MAX) => (X-C) u>= INT_MAX-C 2545 if (RHSC->isMaxValue(true)) 2546 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue(), 2547 true, false); 2548 // (X s< 13 | X == 14) -> no change 2549 break; 2550 case ICmpInst::ICMP_SGT: // (X s< 13 | X s> 15) -> (X-13) u> 2 2551 assert(!RHSC->isMaxValue(true) && "Missed icmp simplification"); 2552 return insertRangeTest(LHS0, LHSC->getValue(), RHSC->getValue() + 1, true, 2553 false); 2554 } 2555 break; 2556 } 2557 return nullptr; 2558 } 2559 2560 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 2561 // here. We should standardize that construct where it is needed or choose some 2562 // other way to ensure that commutated variants of patterns are not missed. 2563 Instruction *InstCombiner::visitOr(BinaryOperator &I) { 2564 if (Value *V = SimplifyOrInst(I.getOperand(0), I.getOperand(1), 2565 SQ.getWithInstruction(&I))) 2566 return replaceInstUsesWith(I, V); 2567 2568 if (SimplifyAssociativeOrCommutative(I)) 2569 return &I; 2570 2571 if (Instruction *X = foldVectorBinop(I)) 2572 return X; 2573 2574 // See if we can simplify any instructions used by the instruction whose sole 2575 // purpose is to compute bits we don't care about. 2576 if (SimplifyDemandedInstructionBits(I)) 2577 return &I; 2578 2579 // Do this before using distributive laws to catch simple and/or/not patterns. 2580 if (Instruction *Xor = foldOrToXor(I, Builder)) 2581 return Xor; 2582 2583 // (A&B)|(A&C) -> A&(B|C) etc 2584 if (Value *V = SimplifyUsingDistributiveLaws(I)) 2585 return replaceInstUsesWith(I, V); 2586 2587 if (Value *V = SimplifyBSwap(I, Builder)) 2588 return replaceInstUsesWith(I, V); 2589 2590 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 2591 return FoldedLogic; 2592 2593 if (Instruction *BSwap = matchBSwap(I)) 2594 return BSwap; 2595 2596 if (Instruction *Rotate = matchRotate(I)) 2597 return Rotate; 2598 2599 if (Instruction *Concat = matchOrConcat(I, Builder)) 2600 return replaceInstUsesWith(I, Concat); 2601 2602 Value *X, *Y; 2603 const APInt *CV; 2604 if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) && 2605 !CV->isAllOnesValue() && MaskedValueIsZero(Y, *CV, 0, &I)) { 2606 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0 2607 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X). 2608 Value *Or = Builder.CreateOr(X, Y); 2609 return BinaryOperator::CreateXor(Or, ConstantInt::get(I.getType(), *CV)); 2610 } 2611 2612 // (A & C)|(B & D) 2613 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 2614 Value *A, *B, *C, *D; 2615 if (match(Op0, m_And(m_Value(A), m_Value(C))) && 2616 match(Op1, m_And(m_Value(B), m_Value(D)))) { 2617 ConstantInt *C1 = dyn_cast<ConstantInt>(C); 2618 ConstantInt *C2 = dyn_cast<ConstantInt>(D); 2619 if (C1 && C2) { // (A & C1)|(B & C2) 2620 Value *V1 = nullptr, *V2 = nullptr; 2621 if ((C1->getValue() & C2->getValue()).isNullValue()) { 2622 // ((V | N) & C1) | (V & C2) --> (V|N) & (C1|C2) 2623 // iff (C1&C2) == 0 and (N&~C1) == 0 2624 if (match(A, m_Or(m_Value(V1), m_Value(V2))) && 2625 ((V1 == B && 2626 MaskedValueIsZero(V2, ~C1->getValue(), 0, &I)) || // (V|N) 2627 (V2 == B && 2628 MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V) 2629 return BinaryOperator::CreateAnd(A, 2630 Builder.getInt(C1->getValue()|C2->getValue())); 2631 // Or commutes, try both ways. 2632 if (match(B, m_Or(m_Value(V1), m_Value(V2))) && 2633 ((V1 == A && 2634 MaskedValueIsZero(V2, ~C2->getValue(), 0, &I)) || // (V|N) 2635 (V2 == A && 2636 MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V) 2637 return BinaryOperator::CreateAnd(B, 2638 Builder.getInt(C1->getValue()|C2->getValue())); 2639 2640 // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2) 2641 // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0. 2642 ConstantInt *C3 = nullptr, *C4 = nullptr; 2643 if (match(A, m_Or(m_Value(V1), m_ConstantInt(C3))) && 2644 (C3->getValue() & ~C1->getValue()).isNullValue() && 2645 match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) && 2646 (C4->getValue() & ~C2->getValue()).isNullValue()) { 2647 V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield"); 2648 return BinaryOperator::CreateAnd(V2, 2649 Builder.getInt(C1->getValue()|C2->getValue())); 2650 } 2651 } 2652 2653 if (C1->getValue() == ~C2->getValue()) { 2654 Value *X; 2655 2656 // ((X|B)&C1)|(B&C2) -> (X&C1) | B iff C1 == ~C2 2657 if (match(A, m_c_Or(m_Value(X), m_Specific(B)))) 2658 return BinaryOperator::CreateOr(Builder.CreateAnd(X, C1), B); 2659 // (A&C2)|((X|A)&C1) -> (X&C2) | A iff C1 == ~C2 2660 if (match(B, m_c_Or(m_Specific(A), m_Value(X)))) 2661 return BinaryOperator::CreateOr(Builder.CreateAnd(X, C2), A); 2662 2663 // ((X^B)&C1)|(B&C2) -> (X&C1) ^ B iff C1 == ~C2 2664 if (match(A, m_c_Xor(m_Value(X), m_Specific(B)))) 2665 return BinaryOperator::CreateXor(Builder.CreateAnd(X, C1), B); 2666 // (A&C2)|((X^A)&C1) -> (X&C2) ^ A iff C1 == ~C2 2667 if (match(B, m_c_Xor(m_Specific(A), m_Value(X)))) 2668 return BinaryOperator::CreateXor(Builder.CreateAnd(X, C2), A); 2669 } 2670 } 2671 2672 // Don't try to form a select if it's unlikely that we'll get rid of at 2673 // least one of the operands. A select is generally more expensive than the 2674 // 'or' that it is replacing. 2675 if (Op0->hasOneUse() || Op1->hasOneUse()) { 2676 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants. 2677 if (Value *V = matchSelectFromAndOr(A, C, B, D)) 2678 return replaceInstUsesWith(I, V); 2679 if (Value *V = matchSelectFromAndOr(A, C, D, B)) 2680 return replaceInstUsesWith(I, V); 2681 if (Value *V = matchSelectFromAndOr(C, A, B, D)) 2682 return replaceInstUsesWith(I, V); 2683 if (Value *V = matchSelectFromAndOr(C, A, D, B)) 2684 return replaceInstUsesWith(I, V); 2685 if (Value *V = matchSelectFromAndOr(B, D, A, C)) 2686 return replaceInstUsesWith(I, V); 2687 if (Value *V = matchSelectFromAndOr(B, D, C, A)) 2688 return replaceInstUsesWith(I, V); 2689 if (Value *V = matchSelectFromAndOr(D, B, A, C)) 2690 return replaceInstUsesWith(I, V); 2691 if (Value *V = matchSelectFromAndOr(D, B, C, A)) 2692 return replaceInstUsesWith(I, V); 2693 } 2694 } 2695 2696 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C 2697 if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) 2698 if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) 2699 return BinaryOperator::CreateOr(Op0, C); 2700 2701 // ((A ^ C) ^ B) | (B ^ A) -> (B ^ A) | C 2702 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) 2703 if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) 2704 return BinaryOperator::CreateOr(Op1, C); 2705 2706 // ((B | C) & A) | B -> B | (A & C) 2707 if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A)))) 2708 return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C)); 2709 2710 if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder)) 2711 return DeMorgan; 2712 2713 // Canonicalize xor to the RHS. 2714 bool SwappedForXor = false; 2715 if (match(Op0, m_Xor(m_Value(), m_Value()))) { 2716 std::swap(Op0, Op1); 2717 SwappedForXor = true; 2718 } 2719 2720 // A | ( A ^ B) -> A | B 2721 // A | (~A ^ B) -> A | ~B 2722 // (A & B) | (A ^ B) 2723 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) { 2724 if (Op0 == A || Op0 == B) 2725 return BinaryOperator::CreateOr(A, B); 2726 2727 if (match(Op0, m_And(m_Specific(A), m_Specific(B))) || 2728 match(Op0, m_And(m_Specific(B), m_Specific(A)))) 2729 return BinaryOperator::CreateOr(A, B); 2730 2731 if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) { 2732 Value *Not = Builder.CreateNot(B, B->getName() + ".not"); 2733 return BinaryOperator::CreateOr(Not, Op0); 2734 } 2735 if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) { 2736 Value *Not = Builder.CreateNot(A, A->getName() + ".not"); 2737 return BinaryOperator::CreateOr(Not, Op0); 2738 } 2739 } 2740 2741 // A | ~(A | B) -> A | ~B 2742 // A | ~(A ^ B) -> A | ~B 2743 if (match(Op1, m_Not(m_Value(A)))) 2744 if (BinaryOperator *B = dyn_cast<BinaryOperator>(A)) 2745 if ((Op0 == B->getOperand(0) || Op0 == B->getOperand(1)) && 2746 Op1->hasOneUse() && (B->getOpcode() == Instruction::Or || 2747 B->getOpcode() == Instruction::Xor)) { 2748 Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) : 2749 B->getOperand(0); 2750 Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not"); 2751 return BinaryOperator::CreateOr(Not, Op0); 2752 } 2753 2754 if (SwappedForXor) 2755 std::swap(Op0, Op1); 2756 2757 { 2758 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0); 2759 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1); 2760 if (LHS && RHS) 2761 if (Value *Res = foldOrOfICmps(LHS, RHS, I)) 2762 return replaceInstUsesWith(I, Res); 2763 2764 // TODO: Make this recursive; it's a little tricky because an arbitrary 2765 // number of 'or' instructions might have to be created. 2766 Value *X, *Y; 2767 if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { 2768 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2769 if (Value *Res = foldOrOfICmps(LHS, Cmp, I)) 2770 return replaceInstUsesWith(I, Builder.CreateOr(Res, Y)); 2771 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2772 if (Value *Res = foldOrOfICmps(LHS, Cmp, I)) 2773 return replaceInstUsesWith(I, Builder.CreateOr(Res, X)); 2774 } 2775 if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { 2776 if (auto *Cmp = dyn_cast<ICmpInst>(X)) 2777 if (Value *Res = foldOrOfICmps(Cmp, RHS, I)) 2778 return replaceInstUsesWith(I, Builder.CreateOr(Res, Y)); 2779 if (auto *Cmp = dyn_cast<ICmpInst>(Y)) 2780 if (Value *Res = foldOrOfICmps(Cmp, RHS, I)) 2781 return replaceInstUsesWith(I, Builder.CreateOr(Res, X)); 2782 } 2783 } 2784 2785 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0))) 2786 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1))) 2787 if (Value *Res = foldLogicOfFCmps(LHS, RHS, false)) 2788 return replaceInstUsesWith(I, Res); 2789 2790 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder)) 2791 return FoldedFCmps; 2792 2793 if (Instruction *CastedOr = foldCastedBitwiseLogic(I)) 2794 return CastedOr; 2795 2796 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>. 2797 if (match(Op0, m_OneUse(m_SExt(m_Value(A)))) && 2798 A->getType()->isIntOrIntVectorTy(1)) 2799 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op1); 2800 if (match(Op1, m_OneUse(m_SExt(m_Value(A)))) && 2801 A->getType()->isIntOrIntVectorTy(1)) 2802 return SelectInst::Create(A, ConstantInt::getSigned(I.getType(), -1), Op0); 2803 2804 // Note: If we've gotten to the point of visiting the outer OR, then the 2805 // inner one couldn't be simplified. If it was a constant, then it won't 2806 // be simplified by a later pass either, so we try swapping the inner/outer 2807 // ORs in the hopes that we'll be able to simplify it this way. 2808 // (X|C) | V --> (X|V) | C 2809 ConstantInt *CI; 2810 if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) && 2811 match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) { 2812 Value *Inner = Builder.CreateOr(A, Op1); 2813 Inner->takeName(Op0); 2814 return BinaryOperator::CreateOr(Inner, CI); 2815 } 2816 2817 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D)) 2818 // Since this OR statement hasn't been optimized further yet, we hope 2819 // that this transformation will allow the new ORs to be optimized. 2820 { 2821 Value *X = nullptr, *Y = nullptr; 2822 if (Op0->hasOneUse() && Op1->hasOneUse() && 2823 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) && 2824 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) { 2825 Value *orTrue = Builder.CreateOr(A, C); 2826 Value *orFalse = Builder.CreateOr(B, D); 2827 return SelectInst::Create(X, orTrue, orFalse); 2828 } 2829 } 2830 2831 // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y)-1), X) --> X s> Y ? -1 : X. 2832 { 2833 Value *X, *Y; 2834 const APInt *ShAmt; 2835 Type *Ty = I.getType(); 2836 if (match(&I, m_c_Or(m_OneUse(m_AShr(m_NSWSub(m_Value(Y), m_Value(X)), 2837 m_APInt(ShAmt))), 2838 m_Deferred(X))) && 2839 *ShAmt == Ty->getScalarSizeInBits() - 1) { 2840 Value *NewICmpInst = Builder.CreateICmpSGT(X, Y); 2841 return SelectInst::Create(NewICmpInst, ConstantInt::getAllOnesValue(Ty), 2842 X); 2843 } 2844 } 2845 2846 if (Instruction *V = 2847 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I)) 2848 return V; 2849 2850 CmpInst::Predicate Pred; 2851 Value *Mul, *Ov, *MulIsNotZero, *UMulWithOv; 2852 // Check if the OR weakens the overflow condition for umul.with.overflow by 2853 // treating any non-zero result as overflow. In that case, we overflow if both 2854 // umul.with.overflow operands are != 0, as in that case the result can only 2855 // be 0, iff the multiplication overflows. 2856 if (match(&I, 2857 m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_Value(UMulWithOv)), 2858 m_Value(Ov)), 2859 m_CombineAnd(m_ICmp(Pred, 2860 m_CombineAnd(m_ExtractValue<0>( 2861 m_Deferred(UMulWithOv)), 2862 m_Value(Mul)), 2863 m_ZeroInt()), 2864 m_Value(MulIsNotZero)))) && 2865 (Ov->hasOneUse() || (MulIsNotZero->hasOneUse() && Mul->hasOneUse())) && 2866 Pred == CmpInst::ICMP_NE) { 2867 Value *A, *B; 2868 if (match(UMulWithOv, m_Intrinsic<Intrinsic::umul_with_overflow>( 2869 m_Value(A), m_Value(B)))) { 2870 Value *NotNullA = Builder.CreateIsNotNull(A); 2871 Value *NotNullB = Builder.CreateIsNotNull(B); 2872 return BinaryOperator::CreateAnd(NotNullA, NotNullB); 2873 } 2874 } 2875 2876 return nullptr; 2877 } 2878 2879 /// A ^ B can be specified using other logic ops in a variety of patterns. We 2880 /// can fold these early and efficiently by morphing an existing instruction. 2881 static Instruction *foldXorToXor(BinaryOperator &I, 2882 InstCombiner::BuilderTy &Builder) { 2883 assert(I.getOpcode() == Instruction::Xor); 2884 Value *Op0 = I.getOperand(0); 2885 Value *Op1 = I.getOperand(1); 2886 Value *A, *B; 2887 2888 // There are 4 commuted variants for each of the basic patterns. 2889 2890 // (A & B) ^ (A | B) -> A ^ B 2891 // (A & B) ^ (B | A) -> A ^ B 2892 // (A | B) ^ (A & B) -> A ^ B 2893 // (A | B) ^ (B & A) -> A ^ B 2894 if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)), 2895 m_c_Or(m_Deferred(A), m_Deferred(B))))) 2896 return BinaryOperator::CreateXor(A, B); 2897 2898 // (A | ~B) ^ (~A | B) -> A ^ B 2899 // (~B | A) ^ (~A | B) -> A ^ B 2900 // (~A | B) ^ (A | ~B) -> A ^ B 2901 // (B | ~A) ^ (A | ~B) -> A ^ B 2902 if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))), 2903 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B))))) 2904 return BinaryOperator::CreateXor(A, B); 2905 2906 // (A & ~B) ^ (~A & B) -> A ^ B 2907 // (~B & A) ^ (~A & B) -> A ^ B 2908 // (~A & B) ^ (A & ~B) -> A ^ B 2909 // (B & ~A) ^ (A & ~B) -> A ^ B 2910 if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))), 2911 m_c_And(m_Not(m_Deferred(A)), m_Deferred(B))))) 2912 return BinaryOperator::CreateXor(A, B); 2913 2914 // For the remaining cases we need to get rid of one of the operands. 2915 if (!Op0->hasOneUse() && !Op1->hasOneUse()) 2916 return nullptr; 2917 2918 // (A | B) ^ ~(A & B) -> ~(A ^ B) 2919 // (A | B) ^ ~(B & A) -> ~(A ^ B) 2920 // (A & B) ^ ~(A | B) -> ~(A ^ B) 2921 // (A & B) ^ ~(B | A) -> ~(A ^ B) 2922 // Complexity sorting ensures the not will be on the right side. 2923 if ((match(Op0, m_Or(m_Value(A), m_Value(B))) && 2924 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) || 2925 (match(Op0, m_And(m_Value(A), m_Value(B))) && 2926 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))) 2927 return BinaryOperator::CreateNot(Builder.CreateXor(A, B)); 2928 2929 return nullptr; 2930 } 2931 2932 Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS, 2933 BinaryOperator &I) { 2934 assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS && 2935 I.getOperand(1) == RHS && "Should be 'xor' with these operands"); 2936 2937 if (predicatesFoldable(LHS->getPredicate(), RHS->getPredicate())) { 2938 if (LHS->getOperand(0) == RHS->getOperand(1) && 2939 LHS->getOperand(1) == RHS->getOperand(0)) 2940 LHS->swapOperands(); 2941 if (LHS->getOperand(0) == RHS->getOperand(0) && 2942 LHS->getOperand(1) == RHS->getOperand(1)) { 2943 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B) 2944 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); 2945 unsigned Code = getICmpCode(LHS) ^ getICmpCode(RHS); 2946 bool IsSigned = LHS->isSigned() || RHS->isSigned(); 2947 return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder); 2948 } 2949 } 2950 2951 // TODO: This can be generalized to compares of non-signbits using 2952 // decomposeBitTestICmp(). It could be enhanced more by using (something like) 2953 // foldLogOpOfMaskedICmps(). 2954 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 2955 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 2956 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 2957 if ((LHS->hasOneUse() || RHS->hasOneUse()) && 2958 LHS0->getType() == RHS0->getType() && 2959 LHS0->getType()->isIntOrIntVectorTy()) { 2960 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0 2961 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0 2962 if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_AllOnes()) && 2963 PredR == CmpInst::ICMP_SGT && match(RHS1, m_AllOnes())) || 2964 (PredL == CmpInst::ICMP_SLT && match(LHS1, m_Zero()) && 2965 PredR == CmpInst::ICMP_SLT && match(RHS1, m_Zero()))) { 2966 Value *Zero = ConstantInt::getNullValue(LHS0->getType()); 2967 return Builder.CreateICmpSLT(Builder.CreateXor(LHS0, RHS0), Zero); 2968 } 2969 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1 2970 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1 2971 if ((PredL == CmpInst::ICMP_SGT && match(LHS1, m_AllOnes()) && 2972 PredR == CmpInst::ICMP_SLT && match(RHS1, m_Zero())) || 2973 (PredL == CmpInst::ICMP_SLT && match(LHS1, m_Zero()) && 2974 PredR == CmpInst::ICMP_SGT && match(RHS1, m_AllOnes()))) { 2975 Value *MinusOne = ConstantInt::getAllOnesValue(LHS0->getType()); 2976 return Builder.CreateICmpSGT(Builder.CreateXor(LHS0, RHS0), MinusOne); 2977 } 2978 } 2979 2980 // Instead of trying to imitate the folds for and/or, decompose this 'xor' 2981 // into those logic ops. That is, try to turn this into an and-of-icmps 2982 // because we have many folds for that pattern. 2983 // 2984 // This is based on a truth table definition of xor: 2985 // X ^ Y --> (X | Y) & !(X & Y) 2986 if (Value *OrICmp = SimplifyBinOp(Instruction::Or, LHS, RHS, SQ)) { 2987 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y). 2988 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?). 2989 if (Value *AndICmp = SimplifyBinOp(Instruction::And, LHS, RHS, SQ)) { 2990 // TODO: Independently handle cases where the 'and' side is a constant. 2991 ICmpInst *X = nullptr, *Y = nullptr; 2992 if (OrICmp == LHS && AndICmp == RHS) { 2993 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS --> X & !Y 2994 X = LHS; 2995 Y = RHS; 2996 } 2997 if (OrICmp == RHS && AndICmp == LHS) { 2998 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS --> !Y & X 2999 X = RHS; 3000 Y = LHS; 3001 } 3002 if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(Y, &I))) { 3003 // Invert the predicate of 'Y', thus inverting its output. 3004 Y->setPredicate(Y->getInversePredicate()); 3005 // So, are there other uses of Y? 3006 if (!Y->hasOneUse()) { 3007 // We need to adapt other uses of Y though. Get a value that matches 3008 // the original value of Y before inversion. While this increases 3009 // immediate instruction count, we have just ensured that all the 3010 // users are freely-invertible, so that 'not' *will* get folded away. 3011 BuilderTy::InsertPointGuard Guard(Builder); 3012 // Set insertion point to right after the Y. 3013 Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator())); 3014 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3015 // Replace all uses of Y (excluding the one in NotY!) with NotY. 3016 Worklist.pushUsersToWorkList(*Y); 3017 Y->replaceUsesWithIf(NotY, 3018 [NotY](Use &U) { return U.getUser() != NotY; }); 3019 } 3020 // All done. 3021 return Builder.CreateAnd(LHS, RHS); 3022 } 3023 } 3024 } 3025 3026 return nullptr; 3027 } 3028 3029 /// If we have a masked merge, in the canonical form of: 3030 /// (assuming that A only has one use.) 3031 /// | A | |B| 3032 /// ((x ^ y) & M) ^ y 3033 /// | D | 3034 /// * If M is inverted: 3035 /// | D | 3036 /// ((x ^ y) & ~M) ^ y 3037 /// We can canonicalize by swapping the final xor operand 3038 /// to eliminate the 'not' of the mask. 3039 /// ((x ^ y) & M) ^ x 3040 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops 3041 /// because that shortens the dependency chain and improves analysis: 3042 /// (x & M) | (y & ~M) 3043 static Instruction *visitMaskedMerge(BinaryOperator &I, 3044 InstCombiner::BuilderTy &Builder) { 3045 Value *B, *X, *D; 3046 Value *M; 3047 if (!match(&I, m_c_Xor(m_Value(B), 3048 m_OneUse(m_c_And( 3049 m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)), 3050 m_Value(D)), 3051 m_Value(M)))))) 3052 return nullptr; 3053 3054 Value *NotM; 3055 if (match(M, m_Not(m_Value(NotM)))) { 3056 // De-invert the mask and swap the value in B part. 3057 Value *NewA = Builder.CreateAnd(D, NotM); 3058 return BinaryOperator::CreateXor(NewA, X); 3059 } 3060 3061 Constant *C; 3062 if (D->hasOneUse() && match(M, m_Constant(C))) { 3063 // Propagating undef is unsafe. Clamp undef elements to -1. 3064 Type *EltTy = C->getType()->getScalarType(); 3065 C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy)); 3066 // Unfold. 3067 Value *LHS = Builder.CreateAnd(X, C); 3068 Value *NotC = Builder.CreateNot(C); 3069 Value *RHS = Builder.CreateAnd(B, NotC); 3070 return BinaryOperator::CreateOr(LHS, RHS); 3071 } 3072 3073 return nullptr; 3074 } 3075 3076 // Transform 3077 // ~(x ^ y) 3078 // into: 3079 // (~x) ^ y 3080 // or into 3081 // x ^ (~y) 3082 static Instruction *sinkNotIntoXor(BinaryOperator &I, 3083 InstCombiner::BuilderTy &Builder) { 3084 Value *X, *Y; 3085 // FIXME: one-use check is not needed in general, but currently we are unable 3086 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182) 3087 if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y)))))) 3088 return nullptr; 3089 3090 // We only want to do the transform if it is free to do. 3091 if (isFreeToInvert(X, X->hasOneUse())) { 3092 // Ok, good. 3093 } else if (isFreeToInvert(Y, Y->hasOneUse())) { 3094 std::swap(X, Y); 3095 } else 3096 return nullptr; 3097 3098 Value *NotX = Builder.CreateNot(X, X->getName() + ".not"); 3099 return BinaryOperator::CreateXor(NotX, Y, I.getName() + ".demorgan"); 3100 } 3101 3102 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches 3103 // here. We should standardize that construct where it is needed or choose some 3104 // other way to ensure that commutated variants of patterns are not missed. 3105 Instruction *InstCombiner::visitXor(BinaryOperator &I) { 3106 if (Value *V = SimplifyXorInst(I.getOperand(0), I.getOperand(1), 3107 SQ.getWithInstruction(&I))) 3108 return replaceInstUsesWith(I, V); 3109 3110 if (SimplifyAssociativeOrCommutative(I)) 3111 return &I; 3112 3113 if (Instruction *X = foldVectorBinop(I)) 3114 return X; 3115 3116 if (Instruction *NewXor = foldXorToXor(I, Builder)) 3117 return NewXor; 3118 3119 // (A&B)^(A&C) -> A&(B^C) etc 3120 if (Value *V = SimplifyUsingDistributiveLaws(I)) 3121 return replaceInstUsesWith(I, V); 3122 3123 // See if we can simplify any instructions used by the instruction whose sole 3124 // purpose is to compute bits we don't care about. 3125 if (SimplifyDemandedInstructionBits(I)) 3126 return &I; 3127 3128 if (Value *V = SimplifyBSwap(I, Builder)) 3129 return replaceInstUsesWith(I, V); 3130 3131 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); 3132 3133 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M) 3134 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits 3135 // calls in there are unnecessary as SimplifyDemandedInstructionBits should 3136 // have already taken care of those cases. 3137 Value *M; 3138 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()), 3139 m_c_And(m_Deferred(M), m_Value())))) 3140 return BinaryOperator::CreateOr(Op0, Op1); 3141 3142 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand. 3143 Value *X, *Y; 3144 3145 // We must eliminate the and/or (one-use) for these transforms to not increase 3146 // the instruction count. 3147 // ~(~X & Y) --> (X | ~Y) 3148 // ~(Y & ~X) --> (X | ~Y) 3149 if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) { 3150 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3151 return BinaryOperator::CreateOr(X, NotY); 3152 } 3153 // ~(~X | Y) --> (X & ~Y) 3154 // ~(Y | ~X) --> (X & ~Y) 3155 if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) { 3156 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not"); 3157 return BinaryOperator::CreateAnd(X, NotY); 3158 } 3159 3160 if (Instruction *Xor = visitMaskedMerge(I, Builder)) 3161 return Xor; 3162 3163 // Is this a 'not' (~) fed by a binary operator? 3164 BinaryOperator *NotVal; 3165 if (match(&I, m_Not(m_BinOp(NotVal)))) { 3166 if (NotVal->getOpcode() == Instruction::And || 3167 NotVal->getOpcode() == Instruction::Or) { 3168 // Apply DeMorgan's Law when inverts are free: 3169 // ~(X & Y) --> (~X | ~Y) 3170 // ~(X | Y) --> (~X & ~Y) 3171 if (isFreeToInvert(NotVal->getOperand(0), 3172 NotVal->getOperand(0)->hasOneUse()) && 3173 isFreeToInvert(NotVal->getOperand(1), 3174 NotVal->getOperand(1)->hasOneUse())) { 3175 Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs"); 3176 Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs"); 3177 if (NotVal->getOpcode() == Instruction::And) 3178 return BinaryOperator::CreateOr(NotX, NotY); 3179 return BinaryOperator::CreateAnd(NotX, NotY); 3180 } 3181 } 3182 3183 // ~(X - Y) --> ~X + Y 3184 if (match(NotVal, m_Sub(m_Value(X), m_Value(Y)))) 3185 if (isa<Constant>(X) || NotVal->hasOneUse()) 3186 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y); 3187 3188 // ~(~X >>s Y) --> (X >>s Y) 3189 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y)))) 3190 return BinaryOperator::CreateAShr(X, Y); 3191 3192 // If we are inverting a right-shifted constant, we may be able to eliminate 3193 // the 'not' by inverting the constant and using the opposite shift type. 3194 // Canonicalization rules ensure that only a negative constant uses 'ashr', 3195 // but we must check that in case that transform has not fired yet. 3196 3197 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits) 3198 Constant *C; 3199 if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) && 3200 match(C, m_Negative())) { 3201 // We matched a negative constant, so propagating undef is unsafe. 3202 // Clamp undef elements to -1. 3203 Type *EltTy = C->getType()->getScalarType(); 3204 C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy)); 3205 return BinaryOperator::CreateLShr(ConstantExpr::getNot(C), Y); 3206 } 3207 3208 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits) 3209 if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) && 3210 match(C, m_NonNegative())) { 3211 // We matched a non-negative constant, so propagating undef is unsafe. 3212 // Clamp undef elements to 0. 3213 Type *EltTy = C->getType()->getScalarType(); 3214 C = Constant::replaceUndefsWith(C, ConstantInt::getNullValue(EltTy)); 3215 return BinaryOperator::CreateAShr(ConstantExpr::getNot(C), Y); 3216 } 3217 3218 // ~(X + C) --> -(C + 1) - X 3219 if (match(Op0, m_Add(m_Value(X), m_Constant(C)))) 3220 return BinaryOperator::CreateSub(ConstantExpr::getNeg(AddOne(C)), X); 3221 } 3222 3223 // Use DeMorgan and reassociation to eliminate a 'not' op. 3224 Constant *C1; 3225 if (match(Op1, m_Constant(C1))) { 3226 Constant *C2; 3227 if (match(Op0, m_OneUse(m_Or(m_Not(m_Value(X)), m_Constant(C2))))) { 3228 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1 3229 Value *And = Builder.CreateAnd(X, ConstantExpr::getNot(C2)); 3230 return BinaryOperator::CreateXor(And, ConstantExpr::getNot(C1)); 3231 } 3232 if (match(Op0, m_OneUse(m_And(m_Not(m_Value(X)), m_Constant(C2))))) { 3233 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1 3234 Value *Or = Builder.CreateOr(X, ConstantExpr::getNot(C2)); 3235 return BinaryOperator::CreateXor(Or, ConstantExpr::getNot(C1)); 3236 } 3237 } 3238 3239 // not (cmp A, B) = !cmp A, B 3240 CmpInst::Predicate Pred; 3241 if (match(&I, m_Not(m_OneUse(m_Cmp(Pred, m_Value(), m_Value()))))) { 3242 cast<CmpInst>(Op0)->setPredicate(CmpInst::getInversePredicate(Pred)); 3243 return replaceInstUsesWith(I, Op0); 3244 } 3245 3246 { 3247 const APInt *RHSC; 3248 if (match(Op1, m_APInt(RHSC))) { 3249 Value *X; 3250 const APInt *C; 3251 if (RHSC->isSignMask() && match(Op0, m_Sub(m_APInt(C), m_Value(X)))) { 3252 // (C - X) ^ signmask -> (C + signmask - X) 3253 Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC); 3254 return BinaryOperator::CreateSub(NewC, X); 3255 } 3256 if (RHSC->isSignMask() && match(Op0, m_Add(m_Value(X), m_APInt(C)))) { 3257 // (X + C) ^ signmask -> (X + C + signmask) 3258 Constant *NewC = ConstantInt::get(I.getType(), *C + *RHSC); 3259 return BinaryOperator::CreateAdd(X, NewC); 3260 } 3261 3262 // (X|C1)^C2 -> X^(C1^C2) iff X&~C1 == 0 3263 if (match(Op0, m_Or(m_Value(X), m_APInt(C))) && 3264 MaskedValueIsZero(X, *C, 0, &I)) { 3265 Constant *NewC = ConstantInt::get(I.getType(), *C ^ *RHSC); 3266 return BinaryOperator::CreateXor(X, NewC); 3267 } 3268 } 3269 } 3270 3271 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(Op1)) { 3272 if (BinaryOperator *Op0I = dyn_cast<BinaryOperator>(Op0)) { 3273 if (ConstantInt *Op0CI = dyn_cast<ConstantInt>(Op0I->getOperand(1))) { 3274 if (Op0I->getOpcode() == Instruction::LShr) { 3275 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3) 3276 // E1 = "X ^ C1" 3277 BinaryOperator *E1; 3278 ConstantInt *C1; 3279 if (Op0I->hasOneUse() && 3280 (E1 = dyn_cast<BinaryOperator>(Op0I->getOperand(0))) && 3281 E1->getOpcode() == Instruction::Xor && 3282 (C1 = dyn_cast<ConstantInt>(E1->getOperand(1)))) { 3283 // fold (C1 >> C2) ^ C3 3284 ConstantInt *C2 = Op0CI, *C3 = RHSC; 3285 APInt FoldConst = C1->getValue().lshr(C2->getValue()); 3286 FoldConst ^= C3->getValue(); 3287 // Prepare the two operands. 3288 Value *Opnd0 = Builder.CreateLShr(E1->getOperand(0), C2); 3289 Opnd0->takeName(Op0I); 3290 cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc()); 3291 Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst); 3292 3293 return BinaryOperator::CreateXor(Opnd0, FoldVal); 3294 } 3295 } 3296 } 3297 } 3298 } 3299 3300 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I)) 3301 return FoldedLogic; 3302 3303 // Y ^ (X | Y) --> X & ~Y 3304 // Y ^ (Y | X) --> X & ~Y 3305 if (match(Op1, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op0))))) 3306 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op0)); 3307 // (X | Y) ^ Y --> X & ~Y 3308 // (Y | X) ^ Y --> X & ~Y 3309 if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op1))))) 3310 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op1)); 3311 3312 // Y ^ (X & Y) --> ~X & Y 3313 // Y ^ (Y & X) --> ~X & Y 3314 if (match(Op1, m_OneUse(m_c_And(m_Value(X), m_Specific(Op0))))) 3315 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(X)); 3316 // (X & Y) ^ Y --> ~X & Y 3317 // (Y & X) ^ Y --> ~X & Y 3318 // Canonical form is (X & C) ^ C; don't touch that. 3319 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must 3320 // be fixed to prefer that (otherwise we get infinite looping). 3321 if (!match(Op1, m_Constant()) && 3322 match(Op0, m_OneUse(m_c_And(m_Value(X), m_Specific(Op1))))) 3323 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(X)); 3324 3325 Value *A, *B, *C; 3326 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants. 3327 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))), 3328 m_OneUse(m_c_Or(m_Deferred(A), m_Value(C)))))) 3329 return BinaryOperator::CreateXor( 3330 Builder.CreateAnd(Builder.CreateNot(A), C), B); 3331 3332 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants. 3333 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))), 3334 m_OneUse(m_c_Or(m_Deferred(B), m_Value(C)))))) 3335 return BinaryOperator::CreateXor( 3336 Builder.CreateAnd(Builder.CreateNot(B), C), A); 3337 3338 // (A & B) ^ (A ^ B) -> (A | B) 3339 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 3340 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B)))) 3341 return BinaryOperator::CreateOr(A, B); 3342 // (A ^ B) ^ (A & B) -> (A | B) 3343 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 3344 match(Op1, m_c_And(m_Specific(A), m_Specific(B)))) 3345 return BinaryOperator::CreateOr(A, B); 3346 3347 // (A & ~B) ^ ~A -> ~(A & B) 3348 // (~B & A) ^ ~A -> ~(A & B) 3349 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && 3350 match(Op1, m_Not(m_Specific(A)))) 3351 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B)); 3352 3353 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) 3354 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) 3355 if (Value *V = foldXorOfICmps(LHS, RHS, I)) 3356 return replaceInstUsesWith(I, V); 3357 3358 if (Instruction *CastedXor = foldCastedBitwiseLogic(I)) 3359 return CastedXor; 3360 3361 // Canonicalize a shifty way to code absolute value to the common pattern. 3362 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1. 3363 // We're relying on the fact that we only do this transform when the shift has 3364 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase 3365 // instructions). 3366 if (Op0->hasNUses(2)) 3367 std::swap(Op0, Op1); 3368 3369 const APInt *ShAmt; 3370 Type *Ty = I.getType(); 3371 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) && 3372 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 && 3373 match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) { 3374 // B = ashr i32 A, 31 ; smear the sign bit 3375 // xor (add A, B), B ; add -1 and flip bits if negative 3376 // --> (A < 0) ? -A : A 3377 Value *Cmp = Builder.CreateICmpSLT(A, ConstantInt::getNullValue(Ty)); 3378 // Copy the nuw/nsw flags from the add to the negate. 3379 auto *Add = cast<BinaryOperator>(Op0); 3380 Value *Neg = Builder.CreateNeg(A, "", Add->hasNoUnsignedWrap(), 3381 Add->hasNoSignedWrap()); 3382 return SelectInst::Create(Cmp, Neg, A); 3383 } 3384 3385 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max: 3386 // 3387 // %notx = xor i32 %x, -1 3388 // %cmp1 = icmp sgt i32 %notx, %y 3389 // %smax = select i1 %cmp1, i32 %notx, i32 %y 3390 // %res = xor i32 %smax, -1 3391 // => 3392 // %noty = xor i32 %y, -1 3393 // %cmp2 = icmp slt %x, %noty 3394 // %res = select i1 %cmp2, i32 %x, i32 %noty 3395 // 3396 // Same is applicable for smin/umax/umin. 3397 if (match(Op1, m_AllOnes()) && Op0->hasOneUse()) { 3398 Value *LHS, *RHS; 3399 SelectPatternFlavor SPF = matchSelectPattern(Op0, LHS, RHS).Flavor; 3400 if (SelectPatternResult::isMinOrMax(SPF)) { 3401 // It's possible we get here before the not has been simplified, so make 3402 // sure the input to the not isn't freely invertible. 3403 if (match(LHS, m_Not(m_Value(X))) && !isFreeToInvert(X, X->hasOneUse())) { 3404 Value *NotY = Builder.CreateNot(RHS); 3405 return SelectInst::Create( 3406 Builder.CreateICmp(getInverseMinMaxPred(SPF), X, NotY), X, NotY); 3407 } 3408 3409 // It's possible we get here before the not has been simplified, so make 3410 // sure the input to the not isn't freely invertible. 3411 if (match(RHS, m_Not(m_Value(Y))) && !isFreeToInvert(Y, Y->hasOneUse())) { 3412 Value *NotX = Builder.CreateNot(LHS); 3413 return SelectInst::Create( 3414 Builder.CreateICmp(getInverseMinMaxPred(SPF), NotX, Y), NotX, Y); 3415 } 3416 3417 // If both sides are freely invertible, then we can get rid of the xor 3418 // completely. 3419 if (isFreeToInvert(LHS, !LHS->hasNUsesOrMore(3)) && 3420 isFreeToInvert(RHS, !RHS->hasNUsesOrMore(3))) { 3421 Value *NotLHS = Builder.CreateNot(LHS); 3422 Value *NotRHS = Builder.CreateNot(RHS); 3423 return SelectInst::Create( 3424 Builder.CreateICmp(getInverseMinMaxPred(SPF), NotLHS, NotRHS), 3425 NotLHS, NotRHS); 3426 } 3427 } 3428 3429 // Pull 'not' into operands of select if both operands are one-use compares. 3430 // Inverting the predicates eliminates the 'not' operation. 3431 // Example: 3432 // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) --> 3433 // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?) 3434 // TODO: Canonicalize by hoisting 'not' into an arm of the select if only 3435 // 1 select operand is a cmp? 3436 if (auto *Sel = dyn_cast<SelectInst>(Op0)) { 3437 auto *CmpT = dyn_cast<CmpInst>(Sel->getTrueValue()); 3438 auto *CmpF = dyn_cast<CmpInst>(Sel->getFalseValue()); 3439 if (CmpT && CmpF && CmpT->hasOneUse() && CmpF->hasOneUse()) { 3440 CmpT->setPredicate(CmpT->getInversePredicate()); 3441 CmpF->setPredicate(CmpF->getInversePredicate()); 3442 return replaceInstUsesWith(I, Sel); 3443 } 3444 } 3445 } 3446 3447 if (Instruction *NewXor = sinkNotIntoXor(I, Builder)) 3448 return NewXor; 3449 3450 return nullptr; 3451 } 3452