1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements routines for folding instructions into simpler forms 10 // that do not require creating new instructions. This does constant folding 11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either 12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value 13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been 14 // simplified: This is usually true and assuming it simplifies the logic (if 15 // they have not been simplified then results are correct but maybe suboptimal). 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Analysis/InstructionSimplify.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/CaptureTracking.h" 25 #include "llvm/Analysis/CmpInstAnalysis.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/LoopAnalysisManager.h" 28 #include "llvm/Analysis/MemoryBuiltins.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Analysis/VectorUtils.h" 31 #include "llvm/IR/ConstantRange.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/Dominators.h" 34 #include "llvm/IR/GetElementPtrTypeIterator.h" 35 #include "llvm/IR/GlobalAlias.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/IR/PatternMatch.h" 40 #include "llvm/IR/ValueHandle.h" 41 #include "llvm/Support/KnownBits.h" 42 #include <algorithm> 43 using namespace llvm; 44 using namespace llvm::PatternMatch; 45 46 #define DEBUG_TYPE "instsimplify" 47 48 enum { RecursionLimit = 3 }; 49 50 STATISTIC(NumExpand, "Number of expansions"); 51 STATISTIC(NumReassoc, "Number of reassociations"); 52 53 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned); 54 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned); 55 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, 56 const SimplifyQuery &, unsigned); 57 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &, 58 unsigned); 59 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &, 60 const SimplifyQuery &, unsigned); 61 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &, 62 unsigned); 63 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 64 const SimplifyQuery &Q, unsigned MaxRecurse); 65 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned); 66 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned); 67 static Value *SimplifyCastInst(unsigned, Value *, Type *, 68 const SimplifyQuery &, unsigned); 69 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &, 70 unsigned); 71 72 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal, 73 Value *FalseVal) { 74 BinaryOperator::BinaryOps BinOpCode; 75 if (auto *BO = dyn_cast<BinaryOperator>(Cond)) 76 BinOpCode = BO->getOpcode(); 77 else 78 return nullptr; 79 80 CmpInst::Predicate ExpectedPred, Pred1, Pred2; 81 if (BinOpCode == BinaryOperator::Or) { 82 ExpectedPred = ICmpInst::ICMP_NE; 83 } else if (BinOpCode == BinaryOperator::And) { 84 ExpectedPred = ICmpInst::ICMP_EQ; 85 } else 86 return nullptr; 87 88 // %A = icmp eq %TV, %FV 89 // %B = icmp eq %X, %Y (and one of these is a select operand) 90 // %C = and %A, %B 91 // %D = select %C, %TV, %FV 92 // --> 93 // %FV 94 95 // %A = icmp ne %TV, %FV 96 // %B = icmp ne %X, %Y (and one of these is a select operand) 97 // %C = or %A, %B 98 // %D = select %C, %TV, %FV 99 // --> 100 // %TV 101 Value *X, *Y; 102 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal), 103 m_Specific(FalseVal)), 104 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) || 105 Pred1 != Pred2 || Pred1 != ExpectedPred) 106 return nullptr; 107 108 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal) 109 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal; 110 111 return nullptr; 112 } 113 114 /// For a boolean type or a vector of boolean type, return false or a vector 115 /// with every element false. 116 static Constant *getFalse(Type *Ty) { 117 return ConstantInt::getFalse(Ty); 118 } 119 120 /// For a boolean type or a vector of boolean type, return true or a vector 121 /// with every element true. 122 static Constant *getTrue(Type *Ty) { 123 return ConstantInt::getTrue(Ty); 124 } 125 126 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"? 127 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS, 128 Value *RHS) { 129 CmpInst *Cmp = dyn_cast<CmpInst>(V); 130 if (!Cmp) 131 return false; 132 CmpInst::Predicate CPred = Cmp->getPredicate(); 133 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1); 134 if (CPred == Pred && CLHS == LHS && CRHS == RHS) 135 return true; 136 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS && 137 CRHS == LHS; 138 } 139 140 /// Simplify comparison with true or false branch of select: 141 /// %sel = select i1 %cond, i32 %tv, i32 %fv 142 /// %cmp = icmp sle i32 %sel, %rhs 143 /// Compose new comparison by substituting %sel with either %tv or %fv 144 /// and see if it simplifies. 145 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS, 146 Value *RHS, Value *Cond, 147 const SimplifyQuery &Q, unsigned MaxRecurse, 148 Constant *TrueOrFalse) { 149 Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse); 150 if (SimplifiedCmp == Cond) { 151 // %cmp simplified to the select condition (%cond). 152 return TrueOrFalse; 153 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) { 154 // It didn't simplify. However, if composed comparison is equivalent 155 // to the select condition (%cond) then we can replace it. 156 return TrueOrFalse; 157 } 158 return SimplifiedCmp; 159 } 160 161 /// Simplify comparison with true branch of select 162 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS, 163 Value *RHS, Value *Cond, 164 const SimplifyQuery &Q, 165 unsigned MaxRecurse) { 166 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, 167 getTrue(Cond->getType())); 168 } 169 170 /// Simplify comparison with false branch of select 171 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS, 172 Value *RHS, Value *Cond, 173 const SimplifyQuery &Q, 174 unsigned MaxRecurse) { 175 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse, 176 getFalse(Cond->getType())); 177 } 178 179 /// We know comparison with both branches of select can be simplified, but they 180 /// are not equal. This routine handles some logical simplifications. 181 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, 182 Value *Cond, 183 const SimplifyQuery &Q, 184 unsigned MaxRecurse) { 185 // If the false value simplified to false, then the result of the compare 186 // is equal to "Cond && TCmp". This also catches the case when the false 187 // value simplified to false and the true value to true, returning "Cond". 188 if (match(FCmp, m_Zero())) 189 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse)) 190 return V; 191 // If the true value simplified to true, then the result of the compare 192 // is equal to "Cond || FCmp". 193 if (match(TCmp, m_One())) 194 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse)) 195 return V; 196 // Finally, if the false value simplified to true and the true value to 197 // false, then the result of the compare is equal to "!Cond". 198 if (match(FCmp, m_One()) && match(TCmp, m_Zero())) 199 if (Value *V = SimplifyXorInst( 200 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse)) 201 return V; 202 return nullptr; 203 } 204 205 /// Does the given value dominate the specified phi node? 206 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) { 207 Instruction *I = dyn_cast<Instruction>(V); 208 if (!I) 209 // Arguments and constants dominate all instructions. 210 return true; 211 212 // If we are processing instructions (and/or basic blocks) that have not been 213 // fully added to a function, the parent nodes may still be null. Simply 214 // return the conservative answer in these cases. 215 if (!I->getParent() || !P->getParent() || !I->getFunction()) 216 return false; 217 218 // If we have a DominatorTree then do a precise test. 219 if (DT) 220 return DT->dominates(I, P); 221 222 // Otherwise, if the instruction is in the entry block and is not an invoke, 223 // then it obviously dominates all phi nodes. 224 if (I->getParent() == &I->getFunction()->getEntryBlock() && 225 !isa<InvokeInst>(I)) 226 return true; 227 228 return false; 229 } 230 231 /// Simplify "A op (B op' C)" by distributing op over op', turning it into 232 /// "(A op B) op' (A op C)". Here "op" is given by Opcode and "op'" is 233 /// given by OpcodeToExpand, while "A" corresponds to LHS and "B op' C" to RHS. 234 /// Also performs the transform "(A op' B) op C" -> "(A op C) op' (B op C)". 235 /// Returns the simplified value, or null if no simplification was performed. 236 static Value *ExpandBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, 237 Instruction::BinaryOps OpcodeToExpand, 238 const SimplifyQuery &Q, unsigned MaxRecurse) { 239 // Recursion is always used, so bail out at once if we already hit the limit. 240 if (!MaxRecurse--) 241 return nullptr; 242 243 // Check whether the expression has the form "(A op' B) op C". 244 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) 245 if (Op0->getOpcode() == OpcodeToExpand) { 246 // It does! Try turning it into "(A op C) op' (B op C)". 247 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; 248 // Do "A op C" and "B op C" both simplify? 249 if (Value *L = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) 250 if (Value *R = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 251 // They do! Return "L op' R" if it simplifies or is already available. 252 // If "L op' R" equals "A op' B" then "L op' R" is just the LHS. 253 if ((L == A && R == B) || (Instruction::isCommutative(OpcodeToExpand) 254 && L == B && R == A)) { 255 ++NumExpand; 256 return LHS; 257 } 258 // Otherwise return "L op' R" if it simplifies. 259 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 260 ++NumExpand; 261 return V; 262 } 263 } 264 } 265 266 // Check whether the expression has the form "A op (B op' C)". 267 if (BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS)) 268 if (Op1->getOpcode() == OpcodeToExpand) { 269 // It does! Try turning it into "(A op B) op' (A op C)". 270 Value *A = LHS, *B = Op1->getOperand(0), *C = Op1->getOperand(1); 271 // Do "A op B" and "A op C" both simplify? 272 if (Value *L = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) 273 if (Value *R = SimplifyBinOp(Opcode, A, C, Q, MaxRecurse)) { 274 // They do! Return "L op' R" if it simplifies or is already available. 275 // If "L op' R" equals "B op' C" then "L op' R" is just the RHS. 276 if ((L == B && R == C) || (Instruction::isCommutative(OpcodeToExpand) 277 && L == C && R == B)) { 278 ++NumExpand; 279 return RHS; 280 } 281 // Otherwise return "L op' R" if it simplifies. 282 if (Value *V = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse)) { 283 ++NumExpand; 284 return V; 285 } 286 } 287 } 288 289 return nullptr; 290 } 291 292 /// Generic simplifications for associative binary operations. 293 /// Returns the simpler value, or null if none was found. 294 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode, 295 Value *LHS, Value *RHS, 296 const SimplifyQuery &Q, 297 unsigned MaxRecurse) { 298 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!"); 299 300 // Recursion is always used, so bail out at once if we already hit the limit. 301 if (!MaxRecurse--) 302 return nullptr; 303 304 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); 305 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS); 306 307 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely. 308 if (Op0 && Op0->getOpcode() == Opcode) { 309 Value *A = Op0->getOperand(0); 310 Value *B = Op0->getOperand(1); 311 Value *C = RHS; 312 313 // Does "B op C" simplify? 314 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) { 315 // It does! Return "A op V" if it simplifies or is already available. 316 // If V equals B then "A op V" is just the LHS. 317 if (V == B) return LHS; 318 // Otherwise return "A op V" if it simplifies. 319 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) { 320 ++NumReassoc; 321 return W; 322 } 323 } 324 } 325 326 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely. 327 if (Op1 && Op1->getOpcode() == Opcode) { 328 Value *A = LHS; 329 Value *B = Op1->getOperand(0); 330 Value *C = Op1->getOperand(1); 331 332 // Does "A op B" simplify? 333 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) { 334 // It does! Return "V op C" if it simplifies or is already available. 335 // If V equals B then "V op C" is just the RHS. 336 if (V == B) return RHS; 337 // Otherwise return "V op C" if it simplifies. 338 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) { 339 ++NumReassoc; 340 return W; 341 } 342 } 343 } 344 345 // The remaining transforms require commutativity as well as associativity. 346 if (!Instruction::isCommutative(Opcode)) 347 return nullptr; 348 349 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely. 350 if (Op0 && Op0->getOpcode() == Opcode) { 351 Value *A = Op0->getOperand(0); 352 Value *B = Op0->getOperand(1); 353 Value *C = RHS; 354 355 // Does "C op A" simplify? 356 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 357 // It does! Return "V op B" if it simplifies or is already available. 358 // If V equals A then "V op B" is just the LHS. 359 if (V == A) return LHS; 360 // Otherwise return "V op B" if it simplifies. 361 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) { 362 ++NumReassoc; 363 return W; 364 } 365 } 366 } 367 368 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely. 369 if (Op1 && Op1->getOpcode() == Opcode) { 370 Value *A = LHS; 371 Value *B = Op1->getOperand(0); 372 Value *C = Op1->getOperand(1); 373 374 // Does "C op A" simplify? 375 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) { 376 // It does! Return "B op V" if it simplifies or is already available. 377 // If V equals C then "B op V" is just the RHS. 378 if (V == C) return RHS; 379 // Otherwise return "B op V" if it simplifies. 380 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) { 381 ++NumReassoc; 382 return W; 383 } 384 } 385 } 386 387 return nullptr; 388 } 389 390 /// In the case of a binary operation with a select instruction as an operand, 391 /// try to simplify the binop by seeing whether evaluating it on both branches 392 /// of the select results in the same value. Returns the common value if so, 393 /// otherwise returns null. 394 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, 395 Value *RHS, const SimplifyQuery &Q, 396 unsigned MaxRecurse) { 397 // Recursion is always used, so bail out at once if we already hit the limit. 398 if (!MaxRecurse--) 399 return nullptr; 400 401 SelectInst *SI; 402 if (isa<SelectInst>(LHS)) { 403 SI = cast<SelectInst>(LHS); 404 } else { 405 assert(isa<SelectInst>(RHS) && "No select instruction operand!"); 406 SI = cast<SelectInst>(RHS); 407 } 408 409 // Evaluate the BinOp on the true and false branches of the select. 410 Value *TV; 411 Value *FV; 412 if (SI == LHS) { 413 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse); 414 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse); 415 } else { 416 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse); 417 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse); 418 } 419 420 // If they simplified to the same value, then return the common value. 421 // If they both failed to simplify then return null. 422 if (TV == FV) 423 return TV; 424 425 // If one branch simplified to undef, return the other one. 426 if (TV && isa<UndefValue>(TV)) 427 return FV; 428 if (FV && isa<UndefValue>(FV)) 429 return TV; 430 431 // If applying the operation did not change the true and false select values, 432 // then the result of the binop is the select itself. 433 if (TV == SI->getTrueValue() && FV == SI->getFalseValue()) 434 return SI; 435 436 // If one branch simplified and the other did not, and the simplified 437 // value is equal to the unsimplified one, return the simplified value. 438 // For example, select (cond, X, X & Z) & Z -> X & Z. 439 if ((FV && !TV) || (TV && !FV)) { 440 // Check that the simplified value has the form "X op Y" where "op" is the 441 // same as the original operation. 442 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV); 443 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) { 444 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS". 445 // We already know that "op" is the same as for the simplified value. See 446 // if the operands match too. If so, return the simplified value. 447 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue(); 448 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS; 449 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch; 450 if (Simplified->getOperand(0) == UnsimplifiedLHS && 451 Simplified->getOperand(1) == UnsimplifiedRHS) 452 return Simplified; 453 if (Simplified->isCommutative() && 454 Simplified->getOperand(1) == UnsimplifiedLHS && 455 Simplified->getOperand(0) == UnsimplifiedRHS) 456 return Simplified; 457 } 458 } 459 460 return nullptr; 461 } 462 463 /// In the case of a comparison with a select instruction, try to simplify the 464 /// comparison by seeing whether both branches of the select result in the same 465 /// value. Returns the common value if so, otherwise returns null. 466 /// For example, if we have: 467 /// %tmp = select i1 %cmp, i32 1, i32 2 468 /// %cmp1 = icmp sle i32 %tmp, 3 469 /// We can simplify %cmp1 to true, because both branches of select are 470 /// less than 3. We compose new comparison by substituting %tmp with both 471 /// branches of select and see if it can be simplified. 472 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS, 473 Value *RHS, const SimplifyQuery &Q, 474 unsigned MaxRecurse) { 475 // Recursion is always used, so bail out at once if we already hit the limit. 476 if (!MaxRecurse--) 477 return nullptr; 478 479 // Make sure the select is on the LHS. 480 if (!isa<SelectInst>(LHS)) { 481 std::swap(LHS, RHS); 482 Pred = CmpInst::getSwappedPredicate(Pred); 483 } 484 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!"); 485 SelectInst *SI = cast<SelectInst>(LHS); 486 Value *Cond = SI->getCondition(); 487 Value *TV = SI->getTrueValue(); 488 Value *FV = SI->getFalseValue(); 489 490 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it. 491 // Does "cmp TV, RHS" simplify? 492 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse); 493 if (!TCmp) 494 return nullptr; 495 496 // Does "cmp FV, RHS" simplify? 497 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse); 498 if (!FCmp) 499 return nullptr; 500 501 // If both sides simplified to the same value, then use it as the result of 502 // the original comparison. 503 if (TCmp == FCmp) 504 return TCmp; 505 506 // The remaining cases only make sense if the select condition has the same 507 // type as the result of the comparison, so bail out if this is not so. 508 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy()) 509 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse); 510 511 return nullptr; 512 } 513 514 /// In the case of a binary operation with an operand that is a PHI instruction, 515 /// try to simplify the binop by seeing whether evaluating it on the incoming 516 /// phi values yields the same result for every value. If so returns the common 517 /// value, otherwise returns null. 518 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, 519 Value *RHS, const SimplifyQuery &Q, 520 unsigned MaxRecurse) { 521 // Recursion is always used, so bail out at once if we already hit the limit. 522 if (!MaxRecurse--) 523 return nullptr; 524 525 PHINode *PI; 526 if (isa<PHINode>(LHS)) { 527 PI = cast<PHINode>(LHS); 528 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 529 if (!valueDominatesPHI(RHS, PI, Q.DT)) 530 return nullptr; 531 } else { 532 assert(isa<PHINode>(RHS) && "No PHI instruction operand!"); 533 PI = cast<PHINode>(RHS); 534 // Bail out if LHS and the phi may be mutually interdependent due to a loop. 535 if (!valueDominatesPHI(LHS, PI, Q.DT)) 536 return nullptr; 537 } 538 539 // Evaluate the BinOp on the incoming phi values. 540 Value *CommonValue = nullptr; 541 for (Value *Incoming : PI->incoming_values()) { 542 // If the incoming value is the phi node itself, it can safely be skipped. 543 if (Incoming == PI) continue; 544 Value *V = PI == LHS ? 545 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) : 546 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse); 547 // If the operation failed to simplify, or simplified to a different value 548 // to previously, then give up. 549 if (!V || (CommonValue && V != CommonValue)) 550 return nullptr; 551 CommonValue = V; 552 } 553 554 return CommonValue; 555 } 556 557 /// In the case of a comparison with a PHI instruction, try to simplify the 558 /// comparison by seeing whether comparing with all of the incoming phi values 559 /// yields the same result every time. If so returns the common result, 560 /// otherwise returns null. 561 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS, 562 const SimplifyQuery &Q, unsigned MaxRecurse) { 563 // Recursion is always used, so bail out at once if we already hit the limit. 564 if (!MaxRecurse--) 565 return nullptr; 566 567 // Make sure the phi is on the LHS. 568 if (!isa<PHINode>(LHS)) { 569 std::swap(LHS, RHS); 570 Pred = CmpInst::getSwappedPredicate(Pred); 571 } 572 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!"); 573 PHINode *PI = cast<PHINode>(LHS); 574 575 // Bail out if RHS and the phi may be mutually interdependent due to a loop. 576 if (!valueDominatesPHI(RHS, PI, Q.DT)) 577 return nullptr; 578 579 // Evaluate the BinOp on the incoming phi values. 580 Value *CommonValue = nullptr; 581 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) { 582 Value *Incoming = PI->getIncomingValue(u); 583 Instruction *InTI = PI->getIncomingBlock(u)->getTerminator(); 584 // If the incoming value is the phi node itself, it can safely be skipped. 585 if (Incoming == PI) continue; 586 // Change the context instruction to the "edge" that flows into the phi. 587 // This is important because that is where incoming is actually "evaluated" 588 // even though it is used later somewhere else. 589 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI), 590 MaxRecurse); 591 // If the operation failed to simplify, or simplified to a different value 592 // to previously, then give up. 593 if (!V || (CommonValue && V != CommonValue)) 594 return nullptr; 595 CommonValue = V; 596 } 597 598 return CommonValue; 599 } 600 601 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode, 602 Value *&Op0, Value *&Op1, 603 const SimplifyQuery &Q) { 604 if (auto *CLHS = dyn_cast<Constant>(Op0)) { 605 if (auto *CRHS = dyn_cast<Constant>(Op1)) 606 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL); 607 608 // Canonicalize the constant to the RHS if this is a commutative operation. 609 if (Instruction::isCommutative(Opcode)) 610 std::swap(Op0, Op1); 611 } 612 return nullptr; 613 } 614 615 /// Given operands for an Add, see if we can fold the result. 616 /// If not, this returns null. 617 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, 618 const SimplifyQuery &Q, unsigned MaxRecurse) { 619 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q)) 620 return C; 621 622 // X + undef -> undef 623 if (match(Op1, m_Undef())) 624 return Op1; 625 626 // X + 0 -> X 627 if (match(Op1, m_Zero())) 628 return Op0; 629 630 // If two operands are negative, return 0. 631 if (isKnownNegation(Op0, Op1)) 632 return Constant::getNullValue(Op0->getType()); 633 634 // X + (Y - X) -> Y 635 // (Y - X) + X -> Y 636 // Eg: X + -X -> 0 637 Value *Y = nullptr; 638 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) || 639 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1)))) 640 return Y; 641 642 // X + ~X -> -1 since ~X = -X-1 643 Type *Ty = Op0->getType(); 644 if (match(Op0, m_Not(m_Specific(Op1))) || 645 match(Op1, m_Not(m_Specific(Op0)))) 646 return Constant::getAllOnesValue(Ty); 647 648 // add nsw/nuw (xor Y, signmask), signmask --> Y 649 // The no-wrapping add guarantees that the top bit will be set by the add. 650 // Therefore, the xor must be clearing the already set sign bit of Y. 651 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) && 652 match(Op0, m_Xor(m_Value(Y), m_SignMask()))) 653 return Y; 654 655 // add nuw %x, -1 -> -1, because %x can only be 0. 656 if (IsNUW && match(Op1, m_AllOnes())) 657 return Op1; // Which is -1. 658 659 /// i1 add -> xor. 660 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 661 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 662 return V; 663 664 // Try some generic simplifications for associative operations. 665 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, 666 MaxRecurse)) 667 return V; 668 669 // Threading Add over selects and phi nodes is pointless, so don't bother. 670 // Threading over the select in "A + select(cond, B, C)" means evaluating 671 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and 672 // only if B and C are equal. If B and C are equal then (since we assume 673 // that operands have already been simplified) "select(cond, B, C)" should 674 // have been simplified to the common value of B and C already. Analysing 675 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly 676 // for threading over phi nodes. 677 678 return nullptr; 679 } 680 681 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, 682 const SimplifyQuery &Query) { 683 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit); 684 } 685 686 /// Compute the base pointer and cumulative constant offsets for V. 687 /// 688 /// This strips all constant offsets off of V, leaving it the base pointer, and 689 /// accumulates the total constant offset applied in the returned constant. It 690 /// returns 0 if V is not a pointer, and returns the constant '0' if there are 691 /// no constant offsets applied. 692 /// 693 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't 694 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc. 695 /// folding. 696 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V, 697 bool AllowNonInbounds = false) { 698 assert(V->getType()->isPtrOrPtrVectorTy()); 699 700 Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); 701 APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth()); 702 703 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds); 704 // As that strip may trace through `addrspacecast`, need to sext or trunc 705 // the offset calculated. 706 IntIdxTy = DL.getIndexType(V->getType())->getScalarType(); 707 Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth()); 708 709 Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset); 710 if (V->getType()->isVectorTy()) 711 return ConstantVector::getSplat(V->getType()->getVectorNumElements(), 712 OffsetIntPtr); 713 return OffsetIntPtr; 714 } 715 716 /// Compute the constant difference between two pointer values. 717 /// If the difference is not a constant, returns zero. 718 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS, 719 Value *RHS) { 720 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 721 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 722 723 // If LHS and RHS are not related via constant offsets to the same base 724 // value, there is nothing we can do here. 725 if (LHS != RHS) 726 return nullptr; 727 728 // Otherwise, the difference of LHS - RHS can be computed as: 729 // LHS - RHS 730 // = (LHSOffset + Base) - (RHSOffset + Base) 731 // = LHSOffset - RHSOffset 732 return ConstantExpr::getSub(LHSOffset, RHSOffset); 733 } 734 735 /// Given operands for a Sub, see if we can fold the result. 736 /// If not, this returns null. 737 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 738 const SimplifyQuery &Q, unsigned MaxRecurse) { 739 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q)) 740 return C; 741 742 // X - undef -> undef 743 // undef - X -> undef 744 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 745 return UndefValue::get(Op0->getType()); 746 747 // X - 0 -> X 748 if (match(Op1, m_Zero())) 749 return Op0; 750 751 // X - X -> 0 752 if (Op0 == Op1) 753 return Constant::getNullValue(Op0->getType()); 754 755 // Is this a negation? 756 if (match(Op0, m_Zero())) { 757 // 0 - X -> 0 if the sub is NUW. 758 if (isNUW) 759 return Constant::getNullValue(Op0->getType()); 760 761 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 762 if (Known.Zero.isMaxSignedValue()) { 763 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then 764 // Op1 must be 0 because negating the minimum signed value is undefined. 765 if (isNSW) 766 return Constant::getNullValue(Op0->getType()); 767 768 // 0 - X -> X if X is 0 or the minimum signed value. 769 return Op1; 770 } 771 } 772 773 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies. 774 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X 775 Value *X = nullptr, *Y = nullptr, *Z = Op1; 776 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z 777 // See if "V === Y - Z" simplifies. 778 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1)) 779 // It does! Now see if "X + V" simplifies. 780 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) { 781 // It does, we successfully reassociated! 782 ++NumReassoc; 783 return W; 784 } 785 // See if "V === X - Z" simplifies. 786 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 787 // It does! Now see if "Y + V" simplifies. 788 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) { 789 // It does, we successfully reassociated! 790 ++NumReassoc; 791 return W; 792 } 793 } 794 795 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies. 796 // For example, X - (X + 1) -> -1 797 X = Op0; 798 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z) 799 // See if "V === X - Y" simplifies. 800 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 801 // It does! Now see if "V - Z" simplifies. 802 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) { 803 // It does, we successfully reassociated! 804 ++NumReassoc; 805 return W; 806 } 807 // See if "V === X - Z" simplifies. 808 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1)) 809 // It does! Now see if "V - Y" simplifies. 810 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) { 811 // It does, we successfully reassociated! 812 ++NumReassoc; 813 return W; 814 } 815 } 816 817 // Z - (X - Y) -> (Z - X) + Y if everything simplifies. 818 // For example, X - (X - Y) -> Y. 819 Z = Op0; 820 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y) 821 // See if "V === Z - X" simplifies. 822 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1)) 823 // It does! Now see if "V + Y" simplifies. 824 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) { 825 // It does, we successfully reassociated! 826 ++NumReassoc; 827 return W; 828 } 829 830 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies. 831 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) && 832 match(Op1, m_Trunc(m_Value(Y)))) 833 if (X->getType() == Y->getType()) 834 // See if "V === X - Y" simplifies. 835 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1)) 836 // It does! Now see if "trunc V" simplifies. 837 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(), 838 Q, MaxRecurse - 1)) 839 // It does, return the simplified "trunc V". 840 return W; 841 842 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...). 843 if (match(Op0, m_PtrToInt(m_Value(X))) && 844 match(Op1, m_PtrToInt(m_Value(Y)))) 845 if (Constant *Result = computePointerDifference(Q.DL, X, Y)) 846 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true); 847 848 // i1 sub -> xor. 849 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 850 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1)) 851 return V; 852 853 // Threading Sub over selects and phi nodes is pointless, so don't bother. 854 // Threading over the select in "A - select(cond, B, C)" means evaluating 855 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and 856 // only if B and C are equal. If B and C are equal then (since we assume 857 // that operands have already been simplified) "select(cond, B, C)" should 858 // have been simplified to the common value of B and C already. Analysing 859 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly 860 // for threading over phi nodes. 861 862 return nullptr; 863 } 864 865 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 866 const SimplifyQuery &Q) { 867 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 868 } 869 870 /// Given operands for a Mul, see if we can fold the result. 871 /// If not, this returns null. 872 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 873 unsigned MaxRecurse) { 874 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q)) 875 return C; 876 877 // X * undef -> 0 878 // X * 0 -> 0 879 if (match(Op1, m_CombineOr(m_Undef(), m_Zero()))) 880 return Constant::getNullValue(Op0->getType()); 881 882 // X * 1 -> X 883 if (match(Op1, m_One())) 884 return Op0; 885 886 // (X / Y) * Y -> X if the division is exact. 887 Value *X = nullptr; 888 if (Q.IIQ.UseInstrInfo && 889 (match(Op0, 890 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y 891 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y) 892 return X; 893 894 // i1 mul -> and. 895 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1)) 896 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1)) 897 return V; 898 899 // Try some generic simplifications for associative operations. 900 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, 901 MaxRecurse)) 902 return V; 903 904 // Mul distributes over Add. Try some generic simplifications based on this. 905 if (Value *V = ExpandBinOp(Instruction::Mul, Op0, Op1, Instruction::Add, 906 Q, MaxRecurse)) 907 return V; 908 909 // If the operation is with the result of a select instruction, check whether 910 // operating on either branch of the select always yields the same value. 911 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 912 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, 913 MaxRecurse)) 914 return V; 915 916 // If the operation is with the result of a phi instruction, check whether 917 // operating on all incoming values of the phi always yields the same value. 918 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 919 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, 920 MaxRecurse)) 921 return V; 922 923 return nullptr; 924 } 925 926 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 927 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit); 928 } 929 930 /// Check for common or similar folds of integer division or integer remainder. 931 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem). 932 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv) { 933 Type *Ty = Op0->getType(); 934 935 // X / undef -> undef 936 // X % undef -> undef 937 if (match(Op1, m_Undef())) 938 return Op1; 939 940 // X / 0 -> undef 941 // X % 0 -> undef 942 // We don't need to preserve faults! 943 if (match(Op1, m_Zero())) 944 return UndefValue::get(Ty); 945 946 // If any element of a constant divisor vector is zero or undef, the whole op 947 // is undef. 948 auto *Op1C = dyn_cast<Constant>(Op1); 949 if (Op1C && Ty->isVectorTy()) { 950 unsigned NumElts = Ty->getVectorNumElements(); 951 for (unsigned i = 0; i != NumElts; ++i) { 952 Constant *Elt = Op1C->getAggregateElement(i); 953 if (Elt && (Elt->isNullValue() || isa<UndefValue>(Elt))) 954 return UndefValue::get(Ty); 955 } 956 } 957 958 // undef / X -> 0 959 // undef % X -> 0 960 if (match(Op0, m_Undef())) 961 return Constant::getNullValue(Ty); 962 963 // 0 / X -> 0 964 // 0 % X -> 0 965 if (match(Op0, m_Zero())) 966 return Constant::getNullValue(Op0->getType()); 967 968 // X / X -> 1 969 // X % X -> 0 970 if (Op0 == Op1) 971 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty); 972 973 // X / 1 -> X 974 // X % 1 -> 0 975 // If this is a boolean op (single-bit element type), we can't have 976 // division-by-zero or remainder-by-zero, so assume the divisor is 1. 977 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1. 978 Value *X; 979 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) || 980 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 981 return IsDiv ? Op0 : Constant::getNullValue(Ty); 982 983 return nullptr; 984 } 985 986 /// Given a predicate and two operands, return true if the comparison is true. 987 /// This is a helper for div/rem simplification where we return some other value 988 /// when we can prove a relationship between the operands. 989 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS, 990 const SimplifyQuery &Q, unsigned MaxRecurse) { 991 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse); 992 Constant *C = dyn_cast_or_null<Constant>(V); 993 return (C && C->isAllOnesValue()); 994 } 995 996 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer 997 /// to simplify X % Y to X. 998 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, 999 unsigned MaxRecurse, bool IsSigned) { 1000 // Recursion is always used, so bail out at once if we already hit the limit. 1001 if (!MaxRecurse--) 1002 return false; 1003 1004 if (IsSigned) { 1005 // |X| / |Y| --> 0 1006 // 1007 // We require that 1 operand is a simple constant. That could be extended to 1008 // 2 variables if we computed the sign bit for each. 1009 // 1010 // Make sure that a constant is not the minimum signed value because taking 1011 // the abs() of that is undefined. 1012 Type *Ty = X->getType(); 1013 const APInt *C; 1014 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) { 1015 // Is the variable divisor magnitude always greater than the constant 1016 // dividend magnitude? 1017 // |Y| > |C| --> Y < -abs(C) or Y > abs(C) 1018 Constant *PosDividendC = ConstantInt::get(Ty, C->abs()); 1019 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs()); 1020 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) || 1021 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse)) 1022 return true; 1023 } 1024 if (match(Y, m_APInt(C))) { 1025 // Special-case: we can't take the abs() of a minimum signed value. If 1026 // that's the divisor, then all we have to do is prove that the dividend 1027 // is also not the minimum signed value. 1028 if (C->isMinSignedValue()) 1029 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse); 1030 1031 // Is the variable dividend magnitude always less than the constant 1032 // divisor magnitude? 1033 // |X| < |C| --> X > -abs(C) and X < abs(C) 1034 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs()); 1035 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs()); 1036 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) && 1037 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse)) 1038 return true; 1039 } 1040 return false; 1041 } 1042 1043 // IsSigned == false. 1044 // Is the dividend unsigned less than the divisor? 1045 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse); 1046 } 1047 1048 /// These are simplifications common to SDiv and UDiv. 1049 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1050 const SimplifyQuery &Q, unsigned MaxRecurse) { 1051 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1052 return C; 1053 1054 if (Value *V = simplifyDivRem(Op0, Op1, true)) 1055 return V; 1056 1057 bool IsSigned = Opcode == Instruction::SDiv; 1058 1059 // (X * Y) / Y -> X if the multiplication does not overflow. 1060 Value *X; 1061 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) { 1062 auto *Mul = cast<OverflowingBinaryOperator>(Op0); 1063 // If the Mul does not overflow, then we are good to go. 1064 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) || 1065 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul))) 1066 return X; 1067 // If X has the form X = A / Y, then X * Y cannot overflow. 1068 if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) || 1069 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) 1070 return X; 1071 } 1072 1073 // (X rem Y) / Y -> 0 1074 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1075 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1076 return Constant::getNullValue(Op0->getType()); 1077 1078 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow 1079 ConstantInt *C1, *C2; 1080 if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) && 1081 match(Op1, m_ConstantInt(C2))) { 1082 bool Overflow; 1083 (void)C1->getValue().umul_ov(C2->getValue(), Overflow); 1084 if (Overflow) 1085 return Constant::getNullValue(Op0->getType()); 1086 } 1087 1088 // If the operation is with the result of a select instruction, check whether 1089 // operating on either branch of the select always yields the same value. 1090 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1091 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1092 return V; 1093 1094 // If the operation is with the result of a phi instruction, check whether 1095 // operating on all incoming values of the phi always yields the same value. 1096 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1097 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1098 return V; 1099 1100 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned)) 1101 return Constant::getNullValue(Op0->getType()); 1102 1103 return nullptr; 1104 } 1105 1106 /// These are simplifications common to SRem and URem. 1107 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, 1108 const SimplifyQuery &Q, unsigned MaxRecurse) { 1109 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1110 return C; 1111 1112 if (Value *V = simplifyDivRem(Op0, Op1, false)) 1113 return V; 1114 1115 // (X % Y) % Y -> X % Y 1116 if ((Opcode == Instruction::SRem && 1117 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) || 1118 (Opcode == Instruction::URem && 1119 match(Op0, m_URem(m_Value(), m_Specific(Op1))))) 1120 return Op0; 1121 1122 // (X << Y) % X -> 0 1123 if (Q.IIQ.UseInstrInfo && 1124 ((Opcode == Instruction::SRem && 1125 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) || 1126 (Opcode == Instruction::URem && 1127 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))) 1128 return Constant::getNullValue(Op0->getType()); 1129 1130 // If the operation is with the result of a select instruction, check whether 1131 // operating on either branch of the select always yields the same value. 1132 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1133 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1134 return V; 1135 1136 // If the operation is with the result of a phi instruction, check whether 1137 // operating on all incoming values of the phi always yields the same value. 1138 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1139 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1140 return V; 1141 1142 // If X / Y == 0, then X % Y == X. 1143 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem)) 1144 return Op0; 1145 1146 return nullptr; 1147 } 1148 1149 /// Given operands for an SDiv, see if we can fold the result. 1150 /// If not, this returns null. 1151 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1152 unsigned MaxRecurse) { 1153 // If two operands are negated and no signed overflow, return -1. 1154 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true)) 1155 return Constant::getAllOnesValue(Op0->getType()); 1156 1157 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse); 1158 } 1159 1160 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1161 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit); 1162 } 1163 1164 /// Given operands for a UDiv, see if we can fold the result. 1165 /// If not, this returns null. 1166 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1167 unsigned MaxRecurse) { 1168 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse); 1169 } 1170 1171 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1172 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit); 1173 } 1174 1175 /// Given operands for an SRem, see if we can fold the result. 1176 /// If not, this returns null. 1177 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1178 unsigned MaxRecurse) { 1179 // If the divisor is 0, the result is undefined, so assume the divisor is -1. 1180 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0 1181 Value *X; 1182 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) 1183 return ConstantInt::getNullValue(Op0->getType()); 1184 1185 // If the two operands are negated, return 0. 1186 if (isKnownNegation(Op0, Op1)) 1187 return ConstantInt::getNullValue(Op0->getType()); 1188 1189 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse); 1190 } 1191 1192 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1193 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit); 1194 } 1195 1196 /// Given operands for a URem, see if we can fold the result. 1197 /// If not, this returns null. 1198 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1199 unsigned MaxRecurse) { 1200 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse); 1201 } 1202 1203 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 1204 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit); 1205 } 1206 1207 /// Returns true if a shift by \c Amount always yields undef. 1208 static bool isUndefShift(Value *Amount) { 1209 Constant *C = dyn_cast<Constant>(Amount); 1210 if (!C) 1211 return false; 1212 1213 // X shift by undef -> undef because it may shift by the bitwidth. 1214 if (isa<UndefValue>(C)) 1215 return true; 1216 1217 // Shifting by the bitwidth or more is undefined. 1218 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) 1219 if (CI->getValue().getLimitedValue() >= 1220 CI->getType()->getScalarSizeInBits()) 1221 return true; 1222 1223 // If all lanes of a vector shift are undefined the whole shift is. 1224 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) { 1225 for (unsigned I = 0, E = C->getType()->getVectorNumElements(); I != E; ++I) 1226 if (!isUndefShift(C->getAggregateElement(I))) 1227 return false; 1228 return true; 1229 } 1230 1231 return false; 1232 } 1233 1234 /// Given operands for an Shl, LShr or AShr, see if we can fold the result. 1235 /// If not, this returns null. 1236 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0, 1237 Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse) { 1238 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q)) 1239 return C; 1240 1241 // 0 shift by X -> 0 1242 if (match(Op0, m_Zero())) 1243 return Constant::getNullValue(Op0->getType()); 1244 1245 // X shift by 0 -> X 1246 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones 1247 // would be poison. 1248 Value *X; 1249 if (match(Op1, m_Zero()) || 1250 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))) 1251 return Op0; 1252 1253 // Fold undefined shifts. 1254 if (isUndefShift(Op1)) 1255 return UndefValue::get(Op0->getType()); 1256 1257 // If the operation is with the result of a select instruction, check whether 1258 // operating on either branch of the select always yields the same value. 1259 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 1260 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse)) 1261 return V; 1262 1263 // If the operation is with the result of a phi instruction, check whether 1264 // operating on all incoming values of the phi always yields the same value. 1265 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 1266 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse)) 1267 return V; 1268 1269 // If any bits in the shift amount make that value greater than or equal to 1270 // the number of bits in the type, the shift is undefined. 1271 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1272 if (Known.One.getLimitedValue() >= Known.getBitWidth()) 1273 return UndefValue::get(Op0->getType()); 1274 1275 // If all valid bits in the shift amount are known zero, the first operand is 1276 // unchanged. 1277 unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth()); 1278 if (Known.countMinTrailingZeros() >= NumValidShiftBits) 1279 return Op0; 1280 1281 return nullptr; 1282 } 1283 1284 /// Given operands for an Shl, LShr or AShr, see if we can 1285 /// fold the result. If not, this returns null. 1286 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, 1287 Value *Op1, bool isExact, const SimplifyQuery &Q, 1288 unsigned MaxRecurse) { 1289 if (Value *V = SimplifyShift(Opcode, Op0, Op1, Q, MaxRecurse)) 1290 return V; 1291 1292 // X >> X -> 0 1293 if (Op0 == Op1) 1294 return Constant::getNullValue(Op0->getType()); 1295 1296 // undef >> X -> 0 1297 // undef >> X -> undef (if it's exact) 1298 if (match(Op0, m_Undef())) 1299 return isExact ? Op0 : Constant::getNullValue(Op0->getType()); 1300 1301 // The low bit cannot be shifted out of an exact shift if it is set. 1302 if (isExact) { 1303 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT); 1304 if (Op0Known.One[0]) 1305 return Op0; 1306 } 1307 1308 return nullptr; 1309 } 1310 1311 /// Given operands for an Shl, see if we can fold the result. 1312 /// If not, this returns null. 1313 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1314 const SimplifyQuery &Q, unsigned MaxRecurse) { 1315 if (Value *V = SimplifyShift(Instruction::Shl, Op0, Op1, Q, MaxRecurse)) 1316 return V; 1317 1318 // undef << X -> 0 1319 // undef << X -> undef if (if it's NSW/NUW) 1320 if (match(Op0, m_Undef())) 1321 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType()); 1322 1323 // (X >> A) << A -> X 1324 Value *X; 1325 if (Q.IIQ.UseInstrInfo && 1326 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1))))) 1327 return X; 1328 1329 // shl nuw i8 C, %x -> C iff C has sign bit set. 1330 if (isNUW && match(Op0, m_Negative())) 1331 return Op0; 1332 // NOTE: could use computeKnownBits() / LazyValueInfo, 1333 // but the cost-benefit analysis suggests it isn't worth it. 1334 1335 return nullptr; 1336 } 1337 1338 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, 1339 const SimplifyQuery &Q) { 1340 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit); 1341 } 1342 1343 /// Given operands for an LShr, see if we can fold the result. 1344 /// If not, this returns null. 1345 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1346 const SimplifyQuery &Q, unsigned MaxRecurse) { 1347 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q, 1348 MaxRecurse)) 1349 return V; 1350 1351 // (X << A) >> A -> X 1352 Value *X; 1353 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1)))) 1354 return X; 1355 1356 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A. 1357 // We can return X as we do in the above case since OR alters no bits in X. 1358 // SimplifyDemandedBits in InstCombine can do more general optimization for 1359 // bit manipulation. This pattern aims to provide opportunities for other 1360 // optimizers by supporting a simple but common case in InstSimplify. 1361 Value *Y; 1362 const APInt *ShRAmt, *ShLAmt; 1363 if (match(Op1, m_APInt(ShRAmt)) && 1364 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) && 1365 *ShRAmt == *ShLAmt) { 1366 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1367 const unsigned Width = Op0->getType()->getScalarSizeInBits(); 1368 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); 1369 if (ShRAmt->uge(EffWidthY)) 1370 return X; 1371 } 1372 1373 return nullptr; 1374 } 1375 1376 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, 1377 const SimplifyQuery &Q) { 1378 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1379 } 1380 1381 /// Given operands for an AShr, see if we can fold the result. 1382 /// If not, this returns null. 1383 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1384 const SimplifyQuery &Q, unsigned MaxRecurse) { 1385 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q, 1386 MaxRecurse)) 1387 return V; 1388 1389 // all ones >>a X -> -1 1390 // Do not return Op0 because it may contain undef elements if it's a vector. 1391 if (match(Op0, m_AllOnes())) 1392 return Constant::getAllOnesValue(Op0->getType()); 1393 1394 // (X << A) >> A -> X 1395 Value *X; 1396 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1)))) 1397 return X; 1398 1399 // Arithmetic shifting an all-sign-bit value is a no-op. 1400 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 1401 if (NumSignBits == Op0->getType()->getScalarSizeInBits()) 1402 return Op0; 1403 1404 return nullptr; 1405 } 1406 1407 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, 1408 const SimplifyQuery &Q) { 1409 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit); 1410 } 1411 1412 /// Commuted variants are assumed to be handled by calling this function again 1413 /// with the parameters swapped. 1414 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, 1415 ICmpInst *UnsignedICmp, bool IsAnd, 1416 const SimplifyQuery &Q) { 1417 Value *X, *Y; 1418 1419 ICmpInst::Predicate EqPred; 1420 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) || 1421 !ICmpInst::isEquality(EqPred)) 1422 return nullptr; 1423 1424 ICmpInst::Predicate UnsignedPred; 1425 1426 Value *A, *B; 1427 // Y = (A - B); 1428 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) { 1429 if (match(UnsignedICmp, 1430 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) && 1431 ICmpInst::isUnsigned(UnsignedPred)) { 1432 if (UnsignedICmp->getOperand(0) != A) 1433 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); 1434 1435 // A >=/<= B || (A - B) != 0 <--> true 1436 if ((UnsignedPred == ICmpInst::ICMP_UGE || 1437 UnsignedPred == ICmpInst::ICMP_ULE) && 1438 EqPred == ICmpInst::ICMP_NE && !IsAnd) 1439 return ConstantInt::getTrue(UnsignedICmp->getType()); 1440 // A </> B && (A - B) == 0 <--> false 1441 if ((UnsignedPred == ICmpInst::ICMP_ULT || 1442 UnsignedPred == ICmpInst::ICMP_UGT) && 1443 EqPred == ICmpInst::ICMP_EQ && IsAnd) 1444 return ConstantInt::getFalse(UnsignedICmp->getType()); 1445 1446 // A </> B && (A - B) != 0 <--> A </> B 1447 // A </> B || (A - B) != 0 <--> (A - B) != 0 1448 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT || 1449 UnsignedPred == ICmpInst::ICMP_UGT)) 1450 return IsAnd ? UnsignedICmp : ZeroICmp; 1451 1452 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0 1453 // A <=/>= B || (A - B) == 0 <--> A <=/>= B 1454 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE || 1455 UnsignedPred == ICmpInst::ICMP_UGE)) 1456 return IsAnd ? ZeroICmp : UnsignedICmp; 1457 } 1458 1459 // Given Y = (A - B) 1460 // Y >= A && Y != 0 --> Y >= A iff B != 0 1461 // Y < A || Y == 0 --> Y < A iff B != 0 1462 if (match(UnsignedICmp, 1463 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) { 1464 if (UnsignedICmp->getOperand(0) != Y) 1465 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); 1466 1467 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd && 1468 EqPred == ICmpInst::ICMP_NE && 1469 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1470 return UnsignedICmp; 1471 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd && 1472 EqPred == ICmpInst::ICMP_EQ && 1473 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1474 return UnsignedICmp; 1475 } 1476 } 1477 1478 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) && 1479 ICmpInst::isUnsigned(UnsignedPred)) 1480 ; 1481 else if (match(UnsignedICmp, 1482 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) && 1483 ICmpInst::isUnsigned(UnsignedPred)) 1484 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred); 1485 else 1486 return nullptr; 1487 1488 // X < Y && Y != 0 --> X < Y 1489 // X < Y || Y != 0 --> Y != 0 1490 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE) 1491 return IsAnd ? UnsignedICmp : ZeroICmp; 1492 1493 // X <= Y && Y != 0 --> X <= Y iff X != 0 1494 // X <= Y || Y != 0 --> Y != 0 iff X != 0 1495 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE && 1496 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1497 return IsAnd ? UnsignedICmp : ZeroICmp; 1498 1499 // X >= Y && Y == 0 --> Y == 0 1500 // X >= Y || Y == 0 --> X >= Y 1501 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ) 1502 return IsAnd ? ZeroICmp : UnsignedICmp; 1503 1504 // X > Y && Y == 0 --> Y == 0 iff X != 0 1505 // X > Y || Y == 0 --> X > Y iff X != 0 1506 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ && 1507 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT)) 1508 return IsAnd ? ZeroICmp : UnsignedICmp; 1509 1510 // X < Y && Y == 0 --> false 1511 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ && 1512 IsAnd) 1513 return getFalse(UnsignedICmp->getType()); 1514 1515 // X >= Y || Y != 0 --> true 1516 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE && 1517 !IsAnd) 1518 return getTrue(UnsignedICmp->getType()); 1519 1520 return nullptr; 1521 } 1522 1523 /// Commuted variants are assumed to be handled by calling this function again 1524 /// with the parameters swapped. 1525 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1526 ICmpInst::Predicate Pred0, Pred1; 1527 Value *A ,*B; 1528 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1529 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1530 return nullptr; 1531 1532 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B). 1533 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1534 // can eliminate Op1 from this 'and'. 1535 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1536 return Op0; 1537 1538 // Check for any combination of predicates that are guaranteed to be disjoint. 1539 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1540 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) || 1541 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) || 1542 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)) 1543 return getFalse(Op0->getType()); 1544 1545 return nullptr; 1546 } 1547 1548 /// Commuted variants are assumed to be handled by calling this function again 1549 /// with the parameters swapped. 1550 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) { 1551 ICmpInst::Predicate Pred0, Pred1; 1552 Value *A ,*B; 1553 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) || 1554 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B)))) 1555 return nullptr; 1556 1557 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B). 1558 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we 1559 // can eliminate Op0 from this 'or'. 1560 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1)) 1561 return Op1; 1562 1563 // Check for any combination of predicates that cover the entire range of 1564 // possibilities. 1565 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) || 1566 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) || 1567 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) || 1568 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE)) 1569 return getTrue(Op0->getType()); 1570 1571 return nullptr; 1572 } 1573 1574 /// Test if a pair of compares with a shared operand and 2 constants has an 1575 /// empty set intersection, full set union, or if one compare is a superset of 1576 /// the other. 1577 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, 1578 bool IsAnd) { 1579 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)). 1580 if (Cmp0->getOperand(0) != Cmp1->getOperand(0)) 1581 return nullptr; 1582 1583 const APInt *C0, *C1; 1584 if (!match(Cmp0->getOperand(1), m_APInt(C0)) || 1585 !match(Cmp1->getOperand(1), m_APInt(C1))) 1586 return nullptr; 1587 1588 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0); 1589 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1); 1590 1591 // For and-of-compares, check if the intersection is empty: 1592 // (icmp X, C0) && (icmp X, C1) --> empty set --> false 1593 if (IsAnd && Range0.intersectWith(Range1).isEmptySet()) 1594 return getFalse(Cmp0->getType()); 1595 1596 // For or-of-compares, check if the union is full: 1597 // (icmp X, C0) || (icmp X, C1) --> full set --> true 1598 if (!IsAnd && Range0.unionWith(Range1).isFullSet()) 1599 return getTrue(Cmp0->getType()); 1600 1601 // Is one range a superset of the other? 1602 // If this is and-of-compares, take the smaller set: 1603 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42 1604 // If this is or-of-compares, take the larger set: 1605 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4 1606 if (Range0.contains(Range1)) 1607 return IsAnd ? Cmp1 : Cmp0; 1608 if (Range1.contains(Range0)) 1609 return IsAnd ? Cmp0 : Cmp1; 1610 1611 return nullptr; 1612 } 1613 1614 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1, 1615 bool IsAnd) { 1616 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate(); 1617 if (!match(Cmp0->getOperand(1), m_Zero()) || 1618 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1) 1619 return nullptr; 1620 1621 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ)) 1622 return nullptr; 1623 1624 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)". 1625 Value *X = Cmp0->getOperand(0); 1626 Value *Y = Cmp1->getOperand(0); 1627 1628 // If one of the compares is a masked version of a (not) null check, then 1629 // that compare implies the other, so we eliminate the other. Optionally, look 1630 // through a pointer-to-int cast to match a null check of a pointer type. 1631 1632 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0 1633 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0 1634 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0 1635 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0 1636 if (match(Y, m_c_And(m_Specific(X), m_Value())) || 1637 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value()))) 1638 return Cmp1; 1639 1640 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0 1641 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0 1642 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0 1643 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0 1644 if (match(X, m_c_And(m_Specific(Y), m_Value())) || 1645 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value()))) 1646 return Cmp0; 1647 1648 return nullptr; 1649 } 1650 1651 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, 1652 const InstrInfoQuery &IIQ) { 1653 // (icmp (add V, C0), C1) & (icmp V, C0) 1654 ICmpInst::Predicate Pred0, Pred1; 1655 const APInt *C0, *C1; 1656 Value *V; 1657 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1658 return nullptr; 1659 1660 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1661 return nullptr; 1662 1663 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0)); 1664 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1665 return nullptr; 1666 1667 Type *ITy = Op0->getType(); 1668 bool isNSW = IIQ.hasNoSignedWrap(AddInst); 1669 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); 1670 1671 const APInt Delta = *C1 - *C0; 1672 if (C0->isStrictlyPositive()) { 1673 if (Delta == 2) { 1674 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT) 1675 return getFalse(ITy); 1676 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1677 return getFalse(ITy); 1678 } 1679 if (Delta == 1) { 1680 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT) 1681 return getFalse(ITy); 1682 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW) 1683 return getFalse(ITy); 1684 } 1685 } 1686 if (C0->getBoolValue() && isNUW) { 1687 if (Delta == 2) 1688 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT) 1689 return getFalse(ITy); 1690 if (Delta == 1) 1691 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT) 1692 return getFalse(ITy); 1693 } 1694 1695 return nullptr; 1696 } 1697 1698 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, 1699 const SimplifyQuery &Q) { 1700 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q)) 1701 return X; 1702 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q)) 1703 return X; 1704 1705 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1)) 1706 return X; 1707 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0)) 1708 return X; 1709 1710 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true)) 1711 return X; 1712 1713 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true)) 1714 return X; 1715 1716 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ)) 1717 return X; 1718 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ)) 1719 return X; 1720 1721 return nullptr; 1722 } 1723 1724 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, 1725 const InstrInfoQuery &IIQ) { 1726 // (icmp (add V, C0), C1) | (icmp V, C0) 1727 ICmpInst::Predicate Pred0, Pred1; 1728 const APInt *C0, *C1; 1729 Value *V; 1730 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1)))) 1731 return nullptr; 1732 1733 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value()))) 1734 return nullptr; 1735 1736 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0)); 1737 if (AddInst->getOperand(1) != Op1->getOperand(1)) 1738 return nullptr; 1739 1740 Type *ITy = Op0->getType(); 1741 bool isNSW = IIQ.hasNoSignedWrap(AddInst); 1742 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst); 1743 1744 const APInt Delta = *C1 - *C0; 1745 if (C0->isStrictlyPositive()) { 1746 if (Delta == 2) { 1747 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE) 1748 return getTrue(ITy); 1749 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1750 return getTrue(ITy); 1751 } 1752 if (Delta == 1) { 1753 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE) 1754 return getTrue(ITy); 1755 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW) 1756 return getTrue(ITy); 1757 } 1758 } 1759 if (C0->getBoolValue() && isNUW) { 1760 if (Delta == 2) 1761 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE) 1762 return getTrue(ITy); 1763 if (Delta == 1) 1764 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE) 1765 return getTrue(ITy); 1766 } 1767 1768 return nullptr; 1769 } 1770 1771 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, 1772 const SimplifyQuery &Q) { 1773 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q)) 1774 return X; 1775 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q)) 1776 return X; 1777 1778 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1)) 1779 return X; 1780 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0)) 1781 return X; 1782 1783 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false)) 1784 return X; 1785 1786 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false)) 1787 return X; 1788 1789 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ)) 1790 return X; 1791 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ)) 1792 return X; 1793 1794 return nullptr; 1795 } 1796 1797 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI, 1798 FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) { 1799 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1); 1800 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1); 1801 if (LHS0->getType() != RHS0->getType()) 1802 return nullptr; 1803 1804 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); 1805 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) || 1806 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) { 1807 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y 1808 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X 1809 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y 1810 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X 1811 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y 1812 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X 1813 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y 1814 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X 1815 if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) || 1816 (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1))) 1817 return RHS; 1818 1819 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y 1820 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X 1821 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y 1822 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X 1823 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y 1824 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X 1825 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y 1826 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X 1827 if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) || 1828 (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1))) 1829 return LHS; 1830 } 1831 1832 return nullptr; 1833 } 1834 1835 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, 1836 Value *Op0, Value *Op1, bool IsAnd) { 1837 // Look through casts of the 'and' operands to find compares. 1838 auto *Cast0 = dyn_cast<CastInst>(Op0); 1839 auto *Cast1 = dyn_cast<CastInst>(Op1); 1840 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() && 1841 Cast0->getSrcTy() == Cast1->getSrcTy()) { 1842 Op0 = Cast0->getOperand(0); 1843 Op1 = Cast1->getOperand(0); 1844 } 1845 1846 Value *V = nullptr; 1847 auto *ICmp0 = dyn_cast<ICmpInst>(Op0); 1848 auto *ICmp1 = dyn_cast<ICmpInst>(Op1); 1849 if (ICmp0 && ICmp1) 1850 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q) 1851 : simplifyOrOfICmps(ICmp0, ICmp1, Q); 1852 1853 auto *FCmp0 = dyn_cast<FCmpInst>(Op0); 1854 auto *FCmp1 = dyn_cast<FCmpInst>(Op1); 1855 if (FCmp0 && FCmp1) 1856 V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd); 1857 1858 if (!V) 1859 return nullptr; 1860 if (!Cast0) 1861 return V; 1862 1863 // If we looked through casts, we can only handle a constant simplification 1864 // because we are not allowed to create a cast instruction here. 1865 if (auto *C = dyn_cast<Constant>(V)) 1866 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType()); 1867 1868 return nullptr; 1869 } 1870 1871 /// Check that the Op1 is in expected form, i.e.: 1872 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1873 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1874 static bool omitCheckForZeroBeforeMulWithOverflowInternal(Value *Op1, 1875 Value *X) { 1876 auto *Extract = dyn_cast<ExtractValueInst>(Op1); 1877 // We should only be extracting the overflow bit. 1878 if (!Extract || !Extract->getIndices().equals(1)) 1879 return false; 1880 Value *Agg = Extract->getAggregateOperand(); 1881 // This should be a multiplication-with-overflow intrinsic. 1882 if (!match(Agg, m_CombineOr(m_Intrinsic<Intrinsic::umul_with_overflow>(), 1883 m_Intrinsic<Intrinsic::smul_with_overflow>()))) 1884 return false; 1885 // One of its multipliers should be the value we checked for zero before. 1886 if (!match(Agg, m_CombineOr(m_Argument<0>(m_Specific(X)), 1887 m_Argument<1>(m_Specific(X))))) 1888 return false; 1889 return true; 1890 } 1891 1892 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some 1893 /// other form of check, e.g. one that was using division; it may have been 1894 /// guarded against division-by-zero. We can drop that check now. 1895 /// Look for: 1896 /// %Op0 = icmp ne i4 %X, 0 1897 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1898 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1899 /// %??? = and i1 %Op0, %Op1 1900 /// We can just return %Op1 1901 static Value *omitCheckForZeroBeforeMulWithOverflow(Value *Op0, Value *Op1) { 1902 ICmpInst::Predicate Pred; 1903 Value *X; 1904 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) || 1905 Pred != ICmpInst::Predicate::ICMP_NE) 1906 return nullptr; 1907 // Is Op1 in expected form? 1908 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X)) 1909 return nullptr; 1910 // Can omit 'and', and just return the overflow bit. 1911 return Op1; 1912 } 1913 1914 /// The @llvm.[us]mul.with.overflow intrinsic could have been folded from some 1915 /// other form of check, e.g. one that was using division; it may have been 1916 /// guarded against division-by-zero. We can drop that check now. 1917 /// Look for: 1918 /// %Op0 = icmp eq i4 %X, 0 1919 /// %Agg = tail call { i4, i1 } @llvm.[us]mul.with.overflow.i4(i4 %X, i4 %???) 1920 /// %Op1 = extractvalue { i4, i1 } %Agg, 1 1921 /// %NotOp1 = xor i1 %Op1, true 1922 /// %or = or i1 %Op0, %NotOp1 1923 /// We can just return %NotOp1 1924 static Value *omitCheckForZeroBeforeInvertedMulWithOverflow(Value *Op0, 1925 Value *NotOp1) { 1926 ICmpInst::Predicate Pred; 1927 Value *X; 1928 if (!match(Op0, m_ICmp(Pred, m_Value(X), m_Zero())) || 1929 Pred != ICmpInst::Predicate::ICMP_EQ) 1930 return nullptr; 1931 // We expect the other hand of an 'or' to be a 'not'. 1932 Value *Op1; 1933 if (!match(NotOp1, m_Not(m_Value(Op1)))) 1934 return nullptr; 1935 // Is Op1 in expected form? 1936 if (!omitCheckForZeroBeforeMulWithOverflowInternal(Op1, X)) 1937 return nullptr; 1938 // Can omit 'and', and just return the inverted overflow bit. 1939 return NotOp1; 1940 } 1941 1942 /// Given operands for an And, see if we can fold the result. 1943 /// If not, this returns null. 1944 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 1945 unsigned MaxRecurse) { 1946 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q)) 1947 return C; 1948 1949 // X & undef -> 0 1950 if (match(Op1, m_Undef())) 1951 return Constant::getNullValue(Op0->getType()); 1952 1953 // X & X = X 1954 if (Op0 == Op1) 1955 return Op0; 1956 1957 // X & 0 = 0 1958 if (match(Op1, m_Zero())) 1959 return Constant::getNullValue(Op0->getType()); 1960 1961 // X & -1 = X 1962 if (match(Op1, m_AllOnes())) 1963 return Op0; 1964 1965 // A & ~A = ~A & A = 0 1966 if (match(Op0, m_Not(m_Specific(Op1))) || 1967 match(Op1, m_Not(m_Specific(Op0)))) 1968 return Constant::getNullValue(Op0->getType()); 1969 1970 // (A | ?) & A = A 1971 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value()))) 1972 return Op1; 1973 1974 // A & (A | ?) = A 1975 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value()))) 1976 return Op0; 1977 1978 // A mask that only clears known zeros of a shifted value is a no-op. 1979 Value *X; 1980 const APInt *Mask; 1981 const APInt *ShAmt; 1982 if (match(Op1, m_APInt(Mask))) { 1983 // If all bits in the inverted and shifted mask are clear: 1984 // and (shl X, ShAmt), Mask --> shl X, ShAmt 1985 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && 1986 (~(*Mask)).lshr(*ShAmt).isNullValue()) 1987 return Op0; 1988 1989 // If all bits in the inverted and shifted mask are clear: 1990 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt 1991 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && 1992 (~(*Mask)).shl(*ShAmt).isNullValue()) 1993 return Op0; 1994 } 1995 1996 // If we have a multiplication overflow check that is being 'and'ed with a 1997 // check that one of the multipliers is not zero, we can omit the 'and', and 1998 // only keep the overflow check. 1999 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op0, Op1)) 2000 return V; 2001 if (Value *V = omitCheckForZeroBeforeMulWithOverflow(Op1, Op0)) 2002 return V; 2003 2004 // A & (-A) = A if A is a power of two or zero. 2005 if (match(Op0, m_Neg(m_Specific(Op1))) || 2006 match(Op1, m_Neg(m_Specific(Op0)))) { 2007 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 2008 Q.DT)) 2009 return Op0; 2010 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, 2011 Q.DT)) 2012 return Op1; 2013 } 2014 2015 // This is a similar pattern used for checking if a value is a power-of-2: 2016 // (A - 1) & A --> 0 (if A is a power-of-2 or 0) 2017 // A & (A - 1) --> 0 (if A is a power-of-2 or 0) 2018 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) && 2019 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) 2020 return Constant::getNullValue(Op1->getType()); 2021 if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) && 2022 isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT)) 2023 return Constant::getNullValue(Op0->getType()); 2024 2025 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true)) 2026 return V; 2027 2028 // Try some generic simplifications for associative operations. 2029 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, 2030 MaxRecurse)) 2031 return V; 2032 2033 // And distributes over Or. Try some generic simplifications based on this. 2034 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Or, 2035 Q, MaxRecurse)) 2036 return V; 2037 2038 // And distributes over Xor. Try some generic simplifications based on this. 2039 if (Value *V = ExpandBinOp(Instruction::And, Op0, Op1, Instruction::Xor, 2040 Q, MaxRecurse)) 2041 return V; 2042 2043 // If the operation is with the result of a select instruction, check whether 2044 // operating on either branch of the select always yields the same value. 2045 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 2046 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q, 2047 MaxRecurse)) 2048 return V; 2049 2050 // If the operation is with the result of a phi instruction, check whether 2051 // operating on all incoming values of the phi always yields the same value. 2052 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 2053 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q, 2054 MaxRecurse)) 2055 return V; 2056 2057 // Assuming the effective width of Y is not larger than A, i.e. all bits 2058 // from X and Y are disjoint in (X << A) | Y, 2059 // if the mask of this AND op covers all bits of X or Y, while it covers 2060 // no bits from the other, we can bypass this AND op. E.g., 2061 // ((X << A) | Y) & Mask -> Y, 2062 // if Mask = ((1 << effective_width_of(Y)) - 1) 2063 // ((X << A) | Y) & Mask -> X << A, 2064 // if Mask = ((1 << effective_width_of(X)) - 1) << A 2065 // SimplifyDemandedBits in InstCombine can optimize the general case. 2066 // This pattern aims to help other passes for a common case. 2067 Value *Y, *XShifted; 2068 if (match(Op1, m_APInt(Mask)) && 2069 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)), 2070 m_Value(XShifted)), 2071 m_Value(Y)))) { 2072 const unsigned Width = Op0->getType()->getScalarSizeInBits(); 2073 const unsigned ShftCnt = ShAmt->getLimitedValue(Width); 2074 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2075 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros(); 2076 if (EffWidthY <= ShftCnt) { 2077 const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, 2078 Q.DT); 2079 const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros(); 2080 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY); 2081 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt; 2082 // If the mask is extracting all bits from X or Y as is, we can skip 2083 // this AND op. 2084 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask)) 2085 return Y; 2086 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask)) 2087 return XShifted; 2088 } 2089 } 2090 2091 return nullptr; 2092 } 2093 2094 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2095 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit); 2096 } 2097 2098 /// Given operands for an Or, see if we can fold the result. 2099 /// If not, this returns null. 2100 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 2101 unsigned MaxRecurse) { 2102 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q)) 2103 return C; 2104 2105 // X | undef -> -1 2106 // X | -1 = -1 2107 // Do not return Op1 because it may contain undef elements if it's a vector. 2108 if (match(Op1, m_Undef()) || match(Op1, m_AllOnes())) 2109 return Constant::getAllOnesValue(Op0->getType()); 2110 2111 // X | X = X 2112 // X | 0 = X 2113 if (Op0 == Op1 || match(Op1, m_Zero())) 2114 return Op0; 2115 2116 // A | ~A = ~A | A = -1 2117 if (match(Op0, m_Not(m_Specific(Op1))) || 2118 match(Op1, m_Not(m_Specific(Op0)))) 2119 return Constant::getAllOnesValue(Op0->getType()); 2120 2121 // (A & ?) | A = A 2122 if (match(Op0, m_c_And(m_Specific(Op1), m_Value()))) 2123 return Op1; 2124 2125 // A | (A & ?) = A 2126 if (match(Op1, m_c_And(m_Specific(Op0), m_Value()))) 2127 return Op0; 2128 2129 // ~(A & ?) | A = -1 2130 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value())))) 2131 return Constant::getAllOnesValue(Op1->getType()); 2132 2133 // A | ~(A & ?) = -1 2134 if (match(Op1, m_Not(m_c_And(m_Specific(Op1), m_Value())))) 2135 return Constant::getAllOnesValue(Op0->getType()); 2136 2137 Value *A, *B; 2138 // (A & ~B) | (A ^ B) -> (A ^ B) 2139 // (~B & A) | (A ^ B) -> (A ^ B) 2140 // (A & ~B) | (B ^ A) -> (B ^ A) 2141 // (~B & A) | (B ^ A) -> (B ^ A) 2142 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) && 2143 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 2144 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 2145 return Op1; 2146 2147 // Commute the 'or' operands. 2148 // (A ^ B) | (A & ~B) -> (A ^ B) 2149 // (A ^ B) | (~B & A) -> (A ^ B) 2150 // (B ^ A) | (A & ~B) -> (B ^ A) 2151 // (B ^ A) | (~B & A) -> (B ^ A) 2152 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) && 2153 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) || 2154 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))) 2155 return Op0; 2156 2157 // (A & B) | (~A ^ B) -> (~A ^ B) 2158 // (B & A) | (~A ^ B) -> (~A ^ B) 2159 // (A & B) | (B ^ ~A) -> (B ^ ~A) 2160 // (B & A) | (B ^ ~A) -> (B ^ ~A) 2161 if (match(Op0, m_And(m_Value(A), m_Value(B))) && 2162 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 2163 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 2164 return Op1; 2165 2166 // (~A ^ B) | (A & B) -> (~A ^ B) 2167 // (~A ^ B) | (B & A) -> (~A ^ B) 2168 // (B ^ ~A) | (A & B) -> (B ^ ~A) 2169 // (B ^ ~A) | (B & A) -> (B ^ ~A) 2170 if (match(Op1, m_And(m_Value(A), m_Value(B))) && 2171 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) || 2172 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B))))) 2173 return Op0; 2174 2175 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false)) 2176 return V; 2177 2178 // If we have a multiplication overflow check that is being 'and'ed with a 2179 // check that one of the multipliers is not zero, we can omit the 'and', and 2180 // only keep the overflow check. 2181 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op0, Op1)) 2182 return V; 2183 if (Value *V = omitCheckForZeroBeforeInvertedMulWithOverflow(Op1, Op0)) 2184 return V; 2185 2186 // Try some generic simplifications for associative operations. 2187 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, 2188 MaxRecurse)) 2189 return V; 2190 2191 // Or distributes over And. Try some generic simplifications based on this. 2192 if (Value *V = ExpandBinOp(Instruction::Or, Op0, Op1, Instruction::And, Q, 2193 MaxRecurse)) 2194 return V; 2195 2196 // If the operation is with the result of a select instruction, check whether 2197 // operating on either branch of the select always yields the same value. 2198 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) 2199 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, 2200 MaxRecurse)) 2201 return V; 2202 2203 // (A & C1)|(B & C2) 2204 const APInt *C1, *C2; 2205 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) && 2206 match(Op1, m_And(m_Value(B), m_APInt(C2)))) { 2207 if (*C1 == ~*C2) { 2208 // (A & C1)|(B & C2) 2209 // If we have: ((V + N) & C1) | (V & C2) 2210 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0 2211 // replace with V+N. 2212 Value *N; 2213 if (C2->isMask() && // C2 == 0+1+ 2214 match(A, m_c_Add(m_Specific(B), m_Value(N)))) { 2215 // Add commutes, try both ways. 2216 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2217 return A; 2218 } 2219 // Or commutes, try both ways. 2220 if (C1->isMask() && 2221 match(B, m_c_Add(m_Specific(A), m_Value(N)))) { 2222 // Add commutes, try both ways. 2223 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2224 return B; 2225 } 2226 } 2227 } 2228 2229 // If the operation is with the result of a phi instruction, check whether 2230 // operating on all incoming values of the phi always yields the same value. 2231 if (isa<PHINode>(Op0) || isa<PHINode>(Op1)) 2232 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse)) 2233 return V; 2234 2235 return nullptr; 2236 } 2237 2238 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2239 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit); 2240 } 2241 2242 /// Given operands for a Xor, see if we can fold the result. 2243 /// If not, this returns null. 2244 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, 2245 unsigned MaxRecurse) { 2246 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q)) 2247 return C; 2248 2249 // A ^ undef -> undef 2250 if (match(Op1, m_Undef())) 2251 return Op1; 2252 2253 // A ^ 0 = A 2254 if (match(Op1, m_Zero())) 2255 return Op0; 2256 2257 // A ^ A = 0 2258 if (Op0 == Op1) 2259 return Constant::getNullValue(Op0->getType()); 2260 2261 // A ^ ~A = ~A ^ A = -1 2262 if (match(Op0, m_Not(m_Specific(Op1))) || 2263 match(Op1, m_Not(m_Specific(Op0)))) 2264 return Constant::getAllOnesValue(Op0->getType()); 2265 2266 // Try some generic simplifications for associative operations. 2267 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, 2268 MaxRecurse)) 2269 return V; 2270 2271 // Threading Xor over selects and phi nodes is pointless, so don't bother. 2272 // Threading over the select in "A ^ select(cond, B, C)" means evaluating 2273 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and 2274 // only if B and C are equal. If B and C are equal then (since we assume 2275 // that operands have already been simplified) "select(cond, B, C)" should 2276 // have been simplified to the common value of B and C already. Analysing 2277 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly 2278 // for threading over phi nodes. 2279 2280 return nullptr; 2281 } 2282 2283 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) { 2284 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit); 2285 } 2286 2287 2288 static Type *GetCompareTy(Value *Op) { 2289 return CmpInst::makeCmpResultType(Op->getType()); 2290 } 2291 2292 /// Rummage around inside V looking for something equivalent to the comparison 2293 /// "LHS Pred RHS". Return such a value if found, otherwise return null. 2294 /// Helper function for analyzing max/min idioms. 2295 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred, 2296 Value *LHS, Value *RHS) { 2297 SelectInst *SI = dyn_cast<SelectInst>(V); 2298 if (!SI) 2299 return nullptr; 2300 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition()); 2301 if (!Cmp) 2302 return nullptr; 2303 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1); 2304 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS) 2305 return Cmp; 2306 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) && 2307 LHS == CmpRHS && RHS == CmpLHS) 2308 return Cmp; 2309 return nullptr; 2310 } 2311 2312 // A significant optimization not implemented here is assuming that alloca 2313 // addresses are not equal to incoming argument values. They don't *alias*, 2314 // as we say, but that doesn't mean they aren't equal, so we take a 2315 // conservative approach. 2316 // 2317 // This is inspired in part by C++11 5.10p1: 2318 // "Two pointers of the same type compare equal if and only if they are both 2319 // null, both point to the same function, or both represent the same 2320 // address." 2321 // 2322 // This is pretty permissive. 2323 // 2324 // It's also partly due to C11 6.5.9p6: 2325 // "Two pointers compare equal if and only if both are null pointers, both are 2326 // pointers to the same object (including a pointer to an object and a 2327 // subobject at its beginning) or function, both are pointers to one past the 2328 // last element of the same array object, or one is a pointer to one past the 2329 // end of one array object and the other is a pointer to the start of a 2330 // different array object that happens to immediately follow the first array 2331 // object in the address space.) 2332 // 2333 // C11's version is more restrictive, however there's no reason why an argument 2334 // couldn't be a one-past-the-end value for a stack object in the caller and be 2335 // equal to the beginning of a stack object in the callee. 2336 // 2337 // If the C and C++ standards are ever made sufficiently restrictive in this 2338 // area, it may be possible to update LLVM's semantics accordingly and reinstate 2339 // this optimization. 2340 static Constant * 2341 computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI, 2342 const DominatorTree *DT, CmpInst::Predicate Pred, 2343 AssumptionCache *AC, const Instruction *CxtI, 2344 const InstrInfoQuery &IIQ, Value *LHS, Value *RHS) { 2345 // First, skip past any trivial no-ops. 2346 LHS = LHS->stripPointerCasts(); 2347 RHS = RHS->stripPointerCasts(); 2348 2349 // A non-null pointer is not equal to a null pointer. 2350 if (llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr, 2351 IIQ.UseInstrInfo) && 2352 isa<ConstantPointerNull>(RHS) && 2353 (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE)) 2354 return ConstantInt::get(GetCompareTy(LHS), 2355 !CmpInst::isTrueWhenEqual(Pred)); 2356 2357 // We can only fold certain predicates on pointer comparisons. 2358 switch (Pred) { 2359 default: 2360 return nullptr; 2361 2362 // Equality comaprisons are easy to fold. 2363 case CmpInst::ICMP_EQ: 2364 case CmpInst::ICMP_NE: 2365 break; 2366 2367 // We can only handle unsigned relational comparisons because 'inbounds' on 2368 // a GEP only protects against unsigned wrapping. 2369 case CmpInst::ICMP_UGT: 2370 case CmpInst::ICMP_UGE: 2371 case CmpInst::ICMP_ULT: 2372 case CmpInst::ICMP_ULE: 2373 // However, we have to switch them to their signed variants to handle 2374 // negative indices from the base pointer. 2375 Pred = ICmpInst::getSignedPredicate(Pred); 2376 break; 2377 } 2378 2379 // Strip off any constant offsets so that we can reason about them. 2380 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets 2381 // here and compare base addresses like AliasAnalysis does, however there are 2382 // numerous hazards. AliasAnalysis and its utilities rely on special rules 2383 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis 2384 // doesn't need to guarantee pointer inequality when it says NoAlias. 2385 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS); 2386 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS); 2387 2388 // If LHS and RHS are related via constant offsets to the same base 2389 // value, we can replace it with an icmp which just compares the offsets. 2390 if (LHS == RHS) 2391 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset); 2392 2393 // Various optimizations for (in)equality comparisons. 2394 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) { 2395 // Different non-empty allocations that exist at the same time have 2396 // different addresses (if the program can tell). Global variables always 2397 // exist, so they always exist during the lifetime of each other and all 2398 // allocas. Two different allocas usually have different addresses... 2399 // 2400 // However, if there's an @llvm.stackrestore dynamically in between two 2401 // allocas, they may have the same address. It's tempting to reduce the 2402 // scope of the problem by only looking at *static* allocas here. That would 2403 // cover the majority of allocas while significantly reducing the likelihood 2404 // of having an @llvm.stackrestore pop up in the middle. However, it's not 2405 // actually impossible for an @llvm.stackrestore to pop up in the middle of 2406 // an entry block. Also, if we have a block that's not attached to a 2407 // function, we can't tell if it's "static" under the current definition. 2408 // Theoretically, this problem could be fixed by creating a new kind of 2409 // instruction kind specifically for static allocas. Such a new instruction 2410 // could be required to be at the top of the entry block, thus preventing it 2411 // from being subject to a @llvm.stackrestore. Instcombine could even 2412 // convert regular allocas into these special allocas. It'd be nifty. 2413 // However, until then, this problem remains open. 2414 // 2415 // So, we'll assume that two non-empty allocas have different addresses 2416 // for now. 2417 // 2418 // With all that, if the offsets are within the bounds of their allocations 2419 // (and not one-past-the-end! so we can't use inbounds!), and their 2420 // allocations aren't the same, the pointers are not equal. 2421 // 2422 // Note that it's not necessary to check for LHS being a global variable 2423 // address, due to canonicalization and constant folding. 2424 if (isa<AllocaInst>(LHS) && 2425 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) { 2426 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset); 2427 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset); 2428 uint64_t LHSSize, RHSSize; 2429 ObjectSizeOpts Opts; 2430 Opts.NullIsUnknownSize = 2431 NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction()); 2432 if (LHSOffsetCI && RHSOffsetCI && 2433 getObjectSize(LHS, LHSSize, DL, TLI, Opts) && 2434 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) { 2435 const APInt &LHSOffsetValue = LHSOffsetCI->getValue(); 2436 const APInt &RHSOffsetValue = RHSOffsetCI->getValue(); 2437 if (!LHSOffsetValue.isNegative() && 2438 !RHSOffsetValue.isNegative() && 2439 LHSOffsetValue.ult(LHSSize) && 2440 RHSOffsetValue.ult(RHSSize)) { 2441 return ConstantInt::get(GetCompareTy(LHS), 2442 !CmpInst::isTrueWhenEqual(Pred)); 2443 } 2444 } 2445 2446 // Repeat the above check but this time without depending on DataLayout 2447 // or being able to compute a precise size. 2448 if (!cast<PointerType>(LHS->getType())->isEmptyTy() && 2449 !cast<PointerType>(RHS->getType())->isEmptyTy() && 2450 LHSOffset->isNullValue() && 2451 RHSOffset->isNullValue()) 2452 return ConstantInt::get(GetCompareTy(LHS), 2453 !CmpInst::isTrueWhenEqual(Pred)); 2454 } 2455 2456 // Even if an non-inbounds GEP occurs along the path we can still optimize 2457 // equality comparisons concerning the result. We avoid walking the whole 2458 // chain again by starting where the last calls to 2459 // stripAndComputeConstantOffsets left off and accumulate the offsets. 2460 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true); 2461 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true); 2462 if (LHS == RHS) 2463 return ConstantExpr::getICmp(Pred, 2464 ConstantExpr::getAdd(LHSOffset, LHSNoBound), 2465 ConstantExpr::getAdd(RHSOffset, RHSNoBound)); 2466 2467 // If one side of the equality comparison must come from a noalias call 2468 // (meaning a system memory allocation function), and the other side must 2469 // come from a pointer that cannot overlap with dynamically-allocated 2470 // memory within the lifetime of the current function (allocas, byval 2471 // arguments, globals), then determine the comparison result here. 2472 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs; 2473 GetUnderlyingObjects(LHS, LHSUObjs, DL); 2474 GetUnderlyingObjects(RHS, RHSUObjs, DL); 2475 2476 // Is the set of underlying objects all noalias calls? 2477 auto IsNAC = [](ArrayRef<const Value *> Objects) { 2478 return all_of(Objects, isNoAliasCall); 2479 }; 2480 2481 // Is the set of underlying objects all things which must be disjoint from 2482 // noalias calls. For allocas, we consider only static ones (dynamic 2483 // allocas might be transformed into calls to malloc not simultaneously 2484 // live with the compared-to allocation). For globals, we exclude symbols 2485 // that might be resolve lazily to symbols in another dynamically-loaded 2486 // library (and, thus, could be malloc'ed by the implementation). 2487 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) { 2488 return all_of(Objects, [](const Value *V) { 2489 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) 2490 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca(); 2491 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) 2492 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() || 2493 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) && 2494 !GV->isThreadLocal(); 2495 if (const Argument *A = dyn_cast<Argument>(V)) 2496 return A->hasByValAttr(); 2497 return false; 2498 }); 2499 }; 2500 2501 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) || 2502 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs))) 2503 return ConstantInt::get(GetCompareTy(LHS), 2504 !CmpInst::isTrueWhenEqual(Pred)); 2505 2506 // Fold comparisons for non-escaping pointer even if the allocation call 2507 // cannot be elided. We cannot fold malloc comparison to null. Also, the 2508 // dynamic allocation call could be either of the operands. 2509 Value *MI = nullptr; 2510 if (isAllocLikeFn(LHS, TLI) && 2511 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT)) 2512 MI = LHS; 2513 else if (isAllocLikeFn(RHS, TLI) && 2514 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT)) 2515 MI = RHS; 2516 // FIXME: We should also fold the compare when the pointer escapes, but the 2517 // compare dominates the pointer escape 2518 if (MI && !PointerMayBeCaptured(MI, true, true)) 2519 return ConstantInt::get(GetCompareTy(LHS), 2520 CmpInst::isFalseWhenEqual(Pred)); 2521 } 2522 2523 // Otherwise, fail. 2524 return nullptr; 2525 } 2526 2527 /// Fold an icmp when its operands have i1 scalar type. 2528 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS, 2529 Value *RHS, const SimplifyQuery &Q) { 2530 Type *ITy = GetCompareTy(LHS); // The return type. 2531 Type *OpTy = LHS->getType(); // The operand type. 2532 if (!OpTy->isIntOrIntVectorTy(1)) 2533 return nullptr; 2534 2535 // A boolean compared to true/false can be simplified in 14 out of the 20 2536 // (10 predicates * 2 constants) possible combinations. Cases not handled here 2537 // require a 'not' of the LHS, so those must be transformed in InstCombine. 2538 if (match(RHS, m_Zero())) { 2539 switch (Pred) { 2540 case CmpInst::ICMP_NE: // X != 0 -> X 2541 case CmpInst::ICMP_UGT: // X >u 0 -> X 2542 case CmpInst::ICMP_SLT: // X <s 0 -> X 2543 return LHS; 2544 2545 case CmpInst::ICMP_ULT: // X <u 0 -> false 2546 case CmpInst::ICMP_SGT: // X >s 0 -> false 2547 return getFalse(ITy); 2548 2549 case CmpInst::ICMP_UGE: // X >=u 0 -> true 2550 case CmpInst::ICMP_SLE: // X <=s 0 -> true 2551 return getTrue(ITy); 2552 2553 default: break; 2554 } 2555 } else if (match(RHS, m_One())) { 2556 switch (Pred) { 2557 case CmpInst::ICMP_EQ: // X == 1 -> X 2558 case CmpInst::ICMP_UGE: // X >=u 1 -> X 2559 case CmpInst::ICMP_SLE: // X <=s -1 -> X 2560 return LHS; 2561 2562 case CmpInst::ICMP_UGT: // X >u 1 -> false 2563 case CmpInst::ICMP_SLT: // X <s -1 -> false 2564 return getFalse(ITy); 2565 2566 case CmpInst::ICMP_ULE: // X <=u 1 -> true 2567 case CmpInst::ICMP_SGE: // X >=s -1 -> true 2568 return getTrue(ITy); 2569 2570 default: break; 2571 } 2572 } 2573 2574 switch (Pred) { 2575 default: 2576 break; 2577 case ICmpInst::ICMP_UGE: 2578 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false)) 2579 return getTrue(ITy); 2580 break; 2581 case ICmpInst::ICMP_SGE: 2582 /// For signed comparison, the values for an i1 are 0 and -1 2583 /// respectively. This maps into a truth table of: 2584 /// LHS | RHS | LHS >=s RHS | LHS implies RHS 2585 /// 0 | 0 | 1 (0 >= 0) | 1 2586 /// 0 | 1 | 1 (0 >= -1) | 1 2587 /// 1 | 0 | 0 (-1 >= 0) | 0 2588 /// 1 | 1 | 1 (-1 >= -1) | 1 2589 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2590 return getTrue(ITy); 2591 break; 2592 case ICmpInst::ICMP_ULE: 2593 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false)) 2594 return getTrue(ITy); 2595 break; 2596 } 2597 2598 return nullptr; 2599 } 2600 2601 /// Try hard to fold icmp with zero RHS because this is a common case. 2602 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS, 2603 Value *RHS, const SimplifyQuery &Q) { 2604 if (!match(RHS, m_Zero())) 2605 return nullptr; 2606 2607 Type *ITy = GetCompareTy(LHS); // The return type. 2608 switch (Pred) { 2609 default: 2610 llvm_unreachable("Unknown ICmp predicate!"); 2611 case ICmpInst::ICMP_ULT: 2612 return getFalse(ITy); 2613 case ICmpInst::ICMP_UGE: 2614 return getTrue(ITy); 2615 case ICmpInst::ICMP_EQ: 2616 case ICmpInst::ICMP_ULE: 2617 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) 2618 return getFalse(ITy); 2619 break; 2620 case ICmpInst::ICMP_NE: 2621 case ICmpInst::ICMP_UGT: 2622 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) 2623 return getTrue(ITy); 2624 break; 2625 case ICmpInst::ICMP_SLT: { 2626 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2627 if (LHSKnown.isNegative()) 2628 return getTrue(ITy); 2629 if (LHSKnown.isNonNegative()) 2630 return getFalse(ITy); 2631 break; 2632 } 2633 case ICmpInst::ICMP_SLE: { 2634 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2635 if (LHSKnown.isNegative()) 2636 return getTrue(ITy); 2637 if (LHSKnown.isNonNegative() && 2638 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2639 return getFalse(ITy); 2640 break; 2641 } 2642 case ICmpInst::ICMP_SGE: { 2643 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2644 if (LHSKnown.isNegative()) 2645 return getFalse(ITy); 2646 if (LHSKnown.isNonNegative()) 2647 return getTrue(ITy); 2648 break; 2649 } 2650 case ICmpInst::ICMP_SGT: { 2651 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2652 if (LHSKnown.isNegative()) 2653 return getFalse(ITy); 2654 if (LHSKnown.isNonNegative() && 2655 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) 2656 return getTrue(ITy); 2657 break; 2658 } 2659 } 2660 2661 return nullptr; 2662 } 2663 2664 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS, 2665 Value *RHS, const InstrInfoQuery &IIQ) { 2666 Type *ITy = GetCompareTy(RHS); // The return type. 2667 2668 Value *X; 2669 // Sign-bit checks can be optimized to true/false after unsigned 2670 // floating-point casts: 2671 // icmp slt (bitcast (uitofp X)), 0 --> false 2672 // icmp sgt (bitcast (uitofp X)), -1 --> true 2673 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) { 2674 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero())) 2675 return ConstantInt::getFalse(ITy); 2676 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes())) 2677 return ConstantInt::getTrue(ITy); 2678 } 2679 2680 const APInt *C; 2681 if (!match(RHS, m_APInt(C))) 2682 return nullptr; 2683 2684 // Rule out tautological comparisons (eg., ult 0 or uge 0). 2685 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C); 2686 if (RHS_CR.isEmptySet()) 2687 return ConstantInt::getFalse(ITy); 2688 if (RHS_CR.isFullSet()) 2689 return ConstantInt::getTrue(ITy); 2690 2691 ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo); 2692 if (!LHS_CR.isFullSet()) { 2693 if (RHS_CR.contains(LHS_CR)) 2694 return ConstantInt::getTrue(ITy); 2695 if (RHS_CR.inverse().contains(LHS_CR)) 2696 return ConstantInt::getFalse(ITy); 2697 } 2698 2699 return nullptr; 2700 } 2701 2702 /// TODO: A large part of this logic is duplicated in InstCombine's 2703 /// foldICmpBinOp(). We should be able to share that and avoid the code 2704 /// duplication. 2705 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS, 2706 Value *RHS, const SimplifyQuery &Q, 2707 unsigned MaxRecurse) { 2708 Type *ITy = GetCompareTy(LHS); // The return type. 2709 2710 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS); 2711 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS); 2712 if (MaxRecurse && (LBO || RBO)) { 2713 // Analyze the case when either LHS or RHS is an add instruction. 2714 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr; 2715 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null). 2716 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false; 2717 if (LBO && LBO->getOpcode() == Instruction::Add) { 2718 A = LBO->getOperand(0); 2719 B = LBO->getOperand(1); 2720 NoLHSWrapProblem = 2721 ICmpInst::isEquality(Pred) || 2722 (CmpInst::isUnsigned(Pred) && 2723 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) || 2724 (CmpInst::isSigned(Pred) && 2725 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO))); 2726 } 2727 if (RBO && RBO->getOpcode() == Instruction::Add) { 2728 C = RBO->getOperand(0); 2729 D = RBO->getOperand(1); 2730 NoRHSWrapProblem = 2731 ICmpInst::isEquality(Pred) || 2732 (CmpInst::isUnsigned(Pred) && 2733 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) || 2734 (CmpInst::isSigned(Pred) && 2735 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO))); 2736 } 2737 2738 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow. 2739 if ((A == RHS || B == RHS) && NoLHSWrapProblem) 2740 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A, 2741 Constant::getNullValue(RHS->getType()), Q, 2742 MaxRecurse - 1)) 2743 return V; 2744 2745 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow. 2746 if ((C == LHS || D == LHS) && NoRHSWrapProblem) 2747 if (Value *V = 2748 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()), 2749 C == LHS ? D : C, Q, MaxRecurse - 1)) 2750 return V; 2751 2752 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow. 2753 if (A && C && (A == C || A == D || B == C || B == D) && NoLHSWrapProblem && 2754 NoRHSWrapProblem) { 2755 // Determine Y and Z in the form icmp (X+Y), (X+Z). 2756 Value *Y, *Z; 2757 if (A == C) { 2758 // C + B == C + D -> B == D 2759 Y = B; 2760 Z = D; 2761 } else if (A == D) { 2762 // D + B == C + D -> B == C 2763 Y = B; 2764 Z = C; 2765 } else if (B == C) { 2766 // A + C == C + D -> A == D 2767 Y = A; 2768 Z = D; 2769 } else { 2770 assert(B == D); 2771 // A + D == C + D -> A == C 2772 Y = A; 2773 Z = C; 2774 } 2775 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1)) 2776 return V; 2777 } 2778 } 2779 2780 { 2781 Value *Y = nullptr; 2782 // icmp pred (or X, Y), X 2783 if (LBO && match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) { 2784 if (Pred == ICmpInst::ICMP_ULT) 2785 return getFalse(ITy); 2786 if (Pred == ICmpInst::ICMP_UGE) 2787 return getTrue(ITy); 2788 2789 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) { 2790 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2791 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2792 if (RHSKnown.isNonNegative() && YKnown.isNegative()) 2793 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy); 2794 if (RHSKnown.isNegative() || YKnown.isNonNegative()) 2795 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy); 2796 } 2797 } 2798 // icmp pred X, (or X, Y) 2799 if (RBO && match(RBO, m_c_Or(m_Value(Y), m_Specific(LHS)))) { 2800 if (Pred == ICmpInst::ICMP_ULE) 2801 return getTrue(ITy); 2802 if (Pred == ICmpInst::ICMP_UGT) 2803 return getFalse(ITy); 2804 2805 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SLE) { 2806 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2807 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2808 if (LHSKnown.isNonNegative() && YKnown.isNegative()) 2809 return Pred == ICmpInst::ICMP_SGT ? getTrue(ITy) : getFalse(ITy); 2810 if (LHSKnown.isNegative() || YKnown.isNonNegative()) 2811 return Pred == ICmpInst::ICMP_SGT ? getFalse(ITy) : getTrue(ITy); 2812 } 2813 } 2814 } 2815 2816 // icmp pred (and X, Y), X 2817 if (LBO && match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) { 2818 if (Pred == ICmpInst::ICMP_UGT) 2819 return getFalse(ITy); 2820 if (Pred == ICmpInst::ICMP_ULE) 2821 return getTrue(ITy); 2822 } 2823 // icmp pred X, (and X, Y) 2824 if (RBO && match(RBO, m_c_And(m_Value(), m_Specific(LHS)))) { 2825 if (Pred == ICmpInst::ICMP_UGE) 2826 return getTrue(ITy); 2827 if (Pred == ICmpInst::ICMP_ULT) 2828 return getFalse(ITy); 2829 } 2830 2831 // 0 - (zext X) pred C 2832 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) { 2833 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) { 2834 if (RHSC->getValue().isStrictlyPositive()) { 2835 if (Pred == ICmpInst::ICMP_SLT) 2836 return ConstantInt::getTrue(RHSC->getContext()); 2837 if (Pred == ICmpInst::ICMP_SGE) 2838 return ConstantInt::getFalse(RHSC->getContext()); 2839 if (Pred == ICmpInst::ICMP_EQ) 2840 return ConstantInt::getFalse(RHSC->getContext()); 2841 if (Pred == ICmpInst::ICMP_NE) 2842 return ConstantInt::getTrue(RHSC->getContext()); 2843 } 2844 if (RHSC->getValue().isNonNegative()) { 2845 if (Pred == ICmpInst::ICMP_SLE) 2846 return ConstantInt::getTrue(RHSC->getContext()); 2847 if (Pred == ICmpInst::ICMP_SGT) 2848 return ConstantInt::getFalse(RHSC->getContext()); 2849 } 2850 } 2851 } 2852 2853 // icmp pred (urem X, Y), Y 2854 if (LBO && match(LBO, m_URem(m_Value(), m_Specific(RHS)))) { 2855 switch (Pred) { 2856 default: 2857 break; 2858 case ICmpInst::ICMP_SGT: 2859 case ICmpInst::ICMP_SGE: { 2860 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2861 if (!Known.isNonNegative()) 2862 break; 2863 LLVM_FALLTHROUGH; 2864 } 2865 case ICmpInst::ICMP_EQ: 2866 case ICmpInst::ICMP_UGT: 2867 case ICmpInst::ICMP_UGE: 2868 return getFalse(ITy); 2869 case ICmpInst::ICMP_SLT: 2870 case ICmpInst::ICMP_SLE: { 2871 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2872 if (!Known.isNonNegative()) 2873 break; 2874 LLVM_FALLTHROUGH; 2875 } 2876 case ICmpInst::ICMP_NE: 2877 case ICmpInst::ICMP_ULT: 2878 case ICmpInst::ICMP_ULE: 2879 return getTrue(ITy); 2880 } 2881 } 2882 2883 // icmp pred X, (urem Y, X) 2884 if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) { 2885 switch (Pred) { 2886 default: 2887 break; 2888 case ICmpInst::ICMP_SGT: 2889 case ICmpInst::ICMP_SGE: { 2890 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2891 if (!Known.isNonNegative()) 2892 break; 2893 LLVM_FALLTHROUGH; 2894 } 2895 case ICmpInst::ICMP_NE: 2896 case ICmpInst::ICMP_UGT: 2897 case ICmpInst::ICMP_UGE: 2898 return getTrue(ITy); 2899 case ICmpInst::ICMP_SLT: 2900 case ICmpInst::ICMP_SLE: { 2901 KnownBits Known = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT); 2902 if (!Known.isNonNegative()) 2903 break; 2904 LLVM_FALLTHROUGH; 2905 } 2906 case ICmpInst::ICMP_EQ: 2907 case ICmpInst::ICMP_ULT: 2908 case ICmpInst::ICMP_ULE: 2909 return getFalse(ITy); 2910 } 2911 } 2912 2913 // x >> y <=u x 2914 // x udiv y <=u x. 2915 if (LBO && (match(LBO, m_LShr(m_Specific(RHS), m_Value())) || 2916 match(LBO, m_UDiv(m_Specific(RHS), m_Value())))) { 2917 // icmp pred (X op Y), X 2918 if (Pred == ICmpInst::ICMP_UGT) 2919 return getFalse(ITy); 2920 if (Pred == ICmpInst::ICMP_ULE) 2921 return getTrue(ITy); 2922 } 2923 2924 // x >=u x >> y 2925 // x >=u x udiv y. 2926 if (RBO && (match(RBO, m_LShr(m_Specific(LHS), m_Value())) || 2927 match(RBO, m_UDiv(m_Specific(LHS), m_Value())))) { 2928 // icmp pred X, (X op Y) 2929 if (Pred == ICmpInst::ICMP_ULT) 2930 return getFalse(ITy); 2931 if (Pred == ICmpInst::ICMP_UGE) 2932 return getTrue(ITy); 2933 } 2934 2935 // handle: 2936 // CI2 << X == CI 2937 // CI2 << X != CI 2938 // 2939 // where CI2 is a power of 2 and CI isn't 2940 if (auto *CI = dyn_cast<ConstantInt>(RHS)) { 2941 const APInt *CI2Val, *CIVal = &CI->getValue(); 2942 if (LBO && match(LBO, m_Shl(m_APInt(CI2Val), m_Value())) && 2943 CI2Val->isPowerOf2()) { 2944 if (!CIVal->isPowerOf2()) { 2945 // CI2 << X can equal zero in some circumstances, 2946 // this simplification is unsafe if CI is zero. 2947 // 2948 // We know it is safe if: 2949 // - The shift is nsw, we can't shift out the one bit. 2950 // - The shift is nuw, we can't shift out the one bit. 2951 // - CI2 is one 2952 // - CI isn't zero 2953 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) || 2954 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) || 2955 CI2Val->isOneValue() || !CI->isZero()) { 2956 if (Pred == ICmpInst::ICMP_EQ) 2957 return ConstantInt::getFalse(RHS->getContext()); 2958 if (Pred == ICmpInst::ICMP_NE) 2959 return ConstantInt::getTrue(RHS->getContext()); 2960 } 2961 } 2962 if (CIVal->isSignMask() && CI2Val->isOneValue()) { 2963 if (Pred == ICmpInst::ICMP_UGT) 2964 return ConstantInt::getFalse(RHS->getContext()); 2965 if (Pred == ICmpInst::ICMP_ULE) 2966 return ConstantInt::getTrue(RHS->getContext()); 2967 } 2968 } 2969 } 2970 2971 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() && 2972 LBO->getOperand(1) == RBO->getOperand(1)) { 2973 switch (LBO->getOpcode()) { 2974 default: 2975 break; 2976 case Instruction::UDiv: 2977 case Instruction::LShr: 2978 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) || 2979 !Q.IIQ.isExact(RBO)) 2980 break; 2981 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2982 RBO->getOperand(0), Q, MaxRecurse - 1)) 2983 return V; 2984 break; 2985 case Instruction::SDiv: 2986 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) || 2987 !Q.IIQ.isExact(RBO)) 2988 break; 2989 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2990 RBO->getOperand(0), Q, MaxRecurse - 1)) 2991 return V; 2992 break; 2993 case Instruction::AShr: 2994 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO)) 2995 break; 2996 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 2997 RBO->getOperand(0), Q, MaxRecurse - 1)) 2998 return V; 2999 break; 3000 case Instruction::Shl: { 3001 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO); 3002 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO); 3003 if (!NUW && !NSW) 3004 break; 3005 if (!NSW && ICmpInst::isSigned(Pred)) 3006 break; 3007 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0), 3008 RBO->getOperand(0), Q, MaxRecurse - 1)) 3009 return V; 3010 break; 3011 } 3012 } 3013 } 3014 return nullptr; 3015 } 3016 3017 /// Simplify integer comparisons where at least one operand of the compare 3018 /// matches an integer min/max idiom. 3019 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS, 3020 Value *RHS, const SimplifyQuery &Q, 3021 unsigned MaxRecurse) { 3022 Type *ITy = GetCompareTy(LHS); // The return type. 3023 Value *A, *B; 3024 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE; 3025 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B". 3026 3027 // Signed variants on "max(a,b)>=a -> true". 3028 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 3029 if (A != RHS) 3030 std::swap(A, B); // smax(A, B) pred A. 3031 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 3032 // We analyze this as smax(A, B) pred A. 3033 P = Pred; 3034 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) && 3035 (A == LHS || B == LHS)) { 3036 if (A != LHS) 3037 std::swap(A, B); // A pred smax(A, B). 3038 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B". 3039 // We analyze this as smax(A, B) swapped-pred A. 3040 P = CmpInst::getSwappedPredicate(Pred); 3041 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 3042 (A == RHS || B == RHS)) { 3043 if (A != RHS) 3044 std::swap(A, B); // smin(A, B) pred A. 3045 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 3046 // We analyze this as smax(-A, -B) swapped-pred -A. 3047 // Note that we do not need to actually form -A or -B thanks to EqP. 3048 P = CmpInst::getSwappedPredicate(Pred); 3049 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) && 3050 (A == LHS || B == LHS)) { 3051 if (A != LHS) 3052 std::swap(A, B); // A pred smin(A, B). 3053 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B". 3054 // We analyze this as smax(-A, -B) pred -A. 3055 // Note that we do not need to actually form -A or -B thanks to EqP. 3056 P = Pred; 3057 } 3058 if (P != CmpInst::BAD_ICMP_PREDICATE) { 3059 // Cases correspond to "max(A, B) p A". 3060 switch (P) { 3061 default: 3062 break; 3063 case CmpInst::ICMP_EQ: 3064 case CmpInst::ICMP_SLE: 3065 // Equivalent to "A EqP B". This may be the same as the condition tested 3066 // in the max/min; if so, we can just return that. 3067 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 3068 return V; 3069 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 3070 return V; 3071 // Otherwise, see if "A EqP B" simplifies. 3072 if (MaxRecurse) 3073 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 3074 return V; 3075 break; 3076 case CmpInst::ICMP_NE: 3077 case CmpInst::ICMP_SGT: { 3078 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 3079 // Equivalent to "A InvEqP B". This may be the same as the condition 3080 // tested in the max/min; if so, we can just return that. 3081 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 3082 return V; 3083 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 3084 return V; 3085 // Otherwise, see if "A InvEqP B" simplifies. 3086 if (MaxRecurse) 3087 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 3088 return V; 3089 break; 3090 } 3091 case CmpInst::ICMP_SGE: 3092 // Always true. 3093 return getTrue(ITy); 3094 case CmpInst::ICMP_SLT: 3095 // Always false. 3096 return getFalse(ITy); 3097 } 3098 } 3099 3100 // Unsigned variants on "max(a,b)>=a -> true". 3101 P = CmpInst::BAD_ICMP_PREDICATE; 3102 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) { 3103 if (A != RHS) 3104 std::swap(A, B); // umax(A, B) pred A. 3105 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 3106 // We analyze this as umax(A, B) pred A. 3107 P = Pred; 3108 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) && 3109 (A == LHS || B == LHS)) { 3110 if (A != LHS) 3111 std::swap(A, B); // A pred umax(A, B). 3112 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B". 3113 // We analyze this as umax(A, B) swapped-pred A. 3114 P = CmpInst::getSwappedPredicate(Pred); 3115 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3116 (A == RHS || B == RHS)) { 3117 if (A != RHS) 3118 std::swap(A, B); // umin(A, B) pred A. 3119 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3120 // We analyze this as umax(-A, -B) swapped-pred -A. 3121 // Note that we do not need to actually form -A or -B thanks to EqP. 3122 P = CmpInst::getSwappedPredicate(Pred); 3123 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) && 3124 (A == LHS || B == LHS)) { 3125 if (A != LHS) 3126 std::swap(A, B); // A pred umin(A, B). 3127 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B". 3128 // We analyze this as umax(-A, -B) pred -A. 3129 // Note that we do not need to actually form -A or -B thanks to EqP. 3130 P = Pred; 3131 } 3132 if (P != CmpInst::BAD_ICMP_PREDICATE) { 3133 // Cases correspond to "max(A, B) p A". 3134 switch (P) { 3135 default: 3136 break; 3137 case CmpInst::ICMP_EQ: 3138 case CmpInst::ICMP_ULE: 3139 // Equivalent to "A EqP B". This may be the same as the condition tested 3140 // in the max/min; if so, we can just return that. 3141 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B)) 3142 return V; 3143 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B)) 3144 return V; 3145 // Otherwise, see if "A EqP B" simplifies. 3146 if (MaxRecurse) 3147 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1)) 3148 return V; 3149 break; 3150 case CmpInst::ICMP_NE: 3151 case CmpInst::ICMP_UGT: { 3152 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP); 3153 // Equivalent to "A InvEqP B". This may be the same as the condition 3154 // tested in the max/min; if so, we can just return that. 3155 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B)) 3156 return V; 3157 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B)) 3158 return V; 3159 // Otherwise, see if "A InvEqP B" simplifies. 3160 if (MaxRecurse) 3161 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1)) 3162 return V; 3163 break; 3164 } 3165 case CmpInst::ICMP_UGE: 3166 // Always true. 3167 return getTrue(ITy); 3168 case CmpInst::ICMP_ULT: 3169 // Always false. 3170 return getFalse(ITy); 3171 } 3172 } 3173 3174 // Variants on "max(x,y) >= min(x,z)". 3175 Value *C, *D; 3176 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && 3177 match(RHS, m_SMin(m_Value(C), m_Value(D))) && 3178 (A == C || A == D || B == C || B == D)) { 3179 // max(x, ?) pred min(x, ?). 3180 if (Pred == CmpInst::ICMP_SGE) 3181 // Always true. 3182 return getTrue(ITy); 3183 if (Pred == CmpInst::ICMP_SLT) 3184 // Always false. 3185 return getFalse(ITy); 3186 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) && 3187 match(RHS, m_SMax(m_Value(C), m_Value(D))) && 3188 (A == C || A == D || B == C || B == D)) { 3189 // min(x, ?) pred max(x, ?). 3190 if (Pred == CmpInst::ICMP_SLE) 3191 // Always true. 3192 return getTrue(ITy); 3193 if (Pred == CmpInst::ICMP_SGT) 3194 // Always false. 3195 return getFalse(ITy); 3196 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && 3197 match(RHS, m_UMin(m_Value(C), m_Value(D))) && 3198 (A == C || A == D || B == C || B == D)) { 3199 // max(x, ?) pred min(x, ?). 3200 if (Pred == CmpInst::ICMP_UGE) 3201 // Always true. 3202 return getTrue(ITy); 3203 if (Pred == CmpInst::ICMP_ULT) 3204 // Always false. 3205 return getFalse(ITy); 3206 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) && 3207 match(RHS, m_UMax(m_Value(C), m_Value(D))) && 3208 (A == C || A == D || B == C || B == D)) { 3209 // min(x, ?) pred max(x, ?). 3210 if (Pred == CmpInst::ICMP_ULE) 3211 // Always true. 3212 return getTrue(ITy); 3213 if (Pred == CmpInst::ICMP_UGT) 3214 // Always false. 3215 return getFalse(ITy); 3216 } 3217 3218 return nullptr; 3219 } 3220 3221 /// Given operands for an ICmpInst, see if we can fold the result. 3222 /// If not, this returns null. 3223 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3224 const SimplifyQuery &Q, unsigned MaxRecurse) { 3225 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3226 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!"); 3227 3228 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3229 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3230 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3231 3232 // If we have a constant, make sure it is on the RHS. 3233 std::swap(LHS, RHS); 3234 Pred = CmpInst::getSwappedPredicate(Pred); 3235 } 3236 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X"); 3237 3238 Type *ITy = GetCompareTy(LHS); // The return type. 3239 3240 // For EQ and NE, we can always pick a value for the undef to make the 3241 // predicate pass or fail, so we can return undef. 3242 // Matches behavior in llvm::ConstantFoldCompareInstruction. 3243 if (isa<UndefValue>(RHS) && ICmpInst::isEquality(Pred)) 3244 return UndefValue::get(ITy); 3245 3246 // icmp X, X -> true/false 3247 // icmp X, undef -> true/false because undef could be X. 3248 if (LHS == RHS || isa<UndefValue>(RHS)) 3249 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred)); 3250 3251 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q)) 3252 return V; 3253 3254 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q)) 3255 return V; 3256 3257 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ)) 3258 return V; 3259 3260 // If both operands have range metadata, use the metadata 3261 // to simplify the comparison. 3262 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) { 3263 auto RHS_Instr = cast<Instruction>(RHS); 3264 auto LHS_Instr = cast<Instruction>(LHS); 3265 3266 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) && 3267 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) { 3268 auto RHS_CR = getConstantRangeFromMetadata( 3269 *RHS_Instr->getMetadata(LLVMContext::MD_range)); 3270 auto LHS_CR = getConstantRangeFromMetadata( 3271 *LHS_Instr->getMetadata(LLVMContext::MD_range)); 3272 3273 auto Satisfied_CR = ConstantRange::makeSatisfyingICmpRegion(Pred, RHS_CR); 3274 if (Satisfied_CR.contains(LHS_CR)) 3275 return ConstantInt::getTrue(RHS->getContext()); 3276 3277 auto InversedSatisfied_CR = ConstantRange::makeSatisfyingICmpRegion( 3278 CmpInst::getInversePredicate(Pred), RHS_CR); 3279 if (InversedSatisfied_CR.contains(LHS_CR)) 3280 return ConstantInt::getFalse(RHS->getContext()); 3281 } 3282 } 3283 3284 // Compare of cast, for example (zext X) != 0 -> X != 0 3285 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) { 3286 Instruction *LI = cast<CastInst>(LHS); 3287 Value *SrcOp = LI->getOperand(0); 3288 Type *SrcTy = SrcOp->getType(); 3289 Type *DstTy = LI->getType(); 3290 3291 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input 3292 // if the integer type is the same size as the pointer type. 3293 if (MaxRecurse && isa<PtrToIntInst>(LI) && 3294 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) { 3295 if (Constant *RHSC = dyn_cast<Constant>(RHS)) { 3296 // Transfer the cast to the constant. 3297 if (Value *V = SimplifyICmpInst(Pred, SrcOp, 3298 ConstantExpr::getIntToPtr(RHSC, SrcTy), 3299 Q, MaxRecurse-1)) 3300 return V; 3301 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) { 3302 if (RI->getOperand(0)->getType() == SrcTy) 3303 // Compare without the cast. 3304 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3305 Q, MaxRecurse-1)) 3306 return V; 3307 } 3308 } 3309 3310 if (isa<ZExtInst>(LHS)) { 3311 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the 3312 // same type. 3313 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) { 3314 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3315 // Compare X and Y. Note that signed predicates become unsigned. 3316 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3317 SrcOp, RI->getOperand(0), Q, 3318 MaxRecurse-1)) 3319 return V; 3320 } 3321 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended 3322 // too. If not, then try to deduce the result of the comparison. 3323 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3324 // Compute the constant that would happen if we truncated to SrcTy then 3325 // reextended to DstTy. 3326 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3327 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy); 3328 3329 // If the re-extended constant didn't change then this is effectively 3330 // also a case of comparing two zero-extended values. 3331 if (RExt == CI && MaxRecurse) 3332 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), 3333 SrcOp, Trunc, Q, MaxRecurse-1)) 3334 return V; 3335 3336 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit 3337 // there. Use this to work out the result of the comparison. 3338 if (RExt != CI) { 3339 switch (Pred) { 3340 default: llvm_unreachable("Unknown ICmp predicate!"); 3341 // LHS <u RHS. 3342 case ICmpInst::ICMP_EQ: 3343 case ICmpInst::ICMP_UGT: 3344 case ICmpInst::ICMP_UGE: 3345 return ConstantInt::getFalse(CI->getContext()); 3346 3347 case ICmpInst::ICMP_NE: 3348 case ICmpInst::ICMP_ULT: 3349 case ICmpInst::ICMP_ULE: 3350 return ConstantInt::getTrue(CI->getContext()); 3351 3352 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS 3353 // is non-negative then LHS <s RHS. 3354 case ICmpInst::ICMP_SGT: 3355 case ICmpInst::ICMP_SGE: 3356 return CI->getValue().isNegative() ? 3357 ConstantInt::getTrue(CI->getContext()) : 3358 ConstantInt::getFalse(CI->getContext()); 3359 3360 case ICmpInst::ICMP_SLT: 3361 case ICmpInst::ICMP_SLE: 3362 return CI->getValue().isNegative() ? 3363 ConstantInt::getFalse(CI->getContext()) : 3364 ConstantInt::getTrue(CI->getContext()); 3365 } 3366 } 3367 } 3368 } 3369 3370 if (isa<SExtInst>(LHS)) { 3371 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the 3372 // same type. 3373 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) { 3374 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType()) 3375 // Compare X and Y. Note that the predicate does not change. 3376 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0), 3377 Q, MaxRecurse-1)) 3378 return V; 3379 } 3380 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended 3381 // too. If not, then try to deduce the result of the comparison. 3382 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) { 3383 // Compute the constant that would happen if we truncated to SrcTy then 3384 // reextended to DstTy. 3385 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy); 3386 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy); 3387 3388 // If the re-extended constant didn't change then this is effectively 3389 // also a case of comparing two sign-extended values. 3390 if (RExt == CI && MaxRecurse) 3391 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1)) 3392 return V; 3393 3394 // Otherwise the upper bits of LHS are all equal, while RHS has varying 3395 // bits there. Use this to work out the result of the comparison. 3396 if (RExt != CI) { 3397 switch (Pred) { 3398 default: llvm_unreachable("Unknown ICmp predicate!"); 3399 case ICmpInst::ICMP_EQ: 3400 return ConstantInt::getFalse(CI->getContext()); 3401 case ICmpInst::ICMP_NE: 3402 return ConstantInt::getTrue(CI->getContext()); 3403 3404 // If RHS is non-negative then LHS <s RHS. If RHS is negative then 3405 // LHS >s RHS. 3406 case ICmpInst::ICMP_SGT: 3407 case ICmpInst::ICMP_SGE: 3408 return CI->getValue().isNegative() ? 3409 ConstantInt::getTrue(CI->getContext()) : 3410 ConstantInt::getFalse(CI->getContext()); 3411 case ICmpInst::ICMP_SLT: 3412 case ICmpInst::ICMP_SLE: 3413 return CI->getValue().isNegative() ? 3414 ConstantInt::getFalse(CI->getContext()) : 3415 ConstantInt::getTrue(CI->getContext()); 3416 3417 // If LHS is non-negative then LHS <u RHS. If LHS is negative then 3418 // LHS >u RHS. 3419 case ICmpInst::ICMP_UGT: 3420 case ICmpInst::ICMP_UGE: 3421 // Comparison is true iff the LHS <s 0. 3422 if (MaxRecurse) 3423 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp, 3424 Constant::getNullValue(SrcTy), 3425 Q, MaxRecurse-1)) 3426 return V; 3427 break; 3428 case ICmpInst::ICMP_ULT: 3429 case ICmpInst::ICMP_ULE: 3430 // Comparison is true iff the LHS >=s 0. 3431 if (MaxRecurse) 3432 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp, 3433 Constant::getNullValue(SrcTy), 3434 Q, MaxRecurse-1)) 3435 return V; 3436 break; 3437 } 3438 } 3439 } 3440 } 3441 } 3442 3443 // icmp eq|ne X, Y -> false|true if X != Y 3444 if (ICmpInst::isEquality(Pred) && 3445 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) { 3446 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy); 3447 } 3448 3449 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse)) 3450 return V; 3451 3452 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse)) 3453 return V; 3454 3455 // Simplify comparisons of related pointers using a powerful, recursive 3456 // GEP-walk when we have target data available.. 3457 if (LHS->getType()->isPointerTy()) 3458 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, 3459 Q.IIQ, LHS, RHS)) 3460 return C; 3461 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS)) 3462 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS)) 3463 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) == 3464 Q.DL.getTypeSizeInBits(CLHS->getType()) && 3465 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) == 3466 Q.DL.getTypeSizeInBits(CRHS->getType())) 3467 if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, 3468 Q.IIQ, CLHS->getPointerOperand(), 3469 CRHS->getPointerOperand())) 3470 return C; 3471 3472 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) { 3473 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) { 3474 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() && 3475 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() && 3476 (ICmpInst::isEquality(Pred) || 3477 (GLHS->isInBounds() && GRHS->isInBounds() && 3478 Pred == ICmpInst::getSignedPredicate(Pred)))) { 3479 // The bases are equal and the indices are constant. Build a constant 3480 // expression GEP with the same indices and a null base pointer to see 3481 // what constant folding can make out of it. 3482 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType()); 3483 SmallVector<Value *, 4> IndicesLHS(GLHS->idx_begin(), GLHS->idx_end()); 3484 Constant *NewLHS = ConstantExpr::getGetElementPtr( 3485 GLHS->getSourceElementType(), Null, IndicesLHS); 3486 3487 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end()); 3488 Constant *NewRHS = ConstantExpr::getGetElementPtr( 3489 GLHS->getSourceElementType(), Null, IndicesRHS); 3490 return ConstantExpr::getICmp(Pred, NewLHS, NewRHS); 3491 } 3492 } 3493 } 3494 3495 // If the comparison is with the result of a select instruction, check whether 3496 // comparing with either branch of the select always yields the same value. 3497 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3498 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3499 return V; 3500 3501 // If the comparison is with the result of a phi instruction, check whether 3502 // doing the compare with each incoming phi value yields a common result. 3503 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3504 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3505 return V; 3506 3507 return nullptr; 3508 } 3509 3510 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3511 const SimplifyQuery &Q) { 3512 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 3513 } 3514 3515 /// Given operands for an FCmpInst, see if we can fold the result. 3516 /// If not, this returns null. 3517 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3518 FastMathFlags FMF, const SimplifyQuery &Q, 3519 unsigned MaxRecurse) { 3520 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate; 3521 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!"); 3522 3523 if (Constant *CLHS = dyn_cast<Constant>(LHS)) { 3524 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 3525 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI); 3526 3527 // If we have a constant, make sure it is on the RHS. 3528 std::swap(LHS, RHS); 3529 Pred = CmpInst::getSwappedPredicate(Pred); 3530 } 3531 3532 // Fold trivial predicates. 3533 Type *RetTy = GetCompareTy(LHS); 3534 if (Pred == FCmpInst::FCMP_FALSE) 3535 return getFalse(RetTy); 3536 if (Pred == FCmpInst::FCMP_TRUE) 3537 return getTrue(RetTy); 3538 3539 // Fold (un)ordered comparison if we can determine there are no NaNs. 3540 if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD) 3541 if (FMF.noNaNs() || 3542 (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI))) 3543 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD); 3544 3545 // NaN is unordered; NaN is not ordered. 3546 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) && 3547 "Comparison must be either ordered or unordered"); 3548 if (match(RHS, m_NaN())) 3549 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3550 3551 // fcmp pred x, undef and fcmp pred undef, x 3552 // fold to true if unordered, false if ordered 3553 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS)) { 3554 // Choosing NaN for the undef will always make unordered comparison succeed 3555 // and ordered comparison fail. 3556 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred)); 3557 } 3558 3559 // fcmp x,x -> true/false. Not all compares are foldable. 3560 if (LHS == RHS) { 3561 if (CmpInst::isTrueWhenEqual(Pred)) 3562 return getTrue(RetTy); 3563 if (CmpInst::isFalseWhenEqual(Pred)) 3564 return getFalse(RetTy); 3565 } 3566 3567 // Handle fcmp with constant RHS. 3568 // TODO: Use match with a specific FP value, so these work with vectors with 3569 // undef lanes. 3570 const APFloat *C; 3571 if (match(RHS, m_APFloat(C))) { 3572 // Check whether the constant is an infinity. 3573 if (C->isInfinity()) { 3574 if (C->isNegative()) { 3575 switch (Pred) { 3576 case FCmpInst::FCMP_OLT: 3577 // No value is ordered and less than negative infinity. 3578 return getFalse(RetTy); 3579 case FCmpInst::FCMP_UGE: 3580 // All values are unordered with or at least negative infinity. 3581 return getTrue(RetTy); 3582 default: 3583 break; 3584 } 3585 } else { 3586 switch (Pred) { 3587 case FCmpInst::FCMP_OGT: 3588 // No value is ordered and greater than infinity. 3589 return getFalse(RetTy); 3590 case FCmpInst::FCMP_ULE: 3591 // All values are unordered with and at most infinity. 3592 return getTrue(RetTy); 3593 default: 3594 break; 3595 } 3596 } 3597 } 3598 if (C->isNegative() && !C->isNegZero()) { 3599 assert(!C->isNaN() && "Unexpected NaN constant!"); 3600 // TODO: We can catch more cases by using a range check rather than 3601 // relying on CannotBeOrderedLessThanZero. 3602 switch (Pred) { 3603 case FCmpInst::FCMP_UGE: 3604 case FCmpInst::FCMP_UGT: 3605 case FCmpInst::FCMP_UNE: 3606 // (X >= 0) implies (X > C) when (C < 0) 3607 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3608 return getTrue(RetTy); 3609 break; 3610 case FCmpInst::FCMP_OEQ: 3611 case FCmpInst::FCMP_OLE: 3612 case FCmpInst::FCMP_OLT: 3613 // (X >= 0) implies !(X < C) when (C < 0) 3614 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3615 return getFalse(RetTy); 3616 break; 3617 default: 3618 break; 3619 } 3620 } 3621 3622 // Check comparison of [minnum/maxnum with constant] with other constant. 3623 const APFloat *C2; 3624 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) && 3625 C2->compare(*C) == APFloat::cmpLessThan) || 3626 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) && 3627 C2->compare(*C) == APFloat::cmpGreaterThan)) { 3628 bool IsMaxNum = 3629 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum; 3630 // The ordered relationship and minnum/maxnum guarantee that we do not 3631 // have NaN constants, so ordered/unordered preds are handled the same. 3632 switch (Pred) { 3633 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ: 3634 // minnum(X, LesserC) == C --> false 3635 // maxnum(X, GreaterC) == C --> false 3636 return getFalse(RetTy); 3637 case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE: 3638 // minnum(X, LesserC) != C --> true 3639 // maxnum(X, GreaterC) != C --> true 3640 return getTrue(RetTy); 3641 case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE: 3642 case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT: 3643 // minnum(X, LesserC) >= C --> false 3644 // minnum(X, LesserC) > C --> false 3645 // maxnum(X, GreaterC) >= C --> true 3646 // maxnum(X, GreaterC) > C --> true 3647 return ConstantInt::get(RetTy, IsMaxNum); 3648 case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE: 3649 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT: 3650 // minnum(X, LesserC) <= C --> true 3651 // minnum(X, LesserC) < C --> true 3652 // maxnum(X, GreaterC) <= C --> false 3653 // maxnum(X, GreaterC) < C --> false 3654 return ConstantInt::get(RetTy, !IsMaxNum); 3655 default: 3656 // TRUE/FALSE/ORD/UNO should be handled before this. 3657 llvm_unreachable("Unexpected fcmp predicate"); 3658 } 3659 } 3660 } 3661 3662 if (match(RHS, m_AnyZeroFP())) { 3663 switch (Pred) { 3664 case FCmpInst::FCMP_OGE: 3665 case FCmpInst::FCMP_ULT: 3666 // Positive or zero X >= 0.0 --> true 3667 // Positive or zero X < 0.0 --> false 3668 if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) && 3669 CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3670 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy); 3671 break; 3672 case FCmpInst::FCMP_UGE: 3673 case FCmpInst::FCMP_OLT: 3674 // Positive or zero or nan X >= 0.0 --> true 3675 // Positive or zero or nan X < 0.0 --> false 3676 if (CannotBeOrderedLessThanZero(LHS, Q.TLI)) 3677 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy); 3678 break; 3679 default: 3680 break; 3681 } 3682 } 3683 3684 // If the comparison is with the result of a select instruction, check whether 3685 // comparing with either branch of the select always yields the same value. 3686 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS)) 3687 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse)) 3688 return V; 3689 3690 // If the comparison is with the result of a phi instruction, check whether 3691 // doing the compare with each incoming phi value yields a common result. 3692 if (isa<PHINode>(LHS) || isa<PHINode>(RHS)) 3693 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse)) 3694 return V; 3695 3696 return nullptr; 3697 } 3698 3699 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 3700 FastMathFlags FMF, const SimplifyQuery &Q) { 3701 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit); 3702 } 3703 3704 /// See if V simplifies when its operand Op is replaced with RepOp. 3705 static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, 3706 const SimplifyQuery &Q, 3707 unsigned MaxRecurse) { 3708 // Trivial replacement. 3709 if (V == Op) 3710 return RepOp; 3711 3712 // We cannot replace a constant, and shouldn't even try. 3713 if (isa<Constant>(Op)) 3714 return nullptr; 3715 3716 auto *I = dyn_cast<Instruction>(V); 3717 if (!I) 3718 return nullptr; 3719 3720 // If this is a binary operator, try to simplify it with the replaced op. 3721 if (auto *B = dyn_cast<BinaryOperator>(I)) { 3722 // Consider: 3723 // %cmp = icmp eq i32 %x, 2147483647 3724 // %add = add nsw i32 %x, 1 3725 // %sel = select i1 %cmp, i32 -2147483648, i32 %add 3726 // 3727 // We can't replace %sel with %add unless we strip away the flags. 3728 // TODO: This is an unusual limitation because better analysis results in 3729 // worse simplification. InstCombine can do this fold more generally 3730 // by dropping the flags. Remove this fold to save compile-time? 3731 if (isa<OverflowingBinaryOperator>(B)) 3732 if (Q.IIQ.hasNoSignedWrap(B) || Q.IIQ.hasNoUnsignedWrap(B)) 3733 return nullptr; 3734 if (isa<PossiblyExactOperator>(B) && Q.IIQ.isExact(B)) 3735 return nullptr; 3736 3737 if (MaxRecurse) { 3738 if (B->getOperand(0) == Op) 3739 return SimplifyBinOp(B->getOpcode(), RepOp, B->getOperand(1), Q, 3740 MaxRecurse - 1); 3741 if (B->getOperand(1) == Op) 3742 return SimplifyBinOp(B->getOpcode(), B->getOperand(0), RepOp, Q, 3743 MaxRecurse - 1); 3744 } 3745 } 3746 3747 // Same for CmpInsts. 3748 if (CmpInst *C = dyn_cast<CmpInst>(I)) { 3749 if (MaxRecurse) { 3750 if (C->getOperand(0) == Op) 3751 return SimplifyCmpInst(C->getPredicate(), RepOp, C->getOperand(1), Q, 3752 MaxRecurse - 1); 3753 if (C->getOperand(1) == Op) 3754 return SimplifyCmpInst(C->getPredicate(), C->getOperand(0), RepOp, Q, 3755 MaxRecurse - 1); 3756 } 3757 } 3758 3759 // Same for GEPs. 3760 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) { 3761 if (MaxRecurse) { 3762 SmallVector<Value *, 8> NewOps(GEP->getNumOperands()); 3763 transform(GEP->operands(), NewOps.begin(), 3764 [&](Value *V) { return V == Op ? RepOp : V; }); 3765 return SimplifyGEPInst(GEP->getSourceElementType(), NewOps, Q, 3766 MaxRecurse - 1); 3767 } 3768 } 3769 3770 // TODO: We could hand off more cases to instsimplify here. 3771 3772 // If all operands are constant after substituting Op for RepOp then we can 3773 // constant fold the instruction. 3774 if (Constant *CRepOp = dyn_cast<Constant>(RepOp)) { 3775 // Build a list of all constant operands. 3776 SmallVector<Constant *, 8> ConstOps; 3777 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 3778 if (I->getOperand(i) == Op) 3779 ConstOps.push_back(CRepOp); 3780 else if (Constant *COp = dyn_cast<Constant>(I->getOperand(i))) 3781 ConstOps.push_back(COp); 3782 else 3783 break; 3784 } 3785 3786 // All operands were constants, fold it. 3787 if (ConstOps.size() == I->getNumOperands()) { 3788 if (CmpInst *C = dyn_cast<CmpInst>(I)) 3789 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0], 3790 ConstOps[1], Q.DL, Q.TLI); 3791 3792 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 3793 if (!LI->isVolatile()) 3794 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL); 3795 3796 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI); 3797 } 3798 } 3799 3800 return nullptr; 3801 } 3802 3803 /// Try to simplify a select instruction when its condition operand is an 3804 /// integer comparison where one operand of the compare is a constant. 3805 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, 3806 const APInt *Y, bool TrueWhenUnset) { 3807 const APInt *C; 3808 3809 // (X & Y) == 0 ? X & ~Y : X --> X 3810 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y 3811 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) && 3812 *Y == ~*C) 3813 return TrueWhenUnset ? FalseVal : TrueVal; 3814 3815 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y 3816 // (X & Y) != 0 ? X : X & ~Y --> X 3817 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) && 3818 *Y == ~*C) 3819 return TrueWhenUnset ? FalseVal : TrueVal; 3820 3821 if (Y->isPowerOf2()) { 3822 // (X & Y) == 0 ? X | Y : X --> X | Y 3823 // (X & Y) != 0 ? X | Y : X --> X 3824 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) && 3825 *Y == *C) 3826 return TrueWhenUnset ? TrueVal : FalseVal; 3827 3828 // (X & Y) == 0 ? X : X | Y --> X 3829 // (X & Y) != 0 ? X : X | Y --> X | Y 3830 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) && 3831 *Y == *C) 3832 return TrueWhenUnset ? TrueVal : FalseVal; 3833 } 3834 3835 return nullptr; 3836 } 3837 3838 /// An alternative way to test if a bit is set or not uses sgt/slt instead of 3839 /// eq/ne. 3840 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS, 3841 ICmpInst::Predicate Pred, 3842 Value *TrueVal, Value *FalseVal) { 3843 Value *X; 3844 APInt Mask; 3845 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask)) 3846 return nullptr; 3847 3848 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask, 3849 Pred == ICmpInst::ICMP_EQ); 3850 } 3851 3852 /// Try to simplify a select instruction when its condition operand is an 3853 /// integer comparison. 3854 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, 3855 Value *FalseVal, const SimplifyQuery &Q, 3856 unsigned MaxRecurse) { 3857 ICmpInst::Predicate Pred; 3858 Value *CmpLHS, *CmpRHS; 3859 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS)))) 3860 return nullptr; 3861 3862 if (ICmpInst::isEquality(Pred) && match(CmpRHS, m_Zero())) { 3863 Value *X; 3864 const APInt *Y; 3865 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y)))) 3866 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y, 3867 Pred == ICmpInst::ICMP_EQ)) 3868 return V; 3869 3870 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate. 3871 Value *ShAmt; 3872 auto isFsh = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), 3873 m_Value(ShAmt)), 3874 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), 3875 m_Value(ShAmt))); 3876 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X 3877 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X 3878 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt && 3879 Pred == ICmpInst::ICMP_EQ) 3880 return X; 3881 // (ShAmt != 0) ? X : fshl(X, *, ShAmt) --> X 3882 // (ShAmt != 0) ? X : fshr(*, X, ShAmt) --> X 3883 if (match(FalseVal, isFsh) && TrueVal == X && CmpLHS == ShAmt && 3884 Pred == ICmpInst::ICMP_NE) 3885 return X; 3886 3887 // Test for a zero-shift-guard-op around rotates. These are used to 3888 // avoid UB from oversized shifts in raw IR rotate patterns, but the 3889 // intrinsics do not have that problem. 3890 // We do not allow this transform for the general funnel shift case because 3891 // that would not preserve the poison safety of the original code. 3892 auto isRotate = m_CombineOr(m_Intrinsic<Intrinsic::fshl>(m_Value(X), 3893 m_Deferred(X), 3894 m_Value(ShAmt)), 3895 m_Intrinsic<Intrinsic::fshr>(m_Value(X), 3896 m_Deferred(X), 3897 m_Value(ShAmt))); 3898 // (ShAmt != 0) ? fshl(X, X, ShAmt) : X --> fshl(X, X, ShAmt) 3899 // (ShAmt != 0) ? fshr(X, X, ShAmt) : X --> fshr(X, X, ShAmt) 3900 if (match(TrueVal, isRotate) && FalseVal == X && CmpLHS == ShAmt && 3901 Pred == ICmpInst::ICMP_NE) 3902 return TrueVal; 3903 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt) 3904 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt) 3905 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt && 3906 Pred == ICmpInst::ICMP_EQ) 3907 return FalseVal; 3908 } 3909 3910 // Check for other compares that behave like bit test. 3911 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, 3912 TrueVal, FalseVal)) 3913 return V; 3914 3915 // If we have an equality comparison, then we know the value in one of the 3916 // arms of the select. See if substituting this value into the arm and 3917 // simplifying the result yields the same value as the other arm. 3918 if (Pred == ICmpInst::ICMP_EQ) { 3919 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3920 TrueVal || 3921 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3922 TrueVal) 3923 return FalseVal; 3924 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3925 FalseVal || 3926 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3927 FalseVal) 3928 return FalseVal; 3929 } else if (Pred == ICmpInst::ICMP_NE) { 3930 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3931 FalseVal || 3932 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3933 FalseVal) 3934 return TrueVal; 3935 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q, MaxRecurse) == 3936 TrueVal || 3937 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q, MaxRecurse) == 3938 TrueVal) 3939 return TrueVal; 3940 } 3941 3942 return nullptr; 3943 } 3944 3945 /// Try to simplify a select instruction when its condition operand is a 3946 /// floating-point comparison. 3947 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, 3948 const SimplifyQuery &Q) { 3949 FCmpInst::Predicate Pred; 3950 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) && 3951 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T)))) 3952 return nullptr; 3953 3954 // This transform is safe if we do not have (do not care about) -0.0 or if 3955 // at least one operand is known to not be -0.0. Otherwise, the select can 3956 // change the sign of a zero operand. 3957 bool HasNoSignedZeros = Q.CxtI && isa<FPMathOperator>(Q.CxtI) && 3958 Q.CxtI->hasNoSignedZeros(); 3959 const APFloat *C; 3960 if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) || 3961 (match(F, m_APFloat(C)) && C->isNonZero())) { 3962 // (T == F) ? T : F --> F 3963 // (F == T) ? T : F --> F 3964 if (Pred == FCmpInst::FCMP_OEQ) 3965 return F; 3966 3967 // (T != F) ? T : F --> T 3968 // (F != T) ? T : F --> T 3969 if (Pred == FCmpInst::FCMP_UNE) 3970 return T; 3971 } 3972 3973 return nullptr; 3974 } 3975 3976 /// Given operands for a SelectInst, see if we can fold the result. 3977 /// If not, this returns null. 3978 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 3979 const SimplifyQuery &Q, unsigned MaxRecurse) { 3980 if (auto *CondC = dyn_cast<Constant>(Cond)) { 3981 if (auto *TrueC = dyn_cast<Constant>(TrueVal)) 3982 if (auto *FalseC = dyn_cast<Constant>(FalseVal)) 3983 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC); 3984 3985 // select undef, X, Y -> X or Y 3986 if (isa<UndefValue>(CondC)) 3987 return isa<Constant>(FalseVal) ? FalseVal : TrueVal; 3988 3989 // TODO: Vector constants with undef elements don't simplify. 3990 3991 // select true, X, Y -> X 3992 if (CondC->isAllOnesValue()) 3993 return TrueVal; 3994 // select false, X, Y -> Y 3995 if (CondC->isNullValue()) 3996 return FalseVal; 3997 } 3998 3999 // select i1 Cond, i1 true, i1 false --> i1 Cond 4000 assert(Cond->getType()->isIntOrIntVectorTy(1) && 4001 "Select must have bool or bool vector condition"); 4002 assert(TrueVal->getType() == FalseVal->getType() && 4003 "Select must have same types for true/false ops"); 4004 if (Cond->getType() == TrueVal->getType() && 4005 match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt())) 4006 return Cond; 4007 4008 // select ?, X, X -> X 4009 if (TrueVal == FalseVal) 4010 return TrueVal; 4011 4012 if (isa<UndefValue>(TrueVal)) // select ?, undef, X -> X 4013 return FalseVal; 4014 if (isa<UndefValue>(FalseVal)) // select ?, X, undef -> X 4015 return TrueVal; 4016 4017 if (Value *V = 4018 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse)) 4019 return V; 4020 4021 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q)) 4022 return V; 4023 4024 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal)) 4025 return V; 4026 4027 Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL); 4028 if (Imp) 4029 return *Imp ? TrueVal : FalseVal; 4030 4031 return nullptr; 4032 } 4033 4034 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, 4035 const SimplifyQuery &Q) { 4036 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit); 4037 } 4038 4039 /// Given operands for an GetElementPtrInst, see if we can fold the result. 4040 /// If not, this returns null. 4041 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 4042 const SimplifyQuery &Q, unsigned) { 4043 // The type of the GEP pointer operand. 4044 unsigned AS = 4045 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace(); 4046 4047 // getelementptr P -> P. 4048 if (Ops.size() == 1) 4049 return Ops[0]; 4050 4051 // Compute the (pointer) type returned by the GEP instruction. 4052 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1)); 4053 Type *GEPTy = PointerType::get(LastType, AS); 4054 if (VectorType *VT = dyn_cast<VectorType>(Ops[0]->getType())) 4055 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 4056 else if (VectorType *VT = dyn_cast<VectorType>(Ops[1]->getType())) 4057 GEPTy = VectorType::get(GEPTy, VT->getNumElements()); 4058 4059 if (isa<UndefValue>(Ops[0])) 4060 return UndefValue::get(GEPTy); 4061 4062 if (Ops.size() == 2) { 4063 // getelementptr P, 0 -> P. 4064 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy) 4065 return Ops[0]; 4066 4067 Type *Ty = SrcTy; 4068 if (Ty->isSized()) { 4069 Value *P; 4070 uint64_t C; 4071 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty); 4072 // getelementptr P, N -> P if P points to a type of zero size. 4073 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy) 4074 return Ops[0]; 4075 4076 // The following transforms are only safe if the ptrtoint cast 4077 // doesn't truncate the pointers. 4078 if (Ops[1]->getType()->getScalarSizeInBits() == 4079 Q.DL.getPointerSizeInBits(AS)) { 4080 auto PtrToIntOrZero = [GEPTy](Value *P) -> Value * { 4081 if (match(P, m_Zero())) 4082 return Constant::getNullValue(GEPTy); 4083 Value *Temp; 4084 if (match(P, m_PtrToInt(m_Value(Temp)))) 4085 if (Temp->getType() == GEPTy) 4086 return Temp; 4087 return nullptr; 4088 }; 4089 4090 // getelementptr V, (sub P, V) -> P if P points to a type of size 1. 4091 if (TyAllocSize == 1 && 4092 match(Ops[1], m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))))) 4093 if (Value *R = PtrToIntOrZero(P)) 4094 return R; 4095 4096 // getelementptr V, (ashr (sub P, V), C) -> Q 4097 // if P points to a type of size 1 << C. 4098 if (match(Ops[1], 4099 m_AShr(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 4100 m_ConstantInt(C))) && 4101 TyAllocSize == 1ULL << C) 4102 if (Value *R = PtrToIntOrZero(P)) 4103 return R; 4104 4105 // getelementptr V, (sdiv (sub P, V), C) -> Q 4106 // if P points to a type of size C. 4107 if (match(Ops[1], 4108 m_SDiv(m_Sub(m_Value(P), m_PtrToInt(m_Specific(Ops[0]))), 4109 m_SpecificInt(TyAllocSize)))) 4110 if (Value *R = PtrToIntOrZero(P)) 4111 return R; 4112 } 4113 } 4114 } 4115 4116 if (Q.DL.getTypeAllocSize(LastType) == 1 && 4117 all_of(Ops.slice(1).drop_back(1), 4118 [](Value *Idx) { return match(Idx, m_Zero()); })) { 4119 unsigned IdxWidth = 4120 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace()); 4121 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) { 4122 APInt BasePtrOffset(IdxWidth, 0); 4123 Value *StrippedBasePtr = 4124 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL, 4125 BasePtrOffset); 4126 4127 // gep (gep V, C), (sub 0, V) -> C 4128 if (match(Ops.back(), 4129 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr))))) { 4130 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset); 4131 return ConstantExpr::getIntToPtr(CI, GEPTy); 4132 } 4133 // gep (gep V, C), (xor V, -1) -> C-1 4134 if (match(Ops.back(), 4135 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes()))) { 4136 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1); 4137 return ConstantExpr::getIntToPtr(CI, GEPTy); 4138 } 4139 } 4140 } 4141 4142 // Check to see if this is constant foldable. 4143 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); })) 4144 return nullptr; 4145 4146 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]), 4147 Ops.slice(1)); 4148 if (auto *CEFolded = ConstantFoldConstant(CE, Q.DL)) 4149 return CEFolded; 4150 return CE; 4151 } 4152 4153 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops, 4154 const SimplifyQuery &Q) { 4155 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit); 4156 } 4157 4158 /// Given operands for an InsertValueInst, see if we can fold the result. 4159 /// If not, this returns null. 4160 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val, 4161 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q, 4162 unsigned) { 4163 if (Constant *CAgg = dyn_cast<Constant>(Agg)) 4164 if (Constant *CVal = dyn_cast<Constant>(Val)) 4165 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs); 4166 4167 // insertvalue x, undef, n -> x 4168 if (match(Val, m_Undef())) 4169 return Agg; 4170 4171 // insertvalue x, (extractvalue y, n), n 4172 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val)) 4173 if (EV->getAggregateOperand()->getType() == Agg->getType() && 4174 EV->getIndices() == Idxs) { 4175 // insertvalue undef, (extractvalue y, n), n -> y 4176 if (match(Agg, m_Undef())) 4177 return EV->getAggregateOperand(); 4178 4179 // insertvalue y, (extractvalue y, n), n -> y 4180 if (Agg == EV->getAggregateOperand()) 4181 return Agg; 4182 } 4183 4184 return nullptr; 4185 } 4186 4187 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val, 4188 ArrayRef<unsigned> Idxs, 4189 const SimplifyQuery &Q) { 4190 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit); 4191 } 4192 4193 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx, 4194 const SimplifyQuery &Q) { 4195 // Try to constant fold. 4196 auto *VecC = dyn_cast<Constant>(Vec); 4197 auto *ValC = dyn_cast<Constant>(Val); 4198 auto *IdxC = dyn_cast<Constant>(Idx); 4199 if (VecC && ValC && IdxC) 4200 return ConstantFoldInsertElementInstruction(VecC, ValC, IdxC); 4201 4202 // Fold into undef if index is out of bounds. 4203 if (auto *CI = dyn_cast<ConstantInt>(Idx)) { 4204 uint64_t NumElements = cast<VectorType>(Vec->getType())->getNumElements(); 4205 if (CI->uge(NumElements)) 4206 return UndefValue::get(Vec->getType()); 4207 } 4208 4209 // If index is undef, it might be out of bounds (see above case) 4210 if (isa<UndefValue>(Idx)) 4211 return UndefValue::get(Vec->getType()); 4212 4213 // Inserting an undef scalar? Assume it is the same value as the existing 4214 // vector element. 4215 if (isa<UndefValue>(Val)) 4216 return Vec; 4217 4218 // If we are extracting a value from a vector, then inserting it into the same 4219 // place, that's the input vector: 4220 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec 4221 if (match(Val, m_ExtractElement(m_Specific(Vec), m_Specific(Idx)))) 4222 return Vec; 4223 4224 return nullptr; 4225 } 4226 4227 /// Given operands for an ExtractValueInst, see if we can fold the result. 4228 /// If not, this returns null. 4229 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 4230 const SimplifyQuery &, unsigned) { 4231 if (auto *CAgg = dyn_cast<Constant>(Agg)) 4232 return ConstantFoldExtractValueInstruction(CAgg, Idxs); 4233 4234 // extractvalue x, (insertvalue y, elt, n), n -> elt 4235 unsigned NumIdxs = Idxs.size(); 4236 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr; 4237 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) { 4238 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices(); 4239 unsigned NumInsertValueIdxs = InsertValueIdxs.size(); 4240 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs); 4241 if (InsertValueIdxs.slice(0, NumCommonIdxs) == 4242 Idxs.slice(0, NumCommonIdxs)) { 4243 if (NumIdxs == NumInsertValueIdxs) 4244 return IVI->getInsertedValueOperand(); 4245 break; 4246 } 4247 } 4248 4249 return nullptr; 4250 } 4251 4252 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs, 4253 const SimplifyQuery &Q) { 4254 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit); 4255 } 4256 4257 /// Given operands for an ExtractElementInst, see if we can fold the result. 4258 /// If not, this returns null. 4259 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &, 4260 unsigned) { 4261 if (auto *CVec = dyn_cast<Constant>(Vec)) { 4262 if (auto *CIdx = dyn_cast<Constant>(Idx)) 4263 return ConstantFoldExtractElementInstruction(CVec, CIdx); 4264 4265 // The index is not relevant if our vector is a splat. 4266 if (auto *Splat = CVec->getSplatValue()) 4267 return Splat; 4268 4269 if (isa<UndefValue>(Vec)) 4270 return UndefValue::get(Vec->getType()->getVectorElementType()); 4271 } 4272 4273 // If extracting a specified index from the vector, see if we can recursively 4274 // find a previously computed scalar that was inserted into the vector. 4275 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) { 4276 if (IdxC->getValue().uge(Vec->getType()->getVectorNumElements())) 4277 // definitely out of bounds, thus undefined result 4278 return UndefValue::get(Vec->getType()->getVectorElementType()); 4279 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue())) 4280 return Elt; 4281 } 4282 4283 // An undef extract index can be arbitrarily chosen to be an out-of-range 4284 // index value, which would result in the instruction being undef. 4285 if (isa<UndefValue>(Idx)) 4286 return UndefValue::get(Vec->getType()->getVectorElementType()); 4287 4288 return nullptr; 4289 } 4290 4291 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx, 4292 const SimplifyQuery &Q) { 4293 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit); 4294 } 4295 4296 /// See if we can fold the given phi. If not, returns null. 4297 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) { 4298 // If all of the PHI's incoming values are the same then replace the PHI node 4299 // with the common value. 4300 Value *CommonValue = nullptr; 4301 bool HasUndefInput = false; 4302 for (Value *Incoming : PN->incoming_values()) { 4303 // If the incoming value is the phi node itself, it can safely be skipped. 4304 if (Incoming == PN) continue; 4305 if (isa<UndefValue>(Incoming)) { 4306 // Remember that we saw an undef value, but otherwise ignore them. 4307 HasUndefInput = true; 4308 continue; 4309 } 4310 if (CommonValue && Incoming != CommonValue) 4311 return nullptr; // Not the same, bail out. 4312 CommonValue = Incoming; 4313 } 4314 4315 // If CommonValue is null then all of the incoming values were either undef or 4316 // equal to the phi node itself. 4317 if (!CommonValue) 4318 return UndefValue::get(PN->getType()); 4319 4320 // If we have a PHI node like phi(X, undef, X), where X is defined by some 4321 // instruction, we cannot return X as the result of the PHI node unless it 4322 // dominates the PHI block. 4323 if (HasUndefInput) 4324 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr; 4325 4326 return CommonValue; 4327 } 4328 4329 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op, 4330 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) { 4331 if (auto *C = dyn_cast<Constant>(Op)) 4332 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL); 4333 4334 if (auto *CI = dyn_cast<CastInst>(Op)) { 4335 auto *Src = CI->getOperand(0); 4336 Type *SrcTy = Src->getType(); 4337 Type *MidTy = CI->getType(); 4338 Type *DstTy = Ty; 4339 if (Src->getType() == Ty) { 4340 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode()); 4341 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc); 4342 Type *SrcIntPtrTy = 4343 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr; 4344 Type *MidIntPtrTy = 4345 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr; 4346 Type *DstIntPtrTy = 4347 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr; 4348 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy, 4349 SrcIntPtrTy, MidIntPtrTy, 4350 DstIntPtrTy) == Instruction::BitCast) 4351 return Src; 4352 } 4353 } 4354 4355 // bitcast x -> x 4356 if (CastOpc == Instruction::BitCast) 4357 if (Op->getType() == Ty) 4358 return Op; 4359 4360 return nullptr; 4361 } 4362 4363 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, 4364 const SimplifyQuery &Q) { 4365 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit); 4366 } 4367 4368 /// For the given destination element of a shuffle, peek through shuffles to 4369 /// match a root vector source operand that contains that element in the same 4370 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s). 4371 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, 4372 int MaskVal, Value *RootVec, 4373 unsigned MaxRecurse) { 4374 if (!MaxRecurse--) 4375 return nullptr; 4376 4377 // Bail out if any mask value is undefined. That kind of shuffle may be 4378 // simplified further based on demanded bits or other folds. 4379 if (MaskVal == -1) 4380 return nullptr; 4381 4382 // The mask value chooses which source operand we need to look at next. 4383 int InVecNumElts = Op0->getType()->getVectorNumElements(); 4384 int RootElt = MaskVal; 4385 Value *SourceOp = Op0; 4386 if (MaskVal >= InVecNumElts) { 4387 RootElt = MaskVal - InVecNumElts; 4388 SourceOp = Op1; 4389 } 4390 4391 // If the source operand is a shuffle itself, look through it to find the 4392 // matching root vector. 4393 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) { 4394 return foldIdentityShuffles( 4395 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1), 4396 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse); 4397 } 4398 4399 // TODO: Look through bitcasts? What if the bitcast changes the vector element 4400 // size? 4401 4402 // The source operand is not a shuffle. Initialize the root vector value for 4403 // this shuffle if that has not been done yet. 4404 if (!RootVec) 4405 RootVec = SourceOp; 4406 4407 // Give up as soon as a source operand does not match the existing root value. 4408 if (RootVec != SourceOp) 4409 return nullptr; 4410 4411 // The element must be coming from the same lane in the source vector 4412 // (although it may have crossed lanes in intermediate shuffles). 4413 if (RootElt != DestElt) 4414 return nullptr; 4415 4416 return RootVec; 4417 } 4418 4419 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4420 Type *RetTy, const SimplifyQuery &Q, 4421 unsigned MaxRecurse) { 4422 if (isa<UndefValue>(Mask)) 4423 return UndefValue::get(RetTy); 4424 4425 Type *InVecTy = Op0->getType(); 4426 unsigned MaskNumElts = Mask->getType()->getVectorNumElements(); 4427 unsigned InVecNumElts = InVecTy->getVectorNumElements(); 4428 4429 SmallVector<int, 32> Indices; 4430 ShuffleVectorInst::getShuffleMask(Mask, Indices); 4431 assert(MaskNumElts == Indices.size() && 4432 "Size of Indices not same as number of mask elements?"); 4433 4434 // Canonicalization: If mask does not select elements from an input vector, 4435 // replace that input vector with undef. 4436 bool MaskSelects0 = false, MaskSelects1 = false; 4437 for (unsigned i = 0; i != MaskNumElts; ++i) { 4438 if (Indices[i] == -1) 4439 continue; 4440 if ((unsigned)Indices[i] < InVecNumElts) 4441 MaskSelects0 = true; 4442 else 4443 MaskSelects1 = true; 4444 } 4445 if (!MaskSelects0) 4446 Op0 = UndefValue::get(InVecTy); 4447 if (!MaskSelects1) 4448 Op1 = UndefValue::get(InVecTy); 4449 4450 auto *Op0Const = dyn_cast<Constant>(Op0); 4451 auto *Op1Const = dyn_cast<Constant>(Op1); 4452 4453 // If all operands are constant, constant fold the shuffle. 4454 if (Op0Const && Op1Const) 4455 return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask); 4456 4457 // Canonicalization: if only one input vector is constant, it shall be the 4458 // second one. 4459 if (Op0Const && !Op1Const) { 4460 std::swap(Op0, Op1); 4461 ShuffleVectorInst::commuteShuffleMask(Indices, InVecNumElts); 4462 } 4463 4464 // A splat of an inserted scalar constant becomes a vector constant: 4465 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...> 4466 // NOTE: We may have commuted above, so analyze the updated Indices, not the 4467 // original mask constant. 4468 Constant *C; 4469 ConstantInt *IndexC; 4470 if (match(Op0, m_InsertElement(m_Value(), m_Constant(C), 4471 m_ConstantInt(IndexC)))) { 4472 // Match a splat shuffle mask of the insert index allowing undef elements. 4473 int InsertIndex = IndexC->getZExtValue(); 4474 if (all_of(Indices, [InsertIndex](int MaskElt) { 4475 return MaskElt == InsertIndex || MaskElt == -1; 4476 })) { 4477 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat"); 4478 4479 // Shuffle mask undefs become undefined constant result elements. 4480 SmallVector<Constant *, 16> VecC(MaskNumElts, C); 4481 for (unsigned i = 0; i != MaskNumElts; ++i) 4482 if (Indices[i] == -1) 4483 VecC[i] = UndefValue::get(C->getType()); 4484 return ConstantVector::get(VecC); 4485 } 4486 } 4487 4488 // A shuffle of a splat is always the splat itself. Legal if the shuffle's 4489 // value type is same as the input vectors' type. 4490 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0)) 4491 if (isa<UndefValue>(Op1) && RetTy == InVecTy && 4492 OpShuf->getMask()->getSplatValue()) 4493 return Op0; 4494 4495 // Don't fold a shuffle with undef mask elements. This may get folded in a 4496 // better way using demanded bits or other analysis. 4497 // TODO: Should we allow this? 4498 if (find(Indices, -1) != Indices.end()) 4499 return nullptr; 4500 4501 // Check if every element of this shuffle can be mapped back to the 4502 // corresponding element of a single root vector. If so, we don't need this 4503 // shuffle. This handles simple identity shuffles as well as chains of 4504 // shuffles that may widen/narrow and/or move elements across lanes and back. 4505 Value *RootVec = nullptr; 4506 for (unsigned i = 0; i != MaskNumElts; ++i) { 4507 // Note that recursion is limited for each vector element, so if any element 4508 // exceeds the limit, this will fail to simplify. 4509 RootVec = 4510 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse); 4511 4512 // We can't replace a widening/narrowing shuffle with one of its operands. 4513 if (!RootVec || RootVec->getType() != RetTy) 4514 return nullptr; 4515 } 4516 return RootVec; 4517 } 4518 4519 /// Given operands for a ShuffleVectorInst, fold the result or return null. 4520 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask, 4521 Type *RetTy, const SimplifyQuery &Q) { 4522 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit); 4523 } 4524 4525 static Constant *foldConstant(Instruction::UnaryOps Opcode, 4526 Value *&Op, const SimplifyQuery &Q) { 4527 if (auto *C = dyn_cast<Constant>(Op)) 4528 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL); 4529 return nullptr; 4530 } 4531 4532 /// Given the operand for an FNeg, see if we can fold the result. If not, this 4533 /// returns null. 4534 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF, 4535 const SimplifyQuery &Q, unsigned MaxRecurse) { 4536 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q)) 4537 return C; 4538 4539 Value *X; 4540 // fneg (fneg X) ==> X 4541 if (match(Op, m_FNeg(m_Value(X)))) 4542 return X; 4543 4544 return nullptr; 4545 } 4546 4547 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF, 4548 const SimplifyQuery &Q) { 4549 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit); 4550 } 4551 4552 static Constant *propagateNaN(Constant *In) { 4553 // If the input is a vector with undef elements, just return a default NaN. 4554 if (!In->isNaN()) 4555 return ConstantFP::getNaN(In->getType()); 4556 4557 // Propagate the existing NaN constant when possible. 4558 // TODO: Should we quiet a signaling NaN? 4559 return In; 4560 } 4561 4562 /// Perform folds that are common to any floating-point operation. This implies 4563 /// transforms based on undef/NaN because the operation itself makes no 4564 /// difference to the result. 4565 static Constant *simplifyFPOp(ArrayRef<Value *> Ops) { 4566 if (any_of(Ops, [](Value *V) { return isa<UndefValue>(V); })) 4567 return ConstantFP::getNaN(Ops[0]->getType()); 4568 4569 for (Value *V : Ops) 4570 if (match(V, m_NaN())) 4571 return propagateNaN(cast<Constant>(V)); 4572 4573 return nullptr; 4574 } 4575 4576 /// Given operands for an FAdd, see if we can fold the result. If not, this 4577 /// returns null. 4578 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4579 const SimplifyQuery &Q, unsigned MaxRecurse) { 4580 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q)) 4581 return C; 4582 4583 if (Constant *C = simplifyFPOp({Op0, Op1})) 4584 return C; 4585 4586 // fadd X, -0 ==> X 4587 if (match(Op1, m_NegZeroFP())) 4588 return Op0; 4589 4590 // fadd X, 0 ==> X, when we know X is not -0 4591 if (match(Op1, m_PosZeroFP()) && 4592 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 4593 return Op0; 4594 4595 // With nnan: -X + X --> 0.0 (and commuted variant) 4596 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN. 4597 // Negative zeros are allowed because we always end up with positive zero: 4598 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 4599 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0 4600 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0 4601 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0 4602 if (FMF.noNaNs()) { 4603 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) || 4604 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0)))) 4605 return ConstantFP::getNullValue(Op0->getType()); 4606 4607 if (match(Op0, m_FNeg(m_Specific(Op1))) || 4608 match(Op1, m_FNeg(m_Specific(Op0)))) 4609 return ConstantFP::getNullValue(Op0->getType()); 4610 } 4611 4612 // (X - Y) + Y --> X 4613 // Y + (X - Y) --> X 4614 Value *X; 4615 if (FMF.noSignedZeros() && FMF.allowReassoc() && 4616 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) || 4617 match(Op1, m_FSub(m_Value(X), m_Specific(Op0))))) 4618 return X; 4619 4620 return nullptr; 4621 } 4622 4623 /// Given operands for an FSub, see if we can fold the result. If not, this 4624 /// returns null. 4625 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4626 const SimplifyQuery &Q, unsigned MaxRecurse) { 4627 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q)) 4628 return C; 4629 4630 if (Constant *C = simplifyFPOp({Op0, Op1})) 4631 return C; 4632 4633 // fsub X, +0 ==> X 4634 if (match(Op1, m_PosZeroFP())) 4635 return Op0; 4636 4637 // fsub X, -0 ==> X, when we know X is not -0 4638 if (match(Op1, m_NegZeroFP()) && 4639 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI))) 4640 return Op0; 4641 4642 // fsub -0.0, (fsub -0.0, X) ==> X 4643 // fsub -0.0, (fneg X) ==> X 4644 Value *X; 4645 if (match(Op0, m_NegZeroFP()) && 4646 match(Op1, m_FNeg(m_Value(X)))) 4647 return X; 4648 4649 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored. 4650 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored. 4651 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) && 4652 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) || 4653 match(Op1, m_FNeg(m_Value(X))))) 4654 return X; 4655 4656 // fsub nnan x, x ==> 0.0 4657 if (FMF.noNaNs() && Op0 == Op1) 4658 return Constant::getNullValue(Op0->getType()); 4659 4660 // Y - (Y - X) --> X 4661 // (X + Y) - Y --> X 4662 if (FMF.noSignedZeros() && FMF.allowReassoc() && 4663 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) || 4664 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X))))) 4665 return X; 4666 4667 return nullptr; 4668 } 4669 4670 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, 4671 const SimplifyQuery &Q, unsigned MaxRecurse) { 4672 if (Constant *C = simplifyFPOp({Op0, Op1})) 4673 return C; 4674 4675 // fmul X, 1.0 ==> X 4676 if (match(Op1, m_FPOne())) 4677 return Op0; 4678 4679 // fmul 1.0, X ==> X 4680 if (match(Op0, m_FPOne())) 4681 return Op1; 4682 4683 // fmul nnan nsz X, 0 ==> 0 4684 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP())) 4685 return ConstantFP::getNullValue(Op0->getType()); 4686 4687 // fmul nnan nsz 0, X ==> 0 4688 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP())) 4689 return ConstantFP::getNullValue(Op1->getType()); 4690 4691 // sqrt(X) * sqrt(X) --> X, if we can: 4692 // 1. Remove the intermediate rounding (reassociate). 4693 // 2. Ignore non-zero negative numbers because sqrt would produce NAN. 4694 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0. 4695 Value *X; 4696 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && 4697 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros()) 4698 return X; 4699 4700 return nullptr; 4701 } 4702 4703 /// Given the operands for an FMul, see if we can fold the result 4704 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4705 const SimplifyQuery &Q, unsigned MaxRecurse) { 4706 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q)) 4707 return C; 4708 4709 // Now apply simplifications that do not require rounding. 4710 return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse); 4711 } 4712 4713 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4714 const SimplifyQuery &Q) { 4715 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit); 4716 } 4717 4718 4719 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4720 const SimplifyQuery &Q) { 4721 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit); 4722 } 4723 4724 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4725 const SimplifyQuery &Q) { 4726 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit); 4727 } 4728 4729 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF, 4730 const SimplifyQuery &Q) { 4731 return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit); 4732 } 4733 4734 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4735 const SimplifyQuery &Q, unsigned) { 4736 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q)) 4737 return C; 4738 4739 if (Constant *C = simplifyFPOp({Op0, Op1})) 4740 return C; 4741 4742 // X / 1.0 -> X 4743 if (match(Op1, m_FPOne())) 4744 return Op0; 4745 4746 // 0 / X -> 0 4747 // Requires that NaNs are off (X could be zero) and signed zeroes are 4748 // ignored (X could be positive or negative, so the output sign is unknown). 4749 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP())) 4750 return ConstantFP::getNullValue(Op0->getType()); 4751 4752 if (FMF.noNaNs()) { 4753 // X / X -> 1.0 is legal when NaNs are ignored. 4754 // We can ignore infinities because INF/INF is NaN. 4755 if (Op0 == Op1) 4756 return ConstantFP::get(Op0->getType(), 1.0); 4757 4758 // (X * Y) / Y --> X if we can reassociate to the above form. 4759 Value *X; 4760 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1)))) 4761 return X; 4762 4763 // -X / X -> -1.0 and 4764 // X / -X -> -1.0 are legal when NaNs are ignored. 4765 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored. 4766 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) || 4767 match(Op1, m_FNegNSZ(m_Specific(Op0)))) 4768 return ConstantFP::get(Op0->getType(), -1.0); 4769 } 4770 4771 return nullptr; 4772 } 4773 4774 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4775 const SimplifyQuery &Q) { 4776 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit); 4777 } 4778 4779 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4780 const SimplifyQuery &Q, unsigned) { 4781 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q)) 4782 return C; 4783 4784 if (Constant *C = simplifyFPOp({Op0, Op1})) 4785 return C; 4786 4787 // Unlike fdiv, the result of frem always matches the sign of the dividend. 4788 // The constant match may include undef elements in a vector, so return a full 4789 // zero constant as the result. 4790 if (FMF.noNaNs()) { 4791 // +0 % X -> 0 4792 if (match(Op0, m_PosZeroFP())) 4793 return ConstantFP::getNullValue(Op0->getType()); 4794 // -0 % X -> -0 4795 if (match(Op0, m_NegZeroFP())) 4796 return ConstantFP::getNegativeZero(Op0->getType()); 4797 } 4798 4799 return nullptr; 4800 } 4801 4802 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF, 4803 const SimplifyQuery &Q) { 4804 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit); 4805 } 4806 4807 //=== Helper functions for higher up the class hierarchy. 4808 4809 /// Given the operand for a UnaryOperator, see if we can fold the result. 4810 /// If not, this returns null. 4811 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q, 4812 unsigned MaxRecurse) { 4813 switch (Opcode) { 4814 case Instruction::FNeg: 4815 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse); 4816 default: 4817 llvm_unreachable("Unexpected opcode"); 4818 } 4819 } 4820 4821 /// Given the operand for a UnaryOperator, see if we can fold the result. 4822 /// If not, this returns null. 4823 /// Try to use FastMathFlags when folding the result. 4824 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op, 4825 const FastMathFlags &FMF, 4826 const SimplifyQuery &Q, unsigned MaxRecurse) { 4827 switch (Opcode) { 4828 case Instruction::FNeg: 4829 return simplifyFNegInst(Op, FMF, Q, MaxRecurse); 4830 default: 4831 return simplifyUnOp(Opcode, Op, Q, MaxRecurse); 4832 } 4833 } 4834 4835 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) { 4836 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit); 4837 } 4838 4839 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF, 4840 const SimplifyQuery &Q) { 4841 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit); 4842 } 4843 4844 /// Given operands for a BinaryOperator, see if we can fold the result. 4845 /// If not, this returns null. 4846 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4847 const SimplifyQuery &Q, unsigned MaxRecurse) { 4848 switch (Opcode) { 4849 case Instruction::Add: 4850 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse); 4851 case Instruction::Sub: 4852 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse); 4853 case Instruction::Mul: 4854 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse); 4855 case Instruction::SDiv: 4856 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse); 4857 case Instruction::UDiv: 4858 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse); 4859 case Instruction::SRem: 4860 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse); 4861 case Instruction::URem: 4862 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse); 4863 case Instruction::Shl: 4864 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse); 4865 case Instruction::LShr: 4866 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse); 4867 case Instruction::AShr: 4868 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse); 4869 case Instruction::And: 4870 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse); 4871 case Instruction::Or: 4872 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse); 4873 case Instruction::Xor: 4874 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse); 4875 case Instruction::FAdd: 4876 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4877 case Instruction::FSub: 4878 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4879 case Instruction::FMul: 4880 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4881 case Instruction::FDiv: 4882 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4883 case Instruction::FRem: 4884 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4885 default: 4886 llvm_unreachable("Unexpected opcode"); 4887 } 4888 } 4889 4890 /// Given operands for a BinaryOperator, see if we can fold the result. 4891 /// If not, this returns null. 4892 /// Try to use FastMathFlags when folding the result. 4893 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4894 const FastMathFlags &FMF, const SimplifyQuery &Q, 4895 unsigned MaxRecurse) { 4896 switch (Opcode) { 4897 case Instruction::FAdd: 4898 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse); 4899 case Instruction::FSub: 4900 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse); 4901 case Instruction::FMul: 4902 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse); 4903 case Instruction::FDiv: 4904 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse); 4905 default: 4906 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse); 4907 } 4908 } 4909 4910 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4911 const SimplifyQuery &Q) { 4912 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit); 4913 } 4914 4915 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, 4916 FastMathFlags FMF, const SimplifyQuery &Q) { 4917 return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit); 4918 } 4919 4920 /// Given operands for a CmpInst, see if we can fold the result. 4921 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4922 const SimplifyQuery &Q, unsigned MaxRecurse) { 4923 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate)) 4924 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse); 4925 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse); 4926 } 4927 4928 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, 4929 const SimplifyQuery &Q) { 4930 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit); 4931 } 4932 4933 static bool IsIdempotent(Intrinsic::ID ID) { 4934 switch (ID) { 4935 default: return false; 4936 4937 // Unary idempotent: f(f(x)) = f(x) 4938 case Intrinsic::fabs: 4939 case Intrinsic::floor: 4940 case Intrinsic::ceil: 4941 case Intrinsic::trunc: 4942 case Intrinsic::rint: 4943 case Intrinsic::nearbyint: 4944 case Intrinsic::round: 4945 case Intrinsic::canonicalize: 4946 return true; 4947 } 4948 } 4949 4950 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset, 4951 const DataLayout &DL) { 4952 GlobalValue *PtrSym; 4953 APInt PtrOffset; 4954 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL)) 4955 return nullptr; 4956 4957 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext()); 4958 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext()); 4959 Type *Int32PtrTy = Int32Ty->getPointerTo(); 4960 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext()); 4961 4962 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset); 4963 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64) 4964 return nullptr; 4965 4966 uint64_t OffsetInt = OffsetConstInt->getSExtValue(); 4967 if (OffsetInt % 4 != 0) 4968 return nullptr; 4969 4970 Constant *C = ConstantExpr::getGetElementPtr( 4971 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy), 4972 ConstantInt::get(Int64Ty, OffsetInt / 4)); 4973 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL); 4974 if (!Loaded) 4975 return nullptr; 4976 4977 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded); 4978 if (!LoadedCE) 4979 return nullptr; 4980 4981 if (LoadedCE->getOpcode() == Instruction::Trunc) { 4982 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 4983 if (!LoadedCE) 4984 return nullptr; 4985 } 4986 4987 if (LoadedCE->getOpcode() != Instruction::Sub) 4988 return nullptr; 4989 4990 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0)); 4991 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt) 4992 return nullptr; 4993 auto *LoadedLHSPtr = LoadedLHS->getOperand(0); 4994 4995 Constant *LoadedRHS = LoadedCE->getOperand(1); 4996 GlobalValue *LoadedRHSSym; 4997 APInt LoadedRHSOffset; 4998 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset, 4999 DL) || 5000 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset) 5001 return nullptr; 5002 5003 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy); 5004 } 5005 5006 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0, 5007 const SimplifyQuery &Q) { 5008 // Idempotent functions return the same result when called repeatedly. 5009 Intrinsic::ID IID = F->getIntrinsicID(); 5010 if (IsIdempotent(IID)) 5011 if (auto *II = dyn_cast<IntrinsicInst>(Op0)) 5012 if (II->getIntrinsicID() == IID) 5013 return II; 5014 5015 Value *X; 5016 switch (IID) { 5017 case Intrinsic::fabs: 5018 if (SignBitMustBeZero(Op0, Q.TLI)) return Op0; 5019 break; 5020 case Intrinsic::bswap: 5021 // bswap(bswap(x)) -> x 5022 if (match(Op0, m_BSwap(m_Value(X)))) return X; 5023 break; 5024 case Intrinsic::bitreverse: 5025 // bitreverse(bitreverse(x)) -> x 5026 if (match(Op0, m_BitReverse(m_Value(X)))) return X; 5027 break; 5028 case Intrinsic::exp: 5029 // exp(log(x)) -> x 5030 if (Q.CxtI->hasAllowReassoc() && 5031 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X; 5032 break; 5033 case Intrinsic::exp2: 5034 // exp2(log2(x)) -> x 5035 if (Q.CxtI->hasAllowReassoc() && 5036 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X; 5037 break; 5038 case Intrinsic::log: 5039 // log(exp(x)) -> x 5040 if (Q.CxtI->hasAllowReassoc() && 5041 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X; 5042 break; 5043 case Intrinsic::log2: 5044 // log2(exp2(x)) -> x 5045 if (Q.CxtI->hasAllowReassoc() && 5046 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) || 5047 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), 5048 m_Value(X))))) return X; 5049 break; 5050 case Intrinsic::log10: 5051 // log10(pow(10.0, x)) -> x 5052 if (Q.CxtI->hasAllowReassoc() && 5053 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), 5054 m_Value(X)))) return X; 5055 break; 5056 case Intrinsic::floor: 5057 case Intrinsic::trunc: 5058 case Intrinsic::ceil: 5059 case Intrinsic::round: 5060 case Intrinsic::nearbyint: 5061 case Intrinsic::rint: { 5062 // floor (sitofp x) -> sitofp x 5063 // floor (uitofp x) -> uitofp x 5064 // 5065 // Converting from int always results in a finite integral number or 5066 // infinity. For either of those inputs, these rounding functions always 5067 // return the same value, so the rounding can be eliminated. 5068 if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value()))) 5069 return Op0; 5070 break; 5071 } 5072 default: 5073 break; 5074 } 5075 5076 return nullptr; 5077 } 5078 5079 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1, 5080 const SimplifyQuery &Q) { 5081 Intrinsic::ID IID = F->getIntrinsicID(); 5082 Type *ReturnType = F->getReturnType(); 5083 switch (IID) { 5084 case Intrinsic::usub_with_overflow: 5085 case Intrinsic::ssub_with_overflow: 5086 // X - X -> { 0, false } 5087 if (Op0 == Op1) 5088 return Constant::getNullValue(ReturnType); 5089 LLVM_FALLTHROUGH; 5090 case Intrinsic::uadd_with_overflow: 5091 case Intrinsic::sadd_with_overflow: 5092 // X - undef -> { undef, false } 5093 // undef - X -> { undef, false } 5094 // X + undef -> { undef, false } 5095 // undef + x -> { undef, false } 5096 if (isa<UndefValue>(Op0) || isa<UndefValue>(Op1)) { 5097 return ConstantStruct::get( 5098 cast<StructType>(ReturnType), 5099 {UndefValue::get(ReturnType->getStructElementType(0)), 5100 Constant::getNullValue(ReturnType->getStructElementType(1))}); 5101 } 5102 break; 5103 case Intrinsic::umul_with_overflow: 5104 case Intrinsic::smul_with_overflow: 5105 // 0 * X -> { 0, false } 5106 // X * 0 -> { 0, false } 5107 if (match(Op0, m_Zero()) || match(Op1, m_Zero())) 5108 return Constant::getNullValue(ReturnType); 5109 // undef * X -> { 0, false } 5110 // X * undef -> { 0, false } 5111 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 5112 return Constant::getNullValue(ReturnType); 5113 break; 5114 case Intrinsic::uadd_sat: 5115 // sat(MAX + X) -> MAX 5116 // sat(X + MAX) -> MAX 5117 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes())) 5118 return Constant::getAllOnesValue(ReturnType); 5119 LLVM_FALLTHROUGH; 5120 case Intrinsic::sadd_sat: 5121 // sat(X + undef) -> -1 5122 // sat(undef + X) -> -1 5123 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1). 5124 // For signed: Assume undef is ~X, in which case X + ~X = -1. 5125 if (match(Op0, m_Undef()) || match(Op1, m_Undef())) 5126 return Constant::getAllOnesValue(ReturnType); 5127 5128 // X + 0 -> X 5129 if (match(Op1, m_Zero())) 5130 return Op0; 5131 // 0 + X -> X 5132 if (match(Op0, m_Zero())) 5133 return Op1; 5134 break; 5135 case Intrinsic::usub_sat: 5136 // sat(0 - X) -> 0, sat(X - MAX) -> 0 5137 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes())) 5138 return Constant::getNullValue(ReturnType); 5139 LLVM_FALLTHROUGH; 5140 case Intrinsic::ssub_sat: 5141 // X - X -> 0, X - undef -> 0, undef - X -> 0 5142 if (Op0 == Op1 || match(Op0, m_Undef()) || match(Op1, m_Undef())) 5143 return Constant::getNullValue(ReturnType); 5144 // X - 0 -> X 5145 if (match(Op1, m_Zero())) 5146 return Op0; 5147 break; 5148 case Intrinsic::load_relative: 5149 if (auto *C0 = dyn_cast<Constant>(Op0)) 5150 if (auto *C1 = dyn_cast<Constant>(Op1)) 5151 return SimplifyRelativeLoad(C0, C1, Q.DL); 5152 break; 5153 case Intrinsic::powi: 5154 if (auto *Power = dyn_cast<ConstantInt>(Op1)) { 5155 // powi(x, 0) -> 1.0 5156 if (Power->isZero()) 5157 return ConstantFP::get(Op0->getType(), 1.0); 5158 // powi(x, 1) -> x 5159 if (Power->isOne()) 5160 return Op0; 5161 } 5162 break; 5163 case Intrinsic::copysign: 5164 // copysign X, X --> X 5165 if (Op0 == Op1) 5166 return Op0; 5167 // copysign -X, X --> X 5168 // copysign X, -X --> -X 5169 if (match(Op0, m_FNeg(m_Specific(Op1))) || 5170 match(Op1, m_FNeg(m_Specific(Op0)))) 5171 return Op1; 5172 break; 5173 case Intrinsic::maxnum: 5174 case Intrinsic::minnum: 5175 case Intrinsic::maximum: 5176 case Intrinsic::minimum: { 5177 // If the arguments are the same, this is a no-op. 5178 if (Op0 == Op1) return Op0; 5179 5180 // If one argument is undef, return the other argument. 5181 if (match(Op0, m_Undef())) 5182 return Op1; 5183 if (match(Op1, m_Undef())) 5184 return Op0; 5185 5186 // If one argument is NaN, return other or NaN appropriately. 5187 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum; 5188 if (match(Op0, m_NaN())) 5189 return PropagateNaN ? Op0 : Op1; 5190 if (match(Op1, m_NaN())) 5191 return PropagateNaN ? Op1 : Op0; 5192 5193 // Min/max of the same operation with common operand: 5194 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants) 5195 if (auto *M0 = dyn_cast<IntrinsicInst>(Op0)) 5196 if (M0->getIntrinsicID() == IID && 5197 (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1)) 5198 return Op0; 5199 if (auto *M1 = dyn_cast<IntrinsicInst>(Op1)) 5200 if (M1->getIntrinsicID() == IID && 5201 (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0)) 5202 return Op1; 5203 5204 // min(X, -Inf) --> -Inf (and commuted variant) 5205 // max(X, +Inf) --> +Inf (and commuted variant) 5206 bool UseNegInf = IID == Intrinsic::minnum || IID == Intrinsic::minimum; 5207 const APFloat *C; 5208 if ((match(Op0, m_APFloat(C)) && C->isInfinity() && 5209 C->isNegative() == UseNegInf) || 5210 (match(Op1, m_APFloat(C)) && C->isInfinity() && 5211 C->isNegative() == UseNegInf)) 5212 return ConstantFP::getInfinity(ReturnType, UseNegInf); 5213 5214 // TODO: minnum(nnan x, inf) -> x 5215 // TODO: minnum(nnan ninf x, flt_max) -> x 5216 // TODO: maxnum(nnan x, -inf) -> x 5217 // TODO: maxnum(nnan ninf x, -flt_max) -> x 5218 break; 5219 } 5220 default: 5221 break; 5222 } 5223 5224 return nullptr; 5225 } 5226 5227 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) { 5228 5229 // Intrinsics with no operands have some kind of side effect. Don't simplify. 5230 unsigned NumOperands = Call->getNumArgOperands(); 5231 if (!NumOperands) 5232 return nullptr; 5233 5234 Function *F = cast<Function>(Call->getCalledFunction()); 5235 Intrinsic::ID IID = F->getIntrinsicID(); 5236 if (NumOperands == 1) 5237 return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q); 5238 5239 if (NumOperands == 2) 5240 return simplifyBinaryIntrinsic(F, Call->getArgOperand(0), 5241 Call->getArgOperand(1), Q); 5242 5243 // Handle intrinsics with 3 or more arguments. 5244 switch (IID) { 5245 case Intrinsic::masked_load: 5246 case Intrinsic::masked_gather: { 5247 Value *MaskArg = Call->getArgOperand(2); 5248 Value *PassthruArg = Call->getArgOperand(3); 5249 // If the mask is all zeros or undef, the "passthru" argument is the result. 5250 if (maskIsAllZeroOrUndef(MaskArg)) 5251 return PassthruArg; 5252 return nullptr; 5253 } 5254 case Intrinsic::fshl: 5255 case Intrinsic::fshr: { 5256 Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1), 5257 *ShAmtArg = Call->getArgOperand(2); 5258 5259 // If both operands are undef, the result is undef. 5260 if (match(Op0, m_Undef()) && match(Op1, m_Undef())) 5261 return UndefValue::get(F->getReturnType()); 5262 5263 // If shift amount is undef, assume it is zero. 5264 if (match(ShAmtArg, m_Undef())) 5265 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); 5266 5267 const APInt *ShAmtC; 5268 if (match(ShAmtArg, m_APInt(ShAmtC))) { 5269 // If there's effectively no shift, return the 1st arg or 2nd arg. 5270 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth()); 5271 if (ShAmtC->urem(BitWidth).isNullValue()) 5272 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1); 5273 } 5274 return nullptr; 5275 } 5276 case Intrinsic::fma: 5277 case Intrinsic::fmuladd: { 5278 Value *Op0 = Call->getArgOperand(0); 5279 Value *Op1 = Call->getArgOperand(1); 5280 Value *Op2 = Call->getArgOperand(2); 5281 if (Value *V = simplifyFPOp({ Op0, Op1, Op2 })) 5282 return V; 5283 return nullptr; 5284 } 5285 default: 5286 return nullptr; 5287 } 5288 } 5289 5290 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) { 5291 Value *Callee = Call->getCalledValue(); 5292 5293 // call undef -> undef 5294 // call null -> undef 5295 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee)) 5296 return UndefValue::get(Call->getType()); 5297 5298 Function *F = dyn_cast<Function>(Callee); 5299 if (!F) 5300 return nullptr; 5301 5302 if (F->isIntrinsic()) 5303 if (Value *Ret = simplifyIntrinsic(Call, Q)) 5304 return Ret; 5305 5306 if (!canConstantFoldCallTo(Call, F)) 5307 return nullptr; 5308 5309 SmallVector<Constant *, 4> ConstantArgs; 5310 unsigned NumArgs = Call->getNumArgOperands(); 5311 ConstantArgs.reserve(NumArgs); 5312 for (auto &Arg : Call->args()) { 5313 Constant *C = dyn_cast<Constant>(&Arg); 5314 if (!C) 5315 return nullptr; 5316 ConstantArgs.push_back(C); 5317 } 5318 5319 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI); 5320 } 5321 5322 /// Given operands for a Freeze, see if we can fold the result. 5323 static Value *SimplifyFreezeInst(Value *Op0) { 5324 // Use a utility function defined in ValueTracking. 5325 if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0)) 5326 return Op0; 5327 // We have room for improvement. 5328 return nullptr; 5329 } 5330 5331 Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) { 5332 return ::SimplifyFreezeInst(Op0); 5333 } 5334 5335 /// See if we can compute a simplified version of this instruction. 5336 /// If not, this returns null. 5337 5338 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ, 5339 OptimizationRemarkEmitter *ORE) { 5340 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I); 5341 Value *Result; 5342 5343 switch (I->getOpcode()) { 5344 default: 5345 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI); 5346 break; 5347 case Instruction::FNeg: 5348 Result = SimplifyFNegInst(I->getOperand(0), I->getFastMathFlags(), Q); 5349 break; 5350 case Instruction::FAdd: 5351 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1), 5352 I->getFastMathFlags(), Q); 5353 break; 5354 case Instruction::Add: 5355 Result = 5356 SimplifyAddInst(I->getOperand(0), I->getOperand(1), 5357 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5358 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5359 break; 5360 case Instruction::FSub: 5361 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1), 5362 I->getFastMathFlags(), Q); 5363 break; 5364 case Instruction::Sub: 5365 Result = 5366 SimplifySubInst(I->getOperand(0), I->getOperand(1), 5367 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5368 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5369 break; 5370 case Instruction::FMul: 5371 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1), 5372 I->getFastMathFlags(), Q); 5373 break; 5374 case Instruction::Mul: 5375 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q); 5376 break; 5377 case Instruction::SDiv: 5378 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q); 5379 break; 5380 case Instruction::UDiv: 5381 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q); 5382 break; 5383 case Instruction::FDiv: 5384 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1), 5385 I->getFastMathFlags(), Q); 5386 break; 5387 case Instruction::SRem: 5388 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q); 5389 break; 5390 case Instruction::URem: 5391 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q); 5392 break; 5393 case Instruction::FRem: 5394 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1), 5395 I->getFastMathFlags(), Q); 5396 break; 5397 case Instruction::Shl: 5398 Result = 5399 SimplifyShlInst(I->getOperand(0), I->getOperand(1), 5400 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)), 5401 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q); 5402 break; 5403 case Instruction::LShr: 5404 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1), 5405 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); 5406 break; 5407 case Instruction::AShr: 5408 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1), 5409 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q); 5410 break; 5411 case Instruction::And: 5412 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q); 5413 break; 5414 case Instruction::Or: 5415 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q); 5416 break; 5417 case Instruction::Xor: 5418 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q); 5419 break; 5420 case Instruction::ICmp: 5421 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), 5422 I->getOperand(0), I->getOperand(1), Q); 5423 break; 5424 case Instruction::FCmp: 5425 Result = 5426 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0), 5427 I->getOperand(1), I->getFastMathFlags(), Q); 5428 break; 5429 case Instruction::Select: 5430 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1), 5431 I->getOperand(2), Q); 5432 break; 5433 case Instruction::GetElementPtr: { 5434 SmallVector<Value *, 8> Ops(I->op_begin(), I->op_end()); 5435 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(), 5436 Ops, Q); 5437 break; 5438 } 5439 case Instruction::InsertValue: { 5440 InsertValueInst *IV = cast<InsertValueInst>(I); 5441 Result = SimplifyInsertValueInst(IV->getAggregateOperand(), 5442 IV->getInsertedValueOperand(), 5443 IV->getIndices(), Q); 5444 break; 5445 } 5446 case Instruction::InsertElement: { 5447 auto *IE = cast<InsertElementInst>(I); 5448 Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1), 5449 IE->getOperand(2), Q); 5450 break; 5451 } 5452 case Instruction::ExtractValue: { 5453 auto *EVI = cast<ExtractValueInst>(I); 5454 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(), 5455 EVI->getIndices(), Q); 5456 break; 5457 } 5458 case Instruction::ExtractElement: { 5459 auto *EEI = cast<ExtractElementInst>(I); 5460 Result = SimplifyExtractElementInst(EEI->getVectorOperand(), 5461 EEI->getIndexOperand(), Q); 5462 break; 5463 } 5464 case Instruction::ShuffleVector: { 5465 auto *SVI = cast<ShuffleVectorInst>(I); 5466 Result = SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1), 5467 SVI->getMask(), SVI->getType(), Q); 5468 break; 5469 } 5470 case Instruction::PHI: 5471 Result = SimplifyPHINode(cast<PHINode>(I), Q); 5472 break; 5473 case Instruction::Call: { 5474 Result = SimplifyCall(cast<CallInst>(I), Q); 5475 break; 5476 } 5477 case Instruction::Freeze: 5478 Result = SimplifyFreezeInst(I->getOperand(0), Q); 5479 break; 5480 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc: 5481 #include "llvm/IR/Instruction.def" 5482 #undef HANDLE_CAST_INST 5483 Result = 5484 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q); 5485 break; 5486 case Instruction::Alloca: 5487 // No simplifications for Alloca and it can't be constant folded. 5488 Result = nullptr; 5489 break; 5490 } 5491 5492 // In general, it is possible for computeKnownBits to determine all bits in a 5493 // value even when the operands are not all constants. 5494 if (!Result && I->getType()->isIntOrIntVectorTy()) { 5495 KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE); 5496 if (Known.isConstant()) 5497 Result = ConstantInt::get(I->getType(), Known.getConstant()); 5498 } 5499 5500 /// If called on unreachable code, the above logic may report that the 5501 /// instruction simplified to itself. Make life easier for users by 5502 /// detecting that case here, returning a safe value instead. 5503 return Result == I ? UndefValue::get(I->getType()) : Result; 5504 } 5505 5506 /// Implementation of recursive simplification through an instruction's 5507 /// uses. 5508 /// 5509 /// This is the common implementation of the recursive simplification routines. 5510 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to 5511 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of 5512 /// instructions to process and attempt to simplify it using 5513 /// InstructionSimplify. Recursively visited users which could not be 5514 /// simplified themselves are to the optional UnsimplifiedUsers set for 5515 /// further processing by the caller. 5516 /// 5517 /// This routine returns 'true' only when *it* simplifies something. The passed 5518 /// in simplified value does not count toward this. 5519 static bool replaceAndRecursivelySimplifyImpl( 5520 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, 5521 const DominatorTree *DT, AssumptionCache *AC, 5522 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) { 5523 bool Simplified = false; 5524 SmallSetVector<Instruction *, 8> Worklist; 5525 const DataLayout &DL = I->getModule()->getDataLayout(); 5526 5527 // If we have an explicit value to collapse to, do that round of the 5528 // simplification loop by hand initially. 5529 if (SimpleV) { 5530 for (User *U : I->users()) 5531 if (U != I) 5532 Worklist.insert(cast<Instruction>(U)); 5533 5534 // Replace the instruction with its simplified value. 5535 I->replaceAllUsesWith(SimpleV); 5536 5537 // Gracefully handle edge cases where the instruction is not wired into any 5538 // parent block. 5539 if (I->getParent() && !I->isEHPad() && !I->isTerminator() && 5540 !I->mayHaveSideEffects()) 5541 I->eraseFromParent(); 5542 } else { 5543 Worklist.insert(I); 5544 } 5545 5546 // Note that we must test the size on each iteration, the worklist can grow. 5547 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) { 5548 I = Worklist[Idx]; 5549 5550 // See if this instruction simplifies. 5551 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC}); 5552 if (!SimpleV) { 5553 if (UnsimplifiedUsers) 5554 UnsimplifiedUsers->insert(I); 5555 continue; 5556 } 5557 5558 Simplified = true; 5559 5560 // Stash away all the uses of the old instruction so we can check them for 5561 // recursive simplifications after a RAUW. This is cheaper than checking all 5562 // uses of To on the recursive step in most cases. 5563 for (User *U : I->users()) 5564 Worklist.insert(cast<Instruction>(U)); 5565 5566 // Replace the instruction with its simplified value. 5567 I->replaceAllUsesWith(SimpleV); 5568 5569 // Gracefully handle edge cases where the instruction is not wired into any 5570 // parent block. 5571 if (I->getParent() && !I->isEHPad() && !I->isTerminator() && 5572 !I->mayHaveSideEffects()) 5573 I->eraseFromParent(); 5574 } 5575 return Simplified; 5576 } 5577 5578 bool llvm::recursivelySimplifyInstruction(Instruction *I, 5579 const TargetLibraryInfo *TLI, 5580 const DominatorTree *DT, 5581 AssumptionCache *AC) { 5582 return replaceAndRecursivelySimplifyImpl(I, nullptr, TLI, DT, AC, nullptr); 5583 } 5584 5585 bool llvm::replaceAndRecursivelySimplify( 5586 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, 5587 const DominatorTree *DT, AssumptionCache *AC, 5588 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) { 5589 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); 5590 assert(SimpleV && "Must provide a simplified value."); 5591 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC, 5592 UnsimplifiedUsers); 5593 } 5594 5595 namespace llvm { 5596 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) { 5597 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 5598 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 5599 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>(); 5600 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr; 5601 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>(); 5602 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr; 5603 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 5604 } 5605 5606 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR, 5607 const DataLayout &DL) { 5608 return {DL, &AR.TLI, &AR.DT, &AR.AC}; 5609 } 5610 5611 template <class T, class... TArgs> 5612 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM, 5613 Function &F) { 5614 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F); 5615 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F); 5616 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F); 5617 return {F.getParent()->getDataLayout(), TLI, DT, AC}; 5618 } 5619 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &, 5620 Function &); 5621 } 5622