1 //===- ValueTracking.cpp - Walk computations to compute properties --------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file contains routines that help analyze properties that chains of 10 // computations have. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/ValueTracking.h" 15 #include "llvm/ADT/APFloat.h" 16 #include "llvm/ADT/APInt.h" 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/None.h" 19 #include "llvm/ADT/Optional.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringRef.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/AssumeBundleQueries.h" 28 #include "llvm/Analysis/AssumptionCache.h" 29 #include "llvm/Analysis/ConstantFolding.h" 30 #include "llvm/Analysis/EHPersonalities.h" 31 #include "llvm/Analysis/GuardUtils.h" 32 #include "llvm/Analysis/InstructionSimplify.h" 33 #include "llvm/Analysis/Loads.h" 34 #include "llvm/Analysis/LoopInfo.h" 35 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 36 #include "llvm/Analysis/TargetLibraryInfo.h" 37 #include "llvm/IR/Argument.h" 38 #include "llvm/IR/Attributes.h" 39 #include "llvm/IR/BasicBlock.h" 40 #include "llvm/IR/Constant.h" 41 #include "llvm/IR/ConstantRange.h" 42 #include "llvm/IR/Constants.h" 43 #include "llvm/IR/DerivedTypes.h" 44 #include "llvm/IR/DiagnosticInfo.h" 45 #include "llvm/IR/Dominators.h" 46 #include "llvm/IR/Function.h" 47 #include "llvm/IR/GetElementPtrTypeIterator.h" 48 #include "llvm/IR/GlobalAlias.h" 49 #include "llvm/IR/GlobalValue.h" 50 #include "llvm/IR/GlobalVariable.h" 51 #include "llvm/IR/InstrTypes.h" 52 #include "llvm/IR/Instruction.h" 53 #include "llvm/IR/Instructions.h" 54 #include "llvm/IR/IntrinsicInst.h" 55 #include "llvm/IR/Intrinsics.h" 56 #include "llvm/IR/IntrinsicsAArch64.h" 57 #include "llvm/IR/IntrinsicsRISCV.h" 58 #include "llvm/IR/IntrinsicsX86.h" 59 #include "llvm/IR/LLVMContext.h" 60 #include "llvm/IR/Metadata.h" 61 #include "llvm/IR/Module.h" 62 #include "llvm/IR/Operator.h" 63 #include "llvm/IR/PatternMatch.h" 64 #include "llvm/IR/Type.h" 65 #include "llvm/IR/User.h" 66 #include "llvm/IR/Value.h" 67 #include "llvm/Support/Casting.h" 68 #include "llvm/Support/CommandLine.h" 69 #include "llvm/Support/Compiler.h" 70 #include "llvm/Support/ErrorHandling.h" 71 #include "llvm/Support/KnownBits.h" 72 #include "llvm/Support/MathExtras.h" 73 #include <algorithm> 74 #include <cassert> 75 #include <cstdint> 76 #include <utility> 77 78 using namespace llvm; 79 using namespace llvm::PatternMatch; 80 81 // Controls the number of uses of the value searched for possible 82 // dominating comparisons. 83 static cl::opt<unsigned> DomConditionsMaxUses("dom-conditions-max-uses", 84 cl::Hidden, cl::init(20)); 85 86 // According to the LangRef, branching on a poison condition is absolutely 87 // immediate full UB. However, historically we haven't implemented that 88 // consistently as we had an important transformation (non-trivial unswitch) 89 // which introduced instances of branch on poison/undef to otherwise well 90 // defined programs. This issue has since been fixed, but the flag is 91 // temporarily retained to easily diagnose potential regressions. 92 static cl::opt<bool> BranchOnPoisonAsUB("branch-on-poison-as-ub", 93 cl::Hidden, cl::init(true)); 94 95 96 /// Returns the bitwidth of the given scalar or pointer type. For vector types, 97 /// returns the element type's bitwidth. 98 static unsigned getBitWidth(Type *Ty, const DataLayout &DL) { 99 if (unsigned BitWidth = Ty->getScalarSizeInBits()) 100 return BitWidth; 101 102 return DL.getPointerTypeSizeInBits(Ty); 103 } 104 105 namespace { 106 107 // Simplifying using an assume can only be done in a particular control-flow 108 // context (the context instruction provides that context). If an assume and 109 // the context instruction are not in the same block then the DT helps in 110 // figuring out if we can use it. 111 struct Query { 112 const DataLayout &DL; 113 AssumptionCache *AC; 114 const Instruction *CxtI; 115 const DominatorTree *DT; 116 117 // Unlike the other analyses, this may be a nullptr because not all clients 118 // provide it currently. 119 OptimizationRemarkEmitter *ORE; 120 121 /// If true, it is safe to use metadata during simplification. 122 InstrInfoQuery IIQ; 123 124 Query(const DataLayout &DL, AssumptionCache *AC, const Instruction *CxtI, 125 const DominatorTree *DT, bool UseInstrInfo, 126 OptimizationRemarkEmitter *ORE = nullptr) 127 : DL(DL), AC(AC), CxtI(CxtI), DT(DT), ORE(ORE), IIQ(UseInstrInfo) {} 128 }; 129 130 } // end anonymous namespace 131 132 // Given the provided Value and, potentially, a context instruction, return 133 // the preferred context instruction (if any). 134 static const Instruction *safeCxtI(const Value *V, const Instruction *CxtI) { 135 // If we've been provided with a context instruction, then use that (provided 136 // it has been inserted). 137 if (CxtI && CxtI->getParent()) 138 return CxtI; 139 140 // If the value is really an already-inserted instruction, then use that. 141 CxtI = dyn_cast<Instruction>(V); 142 if (CxtI && CxtI->getParent()) 143 return CxtI; 144 145 return nullptr; 146 } 147 148 static const Instruction *safeCxtI(const Value *V1, const Value *V2, const Instruction *CxtI) { 149 // If we've been provided with a context instruction, then use that (provided 150 // it has been inserted). 151 if (CxtI && CxtI->getParent()) 152 return CxtI; 153 154 // If the value is really an already-inserted instruction, then use that. 155 CxtI = dyn_cast<Instruction>(V1); 156 if (CxtI && CxtI->getParent()) 157 return CxtI; 158 159 CxtI = dyn_cast<Instruction>(V2); 160 if (CxtI && CxtI->getParent()) 161 return CxtI; 162 163 return nullptr; 164 } 165 166 static bool getShuffleDemandedElts(const ShuffleVectorInst *Shuf, 167 const APInt &DemandedElts, 168 APInt &DemandedLHS, APInt &DemandedRHS) { 169 // The length of scalable vectors is unknown at compile time, thus we 170 // cannot check their values 171 if (isa<ScalableVectorType>(Shuf->getType())) 172 return false; 173 174 int NumElts = 175 cast<FixedVectorType>(Shuf->getOperand(0)->getType())->getNumElements(); 176 int NumMaskElts = cast<FixedVectorType>(Shuf->getType())->getNumElements(); 177 DemandedLHS = DemandedRHS = APInt::getZero(NumElts); 178 if (DemandedElts.isZero()) 179 return true; 180 // Simple case of a shuffle with zeroinitializer. 181 if (all_of(Shuf->getShuffleMask(), [](int Elt) { return Elt == 0; })) { 182 DemandedLHS.setBit(0); 183 return true; 184 } 185 for (int i = 0; i != NumMaskElts; ++i) { 186 if (!DemandedElts[i]) 187 continue; 188 int M = Shuf->getMaskValue(i); 189 assert(M < (NumElts * 2) && "Invalid shuffle mask constant"); 190 191 // For undef elements, we don't know anything about the common state of 192 // the shuffle result. 193 if (M == -1) 194 return false; 195 if (M < NumElts) 196 DemandedLHS.setBit(M % NumElts); 197 else 198 DemandedRHS.setBit(M % NumElts); 199 } 200 201 return true; 202 } 203 204 static void computeKnownBits(const Value *V, const APInt &DemandedElts, 205 KnownBits &Known, unsigned Depth, const Query &Q); 206 207 static void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth, 208 const Query &Q) { 209 // FIXME: We currently have no way to represent the DemandedElts of a scalable 210 // vector 211 if (isa<ScalableVectorType>(V->getType())) { 212 Known.resetAll(); 213 return; 214 } 215 216 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 217 APInt DemandedElts = 218 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 219 computeKnownBits(V, DemandedElts, Known, Depth, Q); 220 } 221 222 void llvm::computeKnownBits(const Value *V, KnownBits &Known, 223 const DataLayout &DL, unsigned Depth, 224 AssumptionCache *AC, const Instruction *CxtI, 225 const DominatorTree *DT, 226 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 227 ::computeKnownBits(V, Known, Depth, 228 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 229 } 230 231 void llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, 232 KnownBits &Known, const DataLayout &DL, 233 unsigned Depth, AssumptionCache *AC, 234 const Instruction *CxtI, const DominatorTree *DT, 235 OptimizationRemarkEmitter *ORE, bool UseInstrInfo) { 236 ::computeKnownBits(V, DemandedElts, Known, Depth, 237 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 238 } 239 240 static KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 241 unsigned Depth, const Query &Q); 242 243 static KnownBits computeKnownBits(const Value *V, unsigned Depth, 244 const Query &Q); 245 246 KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL, 247 unsigned Depth, AssumptionCache *AC, 248 const Instruction *CxtI, 249 const DominatorTree *DT, 250 OptimizationRemarkEmitter *ORE, 251 bool UseInstrInfo) { 252 return ::computeKnownBits( 253 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 254 } 255 256 KnownBits llvm::computeKnownBits(const Value *V, const APInt &DemandedElts, 257 const DataLayout &DL, unsigned Depth, 258 AssumptionCache *AC, const Instruction *CxtI, 259 const DominatorTree *DT, 260 OptimizationRemarkEmitter *ORE, 261 bool UseInstrInfo) { 262 return ::computeKnownBits( 263 V, DemandedElts, Depth, 264 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo, ORE)); 265 } 266 267 bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS, 268 const DataLayout &DL, AssumptionCache *AC, 269 const Instruction *CxtI, const DominatorTree *DT, 270 bool UseInstrInfo) { 271 assert(LHS->getType() == RHS->getType() && 272 "LHS and RHS should have the same type"); 273 assert(LHS->getType()->isIntOrIntVectorTy() && 274 "LHS and RHS should be integers"); 275 // Look for an inverted mask: (X & ~M) op (Y & M). 276 { 277 Value *M; 278 if (match(LHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 279 match(RHS, m_c_And(m_Specific(M), m_Value()))) 280 return true; 281 if (match(RHS, m_c_And(m_Not(m_Value(M)), m_Value())) && 282 match(LHS, m_c_And(m_Specific(M), m_Value()))) 283 return true; 284 } 285 286 // X op (Y & ~X) 287 if (match(RHS, m_c_And(m_Not(m_Specific(LHS)), m_Value())) || 288 match(LHS, m_c_And(m_Not(m_Specific(RHS)), m_Value()))) 289 return true; 290 291 // X op ((X & Y) ^ Y) -- this is the canonical form of the previous pattern 292 // for constant Y. 293 Value *Y; 294 if (match(RHS, 295 m_c_Xor(m_c_And(m_Specific(LHS), m_Value(Y)), m_Deferred(Y))) || 296 match(LHS, m_c_Xor(m_c_And(m_Specific(RHS), m_Value(Y)), m_Deferred(Y)))) 297 return true; 298 299 // Look for: (A & B) op ~(A | B) 300 { 301 Value *A, *B; 302 if (match(LHS, m_And(m_Value(A), m_Value(B))) && 303 match(RHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 304 return true; 305 if (match(RHS, m_And(m_Value(A), m_Value(B))) && 306 match(LHS, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))) 307 return true; 308 } 309 IntegerType *IT = cast<IntegerType>(LHS->getType()->getScalarType()); 310 KnownBits LHSKnown(IT->getBitWidth()); 311 KnownBits RHSKnown(IT->getBitWidth()); 312 computeKnownBits(LHS, LHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 313 computeKnownBits(RHS, RHSKnown, DL, 0, AC, CxtI, DT, nullptr, UseInstrInfo); 314 return KnownBits::haveNoCommonBitsSet(LHSKnown, RHSKnown); 315 } 316 317 bool llvm::isOnlyUsedInZeroEqualityComparison(const Instruction *I) { 318 return !I->user_empty() && all_of(I->users(), [](const User *U) { 319 ICmpInst::Predicate P; 320 return match(U, m_ICmp(P, m_Value(), m_Zero())) && ICmpInst::isEquality(P); 321 }); 322 } 323 324 static bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 325 const Query &Q); 326 327 bool llvm::isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, 328 bool OrZero, unsigned Depth, 329 AssumptionCache *AC, const Instruction *CxtI, 330 const DominatorTree *DT, bool UseInstrInfo) { 331 return ::isKnownToBeAPowerOfTwo( 332 V, OrZero, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 333 } 334 335 static bool isKnownNonZero(const Value *V, const APInt &DemandedElts, 336 unsigned Depth, const Query &Q); 337 338 static bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q); 339 340 bool llvm::isKnownNonZero(const Value *V, const DataLayout &DL, unsigned Depth, 341 AssumptionCache *AC, const Instruction *CxtI, 342 const DominatorTree *DT, bool UseInstrInfo) { 343 return ::isKnownNonZero(V, Depth, 344 Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 345 } 346 347 bool llvm::isKnownNonNegative(const Value *V, const DataLayout &DL, 348 unsigned Depth, AssumptionCache *AC, 349 const Instruction *CxtI, const DominatorTree *DT, 350 bool UseInstrInfo) { 351 KnownBits Known = 352 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 353 return Known.isNonNegative(); 354 } 355 356 bool llvm::isKnownPositive(const Value *V, const DataLayout &DL, unsigned Depth, 357 AssumptionCache *AC, const Instruction *CxtI, 358 const DominatorTree *DT, bool UseInstrInfo) { 359 if (auto *CI = dyn_cast<ConstantInt>(V)) 360 return CI->getValue().isStrictlyPositive(); 361 362 // TODO: We'd doing two recursive queries here. We should factor this such 363 // that only a single query is needed. 364 return isKnownNonNegative(V, DL, Depth, AC, CxtI, DT, UseInstrInfo) && 365 isKnownNonZero(V, DL, Depth, AC, CxtI, DT, UseInstrInfo); 366 } 367 368 bool llvm::isKnownNegative(const Value *V, const DataLayout &DL, unsigned Depth, 369 AssumptionCache *AC, const Instruction *CxtI, 370 const DominatorTree *DT, bool UseInstrInfo) { 371 KnownBits Known = 372 computeKnownBits(V, DL, Depth, AC, CxtI, DT, nullptr, UseInstrInfo); 373 return Known.isNegative(); 374 } 375 376 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, 377 const Query &Q); 378 379 bool llvm::isKnownNonEqual(const Value *V1, const Value *V2, 380 const DataLayout &DL, AssumptionCache *AC, 381 const Instruction *CxtI, const DominatorTree *DT, 382 bool UseInstrInfo) { 383 return ::isKnownNonEqual(V1, V2, 0, 384 Query(DL, AC, safeCxtI(V2, V1, CxtI), DT, 385 UseInstrInfo, /*ORE=*/nullptr)); 386 } 387 388 static bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 389 const Query &Q); 390 391 bool llvm::MaskedValueIsZero(const Value *V, const APInt &Mask, 392 const DataLayout &DL, unsigned Depth, 393 AssumptionCache *AC, const Instruction *CxtI, 394 const DominatorTree *DT, bool UseInstrInfo) { 395 return ::MaskedValueIsZero( 396 V, Mask, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 397 } 398 399 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, 400 unsigned Depth, const Query &Q); 401 402 static unsigned ComputeNumSignBits(const Value *V, unsigned Depth, 403 const Query &Q) { 404 // FIXME: We currently have no way to represent the DemandedElts of a scalable 405 // vector 406 if (isa<ScalableVectorType>(V->getType())) 407 return 1; 408 409 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 410 APInt DemandedElts = 411 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 412 return ComputeNumSignBits(V, DemandedElts, Depth, Q); 413 } 414 415 unsigned llvm::ComputeNumSignBits(const Value *V, const DataLayout &DL, 416 unsigned Depth, AssumptionCache *AC, 417 const Instruction *CxtI, 418 const DominatorTree *DT, bool UseInstrInfo) { 419 return ::ComputeNumSignBits( 420 V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT, UseInstrInfo)); 421 } 422 423 unsigned llvm::ComputeMaxSignificantBits(const Value *V, const DataLayout &DL, 424 unsigned Depth, AssumptionCache *AC, 425 const Instruction *CxtI, 426 const DominatorTree *DT) { 427 unsigned SignBits = ComputeNumSignBits(V, DL, Depth, AC, CxtI, DT); 428 return V->getType()->getScalarSizeInBits() - SignBits + 1; 429 } 430 431 static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1, 432 bool NSW, const APInt &DemandedElts, 433 KnownBits &KnownOut, KnownBits &Known2, 434 unsigned Depth, const Query &Q) { 435 computeKnownBits(Op1, DemandedElts, KnownOut, Depth + 1, Q); 436 437 // If one operand is unknown and we have no nowrap information, 438 // the result will be unknown independently of the second operand. 439 if (KnownOut.isUnknown() && !NSW) 440 return; 441 442 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); 443 KnownOut = KnownBits::computeForAddSub(Add, NSW, Known2, KnownOut); 444 } 445 446 static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW, 447 const APInt &DemandedElts, KnownBits &Known, 448 KnownBits &Known2, unsigned Depth, 449 const Query &Q) { 450 computeKnownBits(Op1, DemandedElts, Known, Depth + 1, Q); 451 computeKnownBits(Op0, DemandedElts, Known2, Depth + 1, Q); 452 453 bool isKnownNegative = false; 454 bool isKnownNonNegative = false; 455 // If the multiplication is known not to overflow, compute the sign bit. 456 if (NSW) { 457 if (Op0 == Op1) { 458 // The product of a number with itself is non-negative. 459 isKnownNonNegative = true; 460 } else { 461 bool isKnownNonNegativeOp1 = Known.isNonNegative(); 462 bool isKnownNonNegativeOp0 = Known2.isNonNegative(); 463 bool isKnownNegativeOp1 = Known.isNegative(); 464 bool isKnownNegativeOp0 = Known2.isNegative(); 465 // The product of two numbers with the same sign is non-negative. 466 isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) || 467 (isKnownNonNegativeOp1 && isKnownNonNegativeOp0); 468 // The product of a negative number and a non-negative number is either 469 // negative or zero. 470 if (!isKnownNonNegative) 471 isKnownNegative = 472 (isKnownNegativeOp1 && isKnownNonNegativeOp0 && 473 Known2.isNonZero()) || 474 (isKnownNegativeOp0 && isKnownNonNegativeOp1 && Known.isNonZero()); 475 } 476 } 477 478 bool SelfMultiply = Op0 == Op1; 479 // TODO: SelfMultiply can be poison, but not undef. 480 if (SelfMultiply) 481 SelfMultiply &= 482 isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT, Depth + 1); 483 Known = KnownBits::mul(Known, Known2, SelfMultiply); 484 485 // Only make use of no-wrap flags if we failed to compute the sign bit 486 // directly. This matters if the multiplication always overflows, in 487 // which case we prefer to follow the result of the direct computation, 488 // though as the program is invoking undefined behaviour we can choose 489 // whatever we like here. 490 if (isKnownNonNegative && !Known.isNegative()) 491 Known.makeNonNegative(); 492 else if (isKnownNegative && !Known.isNonNegative()) 493 Known.makeNegative(); 494 } 495 496 void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges, 497 KnownBits &Known) { 498 unsigned BitWidth = Known.getBitWidth(); 499 unsigned NumRanges = Ranges.getNumOperands() / 2; 500 assert(NumRanges >= 1); 501 502 Known.Zero.setAllBits(); 503 Known.One.setAllBits(); 504 505 for (unsigned i = 0; i < NumRanges; ++i) { 506 ConstantInt *Lower = 507 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 0)); 508 ConstantInt *Upper = 509 mdconst::extract<ConstantInt>(Ranges.getOperand(2 * i + 1)); 510 ConstantRange Range(Lower->getValue(), Upper->getValue()); 511 512 // The first CommonPrefixBits of all values in Range are equal. 513 unsigned CommonPrefixBits = 514 (Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros(); 515 APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits); 516 APInt UnsignedMax = Range.getUnsignedMax().zextOrTrunc(BitWidth); 517 Known.One &= UnsignedMax & Mask; 518 Known.Zero &= ~UnsignedMax & Mask; 519 } 520 } 521 522 static bool isEphemeralValueOf(const Instruction *I, const Value *E) { 523 SmallVector<const Value *, 16> WorkSet(1, I); 524 SmallPtrSet<const Value *, 32> Visited; 525 SmallPtrSet<const Value *, 16> EphValues; 526 527 // The instruction defining an assumption's condition itself is always 528 // considered ephemeral to that assumption (even if it has other 529 // non-ephemeral users). See r246696's test case for an example. 530 if (is_contained(I->operands(), E)) 531 return true; 532 533 while (!WorkSet.empty()) { 534 const Value *V = WorkSet.pop_back_val(); 535 if (!Visited.insert(V).second) 536 continue; 537 538 // If all uses of this value are ephemeral, then so is this value. 539 if (llvm::all_of(V->users(), [&](const User *U) { 540 return EphValues.count(U); 541 })) { 542 if (V == E) 543 return true; 544 545 if (V == I || (isa<Instruction>(V) && 546 !cast<Instruction>(V)->mayHaveSideEffects() && 547 !cast<Instruction>(V)->isTerminator())) { 548 EphValues.insert(V); 549 if (const User *U = dyn_cast<User>(V)) 550 append_range(WorkSet, U->operands()); 551 } 552 } 553 } 554 555 return false; 556 } 557 558 // Is this an intrinsic that cannot be speculated but also cannot trap? 559 bool llvm::isAssumeLikeIntrinsic(const Instruction *I) { 560 if (const IntrinsicInst *CI = dyn_cast<IntrinsicInst>(I)) 561 return CI->isAssumeLikeIntrinsic(); 562 563 return false; 564 } 565 566 bool llvm::isValidAssumeForContext(const Instruction *Inv, 567 const Instruction *CxtI, 568 const DominatorTree *DT) { 569 // There are two restrictions on the use of an assume: 570 // 1. The assume must dominate the context (or the control flow must 571 // reach the assume whenever it reaches the context). 572 // 2. The context must not be in the assume's set of ephemeral values 573 // (otherwise we will use the assume to prove that the condition 574 // feeding the assume is trivially true, thus causing the removal of 575 // the assume). 576 577 if (Inv->getParent() == CxtI->getParent()) { 578 // If Inv and CtxI are in the same block, check if the assume (Inv) is first 579 // in the BB. 580 if (Inv->comesBefore(CxtI)) 581 return true; 582 583 // Don't let an assume affect itself - this would cause the problems 584 // `isEphemeralValueOf` is trying to prevent, and it would also make 585 // the loop below go out of bounds. 586 if (Inv == CxtI) 587 return false; 588 589 // The context comes first, but they're both in the same block. 590 // Make sure there is nothing in between that might interrupt 591 // the control flow, not even CxtI itself. 592 // We limit the scan distance between the assume and its context instruction 593 // to avoid a compile-time explosion. This limit is chosen arbitrarily, so 594 // it can be adjusted if needed (could be turned into a cl::opt). 595 auto Range = make_range(CxtI->getIterator(), Inv->getIterator()); 596 if (!isGuaranteedToTransferExecutionToSuccessor(Range, 15)) 597 return false; 598 599 return !isEphemeralValueOf(Inv, CxtI); 600 } 601 602 // Inv and CxtI are in different blocks. 603 if (DT) { 604 if (DT->dominates(Inv, CxtI)) 605 return true; 606 } else if (Inv->getParent() == CxtI->getParent()->getSinglePredecessor()) { 607 // We don't have a DT, but this trivially dominates. 608 return true; 609 } 610 611 return false; 612 } 613 614 static bool cmpExcludesZero(CmpInst::Predicate Pred, const Value *RHS) { 615 // v u> y implies v != 0. 616 if (Pred == ICmpInst::ICMP_UGT) 617 return true; 618 619 // Special-case v != 0 to also handle v != null. 620 if (Pred == ICmpInst::ICMP_NE) 621 return match(RHS, m_Zero()); 622 623 // All other predicates - rely on generic ConstantRange handling. 624 const APInt *C; 625 if (!match(RHS, m_APInt(C))) 626 return false; 627 628 ConstantRange TrueValues = ConstantRange::makeExactICmpRegion(Pred, *C); 629 return !TrueValues.contains(APInt::getZero(C->getBitWidth())); 630 } 631 632 static bool isKnownNonZeroFromAssume(const Value *V, const Query &Q) { 633 // Use of assumptions is context-sensitive. If we don't have a context, we 634 // cannot use them! 635 if (!Q.AC || !Q.CxtI) 636 return false; 637 638 if (Q.CxtI && V->getType()->isPointerTy()) { 639 SmallVector<Attribute::AttrKind, 2> AttrKinds{Attribute::NonNull}; 640 if (!NullPointerIsDefined(Q.CxtI->getFunction(), 641 V->getType()->getPointerAddressSpace())) 642 AttrKinds.push_back(Attribute::Dereferenceable); 643 644 if (getKnowledgeValidInContext(V, AttrKinds, Q.CxtI, Q.DT, Q.AC)) 645 return true; 646 } 647 648 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 649 if (!AssumeVH) 650 continue; 651 CallInst *I = cast<CallInst>(AssumeVH); 652 assert(I->getFunction() == Q.CxtI->getFunction() && 653 "Got assumption for the wrong function!"); 654 655 // Warning: This loop can end up being somewhat performance sensitive. 656 // We're running this loop for once for each value queried resulting in a 657 // runtime of ~O(#assumes * #values). 658 659 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 660 "must be an assume intrinsic"); 661 662 Value *RHS; 663 CmpInst::Predicate Pred; 664 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); 665 if (!match(I->getArgOperand(0), m_c_ICmp(Pred, m_V, m_Value(RHS)))) 666 return false; 667 668 if (cmpExcludesZero(Pred, RHS) && isValidAssumeForContext(I, Q.CxtI, Q.DT)) 669 return true; 670 } 671 672 return false; 673 } 674 675 static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known, 676 unsigned Depth, const Query &Q) { 677 // Use of assumptions is context-sensitive. If we don't have a context, we 678 // cannot use them! 679 if (!Q.AC || !Q.CxtI) 680 return; 681 682 unsigned BitWidth = Known.getBitWidth(); 683 684 // Refine Known set if the pointer alignment is set by assume bundles. 685 if (V->getType()->isPointerTy()) { 686 if (RetainedKnowledge RK = getKnowledgeValidInContext( 687 V, {Attribute::Alignment}, Q.CxtI, Q.DT, Q.AC)) { 688 if (isPowerOf2_64(RK.ArgValue)) 689 Known.Zero.setLowBits(Log2_64(RK.ArgValue)); 690 } 691 } 692 693 // Note that the patterns below need to be kept in sync with the code 694 // in AssumptionCache::updateAffectedValues. 695 696 for (auto &AssumeVH : Q.AC->assumptionsFor(V)) { 697 if (!AssumeVH) 698 continue; 699 CallInst *I = cast<CallInst>(AssumeVH); 700 assert(I->getParent()->getParent() == Q.CxtI->getParent()->getParent() && 701 "Got assumption for the wrong function!"); 702 703 // Warning: This loop can end up being somewhat performance sensitive. 704 // We're running this loop for once for each value queried resulting in a 705 // runtime of ~O(#assumes * #values). 706 707 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 708 "must be an assume intrinsic"); 709 710 Value *Arg = I->getArgOperand(0); 711 712 if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 713 assert(BitWidth == 1 && "assume operand is not i1?"); 714 Known.setAllOnes(); 715 return; 716 } 717 if (match(Arg, m_Not(m_Specific(V))) && 718 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 719 assert(BitWidth == 1 && "assume operand is not i1?"); 720 Known.setAllZero(); 721 return; 722 } 723 724 // The remaining tests are all recursive, so bail out if we hit the limit. 725 if (Depth == MaxAnalysisRecursionDepth) 726 continue; 727 728 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 729 if (!Cmp) 730 continue; 731 732 // We are attempting to compute known bits for the operands of an assume. 733 // Do not try to use other assumptions for those recursive calls because 734 // that can lead to mutual recursion and a compile-time explosion. 735 // An example of the mutual recursion: computeKnownBits can call 736 // isKnownNonZero which calls computeKnownBitsFromAssume (this function) 737 // and so on. 738 Query QueryNoAC = Q; 739 QueryNoAC.AC = nullptr; 740 741 // Note that ptrtoint may change the bitwidth. 742 Value *A, *B; 743 auto m_V = m_CombineOr(m_Specific(V), m_PtrToInt(m_Specific(V))); 744 745 CmpInst::Predicate Pred; 746 uint64_t C; 747 switch (Cmp->getPredicate()) { 748 default: 749 break; 750 case ICmpInst::ICMP_EQ: 751 // assume(v = a) 752 if (match(Cmp, m_c_ICmp(Pred, m_V, m_Value(A))) && 753 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 754 KnownBits RHSKnown = 755 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 756 Known.Zero |= RHSKnown.Zero; 757 Known.One |= RHSKnown.One; 758 // assume(v & b = a) 759 } else if (match(Cmp, 760 m_c_ICmp(Pred, m_c_And(m_V, m_Value(B)), m_Value(A))) && 761 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 762 KnownBits RHSKnown = 763 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 764 KnownBits MaskKnown = 765 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 766 767 // For those bits in the mask that are known to be one, we can propagate 768 // known bits from the RHS to V. 769 Known.Zero |= RHSKnown.Zero & MaskKnown.One; 770 Known.One |= RHSKnown.One & MaskKnown.One; 771 // assume(~(v & b) = a) 772 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_And(m_V, m_Value(B))), 773 m_Value(A))) && 774 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 775 KnownBits RHSKnown = 776 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 777 KnownBits MaskKnown = 778 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 779 780 // For those bits in the mask that are known to be one, we can propagate 781 // inverted known bits from the RHS to V. 782 Known.Zero |= RHSKnown.One & MaskKnown.One; 783 Known.One |= RHSKnown.Zero & MaskKnown.One; 784 // assume(v | b = a) 785 } else if (match(Cmp, 786 m_c_ICmp(Pred, m_c_Or(m_V, m_Value(B)), m_Value(A))) && 787 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 788 KnownBits RHSKnown = 789 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 790 KnownBits BKnown = 791 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 792 793 // For those bits in B that are known to be zero, we can propagate known 794 // bits from the RHS to V. 795 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 796 Known.One |= RHSKnown.One & BKnown.Zero; 797 // assume(~(v | b) = a) 798 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Or(m_V, m_Value(B))), 799 m_Value(A))) && 800 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 801 KnownBits RHSKnown = 802 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 803 KnownBits BKnown = 804 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 805 806 // For those bits in B that are known to be zero, we can propagate 807 // inverted known bits from the RHS to V. 808 Known.Zero |= RHSKnown.One & BKnown.Zero; 809 Known.One |= RHSKnown.Zero & BKnown.Zero; 810 // assume(v ^ b = a) 811 } else if (match(Cmp, 812 m_c_ICmp(Pred, m_c_Xor(m_V, m_Value(B)), m_Value(A))) && 813 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 814 KnownBits RHSKnown = 815 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 816 KnownBits BKnown = 817 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 818 819 // For those bits in B that are known to be zero, we can propagate known 820 // bits from the RHS to V. For those bits in B that are known to be one, 821 // we can propagate inverted known bits from the RHS to V. 822 Known.Zero |= RHSKnown.Zero & BKnown.Zero; 823 Known.One |= RHSKnown.One & BKnown.Zero; 824 Known.Zero |= RHSKnown.One & BKnown.One; 825 Known.One |= RHSKnown.Zero & BKnown.One; 826 // assume(~(v ^ b) = a) 827 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_c_Xor(m_V, m_Value(B))), 828 m_Value(A))) && 829 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 830 KnownBits RHSKnown = 831 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 832 KnownBits BKnown = 833 computeKnownBits(B, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 834 835 // For those bits in B that are known to be zero, we can propagate 836 // inverted known bits from the RHS to V. For those bits in B that are 837 // known to be one, we can propagate known bits from the RHS to V. 838 Known.Zero |= RHSKnown.One & BKnown.Zero; 839 Known.One |= RHSKnown.Zero & BKnown.Zero; 840 Known.Zero |= RHSKnown.Zero & BKnown.One; 841 Known.One |= RHSKnown.One & BKnown.One; 842 // assume(v << c = a) 843 } else if (match(Cmp, m_c_ICmp(Pred, m_Shl(m_V, m_ConstantInt(C)), 844 m_Value(A))) && 845 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 846 KnownBits RHSKnown = 847 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 848 849 // For those bits in RHS that are known, we can propagate them to known 850 // bits in V shifted to the right by C. 851 RHSKnown.Zero.lshrInPlace(C); 852 Known.Zero |= RHSKnown.Zero; 853 RHSKnown.One.lshrInPlace(C); 854 Known.One |= RHSKnown.One; 855 // assume(~(v << c) = a) 856 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shl(m_V, m_ConstantInt(C))), 857 m_Value(A))) && 858 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 859 KnownBits RHSKnown = 860 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 861 // For those bits in RHS that are known, we can propagate them inverted 862 // to known bits in V shifted to the right by C. 863 RHSKnown.One.lshrInPlace(C); 864 Known.Zero |= RHSKnown.One; 865 RHSKnown.Zero.lshrInPlace(C); 866 Known.One |= RHSKnown.Zero; 867 // assume(v >> c = a) 868 } else if (match(Cmp, m_c_ICmp(Pred, m_Shr(m_V, m_ConstantInt(C)), 869 m_Value(A))) && 870 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 871 KnownBits RHSKnown = 872 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 873 // For those bits in RHS that are known, we can propagate them to known 874 // bits in V shifted to the right by C. 875 Known.Zero |= RHSKnown.Zero << C; 876 Known.One |= RHSKnown.One << C; 877 // assume(~(v >> c) = a) 878 } else if (match(Cmp, m_c_ICmp(Pred, m_Not(m_Shr(m_V, m_ConstantInt(C))), 879 m_Value(A))) && 880 isValidAssumeForContext(I, Q.CxtI, Q.DT) && C < BitWidth) { 881 KnownBits RHSKnown = 882 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 883 // For those bits in RHS that are known, we can propagate them inverted 884 // to known bits in V shifted to the right by C. 885 Known.Zero |= RHSKnown.One << C; 886 Known.One |= RHSKnown.Zero << C; 887 } 888 break; 889 case ICmpInst::ICMP_SGE: 890 // assume(v >=_s c) where c is non-negative 891 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 892 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 893 KnownBits RHSKnown = 894 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); 895 896 if (RHSKnown.isNonNegative()) { 897 // We know that the sign bit is zero. 898 Known.makeNonNegative(); 899 } 900 } 901 break; 902 case ICmpInst::ICMP_SGT: 903 // assume(v >_s c) where c is at least -1. 904 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 905 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 906 KnownBits RHSKnown = 907 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); 908 909 if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) { 910 // We know that the sign bit is zero. 911 Known.makeNonNegative(); 912 } 913 } 914 break; 915 case ICmpInst::ICMP_SLE: 916 // assume(v <=_s c) where c is negative 917 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 918 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 919 KnownBits RHSKnown = 920 computeKnownBits(A, Depth + 1, QueryNoAC).anyextOrTrunc(BitWidth); 921 922 if (RHSKnown.isNegative()) { 923 // We know that the sign bit is one. 924 Known.makeNegative(); 925 } 926 } 927 break; 928 case ICmpInst::ICMP_SLT: 929 // assume(v <_s c) where c is non-positive 930 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 931 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 932 KnownBits RHSKnown = 933 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 934 935 if (RHSKnown.isZero() || RHSKnown.isNegative()) { 936 // We know that the sign bit is one. 937 Known.makeNegative(); 938 } 939 } 940 break; 941 case ICmpInst::ICMP_ULE: 942 // assume(v <=_u c) 943 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 944 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 945 KnownBits RHSKnown = 946 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 947 948 // Whatever high bits in c are zero are known to be zero. 949 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 950 } 951 break; 952 case ICmpInst::ICMP_ULT: 953 // assume(v <_u c) 954 if (match(Cmp, m_ICmp(Pred, m_V, m_Value(A))) && 955 isValidAssumeForContext(I, Q.CxtI, Q.DT)) { 956 KnownBits RHSKnown = 957 computeKnownBits(A, Depth+1, QueryNoAC).anyextOrTrunc(BitWidth); 958 959 // If the RHS is known zero, then this assumption must be wrong (nothing 960 // is unsigned less than zero). Signal a conflict and get out of here. 961 if (RHSKnown.isZero()) { 962 Known.Zero.setAllBits(); 963 Known.One.setAllBits(); 964 break; 965 } 966 967 // Whatever high bits in c are zero are known to be zero (if c is a power 968 // of 2, then one more). 969 if (isKnownToBeAPowerOfTwo(A, false, Depth + 1, QueryNoAC)) 970 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros() + 1); 971 else 972 Known.Zero.setHighBits(RHSKnown.countMinLeadingZeros()); 973 } 974 break; 975 } 976 } 977 978 // If assumptions conflict with each other or previous known bits, then we 979 // have a logical fallacy. It's possible that the assumption is not reachable, 980 // so this isn't a real bug. On the other hand, the program may have undefined 981 // behavior, or we might have a bug in the compiler. We can't assert/crash, so 982 // clear out the known bits, try to warn the user, and hope for the best. 983 if (Known.Zero.intersects(Known.One)) { 984 Known.resetAll(); 985 986 if (Q.ORE) 987 Q.ORE->emit([&]() { 988 auto *CxtI = const_cast<Instruction *>(Q.CxtI); 989 return OptimizationRemarkAnalysis("value-tracking", "BadAssumption", 990 CxtI) 991 << "Detected conflicting code assumptions. Program may " 992 "have undefined behavior, or compiler may have " 993 "internal error."; 994 }); 995 } 996 } 997 998 /// Compute known bits from a shift operator, including those with a 999 /// non-constant shift amount. Known is the output of this function. Known2 is a 1000 /// pre-allocated temporary with the same bit width as Known and on return 1001 /// contains the known bit of the shift value source. KF is an 1002 /// operator-specific function that, given the known-bits and a shift amount, 1003 /// compute the implied known-bits of the shift operator's result respectively 1004 /// for that shift amount. The results from calling KF are conservatively 1005 /// combined for all permitted shift amounts. 1006 static void computeKnownBitsFromShiftOperator( 1007 const Operator *I, const APInt &DemandedElts, KnownBits &Known, 1008 KnownBits &Known2, unsigned Depth, const Query &Q, 1009 function_ref<KnownBits(const KnownBits &, const KnownBits &)> KF) { 1010 unsigned BitWidth = Known.getBitWidth(); 1011 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1012 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1013 1014 // Note: We cannot use Known.Zero.getLimitedValue() here, because if 1015 // BitWidth > 64 and any upper bits are known, we'll end up returning the 1016 // limit value (which implies all bits are known). 1017 uint64_t ShiftAmtKZ = Known.Zero.zextOrTrunc(64).getZExtValue(); 1018 uint64_t ShiftAmtKO = Known.One.zextOrTrunc(64).getZExtValue(); 1019 bool ShiftAmtIsConstant = Known.isConstant(); 1020 bool MaxShiftAmtIsOutOfRange = Known.getMaxValue().uge(BitWidth); 1021 1022 if (ShiftAmtIsConstant) { 1023 Known = KF(Known2, Known); 1024 1025 // If the known bits conflict, this must be an overflowing left shift, so 1026 // the shift result is poison. We can return anything we want. Choose 0 for 1027 // the best folding opportunity. 1028 if (Known.hasConflict()) 1029 Known.setAllZero(); 1030 1031 return; 1032 } 1033 1034 // If the shift amount could be greater than or equal to the bit-width of the 1035 // LHS, the value could be poison, but bail out because the check below is 1036 // expensive. 1037 // TODO: Should we just carry on? 1038 if (MaxShiftAmtIsOutOfRange) { 1039 Known.resetAll(); 1040 return; 1041 } 1042 1043 // It would be more-clearly correct to use the two temporaries for this 1044 // calculation. Reusing the APInts here to prevent unnecessary allocations. 1045 Known.resetAll(); 1046 1047 // If we know the shifter operand is nonzero, we can sometimes infer more 1048 // known bits. However this is expensive to compute, so be lazy about it and 1049 // only compute it when absolutely necessary. 1050 Optional<bool> ShifterOperandIsNonZero; 1051 1052 // Early exit if we can't constrain any well-defined shift amount. 1053 if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) && 1054 !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) { 1055 ShifterOperandIsNonZero = 1056 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); 1057 if (!*ShifterOperandIsNonZero) 1058 return; 1059 } 1060 1061 Known.Zero.setAllBits(); 1062 Known.One.setAllBits(); 1063 for (unsigned ShiftAmt = 0; ShiftAmt < BitWidth; ++ShiftAmt) { 1064 // Combine the shifted known input bits only for those shift amounts 1065 // compatible with its known constraints. 1066 if ((ShiftAmt & ~ShiftAmtKZ) != ShiftAmt) 1067 continue; 1068 if ((ShiftAmt | ShiftAmtKO) != ShiftAmt) 1069 continue; 1070 // If we know the shifter is nonzero, we may be able to infer more known 1071 // bits. This check is sunk down as far as possible to avoid the expensive 1072 // call to isKnownNonZero if the cheaper checks above fail. 1073 if (ShiftAmt == 0) { 1074 if (!ShifterOperandIsNonZero) 1075 ShifterOperandIsNonZero = 1076 isKnownNonZero(I->getOperand(1), DemandedElts, Depth + 1, Q); 1077 if (*ShifterOperandIsNonZero) 1078 continue; 1079 } 1080 1081 Known = KnownBits::commonBits( 1082 Known, KF(Known2, KnownBits::makeConstant(APInt(32, ShiftAmt)))); 1083 } 1084 1085 // If the known bits conflict, the result is poison. Return a 0 and hope the 1086 // caller can further optimize that. 1087 if (Known.hasConflict()) 1088 Known.setAllZero(); 1089 } 1090 1091 static void computeKnownBitsFromOperator(const Operator *I, 1092 const APInt &DemandedElts, 1093 KnownBits &Known, unsigned Depth, 1094 const Query &Q) { 1095 unsigned BitWidth = Known.getBitWidth(); 1096 1097 KnownBits Known2(BitWidth); 1098 switch (I->getOpcode()) { 1099 default: break; 1100 case Instruction::Load: 1101 if (MDNode *MD = 1102 Q.IIQ.getMetadata(cast<LoadInst>(I), LLVMContext::MD_range)) 1103 computeKnownBitsFromRangeMetadata(*MD, Known); 1104 break; 1105 case Instruction::And: { 1106 // If either the LHS or the RHS are Zero, the result is zero. 1107 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1108 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1109 1110 Known &= Known2; 1111 1112 // and(x, add (x, -1)) is a common idiom that always clears the low bit; 1113 // here we handle the more general case of adding any odd number by 1114 // matching the form add(x, add(x, y)) where y is odd. 1115 // TODO: This could be generalized to clearing any bit set in y where the 1116 // following bit is known to be unset in y. 1117 Value *X = nullptr, *Y = nullptr; 1118 if (!Known.Zero[0] && !Known.One[0] && 1119 match(I, m_c_BinOp(m_Value(X), m_Add(m_Deferred(X), m_Value(Y))))) { 1120 Known2.resetAll(); 1121 computeKnownBits(Y, DemandedElts, Known2, Depth + 1, Q); 1122 if (Known2.countMinTrailingOnes() > 0) 1123 Known.Zero.setBit(0); 1124 } 1125 break; 1126 } 1127 case Instruction::Or: 1128 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1129 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1130 1131 Known |= Known2; 1132 break; 1133 case Instruction::Xor: 1134 computeKnownBits(I->getOperand(1), DemandedElts, Known, Depth + 1, Q); 1135 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1136 1137 Known ^= Known2; 1138 break; 1139 case Instruction::Mul: { 1140 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1141 computeKnownBitsMul(I->getOperand(0), I->getOperand(1), NSW, DemandedElts, 1142 Known, Known2, Depth, Q); 1143 break; 1144 } 1145 case Instruction::UDiv: { 1146 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1147 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1148 Known = KnownBits::udiv(Known, Known2); 1149 break; 1150 } 1151 case Instruction::Select: { 1152 const Value *LHS = nullptr, *RHS = nullptr; 1153 SelectPatternFlavor SPF = matchSelectPattern(I, LHS, RHS).Flavor; 1154 if (SelectPatternResult::isMinOrMax(SPF)) { 1155 computeKnownBits(RHS, Known, Depth + 1, Q); 1156 computeKnownBits(LHS, Known2, Depth + 1, Q); 1157 switch (SPF) { 1158 default: 1159 llvm_unreachable("Unhandled select pattern flavor!"); 1160 case SPF_SMAX: 1161 Known = KnownBits::smax(Known, Known2); 1162 break; 1163 case SPF_SMIN: 1164 Known = KnownBits::smin(Known, Known2); 1165 break; 1166 case SPF_UMAX: 1167 Known = KnownBits::umax(Known, Known2); 1168 break; 1169 case SPF_UMIN: 1170 Known = KnownBits::umin(Known, Known2); 1171 break; 1172 } 1173 break; 1174 } 1175 1176 computeKnownBits(I->getOperand(2), Known, Depth + 1, Q); 1177 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1178 1179 // Only known if known in both the LHS and RHS. 1180 Known = KnownBits::commonBits(Known, Known2); 1181 1182 if (SPF == SPF_ABS) { 1183 // RHS from matchSelectPattern returns the negation part of abs pattern. 1184 // If the negate has an NSW flag we can assume the sign bit of the result 1185 // will be 0 because that makes abs(INT_MIN) undefined. 1186 if (match(RHS, m_Neg(m_Specific(LHS))) && 1187 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RHS))) 1188 Known.Zero.setSignBit(); 1189 } 1190 1191 break; 1192 } 1193 case Instruction::FPTrunc: 1194 case Instruction::FPExt: 1195 case Instruction::FPToUI: 1196 case Instruction::FPToSI: 1197 case Instruction::SIToFP: 1198 case Instruction::UIToFP: 1199 break; // Can't work with floating point. 1200 case Instruction::PtrToInt: 1201 case Instruction::IntToPtr: 1202 // Fall through and handle them the same as zext/trunc. 1203 LLVM_FALLTHROUGH; 1204 case Instruction::ZExt: 1205 case Instruction::Trunc: { 1206 Type *SrcTy = I->getOperand(0)->getType(); 1207 1208 unsigned SrcBitWidth; 1209 // Note that we handle pointer operands here because of inttoptr/ptrtoint 1210 // which fall through here. 1211 Type *ScalarTy = SrcTy->getScalarType(); 1212 SrcBitWidth = ScalarTy->isPointerTy() ? 1213 Q.DL.getPointerTypeSizeInBits(ScalarTy) : 1214 Q.DL.getTypeSizeInBits(ScalarTy); 1215 1216 assert(SrcBitWidth && "SrcBitWidth can't be zero"); 1217 Known = Known.anyextOrTrunc(SrcBitWidth); 1218 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1219 Known = Known.zextOrTrunc(BitWidth); 1220 break; 1221 } 1222 case Instruction::BitCast: { 1223 Type *SrcTy = I->getOperand(0)->getType(); 1224 if (SrcTy->isIntOrPtrTy() && 1225 // TODO: For now, not handling conversions like: 1226 // (bitcast i64 %x to <2 x i32>) 1227 !I->getType()->isVectorTy()) { 1228 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1229 break; 1230 } 1231 1232 // Handle cast from vector integer type to scalar or vector integer. 1233 auto *SrcVecTy = dyn_cast<FixedVectorType>(SrcTy); 1234 if (!SrcVecTy || !SrcVecTy->getElementType()->isIntegerTy() || 1235 !I->getType()->isIntOrIntVectorTy()) 1236 break; 1237 1238 // Look through a cast from narrow vector elements to wider type. 1239 // Examples: v4i32 -> v2i64, v3i8 -> v24 1240 unsigned SubBitWidth = SrcVecTy->getScalarSizeInBits(); 1241 if (BitWidth % SubBitWidth == 0) { 1242 // Known bits are automatically intersected across demanded elements of a 1243 // vector. So for example, if a bit is computed as known zero, it must be 1244 // zero across all demanded elements of the vector. 1245 // 1246 // For this bitcast, each demanded element of the output is sub-divided 1247 // across a set of smaller vector elements in the source vector. To get 1248 // the known bits for an entire element of the output, compute the known 1249 // bits for each sub-element sequentially. This is done by shifting the 1250 // one-set-bit demanded elements parameter across the sub-elements for 1251 // consecutive calls to computeKnownBits. We are using the demanded 1252 // elements parameter as a mask operator. 1253 // 1254 // The known bits of each sub-element are then inserted into place 1255 // (dependent on endian) to form the full result of known bits. 1256 unsigned NumElts = DemandedElts.getBitWidth(); 1257 unsigned SubScale = BitWidth / SubBitWidth; 1258 APInt SubDemandedElts = APInt::getZero(NumElts * SubScale); 1259 for (unsigned i = 0; i != NumElts; ++i) { 1260 if (DemandedElts[i]) 1261 SubDemandedElts.setBit(i * SubScale); 1262 } 1263 1264 KnownBits KnownSrc(SubBitWidth); 1265 for (unsigned i = 0; i != SubScale; ++i) { 1266 computeKnownBits(I->getOperand(0), SubDemandedElts.shl(i), KnownSrc, 1267 Depth + 1, Q); 1268 unsigned ShiftElt = Q.DL.isLittleEndian() ? i : SubScale - 1 - i; 1269 Known.insertBits(KnownSrc, ShiftElt * SubBitWidth); 1270 } 1271 } 1272 break; 1273 } 1274 case Instruction::SExt: { 1275 // Compute the bits in the result that are not present in the input. 1276 unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits(); 1277 1278 Known = Known.trunc(SrcBitWidth); 1279 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1280 // If the sign bit of the input is known set or clear, then we know the 1281 // top bits of the result. 1282 Known = Known.sext(BitWidth); 1283 break; 1284 } 1285 case Instruction::Shl: { 1286 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1287 auto KF = [NSW](const KnownBits &KnownVal, const KnownBits &KnownAmt) { 1288 KnownBits Result = KnownBits::shl(KnownVal, KnownAmt); 1289 // If this shift has "nsw" keyword, then the result is either a poison 1290 // value or has the same sign bit as the first operand. 1291 if (NSW) { 1292 if (KnownVal.Zero.isSignBitSet()) 1293 Result.Zero.setSignBit(); 1294 if (KnownVal.One.isSignBitSet()) 1295 Result.One.setSignBit(); 1296 } 1297 return Result; 1298 }; 1299 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1300 KF); 1301 // Trailing zeros of a right-shifted constant never decrease. 1302 const APInt *C; 1303 if (match(I->getOperand(0), m_APInt(C))) 1304 Known.Zero.setLowBits(C->countTrailingZeros()); 1305 break; 1306 } 1307 case Instruction::LShr: { 1308 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { 1309 return KnownBits::lshr(KnownVal, KnownAmt); 1310 }; 1311 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1312 KF); 1313 // Leading zeros of a left-shifted constant never decrease. 1314 const APInt *C; 1315 if (match(I->getOperand(0), m_APInt(C))) 1316 Known.Zero.setHighBits(C->countLeadingZeros()); 1317 break; 1318 } 1319 case Instruction::AShr: { 1320 auto KF = [](const KnownBits &KnownVal, const KnownBits &KnownAmt) { 1321 return KnownBits::ashr(KnownVal, KnownAmt); 1322 }; 1323 computeKnownBitsFromShiftOperator(I, DemandedElts, Known, Known2, Depth, Q, 1324 KF); 1325 break; 1326 } 1327 case Instruction::Sub: { 1328 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1329 computeKnownBitsAddSub(false, I->getOperand(0), I->getOperand(1), NSW, 1330 DemandedElts, Known, Known2, Depth, Q); 1331 break; 1332 } 1333 case Instruction::Add: { 1334 bool NSW = Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(I)); 1335 computeKnownBitsAddSub(true, I->getOperand(0), I->getOperand(1), NSW, 1336 DemandedElts, Known, Known2, Depth, Q); 1337 break; 1338 } 1339 case Instruction::SRem: 1340 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1341 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1342 Known = KnownBits::srem(Known, Known2); 1343 break; 1344 1345 case Instruction::URem: 1346 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1347 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1348 Known = KnownBits::urem(Known, Known2); 1349 break; 1350 case Instruction::Alloca: 1351 Known.Zero.setLowBits(Log2(cast<AllocaInst>(I)->getAlign())); 1352 break; 1353 case Instruction::GetElementPtr: { 1354 // Analyze all of the subscripts of this getelementptr instruction 1355 // to determine if we can prove known low zero bits. 1356 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1357 // Accumulate the constant indices in a separate variable 1358 // to minimize the number of calls to computeForAddSub. 1359 APInt AccConstIndices(BitWidth, 0, /*IsSigned*/ true); 1360 1361 gep_type_iterator GTI = gep_type_begin(I); 1362 for (unsigned i = 1, e = I->getNumOperands(); i != e; ++i, ++GTI) { 1363 // TrailZ can only become smaller, short-circuit if we hit zero. 1364 if (Known.isUnknown()) 1365 break; 1366 1367 Value *Index = I->getOperand(i); 1368 1369 // Handle case when index is zero. 1370 Constant *CIndex = dyn_cast<Constant>(Index); 1371 if (CIndex && CIndex->isZeroValue()) 1372 continue; 1373 1374 if (StructType *STy = GTI.getStructTypeOrNull()) { 1375 // Handle struct member offset arithmetic. 1376 1377 assert(CIndex && 1378 "Access to structure field must be known at compile time"); 1379 1380 if (CIndex->getType()->isVectorTy()) 1381 Index = CIndex->getSplatValue(); 1382 1383 unsigned Idx = cast<ConstantInt>(Index)->getZExtValue(); 1384 const StructLayout *SL = Q.DL.getStructLayout(STy); 1385 uint64_t Offset = SL->getElementOffset(Idx); 1386 AccConstIndices += Offset; 1387 continue; 1388 } 1389 1390 // Handle array index arithmetic. 1391 Type *IndexedTy = GTI.getIndexedType(); 1392 if (!IndexedTy->isSized()) { 1393 Known.resetAll(); 1394 break; 1395 } 1396 1397 unsigned IndexBitWidth = Index->getType()->getScalarSizeInBits(); 1398 KnownBits IndexBits(IndexBitWidth); 1399 computeKnownBits(Index, IndexBits, Depth + 1, Q); 1400 TypeSize IndexTypeSize = Q.DL.getTypeAllocSize(IndexedTy); 1401 uint64_t TypeSizeInBytes = IndexTypeSize.getKnownMinSize(); 1402 KnownBits ScalingFactor(IndexBitWidth); 1403 // Multiply by current sizeof type. 1404 // &A[i] == A + i * sizeof(*A[i]). 1405 if (IndexTypeSize.isScalable()) { 1406 // For scalable types the only thing we know about sizeof is 1407 // that this is a multiple of the minimum size. 1408 ScalingFactor.Zero.setLowBits(countTrailingZeros(TypeSizeInBytes)); 1409 } else if (IndexBits.isConstant()) { 1410 APInt IndexConst = IndexBits.getConstant(); 1411 APInt ScalingFactor(IndexBitWidth, TypeSizeInBytes); 1412 IndexConst *= ScalingFactor; 1413 AccConstIndices += IndexConst.sextOrTrunc(BitWidth); 1414 continue; 1415 } else { 1416 ScalingFactor = 1417 KnownBits::makeConstant(APInt(IndexBitWidth, TypeSizeInBytes)); 1418 } 1419 IndexBits = KnownBits::mul(IndexBits, ScalingFactor); 1420 1421 // If the offsets have a different width from the pointer, according 1422 // to the language reference we need to sign-extend or truncate them 1423 // to the width of the pointer. 1424 IndexBits = IndexBits.sextOrTrunc(BitWidth); 1425 1426 // Note that inbounds does *not* guarantee nsw for the addition, as only 1427 // the offset is signed, while the base address is unsigned. 1428 Known = KnownBits::computeForAddSub( 1429 /*Add=*/true, /*NSW=*/false, Known, IndexBits); 1430 } 1431 if (!Known.isUnknown() && !AccConstIndices.isZero()) { 1432 KnownBits Index = KnownBits::makeConstant(AccConstIndices); 1433 Known = KnownBits::computeForAddSub( 1434 /*Add=*/true, /*NSW=*/false, Known, Index); 1435 } 1436 break; 1437 } 1438 case Instruction::PHI: { 1439 const PHINode *P = cast<PHINode>(I); 1440 BinaryOperator *BO = nullptr; 1441 Value *R = nullptr, *L = nullptr; 1442 if (matchSimpleRecurrence(P, BO, R, L)) { 1443 // Handle the case of a simple two-predecessor recurrence PHI. 1444 // There's a lot more that could theoretically be done here, but 1445 // this is sufficient to catch some interesting cases. 1446 unsigned Opcode = BO->getOpcode(); 1447 1448 // If this is a shift recurrence, we know the bits being shifted in. 1449 // We can combine that with information about the start value of the 1450 // recurrence to conclude facts about the result. 1451 if ((Opcode == Instruction::LShr || Opcode == Instruction::AShr || 1452 Opcode == Instruction::Shl) && 1453 BO->getOperand(0) == I) { 1454 1455 // We have matched a recurrence of the form: 1456 // %iv = [R, %entry], [%iv.next, %backedge] 1457 // %iv.next = shift_op %iv, L 1458 1459 // Recurse with the phi context to avoid concern about whether facts 1460 // inferred hold at original context instruction. TODO: It may be 1461 // correct to use the original context. IF warranted, explore and 1462 // add sufficient tests to cover. 1463 Query RecQ = Q; 1464 RecQ.CxtI = P; 1465 computeKnownBits(R, DemandedElts, Known2, Depth + 1, RecQ); 1466 switch (Opcode) { 1467 case Instruction::Shl: 1468 // A shl recurrence will only increase the tailing zeros 1469 Known.Zero.setLowBits(Known2.countMinTrailingZeros()); 1470 break; 1471 case Instruction::LShr: 1472 // A lshr recurrence will preserve the leading zeros of the 1473 // start value 1474 Known.Zero.setHighBits(Known2.countMinLeadingZeros()); 1475 break; 1476 case Instruction::AShr: 1477 // An ashr recurrence will extend the initial sign bit 1478 Known.Zero.setHighBits(Known2.countMinLeadingZeros()); 1479 Known.One.setHighBits(Known2.countMinLeadingOnes()); 1480 break; 1481 }; 1482 } 1483 1484 // Check for operations that have the property that if 1485 // both their operands have low zero bits, the result 1486 // will have low zero bits. 1487 if (Opcode == Instruction::Add || 1488 Opcode == Instruction::Sub || 1489 Opcode == Instruction::And || 1490 Opcode == Instruction::Or || 1491 Opcode == Instruction::Mul) { 1492 // Change the context instruction to the "edge" that flows into the 1493 // phi. This is important because that is where the value is actually 1494 // "evaluated" even though it is used later somewhere else. (see also 1495 // D69571). 1496 Query RecQ = Q; 1497 1498 unsigned OpNum = P->getOperand(0) == R ? 0 : 1; 1499 Instruction *RInst = P->getIncomingBlock(OpNum)->getTerminator(); 1500 Instruction *LInst = P->getIncomingBlock(1-OpNum)->getTerminator(); 1501 1502 // Ok, we have a PHI of the form L op= R. Check for low 1503 // zero bits. 1504 RecQ.CxtI = RInst; 1505 computeKnownBits(R, Known2, Depth + 1, RecQ); 1506 1507 // We need to take the minimum number of known bits 1508 KnownBits Known3(BitWidth); 1509 RecQ.CxtI = LInst; 1510 computeKnownBits(L, Known3, Depth + 1, RecQ); 1511 1512 Known.Zero.setLowBits(std::min(Known2.countMinTrailingZeros(), 1513 Known3.countMinTrailingZeros())); 1514 1515 auto *OverflowOp = dyn_cast<OverflowingBinaryOperator>(BO); 1516 if (OverflowOp && Q.IIQ.hasNoSignedWrap(OverflowOp)) { 1517 // If initial value of recurrence is nonnegative, and we are adding 1518 // a nonnegative number with nsw, the result can only be nonnegative 1519 // or poison value regardless of the number of times we execute the 1520 // add in phi recurrence. If initial value is negative and we are 1521 // adding a negative number with nsw, the result can only be 1522 // negative or poison value. Similar arguments apply to sub and mul. 1523 // 1524 // (add non-negative, non-negative) --> non-negative 1525 // (add negative, negative) --> negative 1526 if (Opcode == Instruction::Add) { 1527 if (Known2.isNonNegative() && Known3.isNonNegative()) 1528 Known.makeNonNegative(); 1529 else if (Known2.isNegative() && Known3.isNegative()) 1530 Known.makeNegative(); 1531 } 1532 1533 // (sub nsw non-negative, negative) --> non-negative 1534 // (sub nsw negative, non-negative) --> negative 1535 else if (Opcode == Instruction::Sub && BO->getOperand(0) == I) { 1536 if (Known2.isNonNegative() && Known3.isNegative()) 1537 Known.makeNonNegative(); 1538 else if (Known2.isNegative() && Known3.isNonNegative()) 1539 Known.makeNegative(); 1540 } 1541 1542 // (mul nsw non-negative, non-negative) --> non-negative 1543 else if (Opcode == Instruction::Mul && Known2.isNonNegative() && 1544 Known3.isNonNegative()) 1545 Known.makeNonNegative(); 1546 } 1547 1548 break; 1549 } 1550 } 1551 1552 // Unreachable blocks may have zero-operand PHI nodes. 1553 if (P->getNumIncomingValues() == 0) 1554 break; 1555 1556 // Otherwise take the unions of the known bit sets of the operands, 1557 // taking conservative care to avoid excessive recursion. 1558 if (Depth < MaxAnalysisRecursionDepth - 1 && !Known.Zero && !Known.One) { 1559 // Skip if every incoming value references to ourself. 1560 if (isa_and_nonnull<UndefValue>(P->hasConstantValue())) 1561 break; 1562 1563 Known.Zero.setAllBits(); 1564 Known.One.setAllBits(); 1565 for (unsigned u = 0, e = P->getNumIncomingValues(); u < e; ++u) { 1566 Value *IncValue = P->getIncomingValue(u); 1567 // Skip direct self references. 1568 if (IncValue == P) continue; 1569 1570 // Change the context instruction to the "edge" that flows into the 1571 // phi. This is important because that is where the value is actually 1572 // "evaluated" even though it is used later somewhere else. (see also 1573 // D69571). 1574 Query RecQ = Q; 1575 RecQ.CxtI = P->getIncomingBlock(u)->getTerminator(); 1576 1577 Known2 = KnownBits(BitWidth); 1578 // Recurse, but cap the recursion to one level, because we don't 1579 // want to waste time spinning around in loops. 1580 computeKnownBits(IncValue, Known2, MaxAnalysisRecursionDepth - 1, RecQ); 1581 Known = KnownBits::commonBits(Known, Known2); 1582 // If all bits have been ruled out, there's no need to check 1583 // more operands. 1584 if (Known.isUnknown()) 1585 break; 1586 } 1587 } 1588 break; 1589 } 1590 case Instruction::Call: 1591 case Instruction::Invoke: 1592 // If range metadata is attached to this call, set known bits from that, 1593 // and then intersect with known bits based on other properties of the 1594 // function. 1595 if (MDNode *MD = 1596 Q.IIQ.getMetadata(cast<Instruction>(I), LLVMContext::MD_range)) 1597 computeKnownBitsFromRangeMetadata(*MD, Known); 1598 if (const Value *RV = cast<CallBase>(I)->getReturnedArgOperand()) { 1599 computeKnownBits(RV, Known2, Depth + 1, Q); 1600 Known.Zero |= Known2.Zero; 1601 Known.One |= Known2.One; 1602 } 1603 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1604 switch (II->getIntrinsicID()) { 1605 default: break; 1606 case Intrinsic::abs: { 1607 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1608 bool IntMinIsPoison = match(II->getArgOperand(1), m_One()); 1609 Known = Known2.abs(IntMinIsPoison); 1610 break; 1611 } 1612 case Intrinsic::bitreverse: 1613 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1614 Known.Zero |= Known2.Zero.reverseBits(); 1615 Known.One |= Known2.One.reverseBits(); 1616 break; 1617 case Intrinsic::bswap: 1618 computeKnownBits(I->getOperand(0), DemandedElts, Known2, Depth + 1, Q); 1619 Known.Zero |= Known2.Zero.byteSwap(); 1620 Known.One |= Known2.One.byteSwap(); 1621 break; 1622 case Intrinsic::ctlz: { 1623 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1624 // If we have a known 1, its position is our upper bound. 1625 unsigned PossibleLZ = Known2.countMaxLeadingZeros(); 1626 // If this call is poison for 0 input, the result will be less than 2^n. 1627 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1628 PossibleLZ = std::min(PossibleLZ, BitWidth - 1); 1629 unsigned LowBits = Log2_32(PossibleLZ)+1; 1630 Known.Zero.setBitsFrom(LowBits); 1631 break; 1632 } 1633 case Intrinsic::cttz: { 1634 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1635 // If we have a known 1, its position is our upper bound. 1636 unsigned PossibleTZ = Known2.countMaxTrailingZeros(); 1637 // If this call is poison for 0 input, the result will be less than 2^n. 1638 if (II->getArgOperand(1) == ConstantInt::getTrue(II->getContext())) 1639 PossibleTZ = std::min(PossibleTZ, BitWidth - 1); 1640 unsigned LowBits = Log2_32(PossibleTZ)+1; 1641 Known.Zero.setBitsFrom(LowBits); 1642 break; 1643 } 1644 case Intrinsic::ctpop: { 1645 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1646 // We can bound the space the count needs. Also, bits known to be zero 1647 // can't contribute to the population. 1648 unsigned BitsPossiblySet = Known2.countMaxPopulation(); 1649 unsigned LowBits = Log2_32(BitsPossiblySet)+1; 1650 Known.Zero.setBitsFrom(LowBits); 1651 // TODO: we could bound KnownOne using the lower bound on the number 1652 // of bits which might be set provided by popcnt KnownOne2. 1653 break; 1654 } 1655 case Intrinsic::fshr: 1656 case Intrinsic::fshl: { 1657 const APInt *SA; 1658 if (!match(I->getOperand(2), m_APInt(SA))) 1659 break; 1660 1661 // Normalize to funnel shift left. 1662 uint64_t ShiftAmt = SA->urem(BitWidth); 1663 if (II->getIntrinsicID() == Intrinsic::fshr) 1664 ShiftAmt = BitWidth - ShiftAmt; 1665 1666 KnownBits Known3(BitWidth); 1667 computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q); 1668 computeKnownBits(I->getOperand(1), Known3, Depth + 1, Q); 1669 1670 Known.Zero = 1671 Known2.Zero.shl(ShiftAmt) | Known3.Zero.lshr(BitWidth - ShiftAmt); 1672 Known.One = 1673 Known2.One.shl(ShiftAmt) | Known3.One.lshr(BitWidth - ShiftAmt); 1674 break; 1675 } 1676 case Intrinsic::uadd_sat: 1677 case Intrinsic::usub_sat: { 1678 bool IsAdd = II->getIntrinsicID() == Intrinsic::uadd_sat; 1679 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1680 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1681 1682 // Add: Leading ones of either operand are preserved. 1683 // Sub: Leading zeros of LHS and leading ones of RHS are preserved 1684 // as leading zeros in the result. 1685 unsigned LeadingKnown; 1686 if (IsAdd) 1687 LeadingKnown = std::max(Known.countMinLeadingOnes(), 1688 Known2.countMinLeadingOnes()); 1689 else 1690 LeadingKnown = std::max(Known.countMinLeadingZeros(), 1691 Known2.countMinLeadingOnes()); 1692 1693 Known = KnownBits::computeForAddSub( 1694 IsAdd, /* NSW */ false, Known, Known2); 1695 1696 // We select between the operation result and all-ones/zero 1697 // respectively, so we can preserve known ones/zeros. 1698 if (IsAdd) { 1699 Known.One.setHighBits(LeadingKnown); 1700 Known.Zero.clearAllBits(); 1701 } else { 1702 Known.Zero.setHighBits(LeadingKnown); 1703 Known.One.clearAllBits(); 1704 } 1705 break; 1706 } 1707 case Intrinsic::umin: 1708 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1709 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1710 Known = KnownBits::umin(Known, Known2); 1711 break; 1712 case Intrinsic::umax: 1713 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1714 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1715 Known = KnownBits::umax(Known, Known2); 1716 break; 1717 case Intrinsic::smin: 1718 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1719 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1720 Known = KnownBits::smin(Known, Known2); 1721 break; 1722 case Intrinsic::smax: 1723 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1724 computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q); 1725 Known = KnownBits::smax(Known, Known2); 1726 break; 1727 case Intrinsic::x86_sse42_crc32_64_64: 1728 Known.Zero.setBitsFrom(32); 1729 break; 1730 case Intrinsic::riscv_vsetvli: 1731 case Intrinsic::riscv_vsetvlimax: 1732 // Assume that VL output is positive and would fit in an int32_t. 1733 // TODO: VLEN might be capped at 16 bits in a future V spec update. 1734 if (BitWidth >= 32) 1735 Known.Zero.setBitsFrom(31); 1736 break; 1737 case Intrinsic::vscale: { 1738 if (!II->getParent() || !II->getFunction() || 1739 !II->getFunction()->hasFnAttribute(Attribute::VScaleRange)) 1740 break; 1741 1742 auto Attr = II->getFunction()->getFnAttribute(Attribute::VScaleRange); 1743 Optional<unsigned> VScaleMax = Attr.getVScaleRangeMax(); 1744 1745 if (!VScaleMax) 1746 break; 1747 1748 unsigned VScaleMin = Attr.getVScaleRangeMin(); 1749 1750 // If vscale min = max then we know the exact value at compile time 1751 // and hence we know the exact bits. 1752 if (VScaleMin == VScaleMax) { 1753 Known.One = VScaleMin; 1754 Known.Zero = VScaleMin; 1755 Known.Zero.flipAllBits(); 1756 break; 1757 } 1758 1759 unsigned FirstZeroHighBit = 32 - countLeadingZeros(*VScaleMax); 1760 if (FirstZeroHighBit < BitWidth) 1761 Known.Zero.setBitsFrom(FirstZeroHighBit); 1762 1763 break; 1764 } 1765 } 1766 } 1767 break; 1768 case Instruction::ShuffleVector: { 1769 auto *Shuf = dyn_cast<ShuffleVectorInst>(I); 1770 // FIXME: Do we need to handle ConstantExpr involving shufflevectors? 1771 if (!Shuf) { 1772 Known.resetAll(); 1773 return; 1774 } 1775 // For undef elements, we don't know anything about the common state of 1776 // the shuffle result. 1777 APInt DemandedLHS, DemandedRHS; 1778 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) { 1779 Known.resetAll(); 1780 return; 1781 } 1782 Known.One.setAllBits(); 1783 Known.Zero.setAllBits(); 1784 if (!!DemandedLHS) { 1785 const Value *LHS = Shuf->getOperand(0); 1786 computeKnownBits(LHS, DemandedLHS, Known, Depth + 1, Q); 1787 // If we don't know any bits, early out. 1788 if (Known.isUnknown()) 1789 break; 1790 } 1791 if (!!DemandedRHS) { 1792 const Value *RHS = Shuf->getOperand(1); 1793 computeKnownBits(RHS, DemandedRHS, Known2, Depth + 1, Q); 1794 Known = KnownBits::commonBits(Known, Known2); 1795 } 1796 break; 1797 } 1798 case Instruction::InsertElement: { 1799 const Value *Vec = I->getOperand(0); 1800 const Value *Elt = I->getOperand(1); 1801 auto *CIdx = dyn_cast<ConstantInt>(I->getOperand(2)); 1802 // Early out if the index is non-constant or out-of-range. 1803 unsigned NumElts = DemandedElts.getBitWidth(); 1804 if (!CIdx || CIdx->getValue().uge(NumElts)) { 1805 Known.resetAll(); 1806 return; 1807 } 1808 Known.One.setAllBits(); 1809 Known.Zero.setAllBits(); 1810 unsigned EltIdx = CIdx->getZExtValue(); 1811 // Do we demand the inserted element? 1812 if (DemandedElts[EltIdx]) { 1813 computeKnownBits(Elt, Known, Depth + 1, Q); 1814 // If we don't know any bits, early out. 1815 if (Known.isUnknown()) 1816 break; 1817 } 1818 // We don't need the base vector element that has been inserted. 1819 APInt DemandedVecElts = DemandedElts; 1820 DemandedVecElts.clearBit(EltIdx); 1821 if (!!DemandedVecElts) { 1822 computeKnownBits(Vec, DemandedVecElts, Known2, Depth + 1, Q); 1823 Known = KnownBits::commonBits(Known, Known2); 1824 } 1825 break; 1826 } 1827 case Instruction::ExtractElement: { 1828 // Look through extract element. If the index is non-constant or 1829 // out-of-range demand all elements, otherwise just the extracted element. 1830 const Value *Vec = I->getOperand(0); 1831 const Value *Idx = I->getOperand(1); 1832 auto *CIdx = dyn_cast<ConstantInt>(Idx); 1833 if (isa<ScalableVectorType>(Vec->getType())) { 1834 // FIXME: there's probably *something* we can do with scalable vectors 1835 Known.resetAll(); 1836 break; 1837 } 1838 unsigned NumElts = cast<FixedVectorType>(Vec->getType())->getNumElements(); 1839 APInt DemandedVecElts = APInt::getAllOnes(NumElts); 1840 if (CIdx && CIdx->getValue().ult(NumElts)) 1841 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 1842 computeKnownBits(Vec, DemandedVecElts, Known, Depth + 1, Q); 1843 break; 1844 } 1845 case Instruction::ExtractValue: 1846 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I->getOperand(0))) { 1847 const ExtractValueInst *EVI = cast<ExtractValueInst>(I); 1848 if (EVI->getNumIndices() != 1) break; 1849 if (EVI->getIndices()[0] == 0) { 1850 switch (II->getIntrinsicID()) { 1851 default: break; 1852 case Intrinsic::uadd_with_overflow: 1853 case Intrinsic::sadd_with_overflow: 1854 computeKnownBitsAddSub(true, II->getArgOperand(0), 1855 II->getArgOperand(1), false, DemandedElts, 1856 Known, Known2, Depth, Q); 1857 break; 1858 case Intrinsic::usub_with_overflow: 1859 case Intrinsic::ssub_with_overflow: 1860 computeKnownBitsAddSub(false, II->getArgOperand(0), 1861 II->getArgOperand(1), false, DemandedElts, 1862 Known, Known2, Depth, Q); 1863 break; 1864 case Intrinsic::umul_with_overflow: 1865 case Intrinsic::smul_with_overflow: 1866 computeKnownBitsMul(II->getArgOperand(0), II->getArgOperand(1), false, 1867 DemandedElts, Known, Known2, Depth, Q); 1868 break; 1869 } 1870 } 1871 } 1872 break; 1873 case Instruction::Freeze: 1874 if (isGuaranteedNotToBePoison(I->getOperand(0), Q.AC, Q.CxtI, Q.DT, 1875 Depth + 1)) 1876 computeKnownBits(I->getOperand(0), Known, Depth + 1, Q); 1877 break; 1878 } 1879 } 1880 1881 /// Determine which bits of V are known to be either zero or one and return 1882 /// them. 1883 KnownBits computeKnownBits(const Value *V, const APInt &DemandedElts, 1884 unsigned Depth, const Query &Q) { 1885 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1886 computeKnownBits(V, DemandedElts, Known, Depth, Q); 1887 return Known; 1888 } 1889 1890 /// Determine which bits of V are known to be either zero or one and return 1891 /// them. 1892 KnownBits computeKnownBits(const Value *V, unsigned Depth, const Query &Q) { 1893 KnownBits Known(getBitWidth(V->getType(), Q.DL)); 1894 computeKnownBits(V, Known, Depth, Q); 1895 return Known; 1896 } 1897 1898 /// Determine which bits of V are known to be either zero or one and return 1899 /// them in the Known bit set. 1900 /// 1901 /// NOTE: we cannot consider 'undef' to be "IsZero" here. The problem is that 1902 /// we cannot optimize based on the assumption that it is zero without changing 1903 /// it to be an explicit zero. If we don't change it to zero, other code could 1904 /// optimized based on the contradictory assumption that it is non-zero. 1905 /// Because instcombine aggressively folds operations with undef args anyway, 1906 /// this won't lose us code quality. 1907 /// 1908 /// This function is defined on values with integer type, values with pointer 1909 /// type, and vectors of integers. In the case 1910 /// where V is a vector, known zero, and known one values are the 1911 /// same width as the vector element, and the bit is set only if it is true 1912 /// for all of the demanded elements in the vector specified by DemandedElts. 1913 void computeKnownBits(const Value *V, const APInt &DemandedElts, 1914 KnownBits &Known, unsigned Depth, const Query &Q) { 1915 if (!DemandedElts || isa<ScalableVectorType>(V->getType())) { 1916 // No demanded elts or V is a scalable vector, better to assume we don't 1917 // know anything. 1918 Known.resetAll(); 1919 return; 1920 } 1921 1922 assert(V && "No Value?"); 1923 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 1924 1925 #ifndef NDEBUG 1926 Type *Ty = V->getType(); 1927 unsigned BitWidth = Known.getBitWidth(); 1928 1929 assert((Ty->isIntOrIntVectorTy(BitWidth) || Ty->isPtrOrPtrVectorTy()) && 1930 "Not integer or pointer type!"); 1931 1932 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 1933 assert( 1934 FVTy->getNumElements() == DemandedElts.getBitWidth() && 1935 "DemandedElt width should equal the fixed vector number of elements"); 1936 } else { 1937 assert(DemandedElts == APInt(1, 1) && 1938 "DemandedElt width should be 1 for scalars"); 1939 } 1940 1941 Type *ScalarTy = Ty->getScalarType(); 1942 if (ScalarTy->isPointerTy()) { 1943 assert(BitWidth == Q.DL.getPointerTypeSizeInBits(ScalarTy) && 1944 "V and Known should have same BitWidth"); 1945 } else { 1946 assert(BitWidth == Q.DL.getTypeSizeInBits(ScalarTy) && 1947 "V and Known should have same BitWidth"); 1948 } 1949 #endif 1950 1951 const APInt *C; 1952 if (match(V, m_APInt(C))) { 1953 // We know all of the bits for a scalar constant or a splat vector constant! 1954 Known = KnownBits::makeConstant(*C); 1955 return; 1956 } 1957 // Null and aggregate-zero are all-zeros. 1958 if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) { 1959 Known.setAllZero(); 1960 return; 1961 } 1962 // Handle a constant vector by taking the intersection of the known bits of 1963 // each element. 1964 if (const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(V)) { 1965 // We know that CDV must be a vector of integers. Take the intersection of 1966 // each element. 1967 Known.Zero.setAllBits(); Known.One.setAllBits(); 1968 for (unsigned i = 0, e = CDV->getNumElements(); i != e; ++i) { 1969 if (!DemandedElts[i]) 1970 continue; 1971 APInt Elt = CDV->getElementAsAPInt(i); 1972 Known.Zero &= ~Elt; 1973 Known.One &= Elt; 1974 } 1975 return; 1976 } 1977 1978 if (const auto *CV = dyn_cast<ConstantVector>(V)) { 1979 // We know that CV must be a vector of integers. Take the intersection of 1980 // each element. 1981 Known.Zero.setAllBits(); Known.One.setAllBits(); 1982 for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) { 1983 if (!DemandedElts[i]) 1984 continue; 1985 Constant *Element = CV->getAggregateElement(i); 1986 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element); 1987 if (!ElementCI) { 1988 Known.resetAll(); 1989 return; 1990 } 1991 const APInt &Elt = ElementCI->getValue(); 1992 Known.Zero &= ~Elt; 1993 Known.One &= Elt; 1994 } 1995 return; 1996 } 1997 1998 // Start out not knowing anything. 1999 Known.resetAll(); 2000 2001 // We can't imply anything about undefs. 2002 if (isa<UndefValue>(V)) 2003 return; 2004 2005 // There's no point in looking through other users of ConstantData for 2006 // assumptions. Confirm that we've handled them all. 2007 assert(!isa<ConstantData>(V) && "Unhandled constant data!"); 2008 2009 // All recursive calls that increase depth must come after this. 2010 if (Depth == MaxAnalysisRecursionDepth) 2011 return; 2012 2013 // A weak GlobalAlias is totally unknown. A non-weak GlobalAlias has 2014 // the bits of its aliasee. 2015 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) { 2016 if (!GA->isInterposable()) 2017 computeKnownBits(GA->getAliasee(), Known, Depth + 1, Q); 2018 return; 2019 } 2020 2021 if (const Operator *I = dyn_cast<Operator>(V)) 2022 computeKnownBitsFromOperator(I, DemandedElts, Known, Depth, Q); 2023 2024 // Aligned pointers have trailing zeros - refine Known.Zero set 2025 if (isa<PointerType>(V->getType())) { 2026 Align Alignment = V->getPointerAlignment(Q.DL); 2027 Known.Zero.setLowBits(Log2(Alignment)); 2028 } 2029 2030 // computeKnownBitsFromAssume strictly refines Known. 2031 // Therefore, we run them after computeKnownBitsFromOperator. 2032 2033 // Check whether a nearby assume intrinsic can determine some known bits. 2034 computeKnownBitsFromAssume(V, Known, Depth, Q); 2035 2036 assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?"); 2037 } 2038 2039 /// Try to detect a recurrence that the value of the induction variable is 2040 /// always a power of two (or zero). 2041 static bool isPowerOfTwoRecurrence(const PHINode *PN, bool OrZero, 2042 unsigned Depth, Query &Q) { 2043 BinaryOperator *BO = nullptr; 2044 Value *Start = nullptr, *Step = nullptr; 2045 if (!matchSimpleRecurrence(PN, BO, Start, Step)) 2046 return false; 2047 2048 // Initial value must be a power of two. 2049 for (const Use &U : PN->operands()) { 2050 if (U.get() == Start) { 2051 // Initial value comes from a different BB, need to adjust context 2052 // instruction for analysis. 2053 Q.CxtI = PN->getIncomingBlock(U)->getTerminator(); 2054 if (!isKnownToBeAPowerOfTwo(Start, OrZero, Depth, Q)) 2055 return false; 2056 } 2057 } 2058 2059 // Except for Mul, the induction variable must be on the left side of the 2060 // increment expression, otherwise its value can be arbitrary. 2061 if (BO->getOpcode() != Instruction::Mul && BO->getOperand(1) != Step) 2062 return false; 2063 2064 Q.CxtI = BO->getParent()->getTerminator(); 2065 switch (BO->getOpcode()) { 2066 case Instruction::Mul: 2067 // Power of two is closed under multiplication. 2068 return (OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || 2069 Q.IIQ.hasNoSignedWrap(BO)) && 2070 isKnownToBeAPowerOfTwo(Step, OrZero, Depth, Q); 2071 case Instruction::SDiv: 2072 // Start value must not be signmask for signed division, so simply being a 2073 // power of two is not sufficient, and it has to be a constant. 2074 if (!match(Start, m_Power2()) || match(Start, m_SignMask())) 2075 return false; 2076 LLVM_FALLTHROUGH; 2077 case Instruction::UDiv: 2078 // Divisor must be a power of two. 2079 // If OrZero is false, cannot guarantee induction variable is non-zero after 2080 // division, same for Shr, unless it is exact division. 2081 return (OrZero || Q.IIQ.isExact(BO)) && 2082 isKnownToBeAPowerOfTwo(Step, false, Depth, Q); 2083 case Instruction::Shl: 2084 return OrZero || Q.IIQ.hasNoUnsignedWrap(BO) || Q.IIQ.hasNoSignedWrap(BO); 2085 case Instruction::AShr: 2086 if (!match(Start, m_Power2()) || match(Start, m_SignMask())) 2087 return false; 2088 LLVM_FALLTHROUGH; 2089 case Instruction::LShr: 2090 return OrZero || Q.IIQ.isExact(BO); 2091 default: 2092 return false; 2093 } 2094 } 2095 2096 /// Return true if the given value is known to have exactly one 2097 /// bit set when defined. For vectors return true if every element is known to 2098 /// be a power of two when defined. Supports values with integer or pointer 2099 /// types and vectors of integers. 2100 bool isKnownToBeAPowerOfTwo(const Value *V, bool OrZero, unsigned Depth, 2101 const Query &Q) { 2102 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 2103 2104 // Attempt to match against constants. 2105 if (OrZero && match(V, m_Power2OrZero())) 2106 return true; 2107 if (match(V, m_Power2())) 2108 return true; 2109 2110 // 1 << X is clearly a power of two if the one is not shifted off the end. If 2111 // it is shifted off the end then the result is undefined. 2112 if (match(V, m_Shl(m_One(), m_Value()))) 2113 return true; 2114 2115 // (signmask) >>l X is clearly a power of two if the one is not shifted off 2116 // the bottom. If it is shifted off the bottom then the result is undefined. 2117 if (match(V, m_LShr(m_SignMask(), m_Value()))) 2118 return true; 2119 2120 // The remaining tests are all recursive, so bail out if we hit the limit. 2121 if (Depth++ == MaxAnalysisRecursionDepth) 2122 return false; 2123 2124 Value *X = nullptr, *Y = nullptr; 2125 // A shift left or a logical shift right of a power of two is a power of two 2126 // or zero. 2127 if (OrZero && (match(V, m_Shl(m_Value(X), m_Value())) || 2128 match(V, m_LShr(m_Value(X), m_Value())))) 2129 return isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q); 2130 2131 if (const ZExtInst *ZI = dyn_cast<ZExtInst>(V)) 2132 return isKnownToBeAPowerOfTwo(ZI->getOperand(0), OrZero, Depth, Q); 2133 2134 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) 2135 return isKnownToBeAPowerOfTwo(SI->getTrueValue(), OrZero, Depth, Q) && 2136 isKnownToBeAPowerOfTwo(SI->getFalseValue(), OrZero, Depth, Q); 2137 2138 // Peek through min/max. 2139 if (match(V, m_MaxOrMin(m_Value(X), m_Value(Y)))) { 2140 return isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q) && 2141 isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q); 2142 } 2143 2144 if (OrZero && match(V, m_And(m_Value(X), m_Value(Y)))) { 2145 // A power of two and'd with anything is a power of two or zero. 2146 if (isKnownToBeAPowerOfTwo(X, /*OrZero*/ true, Depth, Q) || 2147 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, Depth, Q)) 2148 return true; 2149 // X & (-X) is always a power of two or zero. 2150 if (match(X, m_Neg(m_Specific(Y))) || match(Y, m_Neg(m_Specific(X)))) 2151 return true; 2152 return false; 2153 } 2154 2155 // Adding a power-of-two or zero to the same power-of-two or zero yields 2156 // either the original power-of-two, a larger power-of-two or zero. 2157 if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2158 const OverflowingBinaryOperator *VOBO = cast<OverflowingBinaryOperator>(V); 2159 if (OrZero || Q.IIQ.hasNoUnsignedWrap(VOBO) || 2160 Q.IIQ.hasNoSignedWrap(VOBO)) { 2161 if (match(X, m_And(m_Specific(Y), m_Value())) || 2162 match(X, m_And(m_Value(), m_Specific(Y)))) 2163 if (isKnownToBeAPowerOfTwo(Y, OrZero, Depth, Q)) 2164 return true; 2165 if (match(Y, m_And(m_Specific(X), m_Value())) || 2166 match(Y, m_And(m_Value(), m_Specific(X)))) 2167 if (isKnownToBeAPowerOfTwo(X, OrZero, Depth, Q)) 2168 return true; 2169 2170 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 2171 KnownBits LHSBits(BitWidth); 2172 computeKnownBits(X, LHSBits, Depth, Q); 2173 2174 KnownBits RHSBits(BitWidth); 2175 computeKnownBits(Y, RHSBits, Depth, Q); 2176 // If i8 V is a power of two or zero: 2177 // ZeroBits: 1 1 1 0 1 1 1 1 2178 // ~ZeroBits: 0 0 0 1 0 0 0 0 2179 if ((~(LHSBits.Zero & RHSBits.Zero)).isPowerOf2()) 2180 // If OrZero isn't set, we cannot give back a zero result. 2181 // Make sure either the LHS or RHS has a bit set. 2182 if (OrZero || RHSBits.One.getBoolValue() || LHSBits.One.getBoolValue()) 2183 return true; 2184 } 2185 } 2186 2187 // A PHI node is power of two if all incoming values are power of two, or if 2188 // it is an induction variable where in each step its value is a power of two. 2189 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2190 Query RecQ = Q; 2191 2192 // Check if it is an induction variable and always power of two. 2193 if (isPowerOfTwoRecurrence(PN, OrZero, Depth, RecQ)) 2194 return true; 2195 2196 // Recursively check all incoming values. Limit recursion to 2 levels, so 2197 // that search complexity is limited to number of operands^2. 2198 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); 2199 return llvm::all_of(PN->operands(), [&](const Use &U) { 2200 // Value is power of 2 if it is coming from PHI node itself by induction. 2201 if (U.get() == PN) 2202 return true; 2203 2204 // Change the context instruction to the incoming block where it is 2205 // evaluated. 2206 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); 2207 return isKnownToBeAPowerOfTwo(U.get(), OrZero, NewDepth, RecQ); 2208 }); 2209 } 2210 2211 // An exact divide or right shift can only shift off zero bits, so the result 2212 // is a power of two only if the first operand is a power of two and not 2213 // copying a sign bit (sdiv int_min, 2). 2214 if (match(V, m_Exact(m_LShr(m_Value(), m_Value()))) || 2215 match(V, m_Exact(m_UDiv(m_Value(), m_Value())))) { 2216 return isKnownToBeAPowerOfTwo(cast<Operator>(V)->getOperand(0), OrZero, 2217 Depth, Q); 2218 } 2219 2220 return false; 2221 } 2222 2223 /// Test whether a GEP's result is known to be non-null. 2224 /// 2225 /// Uses properties inherent in a GEP to try to determine whether it is known 2226 /// to be non-null. 2227 /// 2228 /// Currently this routine does not support vector GEPs. 2229 static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth, 2230 const Query &Q) { 2231 const Function *F = nullptr; 2232 if (const Instruction *I = dyn_cast<Instruction>(GEP)) 2233 F = I->getFunction(); 2234 2235 if (!GEP->isInBounds() || 2236 NullPointerIsDefined(F, GEP->getPointerAddressSpace())) 2237 return false; 2238 2239 // FIXME: Support vector-GEPs. 2240 assert(GEP->getType()->isPointerTy() && "We only support plain pointer GEP"); 2241 2242 // If the base pointer is non-null, we cannot walk to a null address with an 2243 // inbounds GEP in address space zero. 2244 if (isKnownNonZero(GEP->getPointerOperand(), Depth, Q)) 2245 return true; 2246 2247 // Walk the GEP operands and see if any operand introduces a non-zero offset. 2248 // If so, then the GEP cannot produce a null pointer, as doing so would 2249 // inherently violate the inbounds contract within address space zero. 2250 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP); 2251 GTI != GTE; ++GTI) { 2252 // Struct types are easy -- they must always be indexed by a constant. 2253 if (StructType *STy = GTI.getStructTypeOrNull()) { 2254 ConstantInt *OpC = cast<ConstantInt>(GTI.getOperand()); 2255 unsigned ElementIdx = OpC->getZExtValue(); 2256 const StructLayout *SL = Q.DL.getStructLayout(STy); 2257 uint64_t ElementOffset = SL->getElementOffset(ElementIdx); 2258 if (ElementOffset > 0) 2259 return true; 2260 continue; 2261 } 2262 2263 // If we have a zero-sized type, the index doesn't matter. Keep looping. 2264 if (Q.DL.getTypeAllocSize(GTI.getIndexedType()).getKnownMinSize() == 0) 2265 continue; 2266 2267 // Fast path the constant operand case both for efficiency and so we don't 2268 // increment Depth when just zipping down an all-constant GEP. 2269 if (ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand())) { 2270 if (!OpC->isZero()) 2271 return true; 2272 continue; 2273 } 2274 2275 // We post-increment Depth here because while isKnownNonZero increments it 2276 // as well, when we pop back up that increment won't persist. We don't want 2277 // to recurse 10k times just because we have 10k GEP operands. We don't 2278 // bail completely out because we want to handle constant GEPs regardless 2279 // of depth. 2280 if (Depth++ >= MaxAnalysisRecursionDepth) 2281 continue; 2282 2283 if (isKnownNonZero(GTI.getOperand(), Depth, Q)) 2284 return true; 2285 } 2286 2287 return false; 2288 } 2289 2290 static bool isKnownNonNullFromDominatingCondition(const Value *V, 2291 const Instruction *CtxI, 2292 const DominatorTree *DT) { 2293 if (isa<Constant>(V)) 2294 return false; 2295 2296 if (!CtxI || !DT) 2297 return false; 2298 2299 unsigned NumUsesExplored = 0; 2300 for (const auto *U : V->users()) { 2301 // Avoid massive lists 2302 if (NumUsesExplored >= DomConditionsMaxUses) 2303 break; 2304 NumUsesExplored++; 2305 2306 // If the value is used as an argument to a call or invoke, then argument 2307 // attributes may provide an answer about null-ness. 2308 if (const auto *CB = dyn_cast<CallBase>(U)) 2309 if (auto *CalledFunc = CB->getCalledFunction()) 2310 for (const Argument &Arg : CalledFunc->args()) 2311 if (CB->getArgOperand(Arg.getArgNo()) == V && 2312 Arg.hasNonNullAttr(/* AllowUndefOrPoison */ false) && 2313 DT->dominates(CB, CtxI)) 2314 return true; 2315 2316 // If the value is used as a load/store, then the pointer must be non null. 2317 if (V == getLoadStorePointerOperand(U)) { 2318 const Instruction *I = cast<Instruction>(U); 2319 if (!NullPointerIsDefined(I->getFunction(), 2320 V->getType()->getPointerAddressSpace()) && 2321 DT->dominates(I, CtxI)) 2322 return true; 2323 } 2324 2325 // Consider only compare instructions uniquely controlling a branch 2326 Value *RHS; 2327 CmpInst::Predicate Pred; 2328 if (!match(U, m_c_ICmp(Pred, m_Specific(V), m_Value(RHS)))) 2329 continue; 2330 2331 bool NonNullIfTrue; 2332 if (cmpExcludesZero(Pred, RHS)) 2333 NonNullIfTrue = true; 2334 else if (cmpExcludesZero(CmpInst::getInversePredicate(Pred), RHS)) 2335 NonNullIfTrue = false; 2336 else 2337 continue; 2338 2339 SmallVector<const User *, 4> WorkList; 2340 SmallPtrSet<const User *, 4> Visited; 2341 for (const auto *CmpU : U->users()) { 2342 assert(WorkList.empty() && "Should be!"); 2343 if (Visited.insert(CmpU).second) 2344 WorkList.push_back(CmpU); 2345 2346 while (!WorkList.empty()) { 2347 auto *Curr = WorkList.pop_back_val(); 2348 2349 // If a user is an AND, add all its users to the work list. We only 2350 // propagate "pred != null" condition through AND because it is only 2351 // correct to assume that all conditions of AND are met in true branch. 2352 // TODO: Support similar logic of OR and EQ predicate? 2353 if (NonNullIfTrue) 2354 if (match(Curr, m_LogicalAnd(m_Value(), m_Value()))) { 2355 for (const auto *CurrU : Curr->users()) 2356 if (Visited.insert(CurrU).second) 2357 WorkList.push_back(CurrU); 2358 continue; 2359 } 2360 2361 if (const BranchInst *BI = dyn_cast<BranchInst>(Curr)) { 2362 assert(BI->isConditional() && "uses a comparison!"); 2363 2364 BasicBlock *NonNullSuccessor = 2365 BI->getSuccessor(NonNullIfTrue ? 0 : 1); 2366 BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor); 2367 if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent())) 2368 return true; 2369 } else if (NonNullIfTrue && isGuard(Curr) && 2370 DT->dominates(cast<Instruction>(Curr), CtxI)) { 2371 return true; 2372 } 2373 } 2374 } 2375 } 2376 2377 return false; 2378 } 2379 2380 /// Does the 'Range' metadata (which must be a valid MD_range operand list) 2381 /// ensure that the value it's attached to is never Value? 'RangeType' is 2382 /// is the type of the value described by the range. 2383 static bool rangeMetadataExcludesValue(const MDNode* Ranges, const APInt& Value) { 2384 const unsigned NumRanges = Ranges->getNumOperands() / 2; 2385 assert(NumRanges >= 1); 2386 for (unsigned i = 0; i < NumRanges; ++i) { 2387 ConstantInt *Lower = 2388 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 0)); 2389 ConstantInt *Upper = 2390 mdconst::extract<ConstantInt>(Ranges->getOperand(2 * i + 1)); 2391 ConstantRange Range(Lower->getValue(), Upper->getValue()); 2392 if (Range.contains(Value)) 2393 return false; 2394 } 2395 return true; 2396 } 2397 2398 /// Try to detect a recurrence that monotonically increases/decreases from a 2399 /// non-zero starting value. These are common as induction variables. 2400 static bool isNonZeroRecurrence(const PHINode *PN) { 2401 BinaryOperator *BO = nullptr; 2402 Value *Start = nullptr, *Step = nullptr; 2403 const APInt *StartC, *StepC; 2404 if (!matchSimpleRecurrence(PN, BO, Start, Step) || 2405 !match(Start, m_APInt(StartC)) || StartC->isZero()) 2406 return false; 2407 2408 switch (BO->getOpcode()) { 2409 case Instruction::Add: 2410 // Starting from non-zero and stepping away from zero can never wrap back 2411 // to zero. 2412 return BO->hasNoUnsignedWrap() || 2413 (BO->hasNoSignedWrap() && match(Step, m_APInt(StepC)) && 2414 StartC->isNegative() == StepC->isNegative()); 2415 case Instruction::Mul: 2416 return (BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap()) && 2417 match(Step, m_APInt(StepC)) && !StepC->isZero(); 2418 case Instruction::Shl: 2419 return BO->hasNoUnsignedWrap() || BO->hasNoSignedWrap(); 2420 case Instruction::AShr: 2421 case Instruction::LShr: 2422 return BO->isExact(); 2423 default: 2424 return false; 2425 } 2426 } 2427 2428 /// Return true if the given value is known to be non-zero when defined. For 2429 /// vectors, return true if every demanded element is known to be non-zero when 2430 /// defined. For pointers, if the context instruction and dominator tree are 2431 /// specified, perform context-sensitive analysis and return true if the 2432 /// pointer couldn't possibly be null at the specified instruction. 2433 /// Supports values with integer or pointer type and vectors of integers. 2434 bool isKnownNonZero(const Value *V, const APInt &DemandedElts, unsigned Depth, 2435 const Query &Q) { 2436 // FIXME: We currently have no way to represent the DemandedElts of a scalable 2437 // vector 2438 if (isa<ScalableVectorType>(V->getType())) 2439 return false; 2440 2441 if (auto *C = dyn_cast<Constant>(V)) { 2442 if (C->isNullValue()) 2443 return false; 2444 if (isa<ConstantInt>(C)) 2445 // Must be non-zero due to null test above. 2446 return true; 2447 2448 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 2449 // See the comment for IntToPtr/PtrToInt instructions below. 2450 if (CE->getOpcode() == Instruction::IntToPtr || 2451 CE->getOpcode() == Instruction::PtrToInt) 2452 if (Q.DL.getTypeSizeInBits(CE->getOperand(0)->getType()) 2453 .getFixedSize() <= 2454 Q.DL.getTypeSizeInBits(CE->getType()).getFixedSize()) 2455 return isKnownNonZero(CE->getOperand(0), Depth, Q); 2456 } 2457 2458 // For constant vectors, check that all elements are undefined or known 2459 // non-zero to determine that the whole vector is known non-zero. 2460 if (auto *VecTy = dyn_cast<FixedVectorType>(C->getType())) { 2461 for (unsigned i = 0, e = VecTy->getNumElements(); i != e; ++i) { 2462 if (!DemandedElts[i]) 2463 continue; 2464 Constant *Elt = C->getAggregateElement(i); 2465 if (!Elt || Elt->isNullValue()) 2466 return false; 2467 if (!isa<UndefValue>(Elt) && !isa<ConstantInt>(Elt)) 2468 return false; 2469 } 2470 return true; 2471 } 2472 2473 // A global variable in address space 0 is non null unless extern weak 2474 // or an absolute symbol reference. Other address spaces may have null as a 2475 // valid address for a global, so we can't assume anything. 2476 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 2477 if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() && 2478 GV->getType()->getAddressSpace() == 0) 2479 return true; 2480 } else 2481 return false; 2482 } 2483 2484 if (auto *I = dyn_cast<Instruction>(V)) { 2485 if (MDNode *Ranges = Q.IIQ.getMetadata(I, LLVMContext::MD_range)) { 2486 // If the possible ranges don't contain zero, then the value is 2487 // definitely non-zero. 2488 if (auto *Ty = dyn_cast<IntegerType>(V->getType())) { 2489 const APInt ZeroValue(Ty->getBitWidth(), 0); 2490 if (rangeMetadataExcludesValue(Ranges, ZeroValue)) 2491 return true; 2492 } 2493 } 2494 } 2495 2496 if (isKnownNonZeroFromAssume(V, Q)) 2497 return true; 2498 2499 // Some of the tests below are recursive, so bail out if we hit the limit. 2500 if (Depth++ >= MaxAnalysisRecursionDepth) 2501 return false; 2502 2503 // Check for pointer simplifications. 2504 2505 if (PointerType *PtrTy = dyn_cast<PointerType>(V->getType())) { 2506 // Alloca never returns null, malloc might. 2507 if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0) 2508 return true; 2509 2510 // A byval, inalloca may not be null in a non-default addres space. A 2511 // nonnull argument is assumed never 0. 2512 if (const Argument *A = dyn_cast<Argument>(V)) { 2513 if (((A->hasPassPointeeByValueCopyAttr() && 2514 !NullPointerIsDefined(A->getParent(), PtrTy->getAddressSpace())) || 2515 A->hasNonNullAttr())) 2516 return true; 2517 } 2518 2519 // A Load tagged with nonnull metadata is never null. 2520 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) 2521 if (Q.IIQ.getMetadata(LI, LLVMContext::MD_nonnull)) 2522 return true; 2523 2524 if (const auto *Call = dyn_cast<CallBase>(V)) { 2525 if (Call->isReturnNonNull()) 2526 return true; 2527 if (const auto *RP = getArgumentAliasingToReturnedPointer(Call, true)) 2528 return isKnownNonZero(RP, Depth, Q); 2529 } 2530 } 2531 2532 if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT)) 2533 return true; 2534 2535 // Check for recursive pointer simplifications. 2536 if (V->getType()->isPointerTy()) { 2537 // Look through bitcast operations, GEPs, and int2ptr instructions as they 2538 // do not alter the value, or at least not the nullness property of the 2539 // value, e.g., int2ptr is allowed to zero/sign extend the value. 2540 // 2541 // Note that we have to take special care to avoid looking through 2542 // truncating casts, e.g., int2ptr/ptr2int with appropriate sizes, as well 2543 // as casts that can alter the value, e.g., AddrSpaceCasts. 2544 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) 2545 return isGEPKnownNonNull(GEP, Depth, Q); 2546 2547 if (auto *BCO = dyn_cast<BitCastOperator>(V)) 2548 return isKnownNonZero(BCO->getOperand(0), Depth, Q); 2549 2550 if (auto *I2P = dyn_cast<IntToPtrInst>(V)) 2551 if (Q.DL.getTypeSizeInBits(I2P->getSrcTy()).getFixedSize() <= 2552 Q.DL.getTypeSizeInBits(I2P->getDestTy()).getFixedSize()) 2553 return isKnownNonZero(I2P->getOperand(0), Depth, Q); 2554 } 2555 2556 // Similar to int2ptr above, we can look through ptr2int here if the cast 2557 // is a no-op or an extend and not a truncate. 2558 if (auto *P2I = dyn_cast<PtrToIntInst>(V)) 2559 if (Q.DL.getTypeSizeInBits(P2I->getSrcTy()).getFixedSize() <= 2560 Q.DL.getTypeSizeInBits(P2I->getDestTy()).getFixedSize()) 2561 return isKnownNonZero(P2I->getOperand(0), Depth, Q); 2562 2563 unsigned BitWidth = getBitWidth(V->getType()->getScalarType(), Q.DL); 2564 2565 // X | Y != 0 if X != 0 or Y != 0. 2566 Value *X = nullptr, *Y = nullptr; 2567 if (match(V, m_Or(m_Value(X), m_Value(Y)))) 2568 return isKnownNonZero(X, DemandedElts, Depth, Q) || 2569 isKnownNonZero(Y, DemandedElts, Depth, Q); 2570 2571 // ext X != 0 if X != 0. 2572 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) 2573 return isKnownNonZero(cast<Instruction>(V)->getOperand(0), Depth, Q); 2574 2575 // shl X, Y != 0 if X is odd. Note that the value of the shift is undefined 2576 // if the lowest bit is shifted off the end. 2577 if (match(V, m_Shl(m_Value(X), m_Value(Y)))) { 2578 // shl nuw can't remove any non-zero bits. 2579 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2580 if (Q.IIQ.hasNoUnsignedWrap(BO)) 2581 return isKnownNonZero(X, Depth, Q); 2582 2583 KnownBits Known(BitWidth); 2584 computeKnownBits(X, DemandedElts, Known, Depth, Q); 2585 if (Known.One[0]) 2586 return true; 2587 } 2588 // shr X, Y != 0 if X is negative. Note that the value of the shift is not 2589 // defined if the sign bit is shifted off the end. 2590 else if (match(V, m_Shr(m_Value(X), m_Value(Y)))) { 2591 // shr exact can only shift out zero bits. 2592 const PossiblyExactOperator *BO = cast<PossiblyExactOperator>(V); 2593 if (BO->isExact()) 2594 return isKnownNonZero(X, Depth, Q); 2595 2596 KnownBits Known = computeKnownBits(X, DemandedElts, Depth, Q); 2597 if (Known.isNegative()) 2598 return true; 2599 2600 // If the shifter operand is a constant, and all of the bits shifted 2601 // out are known to be zero, and X is known non-zero then at least one 2602 // non-zero bit must remain. 2603 if (ConstantInt *Shift = dyn_cast<ConstantInt>(Y)) { 2604 auto ShiftVal = Shift->getLimitedValue(BitWidth - 1); 2605 // Is there a known one in the portion not shifted out? 2606 if (Known.countMaxLeadingZeros() < BitWidth - ShiftVal) 2607 return true; 2608 // Are all the bits to be shifted out known zero? 2609 if (Known.countMinTrailingZeros() >= ShiftVal) 2610 return isKnownNonZero(X, DemandedElts, Depth, Q); 2611 } 2612 } 2613 // div exact can only produce a zero if the dividend is zero. 2614 else if (match(V, m_Exact(m_IDiv(m_Value(X), m_Value())))) { 2615 return isKnownNonZero(X, DemandedElts, Depth, Q); 2616 } 2617 // X + Y. 2618 else if (match(V, m_Add(m_Value(X), m_Value(Y)))) { 2619 KnownBits XKnown = computeKnownBits(X, DemandedElts, Depth, Q); 2620 KnownBits YKnown = computeKnownBits(Y, DemandedElts, Depth, Q); 2621 2622 // If X and Y are both non-negative (as signed values) then their sum is not 2623 // zero unless both X and Y are zero. 2624 if (XKnown.isNonNegative() && YKnown.isNonNegative()) 2625 if (isKnownNonZero(X, DemandedElts, Depth, Q) || 2626 isKnownNonZero(Y, DemandedElts, Depth, Q)) 2627 return true; 2628 2629 // If X and Y are both negative (as signed values) then their sum is not 2630 // zero unless both X and Y equal INT_MIN. 2631 if (XKnown.isNegative() && YKnown.isNegative()) { 2632 APInt Mask = APInt::getSignedMaxValue(BitWidth); 2633 // The sign bit of X is set. If some other bit is set then X is not equal 2634 // to INT_MIN. 2635 if (XKnown.One.intersects(Mask)) 2636 return true; 2637 // The sign bit of Y is set. If some other bit is set then Y is not equal 2638 // to INT_MIN. 2639 if (YKnown.One.intersects(Mask)) 2640 return true; 2641 } 2642 2643 // The sum of a non-negative number and a power of two is not zero. 2644 if (XKnown.isNonNegative() && 2645 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ false, Depth, Q)) 2646 return true; 2647 if (YKnown.isNonNegative() && 2648 isKnownToBeAPowerOfTwo(X, /*OrZero*/ false, Depth, Q)) 2649 return true; 2650 } 2651 // X * Y. 2652 else if (match(V, m_Mul(m_Value(X), m_Value(Y)))) { 2653 const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V); 2654 // If X and Y are non-zero then so is X * Y as long as the multiplication 2655 // does not overflow. 2656 if ((Q.IIQ.hasNoSignedWrap(BO) || Q.IIQ.hasNoUnsignedWrap(BO)) && 2657 isKnownNonZero(X, DemandedElts, Depth, Q) && 2658 isKnownNonZero(Y, DemandedElts, Depth, Q)) 2659 return true; 2660 } 2661 // (C ? X : Y) != 0 if X != 0 and Y != 0. 2662 else if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 2663 if (isKnownNonZero(SI->getTrueValue(), DemandedElts, Depth, Q) && 2664 isKnownNonZero(SI->getFalseValue(), DemandedElts, Depth, Q)) 2665 return true; 2666 } 2667 // PHI 2668 else if (const PHINode *PN = dyn_cast<PHINode>(V)) { 2669 if (Q.IIQ.UseInstrInfo && isNonZeroRecurrence(PN)) 2670 return true; 2671 2672 // Check if all incoming values are non-zero using recursion. 2673 Query RecQ = Q; 2674 unsigned NewDepth = std::max(Depth, MaxAnalysisRecursionDepth - 1); 2675 return llvm::all_of(PN->operands(), [&](const Use &U) { 2676 if (U.get() == PN) 2677 return true; 2678 RecQ.CxtI = PN->getIncomingBlock(U)->getTerminator(); 2679 return isKnownNonZero(U.get(), DemandedElts, NewDepth, RecQ); 2680 }); 2681 } 2682 // ExtractElement 2683 else if (const auto *EEI = dyn_cast<ExtractElementInst>(V)) { 2684 const Value *Vec = EEI->getVectorOperand(); 2685 const Value *Idx = EEI->getIndexOperand(); 2686 auto *CIdx = dyn_cast<ConstantInt>(Idx); 2687 if (auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType())) { 2688 unsigned NumElts = VecTy->getNumElements(); 2689 APInt DemandedVecElts = APInt::getAllOnes(NumElts); 2690 if (CIdx && CIdx->getValue().ult(NumElts)) 2691 DemandedVecElts = APInt::getOneBitSet(NumElts, CIdx->getZExtValue()); 2692 return isKnownNonZero(Vec, DemandedVecElts, Depth, Q); 2693 } 2694 } 2695 // Freeze 2696 else if (const FreezeInst *FI = dyn_cast<FreezeInst>(V)) { 2697 auto *Op = FI->getOperand(0); 2698 if (isKnownNonZero(Op, Depth, Q) && 2699 isGuaranteedNotToBePoison(Op, Q.AC, Q.CxtI, Q.DT, Depth)) 2700 return true; 2701 } else if (const auto *II = dyn_cast<IntrinsicInst>(V)) { 2702 if (II->getIntrinsicID() == Intrinsic::vscale) 2703 return true; 2704 } 2705 2706 KnownBits Known(BitWidth); 2707 computeKnownBits(V, DemandedElts, Known, Depth, Q); 2708 return Known.One != 0; 2709 } 2710 2711 bool isKnownNonZero(const Value* V, unsigned Depth, const Query& Q) { 2712 // FIXME: We currently have no way to represent the DemandedElts of a scalable 2713 // vector 2714 if (isa<ScalableVectorType>(V->getType())) 2715 return false; 2716 2717 auto *FVTy = dyn_cast<FixedVectorType>(V->getType()); 2718 APInt DemandedElts = 2719 FVTy ? APInt::getAllOnes(FVTy->getNumElements()) : APInt(1, 1); 2720 return isKnownNonZero(V, DemandedElts, Depth, Q); 2721 } 2722 2723 /// If the pair of operators are the same invertible function, return the 2724 /// the operands of the function corresponding to each input. Otherwise, 2725 /// return None. An invertible function is one that is 1-to-1 and maps 2726 /// every input value to exactly one output value. This is equivalent to 2727 /// saying that Op1 and Op2 are equal exactly when the specified pair of 2728 /// operands are equal, (except that Op1 and Op2 may be poison more often.) 2729 static Optional<std::pair<Value*, Value*>> 2730 getInvertibleOperands(const Operator *Op1, 2731 const Operator *Op2) { 2732 if (Op1->getOpcode() != Op2->getOpcode()) 2733 return None; 2734 2735 auto getOperands = [&](unsigned OpNum) -> auto { 2736 return std::make_pair(Op1->getOperand(OpNum), Op2->getOperand(OpNum)); 2737 }; 2738 2739 switch (Op1->getOpcode()) { 2740 default: 2741 break; 2742 case Instruction::Add: 2743 case Instruction::Sub: 2744 if (Op1->getOperand(0) == Op2->getOperand(0)) 2745 return getOperands(1); 2746 if (Op1->getOperand(1) == Op2->getOperand(1)) 2747 return getOperands(0); 2748 break; 2749 case Instruction::Mul: { 2750 // invertible if A * B == (A * B) mod 2^N where A, and B are integers 2751 // and N is the bitwdith. The nsw case is non-obvious, but proven by 2752 // alive2: https://alive2.llvm.org/ce/z/Z6D5qK 2753 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); 2754 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); 2755 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && 2756 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) 2757 break; 2758 2759 // Assume operand order has been canonicalized 2760 if (Op1->getOperand(1) == Op2->getOperand(1) && 2761 isa<ConstantInt>(Op1->getOperand(1)) && 2762 !cast<ConstantInt>(Op1->getOperand(1))->isZero()) 2763 return getOperands(0); 2764 break; 2765 } 2766 case Instruction::Shl: { 2767 // Same as multiplies, with the difference that we don't need to check 2768 // for a non-zero multiply. Shifts always multiply by non-zero. 2769 auto *OBO1 = cast<OverflowingBinaryOperator>(Op1); 2770 auto *OBO2 = cast<OverflowingBinaryOperator>(Op2); 2771 if ((!OBO1->hasNoUnsignedWrap() || !OBO2->hasNoUnsignedWrap()) && 2772 (!OBO1->hasNoSignedWrap() || !OBO2->hasNoSignedWrap())) 2773 break; 2774 2775 if (Op1->getOperand(1) == Op2->getOperand(1)) 2776 return getOperands(0); 2777 break; 2778 } 2779 case Instruction::AShr: 2780 case Instruction::LShr: { 2781 auto *PEO1 = cast<PossiblyExactOperator>(Op1); 2782 auto *PEO2 = cast<PossiblyExactOperator>(Op2); 2783 if (!PEO1->isExact() || !PEO2->isExact()) 2784 break; 2785 2786 if (Op1->getOperand(1) == Op2->getOperand(1)) 2787 return getOperands(0); 2788 break; 2789 } 2790 case Instruction::SExt: 2791 case Instruction::ZExt: 2792 if (Op1->getOperand(0)->getType() == Op2->getOperand(0)->getType()) 2793 return getOperands(0); 2794 break; 2795 case Instruction::PHI: { 2796 const PHINode *PN1 = cast<PHINode>(Op1); 2797 const PHINode *PN2 = cast<PHINode>(Op2); 2798 2799 // If PN1 and PN2 are both recurrences, can we prove the entire recurrences 2800 // are a single invertible function of the start values? Note that repeated 2801 // application of an invertible function is also invertible 2802 BinaryOperator *BO1 = nullptr; 2803 Value *Start1 = nullptr, *Step1 = nullptr; 2804 BinaryOperator *BO2 = nullptr; 2805 Value *Start2 = nullptr, *Step2 = nullptr; 2806 if (PN1->getParent() != PN2->getParent() || 2807 !matchSimpleRecurrence(PN1, BO1, Start1, Step1) || 2808 !matchSimpleRecurrence(PN2, BO2, Start2, Step2)) 2809 break; 2810 2811 auto Values = getInvertibleOperands(cast<Operator>(BO1), 2812 cast<Operator>(BO2)); 2813 if (!Values) 2814 break; 2815 2816 // We have to be careful of mutually defined recurrences here. Ex: 2817 // * X_i = X_(i-1) OP Y_(i-1), and Y_i = X_(i-1) OP V 2818 // * X_i = Y_i = X_(i-1) OP Y_(i-1) 2819 // The invertibility of these is complicated, and not worth reasoning 2820 // about (yet?). 2821 if (Values->first != PN1 || Values->second != PN2) 2822 break; 2823 2824 return std::make_pair(Start1, Start2); 2825 } 2826 } 2827 return None; 2828 } 2829 2830 /// Return true if V2 == V1 + X, where X is known non-zero. 2831 static bool isAddOfNonZero(const Value *V1, const Value *V2, unsigned Depth, 2832 const Query &Q) { 2833 const BinaryOperator *BO = dyn_cast<BinaryOperator>(V1); 2834 if (!BO || BO->getOpcode() != Instruction::Add) 2835 return false; 2836 Value *Op = nullptr; 2837 if (V2 == BO->getOperand(0)) 2838 Op = BO->getOperand(1); 2839 else if (V2 == BO->getOperand(1)) 2840 Op = BO->getOperand(0); 2841 else 2842 return false; 2843 return isKnownNonZero(Op, Depth + 1, Q); 2844 } 2845 2846 /// Return true if V2 == V1 * C, where V1 is known non-zero, C is not 0/1 and 2847 /// the multiplication is nuw or nsw. 2848 static bool isNonEqualMul(const Value *V1, const Value *V2, unsigned Depth, 2849 const Query &Q) { 2850 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { 2851 const APInt *C; 2852 return match(OBO, m_Mul(m_Specific(V1), m_APInt(C))) && 2853 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && 2854 !C->isZero() && !C->isOne() && isKnownNonZero(V1, Depth + 1, Q); 2855 } 2856 return false; 2857 } 2858 2859 /// Return true if V2 == V1 << C, where V1 is known non-zero, C is not 0 and 2860 /// the shift is nuw or nsw. 2861 static bool isNonEqualShl(const Value *V1, const Value *V2, unsigned Depth, 2862 const Query &Q) { 2863 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(V2)) { 2864 const APInt *C; 2865 return match(OBO, m_Shl(m_Specific(V1), m_APInt(C))) && 2866 (OBO->hasNoUnsignedWrap() || OBO->hasNoSignedWrap()) && 2867 !C->isZero() && isKnownNonZero(V1, Depth + 1, Q); 2868 } 2869 return false; 2870 } 2871 2872 static bool isNonEqualPHIs(const PHINode *PN1, const PHINode *PN2, 2873 unsigned Depth, const Query &Q) { 2874 // Check two PHIs are in same block. 2875 if (PN1->getParent() != PN2->getParent()) 2876 return false; 2877 2878 SmallPtrSet<const BasicBlock *, 8> VisitedBBs; 2879 bool UsedFullRecursion = false; 2880 for (const BasicBlock *IncomBB : PN1->blocks()) { 2881 if (!VisitedBBs.insert(IncomBB).second) 2882 continue; // Don't reprocess blocks that we have dealt with already. 2883 const Value *IV1 = PN1->getIncomingValueForBlock(IncomBB); 2884 const Value *IV2 = PN2->getIncomingValueForBlock(IncomBB); 2885 const APInt *C1, *C2; 2886 if (match(IV1, m_APInt(C1)) && match(IV2, m_APInt(C2)) && *C1 != *C2) 2887 continue; 2888 2889 // Only one pair of phi operands is allowed for full recursion. 2890 if (UsedFullRecursion) 2891 return false; 2892 2893 Query RecQ = Q; 2894 RecQ.CxtI = IncomBB->getTerminator(); 2895 if (!isKnownNonEqual(IV1, IV2, Depth + 1, RecQ)) 2896 return false; 2897 UsedFullRecursion = true; 2898 } 2899 return true; 2900 } 2901 2902 /// Return true if it is known that V1 != V2. 2903 static bool isKnownNonEqual(const Value *V1, const Value *V2, unsigned Depth, 2904 const Query &Q) { 2905 if (V1 == V2) 2906 return false; 2907 if (V1->getType() != V2->getType()) 2908 // We can't look through casts yet. 2909 return false; 2910 2911 if (Depth >= MaxAnalysisRecursionDepth) 2912 return false; 2913 2914 // See if we can recurse through (exactly one of) our operands. This 2915 // requires our operation be 1-to-1 and map every input value to exactly 2916 // one output value. Such an operation is invertible. 2917 auto *O1 = dyn_cast<Operator>(V1); 2918 auto *O2 = dyn_cast<Operator>(V2); 2919 if (O1 && O2 && O1->getOpcode() == O2->getOpcode()) { 2920 if (auto Values = getInvertibleOperands(O1, O2)) 2921 return isKnownNonEqual(Values->first, Values->second, Depth + 1, Q); 2922 2923 if (const PHINode *PN1 = dyn_cast<PHINode>(V1)) { 2924 const PHINode *PN2 = cast<PHINode>(V2); 2925 // FIXME: This is missing a generalization to handle the case where one is 2926 // a PHI and another one isn't. 2927 if (isNonEqualPHIs(PN1, PN2, Depth, Q)) 2928 return true; 2929 }; 2930 } 2931 2932 if (isAddOfNonZero(V1, V2, Depth, Q) || isAddOfNonZero(V2, V1, Depth, Q)) 2933 return true; 2934 2935 if (isNonEqualMul(V1, V2, Depth, Q) || isNonEqualMul(V2, V1, Depth, Q)) 2936 return true; 2937 2938 if (isNonEqualShl(V1, V2, Depth, Q) || isNonEqualShl(V2, V1, Depth, Q)) 2939 return true; 2940 2941 if (V1->getType()->isIntOrIntVectorTy()) { 2942 // Are any known bits in V1 contradictory to known bits in V2? If V1 2943 // has a known zero where V2 has a known one, they must not be equal. 2944 KnownBits Known1 = computeKnownBits(V1, Depth, Q); 2945 KnownBits Known2 = computeKnownBits(V2, Depth, Q); 2946 2947 if (Known1.Zero.intersects(Known2.One) || 2948 Known2.Zero.intersects(Known1.One)) 2949 return true; 2950 } 2951 return false; 2952 } 2953 2954 /// Return true if 'V & Mask' is known to be zero. We use this predicate to 2955 /// simplify operations downstream. Mask is known to be zero for bits that V 2956 /// cannot have. 2957 /// 2958 /// This function is defined on values with integer type, values with pointer 2959 /// type, and vectors of integers. In the case 2960 /// where V is a vector, the mask, known zero, and known one values are the 2961 /// same width as the vector element, and the bit is set only if it is true 2962 /// for all of the elements in the vector. 2963 bool MaskedValueIsZero(const Value *V, const APInt &Mask, unsigned Depth, 2964 const Query &Q) { 2965 KnownBits Known(Mask.getBitWidth()); 2966 computeKnownBits(V, Known, Depth, Q); 2967 return Mask.isSubsetOf(Known.Zero); 2968 } 2969 2970 // Match a signed min+max clamp pattern like smax(smin(In, CHigh), CLow). 2971 // Returns the input and lower/upper bounds. 2972 static bool isSignedMinMaxClamp(const Value *Select, const Value *&In, 2973 const APInt *&CLow, const APInt *&CHigh) { 2974 assert(isa<Operator>(Select) && 2975 cast<Operator>(Select)->getOpcode() == Instruction::Select && 2976 "Input should be a Select!"); 2977 2978 const Value *LHS = nullptr, *RHS = nullptr; 2979 SelectPatternFlavor SPF = matchSelectPattern(Select, LHS, RHS).Flavor; 2980 if (SPF != SPF_SMAX && SPF != SPF_SMIN) 2981 return false; 2982 2983 if (!match(RHS, m_APInt(CLow))) 2984 return false; 2985 2986 const Value *LHS2 = nullptr, *RHS2 = nullptr; 2987 SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor; 2988 if (getInverseMinMaxFlavor(SPF) != SPF2) 2989 return false; 2990 2991 if (!match(RHS2, m_APInt(CHigh))) 2992 return false; 2993 2994 if (SPF == SPF_SMIN) 2995 std::swap(CLow, CHigh); 2996 2997 In = LHS2; 2998 return CLow->sle(*CHigh); 2999 } 3000 3001 static bool isSignedMinMaxIntrinsicClamp(const IntrinsicInst *II, 3002 const APInt *&CLow, 3003 const APInt *&CHigh) { 3004 assert((II->getIntrinsicID() == Intrinsic::smin || 3005 II->getIntrinsicID() == Intrinsic::smax) && "Must be smin/smax"); 3006 3007 Intrinsic::ID InverseID = getInverseMinMaxIntrinsic(II->getIntrinsicID()); 3008 auto *InnerII = dyn_cast<IntrinsicInst>(II->getArgOperand(0)); 3009 if (!InnerII || InnerII->getIntrinsicID() != InverseID || 3010 !match(II->getArgOperand(1), m_APInt(CLow)) || 3011 !match(InnerII->getArgOperand(1), m_APInt(CHigh))) 3012 return false; 3013 3014 if (II->getIntrinsicID() == Intrinsic::smin) 3015 std::swap(CLow, CHigh); 3016 return CLow->sle(*CHigh); 3017 } 3018 3019 /// For vector constants, loop over the elements and find the constant with the 3020 /// minimum number of sign bits. Return 0 if the value is not a vector constant 3021 /// or if any element was not analyzed; otherwise, return the count for the 3022 /// element with the minimum number of sign bits. 3023 static unsigned computeNumSignBitsVectorConstant(const Value *V, 3024 const APInt &DemandedElts, 3025 unsigned TyBits) { 3026 const auto *CV = dyn_cast<Constant>(V); 3027 if (!CV || !isa<FixedVectorType>(CV->getType())) 3028 return 0; 3029 3030 unsigned MinSignBits = TyBits; 3031 unsigned NumElts = cast<FixedVectorType>(CV->getType())->getNumElements(); 3032 for (unsigned i = 0; i != NumElts; ++i) { 3033 if (!DemandedElts[i]) 3034 continue; 3035 // If we find a non-ConstantInt, bail out. 3036 auto *Elt = dyn_cast_or_null<ConstantInt>(CV->getAggregateElement(i)); 3037 if (!Elt) 3038 return 0; 3039 3040 MinSignBits = std::min(MinSignBits, Elt->getValue().getNumSignBits()); 3041 } 3042 3043 return MinSignBits; 3044 } 3045 3046 static unsigned ComputeNumSignBitsImpl(const Value *V, 3047 const APInt &DemandedElts, 3048 unsigned Depth, const Query &Q); 3049 3050 static unsigned ComputeNumSignBits(const Value *V, const APInt &DemandedElts, 3051 unsigned Depth, const Query &Q) { 3052 unsigned Result = ComputeNumSignBitsImpl(V, DemandedElts, Depth, Q); 3053 assert(Result > 0 && "At least one sign bit needs to be present!"); 3054 return Result; 3055 } 3056 3057 /// Return the number of times the sign bit of the register is replicated into 3058 /// the other bits. We know that at least 1 bit is always equal to the sign bit 3059 /// (itself), but other cases can give us information. For example, immediately 3060 /// after an "ashr X, 2", we know that the top 3 bits are all equal to each 3061 /// other, so we return 3. For vectors, return the number of sign bits for the 3062 /// vector element with the minimum number of known sign bits of the demanded 3063 /// elements in the vector specified by DemandedElts. 3064 static unsigned ComputeNumSignBitsImpl(const Value *V, 3065 const APInt &DemandedElts, 3066 unsigned Depth, const Query &Q) { 3067 Type *Ty = V->getType(); 3068 3069 // FIXME: We currently have no way to represent the DemandedElts of a scalable 3070 // vector 3071 if (isa<ScalableVectorType>(Ty)) 3072 return 1; 3073 3074 #ifndef NDEBUG 3075 assert(Depth <= MaxAnalysisRecursionDepth && "Limit Search Depth"); 3076 3077 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty)) { 3078 assert( 3079 FVTy->getNumElements() == DemandedElts.getBitWidth() && 3080 "DemandedElt width should equal the fixed vector number of elements"); 3081 } else { 3082 assert(DemandedElts == APInt(1, 1) && 3083 "DemandedElt width should be 1 for scalars"); 3084 } 3085 #endif 3086 3087 // We return the minimum number of sign bits that are guaranteed to be present 3088 // in V, so for undef we have to conservatively return 1. We don't have the 3089 // same behavior for poison though -- that's a FIXME today. 3090 3091 Type *ScalarTy = Ty->getScalarType(); 3092 unsigned TyBits = ScalarTy->isPointerTy() ? 3093 Q.DL.getPointerTypeSizeInBits(ScalarTy) : 3094 Q.DL.getTypeSizeInBits(ScalarTy); 3095 3096 unsigned Tmp, Tmp2; 3097 unsigned FirstAnswer = 1; 3098 3099 // Note that ConstantInt is handled by the general computeKnownBits case 3100 // below. 3101 3102 if (Depth == MaxAnalysisRecursionDepth) 3103 return 1; 3104 3105 if (auto *U = dyn_cast<Operator>(V)) { 3106 switch (Operator::getOpcode(V)) { 3107 default: break; 3108 case Instruction::SExt: 3109 Tmp = TyBits - U->getOperand(0)->getType()->getScalarSizeInBits(); 3110 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q) + Tmp; 3111 3112 case Instruction::SDiv: { 3113 const APInt *Denominator; 3114 // sdiv X, C -> adds log(C) sign bits. 3115 if (match(U->getOperand(1), m_APInt(Denominator))) { 3116 3117 // Ignore non-positive denominator. 3118 if (!Denominator->isStrictlyPositive()) 3119 break; 3120 3121 // Calculate the incoming numerator bits. 3122 unsigned NumBits = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3123 3124 // Add floor(log(C)) bits to the numerator bits. 3125 return std::min(TyBits, NumBits + Denominator->logBase2()); 3126 } 3127 break; 3128 } 3129 3130 case Instruction::SRem: { 3131 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3132 3133 const APInt *Denominator; 3134 // srem X, C -> we know that the result is within [-C+1,C) when C is a 3135 // positive constant. This let us put a lower bound on the number of sign 3136 // bits. 3137 if (match(U->getOperand(1), m_APInt(Denominator))) { 3138 3139 // Ignore non-positive denominator. 3140 if (Denominator->isStrictlyPositive()) { 3141 // Calculate the leading sign bit constraints by examining the 3142 // denominator. Given that the denominator is positive, there are two 3143 // cases: 3144 // 3145 // 1. The numerator is positive. The result range is [0,C) and 3146 // [0,C) u< (1 << ceilLogBase2(C)). 3147 // 3148 // 2. The numerator is negative. Then the result range is (-C,0] and 3149 // integers in (-C,0] are either 0 or >u (-1 << ceilLogBase2(C)). 3150 // 3151 // Thus a lower bound on the number of sign bits is `TyBits - 3152 // ceilLogBase2(C)`. 3153 3154 unsigned ResBits = TyBits - Denominator->ceilLogBase2(); 3155 Tmp = std::max(Tmp, ResBits); 3156 } 3157 } 3158 return Tmp; 3159 } 3160 3161 case Instruction::AShr: { 3162 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3163 // ashr X, C -> adds C sign bits. Vectors too. 3164 const APInt *ShAmt; 3165 if (match(U->getOperand(1), m_APInt(ShAmt))) { 3166 if (ShAmt->uge(TyBits)) 3167 break; // Bad shift. 3168 unsigned ShAmtLimited = ShAmt->getZExtValue(); 3169 Tmp += ShAmtLimited; 3170 if (Tmp > TyBits) Tmp = TyBits; 3171 } 3172 return Tmp; 3173 } 3174 case Instruction::Shl: { 3175 const APInt *ShAmt; 3176 if (match(U->getOperand(1), m_APInt(ShAmt))) { 3177 // shl destroys sign bits. 3178 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3179 if (ShAmt->uge(TyBits) || // Bad shift. 3180 ShAmt->uge(Tmp)) break; // Shifted all sign bits out. 3181 Tmp2 = ShAmt->getZExtValue(); 3182 return Tmp - Tmp2; 3183 } 3184 break; 3185 } 3186 case Instruction::And: 3187 case Instruction::Or: 3188 case Instruction::Xor: // NOT is handled here. 3189 // Logical binary ops preserve the number of sign bits at the worst. 3190 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3191 if (Tmp != 1) { 3192 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3193 FirstAnswer = std::min(Tmp, Tmp2); 3194 // We computed what we know about the sign bits as our first 3195 // answer. Now proceed to the generic code that uses 3196 // computeKnownBits, and pick whichever answer is better. 3197 } 3198 break; 3199 3200 case Instruction::Select: { 3201 // If we have a clamp pattern, we know that the number of sign bits will 3202 // be the minimum of the clamp min/max range. 3203 const Value *X; 3204 const APInt *CLow, *CHigh; 3205 if (isSignedMinMaxClamp(U, X, CLow, CHigh)) 3206 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 3207 3208 Tmp = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3209 if (Tmp == 1) break; 3210 Tmp2 = ComputeNumSignBits(U->getOperand(2), Depth + 1, Q); 3211 return std::min(Tmp, Tmp2); 3212 } 3213 3214 case Instruction::Add: 3215 // Add can have at most one carry bit. Thus we know that the output 3216 // is, at worst, one more bit than the inputs. 3217 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3218 if (Tmp == 1) break; 3219 3220 // Special case decrementing a value (ADD X, -1): 3221 if (const auto *CRHS = dyn_cast<Constant>(U->getOperand(1))) 3222 if (CRHS->isAllOnesValue()) { 3223 KnownBits Known(TyBits); 3224 computeKnownBits(U->getOperand(0), Known, Depth + 1, Q); 3225 3226 // If the input is known to be 0 or 1, the output is 0/-1, which is 3227 // all sign bits set. 3228 if ((Known.Zero | 1).isAllOnes()) 3229 return TyBits; 3230 3231 // If we are subtracting one from a positive number, there is no carry 3232 // out of the result. 3233 if (Known.isNonNegative()) 3234 return Tmp; 3235 } 3236 3237 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3238 if (Tmp2 == 1) break; 3239 return std::min(Tmp, Tmp2) - 1; 3240 3241 case Instruction::Sub: 3242 Tmp2 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3243 if (Tmp2 == 1) break; 3244 3245 // Handle NEG. 3246 if (const auto *CLHS = dyn_cast<Constant>(U->getOperand(0))) 3247 if (CLHS->isNullValue()) { 3248 KnownBits Known(TyBits); 3249 computeKnownBits(U->getOperand(1), Known, Depth + 1, Q); 3250 // If the input is known to be 0 or 1, the output is 0/-1, which is 3251 // all sign bits set. 3252 if ((Known.Zero | 1).isAllOnes()) 3253 return TyBits; 3254 3255 // If the input is known to be positive (the sign bit is known clear), 3256 // the output of the NEG has the same number of sign bits as the 3257 // input. 3258 if (Known.isNonNegative()) 3259 return Tmp2; 3260 3261 // Otherwise, we treat this like a SUB. 3262 } 3263 3264 // Sub can have at most one carry bit. Thus we know that the output 3265 // is, at worst, one more bit than the inputs. 3266 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3267 if (Tmp == 1) break; 3268 return std::min(Tmp, Tmp2) - 1; 3269 3270 case Instruction::Mul: { 3271 // The output of the Mul can be at most twice the valid bits in the 3272 // inputs. 3273 unsigned SignBitsOp0 = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3274 if (SignBitsOp0 == 1) break; 3275 unsigned SignBitsOp1 = ComputeNumSignBits(U->getOperand(1), Depth + 1, Q); 3276 if (SignBitsOp1 == 1) break; 3277 unsigned OutValidBits = 3278 (TyBits - SignBitsOp0 + 1) + (TyBits - SignBitsOp1 + 1); 3279 return OutValidBits > TyBits ? 1 : TyBits - OutValidBits + 1; 3280 } 3281 3282 case Instruction::PHI: { 3283 const PHINode *PN = cast<PHINode>(U); 3284 unsigned NumIncomingValues = PN->getNumIncomingValues(); 3285 // Don't analyze large in-degree PHIs. 3286 if (NumIncomingValues > 4) break; 3287 // Unreachable blocks may have zero-operand PHI nodes. 3288 if (NumIncomingValues == 0) break; 3289 3290 // Take the minimum of all incoming values. This can't infinitely loop 3291 // because of our depth threshold. 3292 Query RecQ = Q; 3293 Tmp = TyBits; 3294 for (unsigned i = 0, e = NumIncomingValues; i != e; ++i) { 3295 if (Tmp == 1) return Tmp; 3296 RecQ.CxtI = PN->getIncomingBlock(i)->getTerminator(); 3297 Tmp = std::min( 3298 Tmp, ComputeNumSignBits(PN->getIncomingValue(i), Depth + 1, RecQ)); 3299 } 3300 return Tmp; 3301 } 3302 3303 case Instruction::Trunc: 3304 // FIXME: it's tricky to do anything useful for this, but it is an 3305 // important case for targets like X86. 3306 break; 3307 3308 case Instruction::ExtractElement: 3309 // Look through extract element. At the moment we keep this simple and 3310 // skip tracking the specific element. But at least we might find 3311 // information valid for all elements of the vector (for example if vector 3312 // is sign extended, shifted, etc). 3313 return ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3314 3315 case Instruction::ShuffleVector: { 3316 // Collect the minimum number of sign bits that are shared by every vector 3317 // element referenced by the shuffle. 3318 auto *Shuf = dyn_cast<ShuffleVectorInst>(U); 3319 if (!Shuf) { 3320 // FIXME: Add support for shufflevector constant expressions. 3321 return 1; 3322 } 3323 APInt DemandedLHS, DemandedRHS; 3324 // For undef elements, we don't know anything about the common state of 3325 // the shuffle result. 3326 if (!getShuffleDemandedElts(Shuf, DemandedElts, DemandedLHS, DemandedRHS)) 3327 return 1; 3328 Tmp = std::numeric_limits<unsigned>::max(); 3329 if (!!DemandedLHS) { 3330 const Value *LHS = Shuf->getOperand(0); 3331 Tmp = ComputeNumSignBits(LHS, DemandedLHS, Depth + 1, Q); 3332 } 3333 // If we don't know anything, early out and try computeKnownBits 3334 // fall-back. 3335 if (Tmp == 1) 3336 break; 3337 if (!!DemandedRHS) { 3338 const Value *RHS = Shuf->getOperand(1); 3339 Tmp2 = ComputeNumSignBits(RHS, DemandedRHS, Depth + 1, Q); 3340 Tmp = std::min(Tmp, Tmp2); 3341 } 3342 // If we don't know anything, early out and try computeKnownBits 3343 // fall-back. 3344 if (Tmp == 1) 3345 break; 3346 assert(Tmp <= TyBits && "Failed to determine minimum sign bits"); 3347 return Tmp; 3348 } 3349 case Instruction::Call: { 3350 if (const auto *II = dyn_cast<IntrinsicInst>(U)) { 3351 switch (II->getIntrinsicID()) { 3352 default: break; 3353 case Intrinsic::abs: 3354 Tmp = ComputeNumSignBits(U->getOperand(0), Depth + 1, Q); 3355 if (Tmp == 1) break; 3356 3357 // Absolute value reduces number of sign bits by at most 1. 3358 return Tmp - 1; 3359 case Intrinsic::smin: 3360 case Intrinsic::smax: { 3361 const APInt *CLow, *CHigh; 3362 if (isSignedMinMaxIntrinsicClamp(II, CLow, CHigh)) 3363 return std::min(CLow->getNumSignBits(), CHigh->getNumSignBits()); 3364 } 3365 } 3366 } 3367 } 3368 } 3369 } 3370 3371 // Finally, if we can prove that the top bits of the result are 0's or 1's, 3372 // use this information. 3373 3374 // If we can examine all elements of a vector constant successfully, we're 3375 // done (we can't do any better than that). If not, keep trying. 3376 if (unsigned VecSignBits = 3377 computeNumSignBitsVectorConstant(V, DemandedElts, TyBits)) 3378 return VecSignBits; 3379 3380 KnownBits Known(TyBits); 3381 computeKnownBits(V, DemandedElts, Known, Depth, Q); 3382 3383 // If we know that the sign bit is either zero or one, determine the number of 3384 // identical bits in the top of the input value. 3385 return std::max(FirstAnswer, Known.countMinSignBits()); 3386 } 3387 3388 Intrinsic::ID llvm::getIntrinsicForCallSite(const CallBase &CB, 3389 const TargetLibraryInfo *TLI) { 3390 const Function *F = CB.getCalledFunction(); 3391 if (!F) 3392 return Intrinsic::not_intrinsic; 3393 3394 if (F->isIntrinsic()) 3395 return F->getIntrinsicID(); 3396 3397 // We are going to infer semantics of a library function based on mapping it 3398 // to an LLVM intrinsic. Check that the library function is available from 3399 // this callbase and in this environment. 3400 LibFunc Func; 3401 if (F->hasLocalLinkage() || !TLI || !TLI->getLibFunc(CB, Func) || 3402 !CB.onlyReadsMemory()) 3403 return Intrinsic::not_intrinsic; 3404 3405 switch (Func) { 3406 default: 3407 break; 3408 case LibFunc_sin: 3409 case LibFunc_sinf: 3410 case LibFunc_sinl: 3411 return Intrinsic::sin; 3412 case LibFunc_cos: 3413 case LibFunc_cosf: 3414 case LibFunc_cosl: 3415 return Intrinsic::cos; 3416 case LibFunc_exp: 3417 case LibFunc_expf: 3418 case LibFunc_expl: 3419 return Intrinsic::exp; 3420 case LibFunc_exp2: 3421 case LibFunc_exp2f: 3422 case LibFunc_exp2l: 3423 return Intrinsic::exp2; 3424 case LibFunc_log: 3425 case LibFunc_logf: 3426 case LibFunc_logl: 3427 return Intrinsic::log; 3428 case LibFunc_log10: 3429 case LibFunc_log10f: 3430 case LibFunc_log10l: 3431 return Intrinsic::log10; 3432 case LibFunc_log2: 3433 case LibFunc_log2f: 3434 case LibFunc_log2l: 3435 return Intrinsic::log2; 3436 case LibFunc_fabs: 3437 case LibFunc_fabsf: 3438 case LibFunc_fabsl: 3439 return Intrinsic::fabs; 3440 case LibFunc_fmin: 3441 case LibFunc_fminf: 3442 case LibFunc_fminl: 3443 return Intrinsic::minnum; 3444 case LibFunc_fmax: 3445 case LibFunc_fmaxf: 3446 case LibFunc_fmaxl: 3447 return Intrinsic::maxnum; 3448 case LibFunc_copysign: 3449 case LibFunc_copysignf: 3450 case LibFunc_copysignl: 3451 return Intrinsic::copysign; 3452 case LibFunc_floor: 3453 case LibFunc_floorf: 3454 case LibFunc_floorl: 3455 return Intrinsic::floor; 3456 case LibFunc_ceil: 3457 case LibFunc_ceilf: 3458 case LibFunc_ceill: 3459 return Intrinsic::ceil; 3460 case LibFunc_trunc: 3461 case LibFunc_truncf: 3462 case LibFunc_truncl: 3463 return Intrinsic::trunc; 3464 case LibFunc_rint: 3465 case LibFunc_rintf: 3466 case LibFunc_rintl: 3467 return Intrinsic::rint; 3468 case LibFunc_nearbyint: 3469 case LibFunc_nearbyintf: 3470 case LibFunc_nearbyintl: 3471 return Intrinsic::nearbyint; 3472 case LibFunc_round: 3473 case LibFunc_roundf: 3474 case LibFunc_roundl: 3475 return Intrinsic::round; 3476 case LibFunc_roundeven: 3477 case LibFunc_roundevenf: 3478 case LibFunc_roundevenl: 3479 return Intrinsic::roundeven; 3480 case LibFunc_pow: 3481 case LibFunc_powf: 3482 case LibFunc_powl: 3483 return Intrinsic::pow; 3484 case LibFunc_sqrt: 3485 case LibFunc_sqrtf: 3486 case LibFunc_sqrtl: 3487 return Intrinsic::sqrt; 3488 } 3489 3490 return Intrinsic::not_intrinsic; 3491 } 3492 3493 /// Return true if we can prove that the specified FP value is never equal to 3494 /// -0.0. 3495 /// NOTE: Do not check 'nsz' here because that fast-math-flag does not guarantee 3496 /// that a value is not -0.0. It only guarantees that -0.0 may be treated 3497 /// the same as +0.0 in floating-point ops. 3498 bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI, 3499 unsigned Depth) { 3500 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3501 return !CFP->getValueAPF().isNegZero(); 3502 3503 if (Depth == MaxAnalysisRecursionDepth) 3504 return false; 3505 3506 auto *Op = dyn_cast<Operator>(V); 3507 if (!Op) 3508 return false; 3509 3510 // (fadd x, 0.0) is guaranteed to return +0.0, not -0.0. 3511 if (match(Op, m_FAdd(m_Value(), m_PosZeroFP()))) 3512 return true; 3513 3514 // sitofp and uitofp turn into +0.0 for zero. 3515 if (isa<SIToFPInst>(Op) || isa<UIToFPInst>(Op)) 3516 return true; 3517 3518 if (auto *Call = dyn_cast<CallInst>(Op)) { 3519 Intrinsic::ID IID = getIntrinsicForCallSite(*Call, TLI); 3520 switch (IID) { 3521 default: 3522 break; 3523 // sqrt(-0.0) = -0.0, no other negative results are possible. 3524 case Intrinsic::sqrt: 3525 case Intrinsic::canonicalize: 3526 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 3527 case Intrinsic::experimental_constrained_sqrt: { 3528 // NOTE: This rounding mode restriction may be too strict. 3529 const auto *CI = cast<ConstrainedFPIntrinsic>(Call); 3530 if (CI->getRoundingMode() == RoundingMode::NearestTiesToEven) 3531 return CannotBeNegativeZero(Call->getArgOperand(0), TLI, Depth + 1); 3532 else 3533 return false; 3534 } 3535 // fabs(x) != -0.0 3536 case Intrinsic::fabs: 3537 return true; 3538 // sitofp and uitofp turn into +0.0 for zero. 3539 case Intrinsic::experimental_constrained_sitofp: 3540 case Intrinsic::experimental_constrained_uitofp: 3541 return true; 3542 } 3543 } 3544 3545 return false; 3546 } 3547 3548 /// If \p SignBitOnly is true, test for a known 0 sign bit rather than a 3549 /// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign 3550 /// bit despite comparing equal. 3551 static bool cannotBeOrderedLessThanZeroImpl(const Value *V, 3552 const TargetLibraryInfo *TLI, 3553 bool SignBitOnly, 3554 unsigned Depth) { 3555 // TODO: This function does not do the right thing when SignBitOnly is true 3556 // and we're lowering to a hypothetical IEEE 754-compliant-but-evil platform 3557 // which flips the sign bits of NaNs. See 3558 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3559 3560 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) { 3561 return !CFP->getValueAPF().isNegative() || 3562 (!SignBitOnly && CFP->getValueAPF().isZero()); 3563 } 3564 3565 // Handle vector of constants. 3566 if (auto *CV = dyn_cast<Constant>(V)) { 3567 if (auto *CVFVTy = dyn_cast<FixedVectorType>(CV->getType())) { 3568 unsigned NumElts = CVFVTy->getNumElements(); 3569 for (unsigned i = 0; i != NumElts; ++i) { 3570 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i)); 3571 if (!CFP) 3572 return false; 3573 if (CFP->getValueAPF().isNegative() && 3574 (SignBitOnly || !CFP->getValueAPF().isZero())) 3575 return false; 3576 } 3577 3578 // All non-negative ConstantFPs. 3579 return true; 3580 } 3581 } 3582 3583 if (Depth == MaxAnalysisRecursionDepth) 3584 return false; 3585 3586 const Operator *I = dyn_cast<Operator>(V); 3587 if (!I) 3588 return false; 3589 3590 switch (I->getOpcode()) { 3591 default: 3592 break; 3593 // Unsigned integers are always nonnegative. 3594 case Instruction::UIToFP: 3595 return true; 3596 case Instruction::FMul: 3597 case Instruction::FDiv: 3598 // X * X is always non-negative or a NaN. 3599 // X / X is always exactly 1.0 or a NaN. 3600 if (I->getOperand(0) == I->getOperand(1) && 3601 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs())) 3602 return true; 3603 3604 LLVM_FALLTHROUGH; 3605 case Instruction::FAdd: 3606 case Instruction::FRem: 3607 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3608 Depth + 1) && 3609 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3610 Depth + 1); 3611 case Instruction::Select: 3612 return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3613 Depth + 1) && 3614 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3615 Depth + 1); 3616 case Instruction::FPExt: 3617 case Instruction::FPTrunc: 3618 // Widening/narrowing never change sign. 3619 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3620 Depth + 1); 3621 case Instruction::ExtractElement: 3622 // Look through extract element. At the moment we keep this simple and skip 3623 // tracking the specific element. But at least we might find information 3624 // valid for all elements of the vector. 3625 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3626 Depth + 1); 3627 case Instruction::Call: 3628 const auto *CI = cast<CallInst>(I); 3629 Intrinsic::ID IID = getIntrinsicForCallSite(*CI, TLI); 3630 switch (IID) { 3631 default: 3632 break; 3633 case Intrinsic::maxnum: { 3634 Value *V0 = I->getOperand(0), *V1 = I->getOperand(1); 3635 auto isPositiveNum = [&](Value *V) { 3636 if (SignBitOnly) { 3637 // With SignBitOnly, this is tricky because the result of 3638 // maxnum(+0.0, -0.0) is unspecified. Just check if the operand is 3639 // a constant strictly greater than 0.0. 3640 const APFloat *C; 3641 return match(V, m_APFloat(C)) && 3642 *C > APFloat::getZero(C->getSemantics()); 3643 } 3644 3645 // -0.0 compares equal to 0.0, so if this operand is at least -0.0, 3646 // maxnum can't be ordered-less-than-zero. 3647 return isKnownNeverNaN(V, TLI) && 3648 cannotBeOrderedLessThanZeroImpl(V, TLI, false, Depth + 1); 3649 }; 3650 3651 // TODO: This could be improved. We could also check that neither operand 3652 // has its sign bit set (and at least 1 is not-NAN?). 3653 return isPositiveNum(V0) || isPositiveNum(V1); 3654 } 3655 3656 case Intrinsic::maximum: 3657 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3658 Depth + 1) || 3659 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3660 Depth + 1); 3661 case Intrinsic::minnum: 3662 case Intrinsic::minimum: 3663 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3664 Depth + 1) && 3665 cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly, 3666 Depth + 1); 3667 case Intrinsic::exp: 3668 case Intrinsic::exp2: 3669 case Intrinsic::fabs: 3670 return true; 3671 3672 case Intrinsic::sqrt: 3673 // sqrt(x) is always >= -0 or NaN. Moreover, sqrt(x) == -0 iff x == -0. 3674 if (!SignBitOnly) 3675 return true; 3676 return CI->hasNoNaNs() && (CI->hasNoSignedZeros() || 3677 CannotBeNegativeZero(CI->getOperand(0), TLI)); 3678 3679 case Intrinsic::powi: 3680 if (ConstantInt *Exponent = dyn_cast<ConstantInt>(I->getOperand(1))) { 3681 // powi(x,n) is non-negative if n is even. 3682 if (Exponent->getBitWidth() <= 64 && Exponent->getSExtValue() % 2u == 0) 3683 return true; 3684 } 3685 // TODO: This is not correct. Given that exp is an integer, here are the 3686 // ways that pow can return a negative value: 3687 // 3688 // pow(x, exp) --> negative if exp is odd and x is negative. 3689 // pow(-0, exp) --> -inf if exp is negative odd. 3690 // pow(-0, exp) --> -0 if exp is positive odd. 3691 // pow(-inf, exp) --> -0 if exp is negative odd. 3692 // pow(-inf, exp) --> -inf if exp is positive odd. 3693 // 3694 // Therefore, if !SignBitOnly, we can return true if x >= +0 or x is NaN, 3695 // but we must return false if x == -0. Unfortunately we do not currently 3696 // have a way of expressing this constraint. See details in 3697 // https://llvm.org/bugs/show_bug.cgi?id=31702. 3698 return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly, 3699 Depth + 1); 3700 3701 case Intrinsic::fma: 3702 case Intrinsic::fmuladd: 3703 // x*x+y is non-negative if y is non-negative. 3704 return I->getOperand(0) == I->getOperand(1) && 3705 (!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) && 3706 cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly, 3707 Depth + 1); 3708 } 3709 break; 3710 } 3711 return false; 3712 } 3713 3714 bool llvm::CannotBeOrderedLessThanZero(const Value *V, 3715 const TargetLibraryInfo *TLI) { 3716 return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0); 3717 } 3718 3719 bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) { 3720 return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0); 3721 } 3722 3723 bool llvm::isKnownNeverInfinity(const Value *V, const TargetLibraryInfo *TLI, 3724 unsigned Depth) { 3725 assert(V->getType()->isFPOrFPVectorTy() && "Querying for Inf on non-FP type"); 3726 3727 // If we're told that infinities won't happen, assume they won't. 3728 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3729 if (FPMathOp->hasNoInfs()) 3730 return true; 3731 3732 // Handle scalar constants. 3733 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3734 return !CFP->isInfinity(); 3735 3736 if (Depth == MaxAnalysisRecursionDepth) 3737 return false; 3738 3739 if (auto *Inst = dyn_cast<Instruction>(V)) { 3740 switch (Inst->getOpcode()) { 3741 case Instruction::Select: { 3742 return isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1) && 3743 isKnownNeverInfinity(Inst->getOperand(2), TLI, Depth + 1); 3744 } 3745 case Instruction::SIToFP: 3746 case Instruction::UIToFP: { 3747 // Get width of largest magnitude integer (remove a bit if signed). 3748 // This still works for a signed minimum value because the largest FP 3749 // value is scaled by some fraction close to 2.0 (1.0 + 0.xxxx). 3750 int IntSize = Inst->getOperand(0)->getType()->getScalarSizeInBits(); 3751 if (Inst->getOpcode() == Instruction::SIToFP) 3752 --IntSize; 3753 3754 // If the exponent of the largest finite FP value can hold the largest 3755 // integer, the result of the cast must be finite. 3756 Type *FPTy = Inst->getType()->getScalarType(); 3757 return ilogb(APFloat::getLargest(FPTy->getFltSemantics())) >= IntSize; 3758 } 3759 default: 3760 break; 3761 } 3762 } 3763 3764 // try to handle fixed width vector constants 3765 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); 3766 if (VFVTy && isa<Constant>(V)) { 3767 // For vectors, verify that each element is not infinity. 3768 unsigned NumElts = VFVTy->getNumElements(); 3769 for (unsigned i = 0; i != NumElts; ++i) { 3770 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3771 if (!Elt) 3772 return false; 3773 if (isa<UndefValue>(Elt)) 3774 continue; 3775 auto *CElt = dyn_cast<ConstantFP>(Elt); 3776 if (!CElt || CElt->isInfinity()) 3777 return false; 3778 } 3779 // All elements were confirmed non-infinity or undefined. 3780 return true; 3781 } 3782 3783 // was not able to prove that V never contains infinity 3784 return false; 3785 } 3786 3787 bool llvm::isKnownNeverNaN(const Value *V, const TargetLibraryInfo *TLI, 3788 unsigned Depth) { 3789 assert(V->getType()->isFPOrFPVectorTy() && "Querying for NaN on non-FP type"); 3790 3791 // If we're told that NaNs won't happen, assume they won't. 3792 if (auto *FPMathOp = dyn_cast<FPMathOperator>(V)) 3793 if (FPMathOp->hasNoNaNs()) 3794 return true; 3795 3796 // Handle scalar constants. 3797 if (auto *CFP = dyn_cast<ConstantFP>(V)) 3798 return !CFP->isNaN(); 3799 3800 if (Depth == MaxAnalysisRecursionDepth) 3801 return false; 3802 3803 if (auto *Inst = dyn_cast<Instruction>(V)) { 3804 switch (Inst->getOpcode()) { 3805 case Instruction::FAdd: 3806 case Instruction::FSub: 3807 // Adding positive and negative infinity produces NaN. 3808 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && 3809 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3810 (isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) || 3811 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1)); 3812 3813 case Instruction::FMul: 3814 // Zero multiplied with infinity produces NaN. 3815 // FIXME: If neither side can be zero fmul never produces NaN. 3816 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1) && 3817 isKnownNeverInfinity(Inst->getOperand(0), TLI, Depth + 1) && 3818 isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3819 isKnownNeverInfinity(Inst->getOperand(1), TLI, Depth + 1); 3820 3821 case Instruction::FDiv: 3822 case Instruction::FRem: 3823 // FIXME: Only 0/0, Inf/Inf, Inf REM x and x REM 0 produce NaN. 3824 return false; 3825 3826 case Instruction::Select: { 3827 return isKnownNeverNaN(Inst->getOperand(1), TLI, Depth + 1) && 3828 isKnownNeverNaN(Inst->getOperand(2), TLI, Depth + 1); 3829 } 3830 case Instruction::SIToFP: 3831 case Instruction::UIToFP: 3832 return true; 3833 case Instruction::FPTrunc: 3834 case Instruction::FPExt: 3835 return isKnownNeverNaN(Inst->getOperand(0), TLI, Depth + 1); 3836 default: 3837 break; 3838 } 3839 } 3840 3841 if (const auto *II = dyn_cast<IntrinsicInst>(V)) { 3842 switch (II->getIntrinsicID()) { 3843 case Intrinsic::canonicalize: 3844 case Intrinsic::fabs: 3845 case Intrinsic::copysign: 3846 case Intrinsic::exp: 3847 case Intrinsic::exp2: 3848 case Intrinsic::floor: 3849 case Intrinsic::ceil: 3850 case Intrinsic::trunc: 3851 case Intrinsic::rint: 3852 case Intrinsic::nearbyint: 3853 case Intrinsic::round: 3854 case Intrinsic::roundeven: 3855 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1); 3856 case Intrinsic::sqrt: 3857 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) && 3858 CannotBeOrderedLessThanZero(II->getArgOperand(0), TLI); 3859 case Intrinsic::minnum: 3860 case Intrinsic::maxnum: 3861 // If either operand is not NaN, the result is not NaN. 3862 return isKnownNeverNaN(II->getArgOperand(0), TLI, Depth + 1) || 3863 isKnownNeverNaN(II->getArgOperand(1), TLI, Depth + 1); 3864 default: 3865 return false; 3866 } 3867 } 3868 3869 // Try to handle fixed width vector constants 3870 auto *VFVTy = dyn_cast<FixedVectorType>(V->getType()); 3871 if (VFVTy && isa<Constant>(V)) { 3872 // For vectors, verify that each element is not NaN. 3873 unsigned NumElts = VFVTy->getNumElements(); 3874 for (unsigned i = 0; i != NumElts; ++i) { 3875 Constant *Elt = cast<Constant>(V)->getAggregateElement(i); 3876 if (!Elt) 3877 return false; 3878 if (isa<UndefValue>(Elt)) 3879 continue; 3880 auto *CElt = dyn_cast<ConstantFP>(Elt); 3881 if (!CElt || CElt->isNaN()) 3882 return false; 3883 } 3884 // All elements were confirmed not-NaN or undefined. 3885 return true; 3886 } 3887 3888 // Was not able to prove that V never contains NaN 3889 return false; 3890 } 3891 3892 Value *llvm::isBytewiseValue(Value *V, const DataLayout &DL) { 3893 3894 // All byte-wide stores are splatable, even of arbitrary variables. 3895 if (V->getType()->isIntegerTy(8)) 3896 return V; 3897 3898 LLVMContext &Ctx = V->getContext(); 3899 3900 // Undef don't care. 3901 auto *UndefInt8 = UndefValue::get(Type::getInt8Ty(Ctx)); 3902 if (isa<UndefValue>(V)) 3903 return UndefInt8; 3904 3905 // Return Undef for zero-sized type. 3906 if (!DL.getTypeStoreSize(V->getType()).isNonZero()) 3907 return UndefInt8; 3908 3909 Constant *C = dyn_cast<Constant>(V); 3910 if (!C) { 3911 // Conceptually, we could handle things like: 3912 // %a = zext i8 %X to i16 3913 // %b = shl i16 %a, 8 3914 // %c = or i16 %a, %b 3915 // but until there is an example that actually needs this, it doesn't seem 3916 // worth worrying about. 3917 return nullptr; 3918 } 3919 3920 // Handle 'null' ConstantArrayZero etc. 3921 if (C->isNullValue()) 3922 return Constant::getNullValue(Type::getInt8Ty(Ctx)); 3923 3924 // Constant floating-point values can be handled as integer values if the 3925 // corresponding integer value is "byteable". An important case is 0.0. 3926 if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) { 3927 Type *Ty = nullptr; 3928 if (CFP->getType()->isHalfTy()) 3929 Ty = Type::getInt16Ty(Ctx); 3930 else if (CFP->getType()->isFloatTy()) 3931 Ty = Type::getInt32Ty(Ctx); 3932 else if (CFP->getType()->isDoubleTy()) 3933 Ty = Type::getInt64Ty(Ctx); 3934 // Don't handle long double formats, which have strange constraints. 3935 return Ty ? isBytewiseValue(ConstantExpr::getBitCast(CFP, Ty), DL) 3936 : nullptr; 3937 } 3938 3939 // We can handle constant integers that are multiple of 8 bits. 3940 if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) { 3941 if (CI->getBitWidth() % 8 == 0) { 3942 assert(CI->getBitWidth() > 8 && "8 bits should be handled above!"); 3943 if (!CI->getValue().isSplat(8)) 3944 return nullptr; 3945 return ConstantInt::get(Ctx, CI->getValue().trunc(8)); 3946 } 3947 } 3948 3949 if (auto *CE = dyn_cast<ConstantExpr>(C)) { 3950 if (CE->getOpcode() == Instruction::IntToPtr) { 3951 if (auto *PtrTy = dyn_cast<PointerType>(CE->getType())) { 3952 unsigned BitWidth = DL.getPointerSizeInBits(PtrTy->getAddressSpace()); 3953 return isBytewiseValue( 3954 ConstantExpr::getIntegerCast(CE->getOperand(0), 3955 Type::getIntNTy(Ctx, BitWidth), false), 3956 DL); 3957 } 3958 } 3959 } 3960 3961 auto Merge = [&](Value *LHS, Value *RHS) -> Value * { 3962 if (LHS == RHS) 3963 return LHS; 3964 if (!LHS || !RHS) 3965 return nullptr; 3966 if (LHS == UndefInt8) 3967 return RHS; 3968 if (RHS == UndefInt8) 3969 return LHS; 3970 return nullptr; 3971 }; 3972 3973 if (ConstantDataSequential *CA = dyn_cast<ConstantDataSequential>(C)) { 3974 Value *Val = UndefInt8; 3975 for (unsigned I = 0, E = CA->getNumElements(); I != E; ++I) 3976 if (!(Val = Merge(Val, isBytewiseValue(CA->getElementAsConstant(I), DL)))) 3977 return nullptr; 3978 return Val; 3979 } 3980 3981 if (isa<ConstantAggregate>(C)) { 3982 Value *Val = UndefInt8; 3983 for (unsigned I = 0, E = C->getNumOperands(); I != E; ++I) 3984 if (!(Val = Merge(Val, isBytewiseValue(C->getOperand(I), DL)))) 3985 return nullptr; 3986 return Val; 3987 } 3988 3989 // Don't try to handle the handful of other constants. 3990 return nullptr; 3991 } 3992 3993 // This is the recursive version of BuildSubAggregate. It takes a few different 3994 // arguments. Idxs is the index within the nested struct From that we are 3995 // looking at now (which is of type IndexedType). IdxSkip is the number of 3996 // indices from Idxs that should be left out when inserting into the resulting 3997 // struct. To is the result struct built so far, new insertvalue instructions 3998 // build on that. 3999 static Value *BuildSubAggregate(Value *From, Value* To, Type *IndexedType, 4000 SmallVectorImpl<unsigned> &Idxs, 4001 unsigned IdxSkip, 4002 Instruction *InsertBefore) { 4003 StructType *STy = dyn_cast<StructType>(IndexedType); 4004 if (STy) { 4005 // Save the original To argument so we can modify it 4006 Value *OrigTo = To; 4007 // General case, the type indexed by Idxs is a struct 4008 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { 4009 // Process each struct element recursively 4010 Idxs.push_back(i); 4011 Value *PrevTo = To; 4012 To = BuildSubAggregate(From, To, STy->getElementType(i), Idxs, IdxSkip, 4013 InsertBefore); 4014 Idxs.pop_back(); 4015 if (!To) { 4016 // Couldn't find any inserted value for this index? Cleanup 4017 while (PrevTo != OrigTo) { 4018 InsertValueInst* Del = cast<InsertValueInst>(PrevTo); 4019 PrevTo = Del->getAggregateOperand(); 4020 Del->eraseFromParent(); 4021 } 4022 // Stop processing elements 4023 break; 4024 } 4025 } 4026 // If we successfully found a value for each of our subaggregates 4027 if (To) 4028 return To; 4029 } 4030 // Base case, the type indexed by SourceIdxs is not a struct, or not all of 4031 // the struct's elements had a value that was inserted directly. In the latter 4032 // case, perhaps we can't determine each of the subelements individually, but 4033 // we might be able to find the complete struct somewhere. 4034 4035 // Find the value that is at that particular spot 4036 Value *V = FindInsertedValue(From, Idxs); 4037 4038 if (!V) 4039 return nullptr; 4040 4041 // Insert the value in the new (sub) aggregate 4042 return InsertValueInst::Create(To, V, makeArrayRef(Idxs).slice(IdxSkip), 4043 "tmp", InsertBefore); 4044 } 4045 4046 // This helper takes a nested struct and extracts a part of it (which is again a 4047 // struct) into a new value. For example, given the struct: 4048 // { a, { b, { c, d }, e } } 4049 // and the indices "1, 1" this returns 4050 // { c, d }. 4051 // 4052 // It does this by inserting an insertvalue for each element in the resulting 4053 // struct, as opposed to just inserting a single struct. This will only work if 4054 // each of the elements of the substruct are known (ie, inserted into From by an 4055 // insertvalue instruction somewhere). 4056 // 4057 // All inserted insertvalue instructions are inserted before InsertBefore 4058 static Value *BuildSubAggregate(Value *From, ArrayRef<unsigned> idx_range, 4059 Instruction *InsertBefore) { 4060 assert(InsertBefore && "Must have someplace to insert!"); 4061 Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(), 4062 idx_range); 4063 Value *To = UndefValue::get(IndexedType); 4064 SmallVector<unsigned, 10> Idxs(idx_range.begin(), idx_range.end()); 4065 unsigned IdxSkip = Idxs.size(); 4066 4067 return BuildSubAggregate(From, To, IndexedType, Idxs, IdxSkip, InsertBefore); 4068 } 4069 4070 /// Given an aggregate and a sequence of indices, see if the scalar value 4071 /// indexed is already around as a register, for example if it was inserted 4072 /// directly into the aggregate. 4073 /// 4074 /// If InsertBefore is not null, this function will duplicate (modified) 4075 /// insertvalues when a part of a nested struct is extracted. 4076 Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range, 4077 Instruction *InsertBefore) { 4078 // Nothing to index? Just return V then (this is useful at the end of our 4079 // recursion). 4080 if (idx_range.empty()) 4081 return V; 4082 // We have indices, so V should have an indexable type. 4083 assert((V->getType()->isStructTy() || V->getType()->isArrayTy()) && 4084 "Not looking at a struct or array?"); 4085 assert(ExtractValueInst::getIndexedType(V->getType(), idx_range) && 4086 "Invalid indices for type?"); 4087 4088 if (Constant *C = dyn_cast<Constant>(V)) { 4089 C = C->getAggregateElement(idx_range[0]); 4090 if (!C) return nullptr; 4091 return FindInsertedValue(C, idx_range.slice(1), InsertBefore); 4092 } 4093 4094 if (InsertValueInst *I = dyn_cast<InsertValueInst>(V)) { 4095 // Loop the indices for the insertvalue instruction in parallel with the 4096 // requested indices 4097 const unsigned *req_idx = idx_range.begin(); 4098 for (const unsigned *i = I->idx_begin(), *e = I->idx_end(); 4099 i != e; ++i, ++req_idx) { 4100 if (req_idx == idx_range.end()) { 4101 // We can't handle this without inserting insertvalues 4102 if (!InsertBefore) 4103 return nullptr; 4104 4105 // The requested index identifies a part of a nested aggregate. Handle 4106 // this specially. For example, 4107 // %A = insertvalue { i32, {i32, i32 } } undef, i32 10, 1, 0 4108 // %B = insertvalue { i32, {i32, i32 } } %A, i32 11, 1, 1 4109 // %C = extractvalue {i32, { i32, i32 } } %B, 1 4110 // This can be changed into 4111 // %A = insertvalue {i32, i32 } undef, i32 10, 0 4112 // %C = insertvalue {i32, i32 } %A, i32 11, 1 4113 // which allows the unused 0,0 element from the nested struct to be 4114 // removed. 4115 return BuildSubAggregate(V, makeArrayRef(idx_range.begin(), req_idx), 4116 InsertBefore); 4117 } 4118 4119 // This insert value inserts something else than what we are looking for. 4120 // See if the (aggregate) value inserted into has the value we are 4121 // looking for, then. 4122 if (*req_idx != *i) 4123 return FindInsertedValue(I->getAggregateOperand(), idx_range, 4124 InsertBefore); 4125 } 4126 // If we end up here, the indices of the insertvalue match with those 4127 // requested (though possibly only partially). Now we recursively look at 4128 // the inserted value, passing any remaining indices. 4129 return FindInsertedValue(I->getInsertedValueOperand(), 4130 makeArrayRef(req_idx, idx_range.end()), 4131 InsertBefore); 4132 } 4133 4134 if (ExtractValueInst *I = dyn_cast<ExtractValueInst>(V)) { 4135 // If we're extracting a value from an aggregate that was extracted from 4136 // something else, we can extract from that something else directly instead. 4137 // However, we will need to chain I's indices with the requested indices. 4138 4139 // Calculate the number of indices required 4140 unsigned size = I->getNumIndices() + idx_range.size(); 4141 // Allocate some space to put the new indices in 4142 SmallVector<unsigned, 5> Idxs; 4143 Idxs.reserve(size); 4144 // Add indices from the extract value instruction 4145 Idxs.append(I->idx_begin(), I->idx_end()); 4146 4147 // Add requested indices 4148 Idxs.append(idx_range.begin(), idx_range.end()); 4149 4150 assert(Idxs.size() == size 4151 && "Number of indices added not correct?"); 4152 4153 return FindInsertedValue(I->getAggregateOperand(), Idxs, InsertBefore); 4154 } 4155 // Otherwise, we don't know (such as, extracting from a function return value 4156 // or load instruction) 4157 return nullptr; 4158 } 4159 4160 bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP, 4161 unsigned CharSize) { 4162 // Make sure the GEP has exactly three arguments. 4163 if (GEP->getNumOperands() != 3) 4164 return false; 4165 4166 // Make sure the index-ee is a pointer to array of \p CharSize integers. 4167 // CharSize. 4168 ArrayType *AT = dyn_cast<ArrayType>(GEP->getSourceElementType()); 4169 if (!AT || !AT->getElementType()->isIntegerTy(CharSize)) 4170 return false; 4171 4172 // Check to make sure that the first operand of the GEP is an integer and 4173 // has value 0 so that we are sure we're indexing into the initializer. 4174 const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1)); 4175 if (!FirstIdx || !FirstIdx->isZero()) 4176 return false; 4177 4178 return true; 4179 } 4180 4181 // If V refers to an initialized global constant, set Slice either to 4182 // its initializer if the size of its elements equals ElementSize, or, 4183 // for ElementSize == 8, to its representation as an array of unsiged 4184 // char. Return true on success. 4185 bool llvm::getConstantDataArrayInfo(const Value *V, 4186 ConstantDataArraySlice &Slice, 4187 unsigned ElementSize, uint64_t Offset) { 4188 assert(V); 4189 4190 // Drill down into the pointer expression V, ignoring any intervening 4191 // casts, and determine the identity of the object it references along 4192 // with the cumulative byte offset into it. 4193 const GlobalVariable *GV = 4194 dyn_cast<GlobalVariable>(getUnderlyingObject(V)); 4195 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer()) 4196 // Fail if V is not based on constant global object. 4197 return false; 4198 4199 const DataLayout &DL = GV->getParent()->getDataLayout(); 4200 APInt Off(DL.getIndexTypeSizeInBits(V->getType()), 0); 4201 4202 if (GV != V->stripAndAccumulateConstantOffsets(DL, Off, 4203 /*AllowNonInbounds*/ true)) 4204 // Fail if a constant offset could not be determined. 4205 return false; 4206 4207 uint64_t StartIdx = Off.getLimitedValue(); 4208 if (StartIdx == UINT64_MAX) 4209 // Fail if the constant offset is excessive. 4210 return false; 4211 4212 Offset += StartIdx; 4213 4214 ConstantDataArray *Array = nullptr; 4215 ArrayType *ArrayTy = nullptr; 4216 4217 if (GV->getInitializer()->isNullValue()) { 4218 Type *GVTy = GV->getValueType(); 4219 uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize(); 4220 uint64_t Length = SizeInBytes / (ElementSize / 8); 4221 4222 Slice.Array = nullptr; 4223 Slice.Offset = 0; 4224 // Return an empty Slice for undersized constants to let callers 4225 // transform even undefined library calls into simpler, well-defined 4226 // expressions. This is preferable to making the calls although it 4227 // prevents sanitizers from detecting such calls. 4228 Slice.Length = Length < Offset ? 0 : Length - Offset; 4229 return true; 4230 } 4231 4232 auto *Init = const_cast<Constant *>(GV->getInitializer()); 4233 if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) { 4234 Type *InitElTy = ArrayInit->getElementType(); 4235 if (InitElTy->isIntegerTy(ElementSize)) { 4236 // If Init is an initializer for an array of the expected type 4237 // and size, use it as is. 4238 Array = ArrayInit; 4239 ArrayTy = ArrayInit->getType(); 4240 } 4241 } 4242 4243 if (!Array) { 4244 if (ElementSize != 8) 4245 // TODO: Handle conversions to larger integral types. 4246 return false; 4247 4248 // Otherwise extract the portion of the initializer starting 4249 // at Offset as an array of bytes, and reset Offset. 4250 Init = ReadByteArrayFromGlobal(GV, Offset); 4251 if (!Init) 4252 return false; 4253 4254 Offset = 0; 4255 Array = dyn_cast<ConstantDataArray>(Init); 4256 ArrayTy = dyn_cast<ArrayType>(Init->getType()); 4257 } 4258 4259 uint64_t NumElts = ArrayTy->getArrayNumElements(); 4260 if (Offset > NumElts) 4261 return false; 4262 4263 Slice.Array = Array; 4264 Slice.Offset = Offset; 4265 Slice.Length = NumElts - Offset; 4266 return true; 4267 } 4268 4269 /// Extract bytes from the initializer of the constant array V, which need 4270 /// not be a nul-terminated string. On success, store the bytes in Str and 4271 /// return true. When TrimAtNul is set, Str will contain only the bytes up 4272 /// to but not including the first nul. Return false on failure. 4273 bool llvm::getConstantStringInfo(const Value *V, StringRef &Str, 4274 uint64_t Offset, bool TrimAtNul) { 4275 ConstantDataArraySlice Slice; 4276 if (!getConstantDataArrayInfo(V, Slice, 8, Offset)) 4277 return false; 4278 4279 if (Slice.Array == nullptr) { 4280 if (TrimAtNul) { 4281 // Return a nul-terminated string even for an empty Slice. This is 4282 // safe because all existing SimplifyLibcalls callers require string 4283 // arguments and the behavior of the functions they fold is undefined 4284 // otherwise. Folding the calls this way is preferable to making 4285 // the undefined library calls, even though it prevents sanitizers 4286 // from reporting such calls. 4287 Str = StringRef(); 4288 return true; 4289 } 4290 if (Slice.Length == 1) { 4291 Str = StringRef("", 1); 4292 return true; 4293 } 4294 // We cannot instantiate a StringRef as we do not have an appropriate string 4295 // of 0s at hand. 4296 return false; 4297 } 4298 4299 // Start out with the entire array in the StringRef. 4300 Str = Slice.Array->getAsString(); 4301 // Skip over 'offset' bytes. 4302 Str = Str.substr(Slice.Offset); 4303 4304 if (TrimAtNul) { 4305 // Trim off the \0 and anything after it. If the array is not nul 4306 // terminated, we just return the whole end of string. The client may know 4307 // some other way that the string is length-bound. 4308 Str = Str.substr(0, Str.find('\0')); 4309 } 4310 return true; 4311 } 4312 4313 // These next two are very similar to the above, but also look through PHI 4314 // nodes. 4315 // TODO: See if we can integrate these two together. 4316 4317 /// If we can compute the length of the string pointed to by 4318 /// the specified pointer, return 'len+1'. If we can't, return 0. 4319 static uint64_t GetStringLengthH(const Value *V, 4320 SmallPtrSetImpl<const PHINode*> &PHIs, 4321 unsigned CharSize) { 4322 // Look through noop bitcast instructions. 4323 V = V->stripPointerCasts(); 4324 4325 // If this is a PHI node, there are two cases: either we have already seen it 4326 // or we haven't. 4327 if (const PHINode *PN = dyn_cast<PHINode>(V)) { 4328 if (!PHIs.insert(PN).second) 4329 return ~0ULL; // already in the set. 4330 4331 // If it was new, see if all the input strings are the same length. 4332 uint64_t LenSoFar = ~0ULL; 4333 for (Value *IncValue : PN->incoming_values()) { 4334 uint64_t Len = GetStringLengthH(IncValue, PHIs, CharSize); 4335 if (Len == 0) return 0; // Unknown length -> unknown. 4336 4337 if (Len == ~0ULL) continue; 4338 4339 if (Len != LenSoFar && LenSoFar != ~0ULL) 4340 return 0; // Disagree -> unknown. 4341 LenSoFar = Len; 4342 } 4343 4344 // Success, all agree. 4345 return LenSoFar; 4346 } 4347 4348 // strlen(select(c,x,y)) -> strlen(x) ^ strlen(y) 4349 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) { 4350 uint64_t Len1 = GetStringLengthH(SI->getTrueValue(), PHIs, CharSize); 4351 if (Len1 == 0) return 0; 4352 uint64_t Len2 = GetStringLengthH(SI->getFalseValue(), PHIs, CharSize); 4353 if (Len2 == 0) return 0; 4354 if (Len1 == ~0ULL) return Len2; 4355 if (Len2 == ~0ULL) return Len1; 4356 if (Len1 != Len2) return 0; 4357 return Len1; 4358 } 4359 4360 // Otherwise, see if we can read the string. 4361 ConstantDataArraySlice Slice; 4362 if (!getConstantDataArrayInfo(V, Slice, CharSize)) 4363 return 0; 4364 4365 if (Slice.Array == nullptr) 4366 // Zeroinitializer (including an empty one). 4367 return 1; 4368 4369 // Search for the first nul character. Return a conservative result even 4370 // when there is no nul. This is safe since otherwise the string function 4371 // being folded such as strlen is undefined, and can be preferable to 4372 // making the undefined library call. 4373 unsigned NullIndex = 0; 4374 for (unsigned E = Slice.Length; NullIndex < E; ++NullIndex) { 4375 if (Slice.Array->getElementAsInteger(Slice.Offset + NullIndex) == 0) 4376 break; 4377 } 4378 4379 return NullIndex + 1; 4380 } 4381 4382 /// If we can compute the length of the string pointed to by 4383 /// the specified pointer, return 'len+1'. If we can't, return 0. 4384 uint64_t llvm::GetStringLength(const Value *V, unsigned CharSize) { 4385 if (!V->getType()->isPointerTy()) 4386 return 0; 4387 4388 SmallPtrSet<const PHINode*, 32> PHIs; 4389 uint64_t Len = GetStringLengthH(V, PHIs, CharSize); 4390 // If Len is ~0ULL, we had an infinite phi cycle: this is dead code, so return 4391 // an empty string as a length. 4392 return Len == ~0ULL ? 1 : Len; 4393 } 4394 4395 const Value * 4396 llvm::getArgumentAliasingToReturnedPointer(const CallBase *Call, 4397 bool MustPreserveNullness) { 4398 assert(Call && 4399 "getArgumentAliasingToReturnedPointer only works on nonnull calls"); 4400 if (const Value *RV = Call->getReturnedArgOperand()) 4401 return RV; 4402 // This can be used only as a aliasing property. 4403 if (isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 4404 Call, MustPreserveNullness)) 4405 return Call->getArgOperand(0); 4406 return nullptr; 4407 } 4408 4409 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing( 4410 const CallBase *Call, bool MustPreserveNullness) { 4411 switch (Call->getIntrinsicID()) { 4412 case Intrinsic::launder_invariant_group: 4413 case Intrinsic::strip_invariant_group: 4414 case Intrinsic::aarch64_irg: 4415 case Intrinsic::aarch64_tagp: 4416 return true; 4417 case Intrinsic::ptrmask: 4418 return !MustPreserveNullness; 4419 default: 4420 return false; 4421 } 4422 } 4423 4424 /// \p PN defines a loop-variant pointer to an object. Check if the 4425 /// previous iteration of the loop was referring to the same object as \p PN. 4426 static bool isSameUnderlyingObjectInLoop(const PHINode *PN, 4427 const LoopInfo *LI) { 4428 // Find the loop-defined value. 4429 Loop *L = LI->getLoopFor(PN->getParent()); 4430 if (PN->getNumIncomingValues() != 2) 4431 return true; 4432 4433 // Find the value from previous iteration. 4434 auto *PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(0)); 4435 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 4436 PrevValue = dyn_cast<Instruction>(PN->getIncomingValue(1)); 4437 if (!PrevValue || LI->getLoopFor(PrevValue->getParent()) != L) 4438 return true; 4439 4440 // If a new pointer is loaded in the loop, the pointer references a different 4441 // object in every iteration. E.g.: 4442 // for (i) 4443 // int *p = a[i]; 4444 // ... 4445 if (auto *Load = dyn_cast<LoadInst>(PrevValue)) 4446 if (!L->isLoopInvariant(Load->getPointerOperand())) 4447 return false; 4448 return true; 4449 } 4450 4451 const Value *llvm::getUnderlyingObject(const Value *V, unsigned MaxLookup) { 4452 if (!V->getType()->isPointerTy()) 4453 return V; 4454 for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { 4455 if (auto *GEP = dyn_cast<GEPOperator>(V)) { 4456 V = GEP->getPointerOperand(); 4457 } else if (Operator::getOpcode(V) == Instruction::BitCast || 4458 Operator::getOpcode(V) == Instruction::AddrSpaceCast) { 4459 V = cast<Operator>(V)->getOperand(0); 4460 if (!V->getType()->isPointerTy()) 4461 return V; 4462 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) { 4463 if (GA->isInterposable()) 4464 return V; 4465 V = GA->getAliasee(); 4466 } else { 4467 if (auto *PHI = dyn_cast<PHINode>(V)) { 4468 // Look through single-arg phi nodes created by LCSSA. 4469 if (PHI->getNumIncomingValues() == 1) { 4470 V = PHI->getIncomingValue(0); 4471 continue; 4472 } 4473 } else if (auto *Call = dyn_cast<CallBase>(V)) { 4474 // CaptureTracking can know about special capturing properties of some 4475 // intrinsics like launder.invariant.group, that can't be expressed with 4476 // the attributes, but have properties like returning aliasing pointer. 4477 // Because some analysis may assume that nocaptured pointer is not 4478 // returned from some special intrinsic (because function would have to 4479 // be marked with returns attribute), it is crucial to use this function 4480 // because it should be in sync with CaptureTracking. Not using it may 4481 // cause weird miscompilations where 2 aliasing pointers are assumed to 4482 // noalias. 4483 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) { 4484 V = RP; 4485 continue; 4486 } 4487 } 4488 4489 return V; 4490 } 4491 assert(V->getType()->isPointerTy() && "Unexpected operand type!"); 4492 } 4493 return V; 4494 } 4495 4496 void llvm::getUnderlyingObjects(const Value *V, 4497 SmallVectorImpl<const Value *> &Objects, 4498 LoopInfo *LI, unsigned MaxLookup) { 4499 SmallPtrSet<const Value *, 4> Visited; 4500 SmallVector<const Value *, 4> Worklist; 4501 Worklist.push_back(V); 4502 do { 4503 const Value *P = Worklist.pop_back_val(); 4504 P = getUnderlyingObject(P, MaxLookup); 4505 4506 if (!Visited.insert(P).second) 4507 continue; 4508 4509 if (auto *SI = dyn_cast<SelectInst>(P)) { 4510 Worklist.push_back(SI->getTrueValue()); 4511 Worklist.push_back(SI->getFalseValue()); 4512 continue; 4513 } 4514 4515 if (auto *PN = dyn_cast<PHINode>(P)) { 4516 // If this PHI changes the underlying object in every iteration of the 4517 // loop, don't look through it. Consider: 4518 // int **A; 4519 // for (i) { 4520 // Prev = Curr; // Prev = PHI (Prev_0, Curr) 4521 // Curr = A[i]; 4522 // *Prev, *Curr; 4523 // 4524 // Prev is tracking Curr one iteration behind so they refer to different 4525 // underlying objects. 4526 if (!LI || !LI->isLoopHeader(PN->getParent()) || 4527 isSameUnderlyingObjectInLoop(PN, LI)) 4528 append_range(Worklist, PN->incoming_values()); 4529 continue; 4530 } 4531 4532 Objects.push_back(P); 4533 } while (!Worklist.empty()); 4534 } 4535 4536 /// This is the function that does the work of looking through basic 4537 /// ptrtoint+arithmetic+inttoptr sequences. 4538 static const Value *getUnderlyingObjectFromInt(const Value *V) { 4539 do { 4540 if (const Operator *U = dyn_cast<Operator>(V)) { 4541 // If we find a ptrtoint, we can transfer control back to the 4542 // regular getUnderlyingObjectFromInt. 4543 if (U->getOpcode() == Instruction::PtrToInt) 4544 return U->getOperand(0); 4545 // If we find an add of a constant, a multiplied value, or a phi, it's 4546 // likely that the other operand will lead us to the base 4547 // object. We don't have to worry about the case where the 4548 // object address is somehow being computed by the multiply, 4549 // because our callers only care when the result is an 4550 // identifiable object. 4551 if (U->getOpcode() != Instruction::Add || 4552 (!isa<ConstantInt>(U->getOperand(1)) && 4553 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 4554 !isa<PHINode>(U->getOperand(1)))) 4555 return V; 4556 V = U->getOperand(0); 4557 } else { 4558 return V; 4559 } 4560 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 4561 } while (true); 4562 } 4563 4564 /// This is a wrapper around getUnderlyingObjects and adds support for basic 4565 /// ptrtoint+arithmetic+inttoptr sequences. 4566 /// It returns false if unidentified object is found in getUnderlyingObjects. 4567 bool llvm::getUnderlyingObjectsForCodeGen(const Value *V, 4568 SmallVectorImpl<Value *> &Objects) { 4569 SmallPtrSet<const Value *, 16> Visited; 4570 SmallVector<const Value *, 4> Working(1, V); 4571 do { 4572 V = Working.pop_back_val(); 4573 4574 SmallVector<const Value *, 4> Objs; 4575 getUnderlyingObjects(V, Objs); 4576 4577 for (const Value *V : Objs) { 4578 if (!Visited.insert(V).second) 4579 continue; 4580 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 4581 const Value *O = 4582 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 4583 if (O->getType()->isPointerTy()) { 4584 Working.push_back(O); 4585 continue; 4586 } 4587 } 4588 // If getUnderlyingObjects fails to find an identifiable object, 4589 // getUnderlyingObjectsForCodeGen also fails for safety. 4590 if (!isIdentifiedObject(V)) { 4591 Objects.clear(); 4592 return false; 4593 } 4594 Objects.push_back(const_cast<Value *>(V)); 4595 } 4596 } while (!Working.empty()); 4597 return true; 4598 } 4599 4600 AllocaInst *llvm::findAllocaForValue(Value *V, bool OffsetZero) { 4601 AllocaInst *Result = nullptr; 4602 SmallPtrSet<Value *, 4> Visited; 4603 SmallVector<Value *, 4> Worklist; 4604 4605 auto AddWork = [&](Value *V) { 4606 if (Visited.insert(V).second) 4607 Worklist.push_back(V); 4608 }; 4609 4610 AddWork(V); 4611 do { 4612 V = Worklist.pop_back_val(); 4613 assert(Visited.count(V)); 4614 4615 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 4616 if (Result && Result != AI) 4617 return nullptr; 4618 Result = AI; 4619 } else if (CastInst *CI = dyn_cast<CastInst>(V)) { 4620 AddWork(CI->getOperand(0)); 4621 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { 4622 for (Value *IncValue : PN->incoming_values()) 4623 AddWork(IncValue); 4624 } else if (auto *SI = dyn_cast<SelectInst>(V)) { 4625 AddWork(SI->getTrueValue()); 4626 AddWork(SI->getFalseValue()); 4627 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(V)) { 4628 if (OffsetZero && !GEP->hasAllZeroIndices()) 4629 return nullptr; 4630 AddWork(GEP->getPointerOperand()); 4631 } else if (CallBase *CB = dyn_cast<CallBase>(V)) { 4632 Value *Returned = CB->getReturnedArgOperand(); 4633 if (Returned) 4634 AddWork(Returned); 4635 else 4636 return nullptr; 4637 } else { 4638 return nullptr; 4639 } 4640 } while (!Worklist.empty()); 4641 4642 return Result; 4643 } 4644 4645 static bool onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 4646 const Value *V, bool AllowLifetime, bool AllowDroppable) { 4647 for (const User *U : V->users()) { 4648 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); 4649 if (!II) 4650 return false; 4651 4652 if (AllowLifetime && II->isLifetimeStartOrEnd()) 4653 continue; 4654 4655 if (AllowDroppable && II->isDroppable()) 4656 continue; 4657 4658 return false; 4659 } 4660 return true; 4661 } 4662 4663 bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { 4664 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 4665 V, /* AllowLifetime */ true, /* AllowDroppable */ false); 4666 } 4667 bool llvm::onlyUsedByLifetimeMarkersOrDroppableInsts(const Value *V) { 4668 return onlyUsedByLifetimeMarkersOrDroppableInstsHelper( 4669 V, /* AllowLifetime */ true, /* AllowDroppable */ true); 4670 } 4671 4672 bool llvm::mustSuppressSpeculation(const LoadInst &LI) { 4673 if (!LI.isUnordered()) 4674 return true; 4675 const Function &F = *LI.getFunction(); 4676 // Speculative load may create a race that did not exist in the source. 4677 return F.hasFnAttribute(Attribute::SanitizeThread) || 4678 // Speculative load may load data from dirty regions. 4679 F.hasFnAttribute(Attribute::SanitizeAddress) || 4680 F.hasFnAttribute(Attribute::SanitizeHWAddress); 4681 } 4682 4683 bool llvm::isSafeToSpeculativelyExecute(const Instruction *Inst, 4684 const Instruction *CtxI, 4685 const DominatorTree *DT, 4686 const TargetLibraryInfo *TLI) { 4687 return isSafeToSpeculativelyExecuteWithOpcode(Inst->getOpcode(), Inst, CtxI, 4688 DT, TLI); 4689 } 4690 4691 bool llvm::isSafeToSpeculativelyExecuteWithOpcode( 4692 unsigned Opcode, const Instruction *Inst, const Instruction *CtxI, 4693 const DominatorTree *DT, const TargetLibraryInfo *TLI) { 4694 #ifndef NDEBUG 4695 if (Inst->getOpcode() != Opcode) { 4696 // Check that the operands are actually compatible with the Opcode override. 4697 auto hasEqualReturnAndLeadingOperandTypes = 4698 [](const Instruction *Inst, unsigned NumLeadingOperands) { 4699 if (Inst->getNumOperands() < NumLeadingOperands) 4700 return false; 4701 const Type *ExpectedType = Inst->getType(); 4702 for (unsigned ItOp = 0; ItOp < NumLeadingOperands; ++ItOp) 4703 if (Inst->getOperand(ItOp)->getType() != ExpectedType) 4704 return false; 4705 return true; 4706 }; 4707 assert(!Instruction::isBinaryOp(Opcode) || 4708 hasEqualReturnAndLeadingOperandTypes(Inst, 2)); 4709 assert(!Instruction::isUnaryOp(Opcode) || 4710 hasEqualReturnAndLeadingOperandTypes(Inst, 1)); 4711 } 4712 #endif 4713 4714 switch (Opcode) { 4715 default: 4716 return true; 4717 case Instruction::UDiv: 4718 case Instruction::URem: { 4719 // x / y is undefined if y == 0. 4720 const APInt *V; 4721 if (match(Inst->getOperand(1), m_APInt(V))) 4722 return *V != 0; 4723 return false; 4724 } 4725 case Instruction::SDiv: 4726 case Instruction::SRem: { 4727 // x / y is undefined if y == 0 or x == INT_MIN and y == -1 4728 const APInt *Numerator, *Denominator; 4729 if (!match(Inst->getOperand(1), m_APInt(Denominator))) 4730 return false; 4731 // We cannot hoist this division if the denominator is 0. 4732 if (*Denominator == 0) 4733 return false; 4734 // It's safe to hoist if the denominator is not 0 or -1. 4735 if (!Denominator->isAllOnes()) 4736 return true; 4737 // At this point we know that the denominator is -1. It is safe to hoist as 4738 // long we know that the numerator is not INT_MIN. 4739 if (match(Inst->getOperand(0), m_APInt(Numerator))) 4740 return !Numerator->isMinSignedValue(); 4741 // The numerator *might* be MinSignedValue. 4742 return false; 4743 } 4744 case Instruction::Load: { 4745 const LoadInst *LI = dyn_cast<LoadInst>(Inst); 4746 if (!LI) 4747 return false; 4748 if (mustSuppressSpeculation(*LI)) 4749 return false; 4750 const DataLayout &DL = LI->getModule()->getDataLayout(); 4751 return isDereferenceableAndAlignedPointer( 4752 LI->getPointerOperand(), LI->getType(), LI->getAlign(), DL, CtxI, DT, 4753 TLI); 4754 } 4755 case Instruction::Call: { 4756 auto *CI = dyn_cast<const CallInst>(Inst); 4757 if (!CI) 4758 return false; 4759 const Function *Callee = CI->getCalledFunction(); 4760 4761 // The called function could have undefined behavior or side-effects, even 4762 // if marked readnone nounwind. 4763 return Callee && Callee->isSpeculatable(); 4764 } 4765 case Instruction::VAArg: 4766 case Instruction::Alloca: 4767 case Instruction::Invoke: 4768 case Instruction::CallBr: 4769 case Instruction::PHI: 4770 case Instruction::Store: 4771 case Instruction::Ret: 4772 case Instruction::Br: 4773 case Instruction::IndirectBr: 4774 case Instruction::Switch: 4775 case Instruction::Unreachable: 4776 case Instruction::Fence: 4777 case Instruction::AtomicRMW: 4778 case Instruction::AtomicCmpXchg: 4779 case Instruction::LandingPad: 4780 case Instruction::Resume: 4781 case Instruction::CatchSwitch: 4782 case Instruction::CatchPad: 4783 case Instruction::CatchRet: 4784 case Instruction::CleanupPad: 4785 case Instruction::CleanupRet: 4786 return false; // Misc instructions which have effects 4787 } 4788 } 4789 4790 bool llvm::mayHaveNonDefUseDependency(const Instruction &I) { 4791 if (I.mayReadOrWriteMemory()) 4792 // Memory dependency possible 4793 return true; 4794 if (!isSafeToSpeculativelyExecute(&I)) 4795 // Can't move above a maythrow call or infinite loop. Or if an 4796 // inalloca alloca, above a stacksave call. 4797 return true; 4798 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 4799 // 1) Can't reorder two inf-loop calls, even if readonly 4800 // 2) Also can't reorder an inf-loop call below a instruction which isn't 4801 // safe to speculative execute. (Inverse of above) 4802 return true; 4803 return false; 4804 } 4805 4806 /// Convert ConstantRange OverflowResult into ValueTracking OverflowResult. 4807 static OverflowResult mapOverflowResult(ConstantRange::OverflowResult OR) { 4808 switch (OR) { 4809 case ConstantRange::OverflowResult::MayOverflow: 4810 return OverflowResult::MayOverflow; 4811 case ConstantRange::OverflowResult::AlwaysOverflowsLow: 4812 return OverflowResult::AlwaysOverflowsLow; 4813 case ConstantRange::OverflowResult::AlwaysOverflowsHigh: 4814 return OverflowResult::AlwaysOverflowsHigh; 4815 case ConstantRange::OverflowResult::NeverOverflows: 4816 return OverflowResult::NeverOverflows; 4817 } 4818 llvm_unreachable("Unknown OverflowResult"); 4819 } 4820 4821 /// Combine constant ranges from computeConstantRange() and computeKnownBits(). 4822 static ConstantRange computeConstantRangeIncludingKnownBits( 4823 const Value *V, bool ForSigned, const DataLayout &DL, unsigned Depth, 4824 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4825 OptimizationRemarkEmitter *ORE = nullptr, bool UseInstrInfo = true) { 4826 KnownBits Known = computeKnownBits( 4827 V, DL, Depth, AC, CxtI, DT, ORE, UseInstrInfo); 4828 ConstantRange CR1 = ConstantRange::fromKnownBits(Known, ForSigned); 4829 ConstantRange CR2 = computeConstantRange(V, UseInstrInfo); 4830 ConstantRange::PreferredRangeType RangeType = 4831 ForSigned ? ConstantRange::Signed : ConstantRange::Unsigned; 4832 return CR1.intersectWith(CR2, RangeType); 4833 } 4834 4835 OverflowResult llvm::computeOverflowForUnsignedMul( 4836 const Value *LHS, const Value *RHS, const DataLayout &DL, 4837 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4838 bool UseInstrInfo) { 4839 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4840 nullptr, UseInstrInfo); 4841 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4842 nullptr, UseInstrInfo); 4843 ConstantRange LHSRange = ConstantRange::fromKnownBits(LHSKnown, false); 4844 ConstantRange RHSRange = ConstantRange::fromKnownBits(RHSKnown, false); 4845 return mapOverflowResult(LHSRange.unsignedMulMayOverflow(RHSRange)); 4846 } 4847 4848 OverflowResult 4849 llvm::computeOverflowForSignedMul(const Value *LHS, const Value *RHS, 4850 const DataLayout &DL, AssumptionCache *AC, 4851 const Instruction *CxtI, 4852 const DominatorTree *DT, bool UseInstrInfo) { 4853 // Multiplying n * m significant bits yields a result of n + m significant 4854 // bits. If the total number of significant bits does not exceed the 4855 // result bit width (minus 1), there is no overflow. 4856 // This means if we have enough leading sign bits in the operands 4857 // we can guarantee that the result does not overflow. 4858 // Ref: "Hacker's Delight" by Henry Warren 4859 unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); 4860 4861 // Note that underestimating the number of sign bits gives a more 4862 // conservative answer. 4863 unsigned SignBits = ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) + 4864 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT); 4865 4866 // First handle the easy case: if we have enough sign bits there's 4867 // definitely no overflow. 4868 if (SignBits > BitWidth + 1) 4869 return OverflowResult::NeverOverflows; 4870 4871 // There are two ambiguous cases where there can be no overflow: 4872 // SignBits == BitWidth + 1 and 4873 // SignBits == BitWidth 4874 // The second case is difficult to check, therefore we only handle the 4875 // first case. 4876 if (SignBits == BitWidth + 1) { 4877 // It overflows only when both arguments are negative and the true 4878 // product is exactly the minimum negative number. 4879 // E.g. mul i16 with 17 sign bits: 0xff00 * 0xff80 = 0x8000 4880 // For simplicity we just check if at least one side is not negative. 4881 KnownBits LHSKnown = computeKnownBits(LHS, DL, /*Depth=*/0, AC, CxtI, DT, 4882 nullptr, UseInstrInfo); 4883 KnownBits RHSKnown = computeKnownBits(RHS, DL, /*Depth=*/0, AC, CxtI, DT, 4884 nullptr, UseInstrInfo); 4885 if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) 4886 return OverflowResult::NeverOverflows; 4887 } 4888 return OverflowResult::MayOverflow; 4889 } 4890 4891 OverflowResult llvm::computeOverflowForUnsignedAdd( 4892 const Value *LHS, const Value *RHS, const DataLayout &DL, 4893 AssumptionCache *AC, const Instruction *CxtI, const DominatorTree *DT, 4894 bool UseInstrInfo) { 4895 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4896 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, 4897 nullptr, UseInstrInfo); 4898 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4899 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT, 4900 nullptr, UseInstrInfo); 4901 return mapOverflowResult(LHSRange.unsignedAddMayOverflow(RHSRange)); 4902 } 4903 4904 static OverflowResult computeOverflowForSignedAdd(const Value *LHS, 4905 const Value *RHS, 4906 const AddOperator *Add, 4907 const DataLayout &DL, 4908 AssumptionCache *AC, 4909 const Instruction *CxtI, 4910 const DominatorTree *DT) { 4911 if (Add && Add->hasNoSignedWrap()) { 4912 return OverflowResult::NeverOverflows; 4913 } 4914 4915 // If LHS and RHS each have at least two sign bits, the addition will look 4916 // like 4917 // 4918 // XX..... + 4919 // YY..... 4920 // 4921 // If the carry into the most significant position is 0, X and Y can't both 4922 // be 1 and therefore the carry out of the addition is also 0. 4923 // 4924 // If the carry into the most significant position is 1, X and Y can't both 4925 // be 0 and therefore the carry out of the addition is also 1. 4926 // 4927 // Since the carry into the most significant position is always equal to 4928 // the carry out of the addition, there is no signed overflow. 4929 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 4930 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 4931 return OverflowResult::NeverOverflows; 4932 4933 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 4934 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4935 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 4936 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 4937 OverflowResult OR = 4938 mapOverflowResult(LHSRange.signedAddMayOverflow(RHSRange)); 4939 if (OR != OverflowResult::MayOverflow) 4940 return OR; 4941 4942 // The remaining code needs Add to be available. Early returns if not so. 4943 if (!Add) 4944 return OverflowResult::MayOverflow; 4945 4946 // If the sign of Add is the same as at least one of the operands, this add 4947 // CANNOT overflow. If this can be determined from the known bits of the 4948 // operands the above signedAddMayOverflow() check will have already done so. 4949 // The only other way to improve on the known bits is from an assumption, so 4950 // call computeKnownBitsFromAssume() directly. 4951 bool LHSOrRHSKnownNonNegative = 4952 (LHSRange.isAllNonNegative() || RHSRange.isAllNonNegative()); 4953 bool LHSOrRHSKnownNegative = 4954 (LHSRange.isAllNegative() || RHSRange.isAllNegative()); 4955 if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) { 4956 KnownBits AddKnown(LHSRange.getBitWidth()); 4957 computeKnownBitsFromAssume( 4958 Add, AddKnown, /*Depth=*/0, Query(DL, AC, CxtI, DT, true)); 4959 if ((AddKnown.isNonNegative() && LHSOrRHSKnownNonNegative) || 4960 (AddKnown.isNegative() && LHSOrRHSKnownNegative)) 4961 return OverflowResult::NeverOverflows; 4962 } 4963 4964 return OverflowResult::MayOverflow; 4965 } 4966 4967 OverflowResult llvm::computeOverflowForUnsignedSub(const Value *LHS, 4968 const Value *RHS, 4969 const DataLayout &DL, 4970 AssumptionCache *AC, 4971 const Instruction *CxtI, 4972 const DominatorTree *DT) { 4973 // X - (X % ?) 4974 // The remainder of a value can't have greater magnitude than itself, 4975 // so the subtraction can't overflow. 4976 4977 // X - (X -nuw ?) 4978 // In the minimal case, this would simplify to "?", so there's no subtract 4979 // at all. But if this analysis is used to peek through casts, for example, 4980 // then determining no-overflow may allow other transforms. 4981 4982 // TODO: There are other patterns like this. 4983 // See simplifyICmpWithBinOpOnLHS() for candidates. 4984 if (match(RHS, m_URem(m_Specific(LHS), m_Value())) || 4985 match(RHS, m_NUWSub(m_Specific(LHS), m_Value()))) 4986 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT)) 4987 return OverflowResult::NeverOverflows; 4988 4989 // Checking for conditions implied by dominating conditions may be expensive. 4990 // Limit it to usub_with_overflow calls for now. 4991 if (match(CxtI, 4992 m_Intrinsic<Intrinsic::usub_with_overflow>(m_Value(), m_Value()))) 4993 if (auto C = 4994 isImpliedByDomCondition(CmpInst::ICMP_UGE, LHS, RHS, CxtI, DL)) { 4995 if (*C) 4996 return OverflowResult::NeverOverflows; 4997 return OverflowResult::AlwaysOverflowsLow; 4998 } 4999 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 5000 LHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 5001 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 5002 RHS, /*ForSigned=*/false, DL, /*Depth=*/0, AC, CxtI, DT); 5003 return mapOverflowResult(LHSRange.unsignedSubMayOverflow(RHSRange)); 5004 } 5005 5006 OverflowResult llvm::computeOverflowForSignedSub(const Value *LHS, 5007 const Value *RHS, 5008 const DataLayout &DL, 5009 AssumptionCache *AC, 5010 const Instruction *CxtI, 5011 const DominatorTree *DT) { 5012 // X - (X % ?) 5013 // The remainder of a value can't have greater magnitude than itself, 5014 // so the subtraction can't overflow. 5015 5016 // X - (X -nsw ?) 5017 // In the minimal case, this would simplify to "?", so there's no subtract 5018 // at all. But if this analysis is used to peek through casts, for example, 5019 // then determining no-overflow may allow other transforms. 5020 if (match(RHS, m_SRem(m_Specific(LHS), m_Value())) || 5021 match(RHS, m_NSWSub(m_Specific(LHS), m_Value()))) 5022 if (isGuaranteedNotToBeUndefOrPoison(LHS, AC, CxtI, DT)) 5023 return OverflowResult::NeverOverflows; 5024 5025 // If LHS and RHS each have at least two sign bits, the subtraction 5026 // cannot overflow. 5027 if (ComputeNumSignBits(LHS, DL, 0, AC, CxtI, DT) > 1 && 5028 ComputeNumSignBits(RHS, DL, 0, AC, CxtI, DT) > 1) 5029 return OverflowResult::NeverOverflows; 5030 5031 ConstantRange LHSRange = computeConstantRangeIncludingKnownBits( 5032 LHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 5033 ConstantRange RHSRange = computeConstantRangeIncludingKnownBits( 5034 RHS, /*ForSigned=*/true, DL, /*Depth=*/0, AC, CxtI, DT); 5035 return mapOverflowResult(LHSRange.signedSubMayOverflow(RHSRange)); 5036 } 5037 5038 bool llvm::isOverflowIntrinsicNoWrap(const WithOverflowInst *WO, 5039 const DominatorTree &DT) { 5040 SmallVector<const BranchInst *, 2> GuardingBranches; 5041 SmallVector<const ExtractValueInst *, 2> Results; 5042 5043 for (const User *U : WO->users()) { 5044 if (const auto *EVI = dyn_cast<ExtractValueInst>(U)) { 5045 assert(EVI->getNumIndices() == 1 && "Obvious from CI's type"); 5046 5047 if (EVI->getIndices()[0] == 0) 5048 Results.push_back(EVI); 5049 else { 5050 assert(EVI->getIndices()[0] == 1 && "Obvious from CI's type"); 5051 5052 for (const auto *U : EVI->users()) 5053 if (const auto *B = dyn_cast<BranchInst>(U)) { 5054 assert(B->isConditional() && "How else is it using an i1?"); 5055 GuardingBranches.push_back(B); 5056 } 5057 } 5058 } else { 5059 // We are using the aggregate directly in a way we don't want to analyze 5060 // here (storing it to a global, say). 5061 return false; 5062 } 5063 } 5064 5065 auto AllUsesGuardedByBranch = [&](const BranchInst *BI) { 5066 BasicBlockEdge NoWrapEdge(BI->getParent(), BI->getSuccessor(1)); 5067 if (!NoWrapEdge.isSingleEdge()) 5068 return false; 5069 5070 // Check if all users of the add are provably no-wrap. 5071 for (const auto *Result : Results) { 5072 // If the extractvalue itself is not executed on overflow, the we don't 5073 // need to check each use separately, since domination is transitive. 5074 if (DT.dominates(NoWrapEdge, Result->getParent())) 5075 continue; 5076 5077 for (const auto &RU : Result->uses()) 5078 if (!DT.dominates(NoWrapEdge, RU)) 5079 return false; 5080 } 5081 5082 return true; 5083 }; 5084 5085 return llvm::any_of(GuardingBranches, AllUsesGuardedByBranch); 5086 } 5087 5088 static bool canCreateUndefOrPoison(const Operator *Op, bool PoisonOnly, 5089 bool ConsiderFlags) { 5090 5091 if (ConsiderFlags && Op->hasPoisonGeneratingFlags()) 5092 return true; 5093 5094 unsigned Opcode = Op->getOpcode(); 5095 5096 // Check whether opcode is a poison/undef-generating operation 5097 switch (Opcode) { 5098 case Instruction::Shl: 5099 case Instruction::AShr: 5100 case Instruction::LShr: { 5101 // Shifts return poison if shiftwidth is larger than the bitwidth. 5102 if (auto *C = dyn_cast<Constant>(Op->getOperand(1))) { 5103 SmallVector<Constant *, 4> ShiftAmounts; 5104 if (auto *FVTy = dyn_cast<FixedVectorType>(C->getType())) { 5105 unsigned NumElts = FVTy->getNumElements(); 5106 for (unsigned i = 0; i < NumElts; ++i) 5107 ShiftAmounts.push_back(C->getAggregateElement(i)); 5108 } else if (isa<ScalableVectorType>(C->getType())) 5109 return true; // Can't tell, just return true to be safe 5110 else 5111 ShiftAmounts.push_back(C); 5112 5113 bool Safe = llvm::all_of(ShiftAmounts, [](Constant *C) { 5114 auto *CI = dyn_cast_or_null<ConstantInt>(C); 5115 return CI && CI->getValue().ult(C->getType()->getIntegerBitWidth()); 5116 }); 5117 return !Safe; 5118 } 5119 return true; 5120 } 5121 case Instruction::FPToSI: 5122 case Instruction::FPToUI: 5123 // fptosi/ui yields poison if the resulting value does not fit in the 5124 // destination type. 5125 return true; 5126 case Instruction::Call: 5127 if (auto *II = dyn_cast<IntrinsicInst>(Op)) { 5128 switch (II->getIntrinsicID()) { 5129 // TODO: Add more intrinsics. 5130 case Intrinsic::ctpop: 5131 case Intrinsic::sadd_with_overflow: 5132 case Intrinsic::ssub_with_overflow: 5133 case Intrinsic::smul_with_overflow: 5134 case Intrinsic::uadd_with_overflow: 5135 case Intrinsic::usub_with_overflow: 5136 case Intrinsic::umul_with_overflow: 5137 return false; 5138 } 5139 } 5140 LLVM_FALLTHROUGH; 5141 case Instruction::CallBr: 5142 case Instruction::Invoke: { 5143 const auto *CB = cast<CallBase>(Op); 5144 return !CB->hasRetAttr(Attribute::NoUndef); 5145 } 5146 case Instruction::InsertElement: 5147 case Instruction::ExtractElement: { 5148 // If index exceeds the length of the vector, it returns poison 5149 auto *VTy = cast<VectorType>(Op->getOperand(0)->getType()); 5150 unsigned IdxOp = Op->getOpcode() == Instruction::InsertElement ? 2 : 1; 5151 auto *Idx = dyn_cast<ConstantInt>(Op->getOperand(IdxOp)); 5152 if (!Idx || Idx->getValue().uge(VTy->getElementCount().getKnownMinValue())) 5153 return true; 5154 return false; 5155 } 5156 case Instruction::ShuffleVector: { 5157 // shufflevector may return undef. 5158 if (PoisonOnly) 5159 return false; 5160 ArrayRef<int> Mask = isa<ConstantExpr>(Op) 5161 ? cast<ConstantExpr>(Op)->getShuffleMask() 5162 : cast<ShuffleVectorInst>(Op)->getShuffleMask(); 5163 return is_contained(Mask, UndefMaskElem); 5164 } 5165 case Instruction::FNeg: 5166 case Instruction::PHI: 5167 case Instruction::Select: 5168 case Instruction::URem: 5169 case Instruction::SRem: 5170 case Instruction::ExtractValue: 5171 case Instruction::InsertValue: 5172 case Instruction::Freeze: 5173 case Instruction::ICmp: 5174 case Instruction::FCmp: 5175 return false; 5176 case Instruction::GetElementPtr: 5177 // inbounds is handled above 5178 // TODO: what about inrange on constexpr? 5179 return false; 5180 default: { 5181 const auto *CE = dyn_cast<ConstantExpr>(Op); 5182 if (isa<CastInst>(Op) || (CE && CE->isCast())) 5183 return false; 5184 else if (Instruction::isBinaryOp(Opcode)) 5185 return false; 5186 // Be conservative and return true. 5187 return true; 5188 } 5189 } 5190 } 5191 5192 bool llvm::canCreateUndefOrPoison(const Operator *Op, bool ConsiderFlags) { 5193 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/false, ConsiderFlags); 5194 } 5195 5196 bool llvm::canCreatePoison(const Operator *Op, bool ConsiderFlags) { 5197 return ::canCreateUndefOrPoison(Op, /*PoisonOnly=*/true, ConsiderFlags); 5198 } 5199 5200 static bool directlyImpliesPoison(const Value *ValAssumedPoison, 5201 const Value *V, unsigned Depth) { 5202 if (ValAssumedPoison == V) 5203 return true; 5204 5205 const unsigned MaxDepth = 2; 5206 if (Depth >= MaxDepth) 5207 return false; 5208 5209 if (const auto *I = dyn_cast<Instruction>(V)) { 5210 if (propagatesPoison(cast<Operator>(I))) 5211 return any_of(I->operands(), [=](const Value *Op) { 5212 return directlyImpliesPoison(ValAssumedPoison, Op, Depth + 1); 5213 }); 5214 5215 // 'select ValAssumedPoison, _, _' is poison. 5216 if (const auto *SI = dyn_cast<SelectInst>(I)) 5217 return directlyImpliesPoison(ValAssumedPoison, SI->getCondition(), 5218 Depth + 1); 5219 // V = extractvalue V0, idx 5220 // V2 = extractvalue V0, idx2 5221 // V0's elements are all poison or not. (e.g., add_with_overflow) 5222 const WithOverflowInst *II; 5223 if (match(I, m_ExtractValue(m_WithOverflowInst(II))) && 5224 (match(ValAssumedPoison, m_ExtractValue(m_Specific(II))) || 5225 llvm::is_contained(II->args(), ValAssumedPoison))) 5226 return true; 5227 } 5228 return false; 5229 } 5230 5231 static bool impliesPoison(const Value *ValAssumedPoison, const Value *V, 5232 unsigned Depth) { 5233 if (isGuaranteedNotToBeUndefOrPoison(ValAssumedPoison)) 5234 return true; 5235 5236 if (directlyImpliesPoison(ValAssumedPoison, V, /* Depth */ 0)) 5237 return true; 5238 5239 const unsigned MaxDepth = 2; 5240 if (Depth >= MaxDepth) 5241 return false; 5242 5243 const auto *I = dyn_cast<Instruction>(ValAssumedPoison); 5244 if (I && !canCreatePoison(cast<Operator>(I))) { 5245 return all_of(I->operands(), [=](const Value *Op) { 5246 return impliesPoison(Op, V, Depth + 1); 5247 }); 5248 } 5249 return false; 5250 } 5251 5252 bool llvm::impliesPoison(const Value *ValAssumedPoison, const Value *V) { 5253 return ::impliesPoison(ValAssumedPoison, V, /* Depth */ 0); 5254 } 5255 5256 static bool programUndefinedIfUndefOrPoison(const Value *V, 5257 bool PoisonOnly); 5258 5259 static bool isGuaranteedNotToBeUndefOrPoison(const Value *V, 5260 AssumptionCache *AC, 5261 const Instruction *CtxI, 5262 const DominatorTree *DT, 5263 unsigned Depth, bool PoisonOnly) { 5264 if (Depth >= MaxAnalysisRecursionDepth) 5265 return false; 5266 5267 if (isa<MetadataAsValue>(V)) 5268 return false; 5269 5270 if (const auto *A = dyn_cast<Argument>(V)) { 5271 if (A->hasAttribute(Attribute::NoUndef)) 5272 return true; 5273 } 5274 5275 if (auto *C = dyn_cast<Constant>(V)) { 5276 if (isa<UndefValue>(C)) 5277 return PoisonOnly && !isa<PoisonValue>(C); 5278 5279 if (isa<ConstantInt>(C) || isa<GlobalVariable>(C) || isa<ConstantFP>(V) || 5280 isa<ConstantPointerNull>(C) || isa<Function>(C)) 5281 return true; 5282 5283 if (C->getType()->isVectorTy() && !isa<ConstantExpr>(C)) 5284 return (PoisonOnly ? !C->containsPoisonElement() 5285 : !C->containsUndefOrPoisonElement()) && 5286 !C->containsConstantExpression(); 5287 } 5288 5289 // Strip cast operations from a pointer value. 5290 // Note that stripPointerCastsSameRepresentation can strip off getelementptr 5291 // inbounds with zero offset. To guarantee that the result isn't poison, the 5292 // stripped pointer is checked as it has to be pointing into an allocated 5293 // object or be null `null` to ensure `inbounds` getelement pointers with a 5294 // zero offset could not produce poison. 5295 // It can strip off addrspacecast that do not change bit representation as 5296 // well. We believe that such addrspacecast is equivalent to no-op. 5297 auto *StrippedV = V->stripPointerCastsSameRepresentation(); 5298 if (isa<AllocaInst>(StrippedV) || isa<GlobalVariable>(StrippedV) || 5299 isa<Function>(StrippedV) || isa<ConstantPointerNull>(StrippedV)) 5300 return true; 5301 5302 auto OpCheck = [&](const Value *V) { 5303 return isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth + 1, 5304 PoisonOnly); 5305 }; 5306 5307 if (auto *Opr = dyn_cast<Operator>(V)) { 5308 // If the value is a freeze instruction, then it can never 5309 // be undef or poison. 5310 if (isa<FreezeInst>(V)) 5311 return true; 5312 5313 if (const auto *CB = dyn_cast<CallBase>(V)) { 5314 if (CB->hasRetAttr(Attribute::NoUndef)) 5315 return true; 5316 } 5317 5318 if (const auto *PN = dyn_cast<PHINode>(V)) { 5319 unsigned Num = PN->getNumIncomingValues(); 5320 bool IsWellDefined = true; 5321 for (unsigned i = 0; i < Num; ++i) { 5322 auto *TI = PN->getIncomingBlock(i)->getTerminator(); 5323 if (!isGuaranteedNotToBeUndefOrPoison(PN->getIncomingValue(i), AC, TI, 5324 DT, Depth + 1, PoisonOnly)) { 5325 IsWellDefined = false; 5326 break; 5327 } 5328 } 5329 if (IsWellDefined) 5330 return true; 5331 } else if (!canCreateUndefOrPoison(Opr) && all_of(Opr->operands(), OpCheck)) 5332 return true; 5333 } 5334 5335 if (auto *I = dyn_cast<LoadInst>(V)) 5336 if (I->hasMetadata(LLVMContext::MD_noundef) || 5337 I->hasMetadata(LLVMContext::MD_dereferenceable) || 5338 I->hasMetadata(LLVMContext::MD_dereferenceable_or_null)) 5339 return true; 5340 5341 if (programUndefinedIfUndefOrPoison(V, PoisonOnly)) 5342 return true; 5343 5344 // CxtI may be null or a cloned instruction. 5345 if (!CtxI || !CtxI->getParent() || !DT) 5346 return false; 5347 5348 auto *DNode = DT->getNode(CtxI->getParent()); 5349 if (!DNode) 5350 // Unreachable block 5351 return false; 5352 5353 // If V is used as a branch condition before reaching CtxI, V cannot be 5354 // undef or poison. 5355 // br V, BB1, BB2 5356 // BB1: 5357 // CtxI ; V cannot be undef or poison here 5358 auto *Dominator = DNode->getIDom(); 5359 while (Dominator) { 5360 auto *TI = Dominator->getBlock()->getTerminator(); 5361 5362 Value *Cond = nullptr; 5363 if (auto BI = dyn_cast_or_null<BranchInst>(TI)) { 5364 if (BI->isConditional()) 5365 Cond = BI->getCondition(); 5366 } else if (auto SI = dyn_cast_or_null<SwitchInst>(TI)) { 5367 Cond = SI->getCondition(); 5368 } 5369 5370 if (Cond) { 5371 if (Cond == V) 5372 return true; 5373 else if (PoisonOnly && isa<Operator>(Cond)) { 5374 // For poison, we can analyze further 5375 auto *Opr = cast<Operator>(Cond); 5376 if (propagatesPoison(Opr) && is_contained(Opr->operand_values(), V)) 5377 return true; 5378 } 5379 } 5380 5381 Dominator = Dominator->getIDom(); 5382 } 5383 5384 if (getKnowledgeValidInContext(V, {Attribute::NoUndef}, CtxI, DT, AC)) 5385 return true; 5386 5387 return false; 5388 } 5389 5390 bool llvm::isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC, 5391 const Instruction *CtxI, 5392 const DominatorTree *DT, 5393 unsigned Depth) { 5394 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, false); 5395 } 5396 5397 bool llvm::isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC, 5398 const Instruction *CtxI, 5399 const DominatorTree *DT, unsigned Depth) { 5400 return ::isGuaranteedNotToBeUndefOrPoison(V, AC, CtxI, DT, Depth, true); 5401 } 5402 5403 OverflowResult llvm::computeOverflowForSignedAdd(const AddOperator *Add, 5404 const DataLayout &DL, 5405 AssumptionCache *AC, 5406 const Instruction *CxtI, 5407 const DominatorTree *DT) { 5408 return ::computeOverflowForSignedAdd(Add->getOperand(0), Add->getOperand(1), 5409 Add, DL, AC, CxtI, DT); 5410 } 5411 5412 OverflowResult llvm::computeOverflowForSignedAdd(const Value *LHS, 5413 const Value *RHS, 5414 const DataLayout &DL, 5415 AssumptionCache *AC, 5416 const Instruction *CxtI, 5417 const DominatorTree *DT) { 5418 return ::computeOverflowForSignedAdd(LHS, RHS, nullptr, DL, AC, CxtI, DT); 5419 } 5420 5421 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const Instruction *I) { 5422 // Note: An atomic operation isn't guaranteed to return in a reasonable amount 5423 // of time because it's possible for another thread to interfere with it for an 5424 // arbitrary length of time, but programs aren't allowed to rely on that. 5425 5426 // If there is no successor, then execution can't transfer to it. 5427 if (isa<ReturnInst>(I)) 5428 return false; 5429 if (isa<UnreachableInst>(I)) 5430 return false; 5431 5432 // Note: Do not add new checks here; instead, change Instruction::mayThrow or 5433 // Instruction::willReturn. 5434 // 5435 // FIXME: Move this check into Instruction::willReturn. 5436 if (isa<CatchPadInst>(I)) { 5437 switch (classifyEHPersonality(I->getFunction()->getPersonalityFn())) { 5438 default: 5439 // A catchpad may invoke exception object constructors and such, which 5440 // in some languages can be arbitrary code, so be conservative by default. 5441 return false; 5442 case EHPersonality::CoreCLR: 5443 // For CoreCLR, it just involves a type test. 5444 return true; 5445 } 5446 } 5447 5448 // An instruction that returns without throwing must transfer control flow 5449 // to a successor. 5450 return !I->mayThrow() && I->willReturn(); 5451 } 5452 5453 bool llvm::isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB) { 5454 // TODO: This is slightly conservative for invoke instruction since exiting 5455 // via an exception *is* normal control for them. 5456 for (const Instruction &I : *BB) 5457 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5458 return false; 5459 return true; 5460 } 5461 5462 bool llvm::isGuaranteedToTransferExecutionToSuccessor( 5463 BasicBlock::const_iterator Begin, BasicBlock::const_iterator End, 5464 unsigned ScanLimit) { 5465 return isGuaranteedToTransferExecutionToSuccessor(make_range(Begin, End), 5466 ScanLimit); 5467 } 5468 5469 bool llvm::isGuaranteedToTransferExecutionToSuccessor( 5470 iterator_range<BasicBlock::const_iterator> Range, unsigned ScanLimit) { 5471 assert(ScanLimit && "scan limit must be non-zero"); 5472 for (const Instruction &I : Range) { 5473 if (isa<DbgInfoIntrinsic>(I)) 5474 continue; 5475 if (--ScanLimit == 0) 5476 return false; 5477 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5478 return false; 5479 } 5480 return true; 5481 } 5482 5483 bool llvm::isGuaranteedToExecuteForEveryIteration(const Instruction *I, 5484 const Loop *L) { 5485 // The loop header is guaranteed to be executed for every iteration. 5486 // 5487 // FIXME: Relax this constraint to cover all basic blocks that are 5488 // guaranteed to be executed at every iteration. 5489 if (I->getParent() != L->getHeader()) return false; 5490 5491 for (const Instruction &LI : *L->getHeader()) { 5492 if (&LI == I) return true; 5493 if (!isGuaranteedToTransferExecutionToSuccessor(&LI)) return false; 5494 } 5495 llvm_unreachable("Instruction not contained in its own parent basic block."); 5496 } 5497 5498 bool llvm::propagatesPoison(const Operator *I) { 5499 switch (I->getOpcode()) { 5500 case Instruction::Freeze: 5501 case Instruction::Select: 5502 case Instruction::PHI: 5503 case Instruction::Invoke: 5504 return false; 5505 case Instruction::Call: 5506 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 5507 switch (II->getIntrinsicID()) { 5508 // TODO: Add more intrinsics. 5509 case Intrinsic::sadd_with_overflow: 5510 case Intrinsic::ssub_with_overflow: 5511 case Intrinsic::smul_with_overflow: 5512 case Intrinsic::uadd_with_overflow: 5513 case Intrinsic::usub_with_overflow: 5514 case Intrinsic::umul_with_overflow: 5515 // If an input is a vector containing a poison element, the 5516 // two output vectors (calculated results, overflow bits)' 5517 // corresponding lanes are poison. 5518 return true; 5519 case Intrinsic::ctpop: 5520 return true; 5521 } 5522 } 5523 return false; 5524 case Instruction::ICmp: 5525 case Instruction::FCmp: 5526 case Instruction::GetElementPtr: 5527 return true; 5528 default: 5529 if (isa<BinaryOperator>(I) || isa<UnaryOperator>(I) || isa<CastInst>(I)) 5530 return true; 5531 5532 // Be conservative and return false. 5533 return false; 5534 } 5535 } 5536 5537 void llvm::getGuaranteedWellDefinedOps( 5538 const Instruction *I, SmallPtrSetImpl<const Value *> &Operands) { 5539 switch (I->getOpcode()) { 5540 case Instruction::Store: 5541 Operands.insert(cast<StoreInst>(I)->getPointerOperand()); 5542 break; 5543 5544 case Instruction::Load: 5545 Operands.insert(cast<LoadInst>(I)->getPointerOperand()); 5546 break; 5547 5548 // Since dereferenceable attribute imply noundef, atomic operations 5549 // also implicitly have noundef pointers too 5550 case Instruction::AtomicCmpXchg: 5551 Operands.insert(cast<AtomicCmpXchgInst>(I)->getPointerOperand()); 5552 break; 5553 5554 case Instruction::AtomicRMW: 5555 Operands.insert(cast<AtomicRMWInst>(I)->getPointerOperand()); 5556 break; 5557 5558 case Instruction::Call: 5559 case Instruction::Invoke: { 5560 const CallBase *CB = cast<CallBase>(I); 5561 if (CB->isIndirectCall()) 5562 Operands.insert(CB->getCalledOperand()); 5563 for (unsigned i = 0; i < CB->arg_size(); ++i) { 5564 if (CB->paramHasAttr(i, Attribute::NoUndef) || 5565 CB->paramHasAttr(i, Attribute::Dereferenceable)) 5566 Operands.insert(CB->getArgOperand(i)); 5567 } 5568 break; 5569 } 5570 case Instruction::Ret: 5571 if (I->getFunction()->hasRetAttribute(Attribute::NoUndef)) 5572 Operands.insert(I->getOperand(0)); 5573 break; 5574 default: 5575 break; 5576 } 5577 } 5578 5579 void llvm::getGuaranteedNonPoisonOps(const Instruction *I, 5580 SmallPtrSetImpl<const Value *> &Operands) { 5581 getGuaranteedWellDefinedOps(I, Operands); 5582 switch (I->getOpcode()) { 5583 // Divisors of these operations are allowed to be partially undef. 5584 case Instruction::UDiv: 5585 case Instruction::SDiv: 5586 case Instruction::URem: 5587 case Instruction::SRem: 5588 Operands.insert(I->getOperand(1)); 5589 break; 5590 case Instruction::Switch: 5591 if (BranchOnPoisonAsUB) 5592 Operands.insert(cast<SwitchInst>(I)->getCondition()); 5593 break; 5594 case Instruction::Br: { 5595 auto *BR = cast<BranchInst>(I); 5596 if (BranchOnPoisonAsUB && BR->isConditional()) 5597 Operands.insert(BR->getCondition()); 5598 break; 5599 } 5600 default: 5601 break; 5602 } 5603 } 5604 5605 bool llvm::mustTriggerUB(const Instruction *I, 5606 const SmallSet<const Value *, 16>& KnownPoison) { 5607 SmallPtrSet<const Value *, 4> NonPoisonOps; 5608 getGuaranteedNonPoisonOps(I, NonPoisonOps); 5609 5610 for (const auto *V : NonPoisonOps) 5611 if (KnownPoison.count(V)) 5612 return true; 5613 5614 return false; 5615 } 5616 5617 static bool programUndefinedIfUndefOrPoison(const Value *V, 5618 bool PoisonOnly) { 5619 // We currently only look for uses of values within the same basic 5620 // block, as that makes it easier to guarantee that the uses will be 5621 // executed given that Inst is executed. 5622 // 5623 // FIXME: Expand this to consider uses beyond the same basic block. To do 5624 // this, look out for the distinction between post-dominance and strong 5625 // post-dominance. 5626 const BasicBlock *BB = nullptr; 5627 BasicBlock::const_iterator Begin; 5628 if (const auto *Inst = dyn_cast<Instruction>(V)) { 5629 BB = Inst->getParent(); 5630 Begin = Inst->getIterator(); 5631 Begin++; 5632 } else if (const auto *Arg = dyn_cast<Argument>(V)) { 5633 BB = &Arg->getParent()->getEntryBlock(); 5634 Begin = BB->begin(); 5635 } else { 5636 return false; 5637 } 5638 5639 // Limit number of instructions we look at, to avoid scanning through large 5640 // blocks. The current limit is chosen arbitrarily. 5641 unsigned ScanLimit = 32; 5642 BasicBlock::const_iterator End = BB->end(); 5643 5644 if (!PoisonOnly) { 5645 // Since undef does not propagate eagerly, be conservative & just check 5646 // whether a value is directly passed to an instruction that must take 5647 // well-defined operands. 5648 5649 for (const auto &I : make_range(Begin, End)) { 5650 if (isa<DbgInfoIntrinsic>(I)) 5651 continue; 5652 if (--ScanLimit == 0) 5653 break; 5654 5655 SmallPtrSet<const Value *, 4> WellDefinedOps; 5656 getGuaranteedWellDefinedOps(&I, WellDefinedOps); 5657 if (WellDefinedOps.contains(V)) 5658 return true; 5659 5660 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5661 break; 5662 } 5663 return false; 5664 } 5665 5666 // Set of instructions that we have proved will yield poison if Inst 5667 // does. 5668 SmallSet<const Value *, 16> YieldsPoison; 5669 SmallSet<const BasicBlock *, 4> Visited; 5670 5671 YieldsPoison.insert(V); 5672 auto Propagate = [&](const User *User) { 5673 if (propagatesPoison(cast<Operator>(User))) 5674 YieldsPoison.insert(User); 5675 }; 5676 for_each(V->users(), Propagate); 5677 Visited.insert(BB); 5678 5679 while (true) { 5680 for (const auto &I : make_range(Begin, End)) { 5681 if (isa<DbgInfoIntrinsic>(I)) 5682 continue; 5683 if (--ScanLimit == 0) 5684 return false; 5685 if (mustTriggerUB(&I, YieldsPoison)) 5686 return true; 5687 if (!isGuaranteedToTransferExecutionToSuccessor(&I)) 5688 return false; 5689 5690 // Mark poison that propagates from I through uses of I. 5691 if (YieldsPoison.count(&I)) 5692 for_each(I.users(), Propagate); 5693 } 5694 5695 BB = BB->getSingleSuccessor(); 5696 if (!BB || !Visited.insert(BB).second) 5697 break; 5698 5699 Begin = BB->getFirstNonPHI()->getIterator(); 5700 End = BB->end(); 5701 } 5702 return false; 5703 } 5704 5705 bool llvm::programUndefinedIfUndefOrPoison(const Instruction *Inst) { 5706 return ::programUndefinedIfUndefOrPoison(Inst, false); 5707 } 5708 5709 bool llvm::programUndefinedIfPoison(const Instruction *Inst) { 5710 return ::programUndefinedIfUndefOrPoison(Inst, true); 5711 } 5712 5713 static bool isKnownNonNaN(const Value *V, FastMathFlags FMF) { 5714 if (FMF.noNaNs()) 5715 return true; 5716 5717 if (auto *C = dyn_cast<ConstantFP>(V)) 5718 return !C->isNaN(); 5719 5720 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 5721 if (!C->getElementType()->isFloatingPointTy()) 5722 return false; 5723 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 5724 if (C->getElementAsAPFloat(I).isNaN()) 5725 return false; 5726 } 5727 return true; 5728 } 5729 5730 if (isa<ConstantAggregateZero>(V)) 5731 return true; 5732 5733 return false; 5734 } 5735 5736 static bool isKnownNonZero(const Value *V) { 5737 if (auto *C = dyn_cast<ConstantFP>(V)) 5738 return !C->isZero(); 5739 5740 if (auto *C = dyn_cast<ConstantDataVector>(V)) { 5741 if (!C->getElementType()->isFloatingPointTy()) 5742 return false; 5743 for (unsigned I = 0, E = C->getNumElements(); I < E; ++I) { 5744 if (C->getElementAsAPFloat(I).isZero()) 5745 return false; 5746 } 5747 return true; 5748 } 5749 5750 return false; 5751 } 5752 5753 /// Match clamp pattern for float types without care about NaNs or signed zeros. 5754 /// Given non-min/max outer cmp/select from the clamp pattern this 5755 /// function recognizes if it can be substitued by a "canonical" min/max 5756 /// pattern. 5757 static SelectPatternResult matchFastFloatClamp(CmpInst::Predicate Pred, 5758 Value *CmpLHS, Value *CmpRHS, 5759 Value *TrueVal, Value *FalseVal, 5760 Value *&LHS, Value *&RHS) { 5761 // Try to match 5762 // X < C1 ? C1 : Min(X, C2) --> Max(C1, Min(X, C2)) 5763 // X > C1 ? C1 : Max(X, C2) --> Min(C1, Max(X, C2)) 5764 // and return description of the outer Max/Min. 5765 5766 // First, check if select has inverse order: 5767 if (CmpRHS == FalseVal) { 5768 std::swap(TrueVal, FalseVal); 5769 Pred = CmpInst::getInversePredicate(Pred); 5770 } 5771 5772 // Assume success now. If there's no match, callers should not use these anyway. 5773 LHS = TrueVal; 5774 RHS = FalseVal; 5775 5776 const APFloat *FC1; 5777 if (CmpRHS != TrueVal || !match(CmpRHS, m_APFloat(FC1)) || !FC1->isFinite()) 5778 return {SPF_UNKNOWN, SPNB_NA, false}; 5779 5780 const APFloat *FC2; 5781 switch (Pred) { 5782 case CmpInst::FCMP_OLT: 5783 case CmpInst::FCMP_OLE: 5784 case CmpInst::FCMP_ULT: 5785 case CmpInst::FCMP_ULE: 5786 if (match(FalseVal, 5787 m_CombineOr(m_OrdFMin(m_Specific(CmpLHS), m_APFloat(FC2)), 5788 m_UnordFMin(m_Specific(CmpLHS), m_APFloat(FC2)))) && 5789 *FC1 < *FC2) 5790 return {SPF_FMAXNUM, SPNB_RETURNS_ANY, false}; 5791 break; 5792 case CmpInst::FCMP_OGT: 5793 case CmpInst::FCMP_OGE: 5794 case CmpInst::FCMP_UGT: 5795 case CmpInst::FCMP_UGE: 5796 if (match(FalseVal, 5797 m_CombineOr(m_OrdFMax(m_Specific(CmpLHS), m_APFloat(FC2)), 5798 m_UnordFMax(m_Specific(CmpLHS), m_APFloat(FC2)))) && 5799 *FC1 > *FC2) 5800 return {SPF_FMINNUM, SPNB_RETURNS_ANY, false}; 5801 break; 5802 default: 5803 break; 5804 } 5805 5806 return {SPF_UNKNOWN, SPNB_NA, false}; 5807 } 5808 5809 /// Recognize variations of: 5810 /// CLAMP(v,l,h) ==> ((v) < (l) ? (l) : ((v) > (h) ? (h) : (v))) 5811 static SelectPatternResult matchClamp(CmpInst::Predicate Pred, 5812 Value *CmpLHS, Value *CmpRHS, 5813 Value *TrueVal, Value *FalseVal) { 5814 // Swap the select operands and predicate to match the patterns below. 5815 if (CmpRHS != TrueVal) { 5816 Pred = ICmpInst::getSwappedPredicate(Pred); 5817 std::swap(TrueVal, FalseVal); 5818 } 5819 const APInt *C1; 5820 if (CmpRHS == TrueVal && match(CmpRHS, m_APInt(C1))) { 5821 const APInt *C2; 5822 // (X <s C1) ? C1 : SMIN(X, C2) ==> SMAX(SMIN(X, C2), C1) 5823 if (match(FalseVal, m_SMin(m_Specific(CmpLHS), m_APInt(C2))) && 5824 C1->slt(*C2) && Pred == CmpInst::ICMP_SLT) 5825 return {SPF_SMAX, SPNB_NA, false}; 5826 5827 // (X >s C1) ? C1 : SMAX(X, C2) ==> SMIN(SMAX(X, C2), C1) 5828 if (match(FalseVal, m_SMax(m_Specific(CmpLHS), m_APInt(C2))) && 5829 C1->sgt(*C2) && Pred == CmpInst::ICMP_SGT) 5830 return {SPF_SMIN, SPNB_NA, false}; 5831 5832 // (X <u C1) ? C1 : UMIN(X, C2) ==> UMAX(UMIN(X, C2), C1) 5833 if (match(FalseVal, m_UMin(m_Specific(CmpLHS), m_APInt(C2))) && 5834 C1->ult(*C2) && Pred == CmpInst::ICMP_ULT) 5835 return {SPF_UMAX, SPNB_NA, false}; 5836 5837 // (X >u C1) ? C1 : UMAX(X, C2) ==> UMIN(UMAX(X, C2), C1) 5838 if (match(FalseVal, m_UMax(m_Specific(CmpLHS), m_APInt(C2))) && 5839 C1->ugt(*C2) && Pred == CmpInst::ICMP_UGT) 5840 return {SPF_UMIN, SPNB_NA, false}; 5841 } 5842 return {SPF_UNKNOWN, SPNB_NA, false}; 5843 } 5844 5845 /// Recognize variations of: 5846 /// a < c ? min(a,b) : min(b,c) ==> min(min(a,b),min(b,c)) 5847 static SelectPatternResult matchMinMaxOfMinMax(CmpInst::Predicate Pred, 5848 Value *CmpLHS, Value *CmpRHS, 5849 Value *TVal, Value *FVal, 5850 unsigned Depth) { 5851 // TODO: Allow FP min/max with nnan/nsz. 5852 assert(CmpInst::isIntPredicate(Pred) && "Expected integer comparison"); 5853 5854 Value *A = nullptr, *B = nullptr; 5855 SelectPatternResult L = matchSelectPattern(TVal, A, B, nullptr, Depth + 1); 5856 if (!SelectPatternResult::isMinOrMax(L.Flavor)) 5857 return {SPF_UNKNOWN, SPNB_NA, false}; 5858 5859 Value *C = nullptr, *D = nullptr; 5860 SelectPatternResult R = matchSelectPattern(FVal, C, D, nullptr, Depth + 1); 5861 if (L.Flavor != R.Flavor) 5862 return {SPF_UNKNOWN, SPNB_NA, false}; 5863 5864 // We have something like: x Pred y ? min(a, b) : min(c, d). 5865 // Try to match the compare to the min/max operations of the select operands. 5866 // First, make sure we have the right compare predicate. 5867 switch (L.Flavor) { 5868 case SPF_SMIN: 5869 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) { 5870 Pred = ICmpInst::getSwappedPredicate(Pred); 5871 std::swap(CmpLHS, CmpRHS); 5872 } 5873 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) 5874 break; 5875 return {SPF_UNKNOWN, SPNB_NA, false}; 5876 case SPF_SMAX: 5877 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SLE) { 5878 Pred = ICmpInst::getSwappedPredicate(Pred); 5879 std::swap(CmpLHS, CmpRHS); 5880 } 5881 if (Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE) 5882 break; 5883 return {SPF_UNKNOWN, SPNB_NA, false}; 5884 case SPF_UMIN: 5885 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) { 5886 Pred = ICmpInst::getSwappedPredicate(Pred); 5887 std::swap(CmpLHS, CmpRHS); 5888 } 5889 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) 5890 break; 5891 return {SPF_UNKNOWN, SPNB_NA, false}; 5892 case SPF_UMAX: 5893 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_ULE) { 5894 Pred = ICmpInst::getSwappedPredicate(Pred); 5895 std::swap(CmpLHS, CmpRHS); 5896 } 5897 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) 5898 break; 5899 return {SPF_UNKNOWN, SPNB_NA, false}; 5900 default: 5901 return {SPF_UNKNOWN, SPNB_NA, false}; 5902 } 5903 5904 // If there is a common operand in the already matched min/max and the other 5905 // min/max operands match the compare operands (either directly or inverted), 5906 // then this is min/max of the same flavor. 5907 5908 // a pred c ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 5909 // ~c pred ~a ? m(a, b) : m(c, b) --> m(m(a, b), m(c, b)) 5910 if (D == B) { 5911 if ((CmpLHS == A && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 5912 match(A, m_Not(m_Specific(CmpRHS))))) 5913 return {L.Flavor, SPNB_NA, false}; 5914 } 5915 // a pred d ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 5916 // ~d pred ~a ? m(a, b) : m(b, d) --> m(m(a, b), m(b, d)) 5917 if (C == B) { 5918 if ((CmpLHS == A && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 5919 match(A, m_Not(m_Specific(CmpRHS))))) 5920 return {L.Flavor, SPNB_NA, false}; 5921 } 5922 // b pred c ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 5923 // ~c pred ~b ? m(a, b) : m(c, a) --> m(m(a, b), m(c, a)) 5924 if (D == A) { 5925 if ((CmpLHS == B && CmpRHS == C) || (match(C, m_Not(m_Specific(CmpLHS))) && 5926 match(B, m_Not(m_Specific(CmpRHS))))) 5927 return {L.Flavor, SPNB_NA, false}; 5928 } 5929 // b pred d ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 5930 // ~d pred ~b ? m(a, b) : m(a, d) --> m(m(a, b), m(a, d)) 5931 if (C == A) { 5932 if ((CmpLHS == B && CmpRHS == D) || (match(D, m_Not(m_Specific(CmpLHS))) && 5933 match(B, m_Not(m_Specific(CmpRHS))))) 5934 return {L.Flavor, SPNB_NA, false}; 5935 } 5936 5937 return {SPF_UNKNOWN, SPNB_NA, false}; 5938 } 5939 5940 /// If the input value is the result of a 'not' op, constant integer, or vector 5941 /// splat of a constant integer, return the bitwise-not source value. 5942 /// TODO: This could be extended to handle non-splat vector integer constants. 5943 static Value *getNotValue(Value *V) { 5944 Value *NotV; 5945 if (match(V, m_Not(m_Value(NotV)))) 5946 return NotV; 5947 5948 const APInt *C; 5949 if (match(V, m_APInt(C))) 5950 return ConstantInt::get(V->getType(), ~(*C)); 5951 5952 return nullptr; 5953 } 5954 5955 /// Match non-obvious integer minimum and maximum sequences. 5956 static SelectPatternResult matchMinMax(CmpInst::Predicate Pred, 5957 Value *CmpLHS, Value *CmpRHS, 5958 Value *TrueVal, Value *FalseVal, 5959 Value *&LHS, Value *&RHS, 5960 unsigned Depth) { 5961 // Assume success. If there's no match, callers should not use these anyway. 5962 LHS = TrueVal; 5963 RHS = FalseVal; 5964 5965 SelectPatternResult SPR = matchClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal); 5966 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 5967 return SPR; 5968 5969 SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth); 5970 if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN) 5971 return SPR; 5972 5973 // Look through 'not' ops to find disguised min/max. 5974 // (X > Y) ? ~X : ~Y ==> (~X < ~Y) ? ~X : ~Y ==> MIN(~X, ~Y) 5975 // (X < Y) ? ~X : ~Y ==> (~X > ~Y) ? ~X : ~Y ==> MAX(~X, ~Y) 5976 if (CmpLHS == getNotValue(TrueVal) && CmpRHS == getNotValue(FalseVal)) { 5977 switch (Pred) { 5978 case CmpInst::ICMP_SGT: return {SPF_SMIN, SPNB_NA, false}; 5979 case CmpInst::ICMP_SLT: return {SPF_SMAX, SPNB_NA, false}; 5980 case CmpInst::ICMP_UGT: return {SPF_UMIN, SPNB_NA, false}; 5981 case CmpInst::ICMP_ULT: return {SPF_UMAX, SPNB_NA, false}; 5982 default: break; 5983 } 5984 } 5985 5986 // (X > Y) ? ~Y : ~X ==> (~X < ~Y) ? ~Y : ~X ==> MAX(~Y, ~X) 5987 // (X < Y) ? ~Y : ~X ==> (~X > ~Y) ? ~Y : ~X ==> MIN(~Y, ~X) 5988 if (CmpLHS == getNotValue(FalseVal) && CmpRHS == getNotValue(TrueVal)) { 5989 switch (Pred) { 5990 case CmpInst::ICMP_SGT: return {SPF_SMAX, SPNB_NA, false}; 5991 case CmpInst::ICMP_SLT: return {SPF_SMIN, SPNB_NA, false}; 5992 case CmpInst::ICMP_UGT: return {SPF_UMAX, SPNB_NA, false}; 5993 case CmpInst::ICMP_ULT: return {SPF_UMIN, SPNB_NA, false}; 5994 default: break; 5995 } 5996 } 5997 5998 if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT) 5999 return {SPF_UNKNOWN, SPNB_NA, false}; 6000 6001 const APInt *C1; 6002 if (!match(CmpRHS, m_APInt(C1))) 6003 return {SPF_UNKNOWN, SPNB_NA, false}; 6004 6005 // An unsigned min/max can be written with a signed compare. 6006 const APInt *C2; 6007 if ((CmpLHS == TrueVal && match(FalseVal, m_APInt(C2))) || 6008 (CmpLHS == FalseVal && match(TrueVal, m_APInt(C2)))) { 6009 // Is the sign bit set? 6010 // (X <s 0) ? X : MAXVAL ==> (X >u MAXVAL) ? X : MAXVAL ==> UMAX 6011 // (X <s 0) ? MAXVAL : X ==> (X >u MAXVAL) ? MAXVAL : X ==> UMIN 6012 if (Pred == CmpInst::ICMP_SLT && C1->isZero() && C2->isMaxSignedValue()) 6013 return {CmpLHS == TrueVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 6014 6015 // Is the sign bit clear? 6016 // (X >s -1) ? MINVAL : X ==> (X <u MINVAL) ? MINVAL : X ==> UMAX 6017 // (X >s -1) ? X : MINVAL ==> (X <u MINVAL) ? X : MINVAL ==> UMIN 6018 if (Pred == CmpInst::ICMP_SGT && C1->isAllOnes() && C2->isMinSignedValue()) 6019 return {CmpLHS == FalseVal ? SPF_UMAX : SPF_UMIN, SPNB_NA, false}; 6020 } 6021 6022 return {SPF_UNKNOWN, SPNB_NA, false}; 6023 } 6024 6025 bool llvm::isKnownNegation(const Value *X, const Value *Y, bool NeedNSW) { 6026 assert(X && Y && "Invalid operand"); 6027 6028 // X = sub (0, Y) || X = sub nsw (0, Y) 6029 if ((!NeedNSW && match(X, m_Sub(m_ZeroInt(), m_Specific(Y)))) || 6030 (NeedNSW && match(X, m_NSWSub(m_ZeroInt(), m_Specific(Y))))) 6031 return true; 6032 6033 // Y = sub (0, X) || Y = sub nsw (0, X) 6034 if ((!NeedNSW && match(Y, m_Sub(m_ZeroInt(), m_Specific(X)))) || 6035 (NeedNSW && match(Y, m_NSWSub(m_ZeroInt(), m_Specific(X))))) 6036 return true; 6037 6038 // X = sub (A, B), Y = sub (B, A) || X = sub nsw (A, B), Y = sub nsw (B, A) 6039 Value *A, *B; 6040 return (!NeedNSW && (match(X, m_Sub(m_Value(A), m_Value(B))) && 6041 match(Y, m_Sub(m_Specific(B), m_Specific(A))))) || 6042 (NeedNSW && (match(X, m_NSWSub(m_Value(A), m_Value(B))) && 6043 match(Y, m_NSWSub(m_Specific(B), m_Specific(A))))); 6044 } 6045 6046 static SelectPatternResult matchSelectPattern(CmpInst::Predicate Pred, 6047 FastMathFlags FMF, 6048 Value *CmpLHS, Value *CmpRHS, 6049 Value *TrueVal, Value *FalseVal, 6050 Value *&LHS, Value *&RHS, 6051 unsigned Depth) { 6052 if (CmpInst::isFPPredicate(Pred)) { 6053 // IEEE-754 ignores the sign of 0.0 in comparisons. So if the select has one 6054 // 0.0 operand, set the compare's 0.0 operands to that same value for the 6055 // purpose of identifying min/max. Disregard vector constants with undefined 6056 // elements because those can not be back-propagated for analysis. 6057 Value *OutputZeroVal = nullptr; 6058 if (match(TrueVal, m_AnyZeroFP()) && !match(FalseVal, m_AnyZeroFP()) && 6059 !cast<Constant>(TrueVal)->containsUndefOrPoisonElement()) 6060 OutputZeroVal = TrueVal; 6061 else if (match(FalseVal, m_AnyZeroFP()) && !match(TrueVal, m_AnyZeroFP()) && 6062 !cast<Constant>(FalseVal)->containsUndefOrPoisonElement()) 6063 OutputZeroVal = FalseVal; 6064 6065 if (OutputZeroVal) { 6066 if (match(CmpLHS, m_AnyZeroFP())) 6067 CmpLHS = OutputZeroVal; 6068 if (match(CmpRHS, m_AnyZeroFP())) 6069 CmpRHS = OutputZeroVal; 6070 } 6071 } 6072 6073 LHS = CmpLHS; 6074 RHS = CmpRHS; 6075 6076 // Signed zero may return inconsistent results between implementations. 6077 // (0.0 <= -0.0) ? 0.0 : -0.0 // Returns 0.0 6078 // minNum(0.0, -0.0) // May return -0.0 or 0.0 (IEEE 754-2008 5.3.1) 6079 // Therefore, we behave conservatively and only proceed if at least one of the 6080 // operands is known to not be zero or if we don't care about signed zero. 6081 switch (Pred) { 6082 default: break; 6083 // FIXME: Include OGT/OLT/UGT/ULT. 6084 case CmpInst::FCMP_OGE: case CmpInst::FCMP_OLE: 6085 case CmpInst::FCMP_UGE: case CmpInst::FCMP_ULE: 6086 if (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 6087 !isKnownNonZero(CmpRHS)) 6088 return {SPF_UNKNOWN, SPNB_NA, false}; 6089 } 6090 6091 SelectPatternNaNBehavior NaNBehavior = SPNB_NA; 6092 bool Ordered = false; 6093 6094 // When given one NaN and one non-NaN input: 6095 // - maxnum/minnum (C99 fmaxf()/fminf()) return the non-NaN input. 6096 // - A simple C99 (a < b ? a : b) construction will return 'b' (as the 6097 // ordered comparison fails), which could be NaN or non-NaN. 6098 // so here we discover exactly what NaN behavior is required/accepted. 6099 if (CmpInst::isFPPredicate(Pred)) { 6100 bool LHSSafe = isKnownNonNaN(CmpLHS, FMF); 6101 bool RHSSafe = isKnownNonNaN(CmpRHS, FMF); 6102 6103 if (LHSSafe && RHSSafe) { 6104 // Both operands are known non-NaN. 6105 NaNBehavior = SPNB_RETURNS_ANY; 6106 } else if (CmpInst::isOrdered(Pred)) { 6107 // An ordered comparison will return false when given a NaN, so it 6108 // returns the RHS. 6109 Ordered = true; 6110 if (LHSSafe) 6111 // LHS is non-NaN, so if RHS is NaN then NaN will be returned. 6112 NaNBehavior = SPNB_RETURNS_NAN; 6113 else if (RHSSafe) 6114 NaNBehavior = SPNB_RETURNS_OTHER; 6115 else 6116 // Completely unsafe. 6117 return {SPF_UNKNOWN, SPNB_NA, false}; 6118 } else { 6119 Ordered = false; 6120 // An unordered comparison will return true when given a NaN, so it 6121 // returns the LHS. 6122 if (LHSSafe) 6123 // LHS is non-NaN, so if RHS is NaN then non-NaN will be returned. 6124 NaNBehavior = SPNB_RETURNS_OTHER; 6125 else if (RHSSafe) 6126 NaNBehavior = SPNB_RETURNS_NAN; 6127 else 6128 // Completely unsafe. 6129 return {SPF_UNKNOWN, SPNB_NA, false}; 6130 } 6131 } 6132 6133 if (TrueVal == CmpRHS && FalseVal == CmpLHS) { 6134 std::swap(CmpLHS, CmpRHS); 6135 Pred = CmpInst::getSwappedPredicate(Pred); 6136 if (NaNBehavior == SPNB_RETURNS_NAN) 6137 NaNBehavior = SPNB_RETURNS_OTHER; 6138 else if (NaNBehavior == SPNB_RETURNS_OTHER) 6139 NaNBehavior = SPNB_RETURNS_NAN; 6140 Ordered = !Ordered; 6141 } 6142 6143 // ([if]cmp X, Y) ? X : Y 6144 if (TrueVal == CmpLHS && FalseVal == CmpRHS) { 6145 switch (Pred) { 6146 default: return {SPF_UNKNOWN, SPNB_NA, false}; // Equality. 6147 case ICmpInst::ICMP_UGT: 6148 case ICmpInst::ICMP_UGE: return {SPF_UMAX, SPNB_NA, false}; 6149 case ICmpInst::ICMP_SGT: 6150 case ICmpInst::ICMP_SGE: return {SPF_SMAX, SPNB_NA, false}; 6151 case ICmpInst::ICMP_ULT: 6152 case ICmpInst::ICMP_ULE: return {SPF_UMIN, SPNB_NA, false}; 6153 case ICmpInst::ICMP_SLT: 6154 case ICmpInst::ICMP_SLE: return {SPF_SMIN, SPNB_NA, false}; 6155 case FCmpInst::FCMP_UGT: 6156 case FCmpInst::FCMP_UGE: 6157 case FCmpInst::FCMP_OGT: 6158 case FCmpInst::FCMP_OGE: return {SPF_FMAXNUM, NaNBehavior, Ordered}; 6159 case FCmpInst::FCMP_ULT: 6160 case FCmpInst::FCMP_ULE: 6161 case FCmpInst::FCMP_OLT: 6162 case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered}; 6163 } 6164 } 6165 6166 if (isKnownNegation(TrueVal, FalseVal)) { 6167 // Sign-extending LHS does not change its sign, so TrueVal/FalseVal can 6168 // match against either LHS or sext(LHS). 6169 auto MaybeSExtCmpLHS = 6170 m_CombineOr(m_Specific(CmpLHS), m_SExt(m_Specific(CmpLHS))); 6171 auto ZeroOrAllOnes = m_CombineOr(m_ZeroInt(), m_AllOnes()); 6172 auto ZeroOrOne = m_CombineOr(m_ZeroInt(), m_One()); 6173 if (match(TrueVal, MaybeSExtCmpLHS)) { 6174 // Set the return values. If the compare uses the negated value (-X >s 0), 6175 // swap the return values because the negated value is always 'RHS'. 6176 LHS = TrueVal; 6177 RHS = FalseVal; 6178 if (match(CmpLHS, m_Neg(m_Specific(FalseVal)))) 6179 std::swap(LHS, RHS); 6180 6181 // (X >s 0) ? X : -X or (X >s -1) ? X : -X --> ABS(X) 6182 // (-X >s 0) ? -X : X or (-X >s -1) ? -X : X --> ABS(X) 6183 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 6184 return {SPF_ABS, SPNB_NA, false}; 6185 6186 // (X >=s 0) ? X : -X or (X >=s 1) ? X : -X --> ABS(X) 6187 if (Pred == ICmpInst::ICMP_SGE && match(CmpRHS, ZeroOrOne)) 6188 return {SPF_ABS, SPNB_NA, false}; 6189 6190 // (X <s 0) ? X : -X or (X <s 1) ? X : -X --> NABS(X) 6191 // (-X <s 0) ? -X : X or (-X <s 1) ? -X : X --> NABS(X) 6192 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 6193 return {SPF_NABS, SPNB_NA, false}; 6194 } 6195 else if (match(FalseVal, MaybeSExtCmpLHS)) { 6196 // Set the return values. If the compare uses the negated value (-X >s 0), 6197 // swap the return values because the negated value is always 'RHS'. 6198 LHS = FalseVal; 6199 RHS = TrueVal; 6200 if (match(CmpLHS, m_Neg(m_Specific(TrueVal)))) 6201 std::swap(LHS, RHS); 6202 6203 // (X >s 0) ? -X : X or (X >s -1) ? -X : X --> NABS(X) 6204 // (-X >s 0) ? X : -X or (-X >s -1) ? X : -X --> NABS(X) 6205 if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, ZeroOrAllOnes)) 6206 return {SPF_NABS, SPNB_NA, false}; 6207 6208 // (X <s 0) ? -X : X or (X <s 1) ? -X : X --> ABS(X) 6209 // (-X <s 0) ? X : -X or (-X <s 1) ? X : -X --> ABS(X) 6210 if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, ZeroOrOne)) 6211 return {SPF_ABS, SPNB_NA, false}; 6212 } 6213 } 6214 6215 if (CmpInst::isIntPredicate(Pred)) 6216 return matchMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS, Depth); 6217 6218 // According to (IEEE 754-2008 5.3.1), minNum(0.0, -0.0) and similar 6219 // may return either -0.0 or 0.0, so fcmp/select pair has stricter 6220 // semantics than minNum. Be conservative in such case. 6221 if (NaNBehavior != SPNB_RETURNS_ANY || 6222 (!FMF.noSignedZeros() && !isKnownNonZero(CmpLHS) && 6223 !isKnownNonZero(CmpRHS))) 6224 return {SPF_UNKNOWN, SPNB_NA, false}; 6225 6226 return matchFastFloatClamp(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, LHS, RHS); 6227 } 6228 6229 /// Helps to match a select pattern in case of a type mismatch. 6230 /// 6231 /// The function processes the case when type of true and false values of a 6232 /// select instruction differs from type of the cmp instruction operands because 6233 /// of a cast instruction. The function checks if it is legal to move the cast 6234 /// operation after "select". If yes, it returns the new second value of 6235 /// "select" (with the assumption that cast is moved): 6236 /// 1. As operand of cast instruction when both values of "select" are same cast 6237 /// instructions. 6238 /// 2. As restored constant (by applying reverse cast operation) when the first 6239 /// value of the "select" is a cast operation and the second value is a 6240 /// constant. 6241 /// NOTE: We return only the new second value because the first value could be 6242 /// accessed as operand of cast instruction. 6243 static Value *lookThroughCast(CmpInst *CmpI, Value *V1, Value *V2, 6244 Instruction::CastOps *CastOp) { 6245 auto *Cast1 = dyn_cast<CastInst>(V1); 6246 if (!Cast1) 6247 return nullptr; 6248 6249 *CastOp = Cast1->getOpcode(); 6250 Type *SrcTy = Cast1->getSrcTy(); 6251 if (auto *Cast2 = dyn_cast<CastInst>(V2)) { 6252 // If V1 and V2 are both the same cast from the same type, look through V1. 6253 if (*CastOp == Cast2->getOpcode() && SrcTy == Cast2->getSrcTy()) 6254 return Cast2->getOperand(0); 6255 return nullptr; 6256 } 6257 6258 auto *C = dyn_cast<Constant>(V2); 6259 if (!C) 6260 return nullptr; 6261 6262 Constant *CastedTo = nullptr; 6263 switch (*CastOp) { 6264 case Instruction::ZExt: 6265 if (CmpI->isUnsigned()) 6266 CastedTo = ConstantExpr::getTrunc(C, SrcTy); 6267 break; 6268 case Instruction::SExt: 6269 if (CmpI->isSigned()) 6270 CastedTo = ConstantExpr::getTrunc(C, SrcTy, true); 6271 break; 6272 case Instruction::Trunc: 6273 Constant *CmpConst; 6274 if (match(CmpI->getOperand(1), m_Constant(CmpConst)) && 6275 CmpConst->getType() == SrcTy) { 6276 // Here we have the following case: 6277 // 6278 // %cond = cmp iN %x, CmpConst 6279 // %tr = trunc iN %x to iK 6280 // %narrowsel = select i1 %cond, iK %t, iK C 6281 // 6282 // We can always move trunc after select operation: 6283 // 6284 // %cond = cmp iN %x, CmpConst 6285 // %widesel = select i1 %cond, iN %x, iN CmpConst 6286 // %tr = trunc iN %widesel to iK 6287 // 6288 // Note that C could be extended in any way because we don't care about 6289 // upper bits after truncation. It can't be abs pattern, because it would 6290 // look like: 6291 // 6292 // select i1 %cond, x, -x. 6293 // 6294 // So only min/max pattern could be matched. Such match requires widened C 6295 // == CmpConst. That is why set widened C = CmpConst, condition trunc 6296 // CmpConst == C is checked below. 6297 CastedTo = CmpConst; 6298 } else { 6299 CastedTo = ConstantExpr::getIntegerCast(C, SrcTy, CmpI->isSigned()); 6300 } 6301 break; 6302 case Instruction::FPTrunc: 6303 CastedTo = ConstantExpr::getFPExtend(C, SrcTy, true); 6304 break; 6305 case Instruction::FPExt: 6306 CastedTo = ConstantExpr::getFPTrunc(C, SrcTy, true); 6307 break; 6308 case Instruction::FPToUI: 6309 CastedTo = ConstantExpr::getUIToFP(C, SrcTy, true); 6310 break; 6311 case Instruction::FPToSI: 6312 CastedTo = ConstantExpr::getSIToFP(C, SrcTy, true); 6313 break; 6314 case Instruction::UIToFP: 6315 CastedTo = ConstantExpr::getFPToUI(C, SrcTy, true); 6316 break; 6317 case Instruction::SIToFP: 6318 CastedTo = ConstantExpr::getFPToSI(C, SrcTy, true); 6319 break; 6320 default: 6321 break; 6322 } 6323 6324 if (!CastedTo) 6325 return nullptr; 6326 6327 // Make sure the cast doesn't lose any information. 6328 Constant *CastedBack = 6329 ConstantExpr::getCast(*CastOp, CastedTo, C->getType(), true); 6330 if (CastedBack != C) 6331 return nullptr; 6332 6333 return CastedTo; 6334 } 6335 6336 SelectPatternResult llvm::matchSelectPattern(Value *V, Value *&LHS, Value *&RHS, 6337 Instruction::CastOps *CastOp, 6338 unsigned Depth) { 6339 if (Depth >= MaxAnalysisRecursionDepth) 6340 return {SPF_UNKNOWN, SPNB_NA, false}; 6341 6342 SelectInst *SI = dyn_cast<SelectInst>(V); 6343 if (!SI) return {SPF_UNKNOWN, SPNB_NA, false}; 6344 6345 CmpInst *CmpI = dyn_cast<CmpInst>(SI->getCondition()); 6346 if (!CmpI) return {SPF_UNKNOWN, SPNB_NA, false}; 6347 6348 Value *TrueVal = SI->getTrueValue(); 6349 Value *FalseVal = SI->getFalseValue(); 6350 6351 return llvm::matchDecomposedSelectPattern(CmpI, TrueVal, FalseVal, LHS, RHS, 6352 CastOp, Depth); 6353 } 6354 6355 SelectPatternResult llvm::matchDecomposedSelectPattern( 6356 CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, 6357 Instruction::CastOps *CastOp, unsigned Depth) { 6358 CmpInst::Predicate Pred = CmpI->getPredicate(); 6359 Value *CmpLHS = CmpI->getOperand(0); 6360 Value *CmpRHS = CmpI->getOperand(1); 6361 FastMathFlags FMF; 6362 if (isa<FPMathOperator>(CmpI)) 6363 FMF = CmpI->getFastMathFlags(); 6364 6365 // Bail out early. 6366 if (CmpI->isEquality()) 6367 return {SPF_UNKNOWN, SPNB_NA, false}; 6368 6369 // Deal with type mismatches. 6370 if (CastOp && CmpLHS->getType() != TrueVal->getType()) { 6371 if (Value *C = lookThroughCast(CmpI, TrueVal, FalseVal, CastOp)) { 6372 // If this is a potential fmin/fmax with a cast to integer, then ignore 6373 // -0.0 because there is no corresponding integer value. 6374 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 6375 FMF.setNoSignedZeros(); 6376 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 6377 cast<CastInst>(TrueVal)->getOperand(0), C, 6378 LHS, RHS, Depth); 6379 } 6380 if (Value *C = lookThroughCast(CmpI, FalseVal, TrueVal, CastOp)) { 6381 // If this is a potential fmin/fmax with a cast to integer, then ignore 6382 // -0.0 because there is no corresponding integer value. 6383 if (*CastOp == Instruction::FPToSI || *CastOp == Instruction::FPToUI) 6384 FMF.setNoSignedZeros(); 6385 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, 6386 C, cast<CastInst>(FalseVal)->getOperand(0), 6387 LHS, RHS, Depth); 6388 } 6389 } 6390 return ::matchSelectPattern(Pred, FMF, CmpLHS, CmpRHS, TrueVal, FalseVal, 6391 LHS, RHS, Depth); 6392 } 6393 6394 CmpInst::Predicate llvm::getMinMaxPred(SelectPatternFlavor SPF, bool Ordered) { 6395 if (SPF == SPF_SMIN) return ICmpInst::ICMP_SLT; 6396 if (SPF == SPF_UMIN) return ICmpInst::ICMP_ULT; 6397 if (SPF == SPF_SMAX) return ICmpInst::ICMP_SGT; 6398 if (SPF == SPF_UMAX) return ICmpInst::ICMP_UGT; 6399 if (SPF == SPF_FMINNUM) 6400 return Ordered ? FCmpInst::FCMP_OLT : FCmpInst::FCMP_ULT; 6401 if (SPF == SPF_FMAXNUM) 6402 return Ordered ? FCmpInst::FCMP_OGT : FCmpInst::FCMP_UGT; 6403 llvm_unreachable("unhandled!"); 6404 } 6405 6406 SelectPatternFlavor llvm::getInverseMinMaxFlavor(SelectPatternFlavor SPF) { 6407 if (SPF == SPF_SMIN) return SPF_SMAX; 6408 if (SPF == SPF_UMIN) return SPF_UMAX; 6409 if (SPF == SPF_SMAX) return SPF_SMIN; 6410 if (SPF == SPF_UMAX) return SPF_UMIN; 6411 llvm_unreachable("unhandled!"); 6412 } 6413 6414 Intrinsic::ID llvm::getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID) { 6415 switch (MinMaxID) { 6416 case Intrinsic::smax: return Intrinsic::smin; 6417 case Intrinsic::smin: return Intrinsic::smax; 6418 case Intrinsic::umax: return Intrinsic::umin; 6419 case Intrinsic::umin: return Intrinsic::umax; 6420 default: llvm_unreachable("Unexpected intrinsic"); 6421 } 6422 } 6423 6424 CmpInst::Predicate llvm::getInverseMinMaxPred(SelectPatternFlavor SPF) { 6425 return getMinMaxPred(getInverseMinMaxFlavor(SPF)); 6426 } 6427 6428 APInt llvm::getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth) { 6429 switch (SPF) { 6430 case SPF_SMAX: return APInt::getSignedMaxValue(BitWidth); 6431 case SPF_SMIN: return APInt::getSignedMinValue(BitWidth); 6432 case SPF_UMAX: return APInt::getMaxValue(BitWidth); 6433 case SPF_UMIN: return APInt::getMinValue(BitWidth); 6434 default: llvm_unreachable("Unexpected flavor"); 6435 } 6436 } 6437 6438 std::pair<Intrinsic::ID, bool> 6439 llvm::canConvertToMinOrMaxIntrinsic(ArrayRef<Value *> VL) { 6440 // Check if VL contains select instructions that can be folded into a min/max 6441 // vector intrinsic and return the intrinsic if it is possible. 6442 // TODO: Support floating point min/max. 6443 bool AllCmpSingleUse = true; 6444 SelectPatternResult SelectPattern; 6445 SelectPattern.Flavor = SPF_UNKNOWN; 6446 if (all_of(VL, [&SelectPattern, &AllCmpSingleUse](Value *I) { 6447 Value *LHS, *RHS; 6448 auto CurrentPattern = matchSelectPattern(I, LHS, RHS); 6449 if (!SelectPatternResult::isMinOrMax(CurrentPattern.Flavor) || 6450 CurrentPattern.Flavor == SPF_FMINNUM || 6451 CurrentPattern.Flavor == SPF_FMAXNUM || 6452 !I->getType()->isIntOrIntVectorTy()) 6453 return false; 6454 if (SelectPattern.Flavor != SPF_UNKNOWN && 6455 SelectPattern.Flavor != CurrentPattern.Flavor) 6456 return false; 6457 SelectPattern = CurrentPattern; 6458 AllCmpSingleUse &= 6459 match(I, m_Select(m_OneUse(m_Value()), m_Value(), m_Value())); 6460 return true; 6461 })) { 6462 switch (SelectPattern.Flavor) { 6463 case SPF_SMIN: 6464 return {Intrinsic::smin, AllCmpSingleUse}; 6465 case SPF_UMIN: 6466 return {Intrinsic::umin, AllCmpSingleUse}; 6467 case SPF_SMAX: 6468 return {Intrinsic::smax, AllCmpSingleUse}; 6469 case SPF_UMAX: 6470 return {Intrinsic::umax, AllCmpSingleUse}; 6471 default: 6472 llvm_unreachable("unexpected select pattern flavor"); 6473 } 6474 } 6475 return {Intrinsic::not_intrinsic, false}; 6476 } 6477 6478 bool llvm::matchSimpleRecurrence(const PHINode *P, BinaryOperator *&BO, 6479 Value *&Start, Value *&Step) { 6480 // Handle the case of a simple two-predecessor recurrence PHI. 6481 // There's a lot more that could theoretically be done here, but 6482 // this is sufficient to catch some interesting cases. 6483 if (P->getNumIncomingValues() != 2) 6484 return false; 6485 6486 for (unsigned i = 0; i != 2; ++i) { 6487 Value *L = P->getIncomingValue(i); 6488 Value *R = P->getIncomingValue(!i); 6489 Operator *LU = dyn_cast<Operator>(L); 6490 if (!LU) 6491 continue; 6492 unsigned Opcode = LU->getOpcode(); 6493 6494 switch (Opcode) { 6495 default: 6496 continue; 6497 // TODO: Expand list -- xor, div, gep, uaddo, etc.. 6498 case Instruction::LShr: 6499 case Instruction::AShr: 6500 case Instruction::Shl: 6501 case Instruction::Add: 6502 case Instruction::Sub: 6503 case Instruction::And: 6504 case Instruction::Or: 6505 case Instruction::Mul: { 6506 Value *LL = LU->getOperand(0); 6507 Value *LR = LU->getOperand(1); 6508 // Find a recurrence. 6509 if (LL == P) 6510 L = LR; 6511 else if (LR == P) 6512 L = LL; 6513 else 6514 continue; // Check for recurrence with L and R flipped. 6515 6516 break; // Match! 6517 } 6518 }; 6519 6520 // We have matched a recurrence of the form: 6521 // %iv = [R, %entry], [%iv.next, %backedge] 6522 // %iv.next = binop %iv, L 6523 // OR 6524 // %iv = [R, %entry], [%iv.next, %backedge] 6525 // %iv.next = binop L, %iv 6526 BO = cast<BinaryOperator>(LU); 6527 Start = R; 6528 Step = L; 6529 return true; 6530 } 6531 return false; 6532 } 6533 6534 bool llvm::matchSimpleRecurrence(const BinaryOperator *I, PHINode *&P, 6535 Value *&Start, Value *&Step) { 6536 BinaryOperator *BO = nullptr; 6537 P = dyn_cast<PHINode>(I->getOperand(0)); 6538 if (!P) 6539 P = dyn_cast<PHINode>(I->getOperand(1)); 6540 return P && matchSimpleRecurrence(P, BO, Start, Step) && BO == I; 6541 } 6542 6543 /// Return true if "icmp Pred LHS RHS" is always true. 6544 static bool isTruePredicate(CmpInst::Predicate Pred, const Value *LHS, 6545 const Value *RHS, const DataLayout &DL, 6546 unsigned Depth) { 6547 if (ICmpInst::isTrueWhenEqual(Pred) && LHS == RHS) 6548 return true; 6549 6550 switch (Pred) { 6551 default: 6552 return false; 6553 6554 case CmpInst::ICMP_SLE: { 6555 const APInt *C; 6556 6557 // LHS s<= LHS +_{nsw} C if C >= 0 6558 if (match(RHS, m_NSWAdd(m_Specific(LHS), m_APInt(C)))) 6559 return !C->isNegative(); 6560 return false; 6561 } 6562 6563 case CmpInst::ICMP_ULE: { 6564 const APInt *C; 6565 6566 // LHS u<= LHS +_{nuw} C for any C 6567 if (match(RHS, m_NUWAdd(m_Specific(LHS), m_APInt(C)))) 6568 return true; 6569 6570 // Match A to (X +_{nuw} CA) and B to (X +_{nuw} CB) 6571 auto MatchNUWAddsToSameValue = [&](const Value *A, const Value *B, 6572 const Value *&X, 6573 const APInt *&CA, const APInt *&CB) { 6574 if (match(A, m_NUWAdd(m_Value(X), m_APInt(CA))) && 6575 match(B, m_NUWAdd(m_Specific(X), m_APInt(CB)))) 6576 return true; 6577 6578 // If X & C == 0 then (X | C) == X +_{nuw} C 6579 if (match(A, m_Or(m_Value(X), m_APInt(CA))) && 6580 match(B, m_Or(m_Specific(X), m_APInt(CB)))) { 6581 KnownBits Known(CA->getBitWidth()); 6582 computeKnownBits(X, Known, DL, Depth + 1, /*AC*/ nullptr, 6583 /*CxtI*/ nullptr, /*DT*/ nullptr); 6584 if (CA->isSubsetOf(Known.Zero) && CB->isSubsetOf(Known.Zero)) 6585 return true; 6586 } 6587 6588 return false; 6589 }; 6590 6591 const Value *X; 6592 const APInt *CLHS, *CRHS; 6593 if (MatchNUWAddsToSameValue(LHS, RHS, X, CLHS, CRHS)) 6594 return CLHS->ule(*CRHS); 6595 6596 return false; 6597 } 6598 } 6599 } 6600 6601 /// Return true if "icmp Pred BLHS BRHS" is true whenever "icmp Pred 6602 /// ALHS ARHS" is true. Otherwise, return None. 6603 static Optional<bool> 6604 isImpliedCondOperands(CmpInst::Predicate Pred, const Value *ALHS, 6605 const Value *ARHS, const Value *BLHS, const Value *BRHS, 6606 const DataLayout &DL, unsigned Depth) { 6607 switch (Pred) { 6608 default: 6609 return None; 6610 6611 case CmpInst::ICMP_SLT: 6612 case CmpInst::ICMP_SLE: 6613 if (isTruePredicate(CmpInst::ICMP_SLE, BLHS, ALHS, DL, Depth) && 6614 isTruePredicate(CmpInst::ICMP_SLE, ARHS, BRHS, DL, Depth)) 6615 return true; 6616 return None; 6617 6618 case CmpInst::ICMP_ULT: 6619 case CmpInst::ICMP_ULE: 6620 if (isTruePredicate(CmpInst::ICMP_ULE, BLHS, ALHS, DL, Depth) && 6621 isTruePredicate(CmpInst::ICMP_ULE, ARHS, BRHS, DL, Depth)) 6622 return true; 6623 return None; 6624 } 6625 } 6626 6627 /// Return true if the operands of the two compares match. IsSwappedOps is true 6628 /// when the operands match, but are swapped. 6629 static bool isMatchingOps(const Value *ALHS, const Value *ARHS, 6630 const Value *BLHS, const Value *BRHS, 6631 bool &IsSwappedOps) { 6632 6633 bool IsMatchingOps = (ALHS == BLHS && ARHS == BRHS); 6634 IsSwappedOps = (ALHS == BRHS && ARHS == BLHS); 6635 return IsMatchingOps || IsSwappedOps; 6636 } 6637 6638 /// Return true if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is true. 6639 /// Return false if "icmp1 APred X, Y" implies "icmp2 BPred X, Y" is false. 6640 /// Otherwise, return None if we can't infer anything. 6641 static Optional<bool> isImpliedCondMatchingOperands(CmpInst::Predicate APred, 6642 CmpInst::Predicate BPred, 6643 bool AreSwappedOps) { 6644 // Canonicalize the predicate as if the operands were not commuted. 6645 if (AreSwappedOps) 6646 BPred = ICmpInst::getSwappedPredicate(BPred); 6647 6648 if (CmpInst::isImpliedTrueByMatchingCmp(APred, BPred)) 6649 return true; 6650 if (CmpInst::isImpliedFalseByMatchingCmp(APred, BPred)) 6651 return false; 6652 6653 return None; 6654 } 6655 6656 /// Return true if "icmp APred X, C1" implies "icmp BPred X, C2" is true. 6657 /// Return false if "icmp APred X, C1" implies "icmp BPred X, C2" is false. 6658 /// Otherwise, return None if we can't infer anything. 6659 static Optional<bool> isImpliedCondMatchingImmOperands(CmpInst::Predicate APred, 6660 const APInt &C1, 6661 CmpInst::Predicate BPred, 6662 const APInt &C2) { 6663 ConstantRange DomCR = ConstantRange::makeExactICmpRegion(APred, C1); 6664 ConstantRange CR = ConstantRange::makeExactICmpRegion(BPred, C2); 6665 ConstantRange Intersection = DomCR.intersectWith(CR); 6666 ConstantRange Difference = DomCR.difference(CR); 6667 if (Intersection.isEmptySet()) 6668 return false; 6669 if (Difference.isEmptySet()) 6670 return true; 6671 return None; 6672 } 6673 6674 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 6675 /// false. Otherwise, return None if we can't infer anything. 6676 static Optional<bool> isImpliedCondICmps(const ICmpInst *LHS, 6677 CmpInst::Predicate BPred, 6678 const Value *BLHS, const Value *BRHS, 6679 const DataLayout &DL, bool LHSIsTrue, 6680 unsigned Depth) { 6681 Value *ALHS = LHS->getOperand(0); 6682 Value *ARHS = LHS->getOperand(1); 6683 6684 // The rest of the logic assumes the LHS condition is true. If that's not the 6685 // case, invert the predicate to make it so. 6686 CmpInst::Predicate APred = 6687 LHSIsTrue ? LHS->getPredicate() : LHS->getInversePredicate(); 6688 6689 // Can we infer anything when the two compares have matching operands? 6690 bool AreSwappedOps; 6691 if (isMatchingOps(ALHS, ARHS, BLHS, BRHS, AreSwappedOps)) { 6692 if (Optional<bool> Implication = isImpliedCondMatchingOperands( 6693 APred, BPred, AreSwappedOps)) 6694 return Implication; 6695 // No amount of additional analysis will infer the second condition, so 6696 // early exit. 6697 return None; 6698 } 6699 6700 // Can we infer anything when the LHS operands match and the RHS operands are 6701 // constants (not necessarily matching)? 6702 const APInt *AC, *BC; 6703 if (ALHS == BLHS && match(ARHS, m_APInt(AC)) && match(BRHS, m_APInt(BC))) 6704 return isImpliedCondMatchingImmOperands(APred, *AC, BPred, *BC); 6705 6706 if (APred == BPred) 6707 return isImpliedCondOperands(APred, ALHS, ARHS, BLHS, BRHS, DL, Depth); 6708 return None; 6709 } 6710 6711 /// Return true if LHS implies RHS is true. Return false if LHS implies RHS is 6712 /// false. Otherwise, return None if we can't infer anything. We expect the 6713 /// RHS to be an icmp and the LHS to be an 'and', 'or', or a 'select' instruction. 6714 static Optional<bool> 6715 isImpliedCondAndOr(const Instruction *LHS, CmpInst::Predicate RHSPred, 6716 const Value *RHSOp0, const Value *RHSOp1, 6717 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { 6718 // The LHS must be an 'or', 'and', or a 'select' instruction. 6719 assert((LHS->getOpcode() == Instruction::And || 6720 LHS->getOpcode() == Instruction::Or || 6721 LHS->getOpcode() == Instruction::Select) && 6722 "Expected LHS to be 'and', 'or', or 'select'."); 6723 6724 assert(Depth <= MaxAnalysisRecursionDepth && "Hit recursion limit"); 6725 6726 // If the result of an 'or' is false, then we know both legs of the 'or' are 6727 // false. Similarly, if the result of an 'and' is true, then we know both 6728 // legs of the 'and' are true. 6729 const Value *ALHS, *ARHS; 6730 if ((!LHSIsTrue && match(LHS, m_LogicalOr(m_Value(ALHS), m_Value(ARHS)))) || 6731 (LHSIsTrue && match(LHS, m_LogicalAnd(m_Value(ALHS), m_Value(ARHS))))) { 6732 // FIXME: Make this non-recursion. 6733 if (Optional<bool> Implication = isImpliedCondition( 6734 ALHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) 6735 return Implication; 6736 if (Optional<bool> Implication = isImpliedCondition( 6737 ARHS, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, Depth + 1)) 6738 return Implication; 6739 return None; 6740 } 6741 return None; 6742 } 6743 6744 Optional<bool> 6745 llvm::isImpliedCondition(const Value *LHS, CmpInst::Predicate RHSPred, 6746 const Value *RHSOp0, const Value *RHSOp1, 6747 const DataLayout &DL, bool LHSIsTrue, unsigned Depth) { 6748 // Bail out when we hit the limit. 6749 if (Depth == MaxAnalysisRecursionDepth) 6750 return None; 6751 6752 // A mismatch occurs when we compare a scalar cmp to a vector cmp, for 6753 // example. 6754 if (RHSOp0->getType()->isVectorTy() != LHS->getType()->isVectorTy()) 6755 return None; 6756 6757 assert(LHS->getType()->isIntOrIntVectorTy(1) && 6758 "Expected integer type only!"); 6759 6760 // Both LHS and RHS are icmps. 6761 const ICmpInst *LHSCmp = dyn_cast<ICmpInst>(LHS); 6762 if (LHSCmp) 6763 return isImpliedCondICmps(LHSCmp, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, 6764 Depth); 6765 6766 /// The LHS should be an 'or', 'and', or a 'select' instruction. We expect 6767 /// the RHS to be an icmp. 6768 /// FIXME: Add support for and/or/select on the RHS. 6769 if (const Instruction *LHSI = dyn_cast<Instruction>(LHS)) { 6770 if ((LHSI->getOpcode() == Instruction::And || 6771 LHSI->getOpcode() == Instruction::Or || 6772 LHSI->getOpcode() == Instruction::Select)) 6773 return isImpliedCondAndOr(LHSI, RHSPred, RHSOp0, RHSOp1, DL, LHSIsTrue, 6774 Depth); 6775 } 6776 return None; 6777 } 6778 6779 Optional<bool> llvm::isImpliedCondition(const Value *LHS, const Value *RHS, 6780 const DataLayout &DL, bool LHSIsTrue, 6781 unsigned Depth) { 6782 // LHS ==> RHS by definition 6783 if (LHS == RHS) 6784 return LHSIsTrue; 6785 6786 if (const ICmpInst *RHSCmp = dyn_cast<ICmpInst>(RHS)) 6787 return isImpliedCondition(LHS, RHSCmp->getPredicate(), 6788 RHSCmp->getOperand(0), RHSCmp->getOperand(1), DL, 6789 LHSIsTrue, Depth); 6790 6791 if (Depth == MaxAnalysisRecursionDepth) 6792 return None; 6793 6794 // LHS ==> (RHS1 || RHS2) if LHS ==> RHS1 or LHS ==> RHS2 6795 // LHS ==> !(RHS1 && RHS2) if LHS ==> !RHS1 or LHS ==> !RHS2 6796 const Value *RHS1, *RHS2; 6797 if (match(RHS, m_LogicalOr(m_Value(RHS1), m_Value(RHS2)))) { 6798 if (Optional<bool> Imp = 6799 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) 6800 if (*Imp == true) 6801 return true; 6802 if (Optional<bool> Imp = 6803 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) 6804 if (*Imp == true) 6805 return true; 6806 } 6807 if (match(RHS, m_LogicalAnd(m_Value(RHS1), m_Value(RHS2)))) { 6808 if (Optional<bool> Imp = 6809 isImpliedCondition(LHS, RHS1, DL, LHSIsTrue, Depth + 1)) 6810 if (*Imp == false) 6811 return false; 6812 if (Optional<bool> Imp = 6813 isImpliedCondition(LHS, RHS2, DL, LHSIsTrue, Depth + 1)) 6814 if (*Imp == false) 6815 return false; 6816 } 6817 6818 return None; 6819 } 6820 6821 // Returns a pair (Condition, ConditionIsTrue), where Condition is a branch 6822 // condition dominating ContextI or nullptr, if no condition is found. 6823 static std::pair<Value *, bool> 6824 getDomPredecessorCondition(const Instruction *ContextI) { 6825 if (!ContextI || !ContextI->getParent()) 6826 return {nullptr, false}; 6827 6828 // TODO: This is a poor/cheap way to determine dominance. Should we use a 6829 // dominator tree (eg, from a SimplifyQuery) instead? 6830 const BasicBlock *ContextBB = ContextI->getParent(); 6831 const BasicBlock *PredBB = ContextBB->getSinglePredecessor(); 6832 if (!PredBB) 6833 return {nullptr, false}; 6834 6835 // We need a conditional branch in the predecessor. 6836 Value *PredCond; 6837 BasicBlock *TrueBB, *FalseBB; 6838 if (!match(PredBB->getTerminator(), m_Br(m_Value(PredCond), TrueBB, FalseBB))) 6839 return {nullptr, false}; 6840 6841 // The branch should get simplified. Don't bother simplifying this condition. 6842 if (TrueBB == FalseBB) 6843 return {nullptr, false}; 6844 6845 assert((TrueBB == ContextBB || FalseBB == ContextBB) && 6846 "Predecessor block does not point to successor?"); 6847 6848 // Is this condition implied by the predecessor condition? 6849 return {PredCond, TrueBB == ContextBB}; 6850 } 6851 6852 Optional<bool> llvm::isImpliedByDomCondition(const Value *Cond, 6853 const Instruction *ContextI, 6854 const DataLayout &DL) { 6855 assert(Cond->getType()->isIntOrIntVectorTy(1) && "Condition must be bool"); 6856 auto PredCond = getDomPredecessorCondition(ContextI); 6857 if (PredCond.first) 6858 return isImpliedCondition(PredCond.first, Cond, DL, PredCond.second); 6859 return None; 6860 } 6861 6862 Optional<bool> llvm::isImpliedByDomCondition(CmpInst::Predicate Pred, 6863 const Value *LHS, const Value *RHS, 6864 const Instruction *ContextI, 6865 const DataLayout &DL) { 6866 auto PredCond = getDomPredecessorCondition(ContextI); 6867 if (PredCond.first) 6868 return isImpliedCondition(PredCond.first, Pred, LHS, RHS, DL, 6869 PredCond.second); 6870 return None; 6871 } 6872 6873 static void setLimitsForBinOp(const BinaryOperator &BO, APInt &Lower, 6874 APInt &Upper, const InstrInfoQuery &IIQ, 6875 bool PreferSignedRange) { 6876 unsigned Width = Lower.getBitWidth(); 6877 const APInt *C; 6878 switch (BO.getOpcode()) { 6879 case Instruction::Add: 6880 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { 6881 bool HasNSW = IIQ.hasNoSignedWrap(&BO); 6882 bool HasNUW = IIQ.hasNoUnsignedWrap(&BO); 6883 6884 // If the caller expects a signed compare, then try to use a signed range. 6885 // Otherwise if both no-wraps are set, use the unsigned range because it 6886 // is never larger than the signed range. Example: 6887 // "add nuw nsw i8 X, -2" is unsigned [254,255] vs. signed [-128, 125]. 6888 if (PreferSignedRange && HasNSW && HasNUW) 6889 HasNUW = false; 6890 6891 if (HasNUW) { 6892 // 'add nuw x, C' produces [C, UINT_MAX]. 6893 Lower = *C; 6894 } else if (HasNSW) { 6895 if (C->isNegative()) { 6896 // 'add nsw x, -C' produces [SINT_MIN, SINT_MAX - C]. 6897 Lower = APInt::getSignedMinValue(Width); 6898 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 6899 } else { 6900 // 'add nsw x, +C' produces [SINT_MIN + C, SINT_MAX]. 6901 Lower = APInt::getSignedMinValue(Width) + *C; 6902 Upper = APInt::getSignedMaxValue(Width) + 1; 6903 } 6904 } 6905 } 6906 break; 6907 6908 case Instruction::And: 6909 if (match(BO.getOperand(1), m_APInt(C))) 6910 // 'and x, C' produces [0, C]. 6911 Upper = *C + 1; 6912 break; 6913 6914 case Instruction::Or: 6915 if (match(BO.getOperand(1), m_APInt(C))) 6916 // 'or x, C' produces [C, UINT_MAX]. 6917 Lower = *C; 6918 break; 6919 6920 case Instruction::AShr: 6921 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 6922 // 'ashr x, C' produces [INT_MIN >> C, INT_MAX >> C]. 6923 Lower = APInt::getSignedMinValue(Width).ashr(*C); 6924 Upper = APInt::getSignedMaxValue(Width).ashr(*C) + 1; 6925 } else if (match(BO.getOperand(0), m_APInt(C))) { 6926 unsigned ShiftAmount = Width - 1; 6927 if (!C->isZero() && IIQ.isExact(&BO)) 6928 ShiftAmount = C->countTrailingZeros(); 6929 if (C->isNegative()) { 6930 // 'ashr C, x' produces [C, C >> (Width-1)] 6931 Lower = *C; 6932 Upper = C->ashr(ShiftAmount) + 1; 6933 } else { 6934 // 'ashr C, x' produces [C >> (Width-1), C] 6935 Lower = C->ashr(ShiftAmount); 6936 Upper = *C + 1; 6937 } 6938 } 6939 break; 6940 6941 case Instruction::LShr: 6942 if (match(BO.getOperand(1), m_APInt(C)) && C->ult(Width)) { 6943 // 'lshr x, C' produces [0, UINT_MAX >> C]. 6944 Upper = APInt::getAllOnes(Width).lshr(*C) + 1; 6945 } else if (match(BO.getOperand(0), m_APInt(C))) { 6946 // 'lshr C, x' produces [C >> (Width-1), C]. 6947 unsigned ShiftAmount = Width - 1; 6948 if (!C->isZero() && IIQ.isExact(&BO)) 6949 ShiftAmount = C->countTrailingZeros(); 6950 Lower = C->lshr(ShiftAmount); 6951 Upper = *C + 1; 6952 } 6953 break; 6954 6955 case Instruction::Shl: 6956 if (match(BO.getOperand(0), m_APInt(C))) { 6957 if (IIQ.hasNoUnsignedWrap(&BO)) { 6958 // 'shl nuw C, x' produces [C, C << CLZ(C)] 6959 Lower = *C; 6960 Upper = Lower.shl(Lower.countLeadingZeros()) + 1; 6961 } else if (BO.hasNoSignedWrap()) { // TODO: What if both nuw+nsw? 6962 if (C->isNegative()) { 6963 // 'shl nsw C, x' produces [C << CLO(C)-1, C] 6964 unsigned ShiftAmount = C->countLeadingOnes() - 1; 6965 Lower = C->shl(ShiftAmount); 6966 Upper = *C + 1; 6967 } else { 6968 // 'shl nsw C, x' produces [C, C << CLZ(C)-1] 6969 unsigned ShiftAmount = C->countLeadingZeros() - 1; 6970 Lower = *C; 6971 Upper = C->shl(ShiftAmount) + 1; 6972 } 6973 } 6974 } 6975 break; 6976 6977 case Instruction::SDiv: 6978 if (match(BO.getOperand(1), m_APInt(C))) { 6979 APInt IntMin = APInt::getSignedMinValue(Width); 6980 APInt IntMax = APInt::getSignedMaxValue(Width); 6981 if (C->isAllOnes()) { 6982 // 'sdiv x, -1' produces [INT_MIN + 1, INT_MAX] 6983 // where C != -1 and C != 0 and C != 1 6984 Lower = IntMin + 1; 6985 Upper = IntMax + 1; 6986 } else if (C->countLeadingZeros() < Width - 1) { 6987 // 'sdiv x, C' produces [INT_MIN / C, INT_MAX / C] 6988 // where C != -1 and C != 0 and C != 1 6989 Lower = IntMin.sdiv(*C); 6990 Upper = IntMax.sdiv(*C); 6991 if (Lower.sgt(Upper)) 6992 std::swap(Lower, Upper); 6993 Upper = Upper + 1; 6994 assert(Upper != Lower && "Upper part of range has wrapped!"); 6995 } 6996 } else if (match(BO.getOperand(0), m_APInt(C))) { 6997 if (C->isMinSignedValue()) { 6998 // 'sdiv INT_MIN, x' produces [INT_MIN, INT_MIN / -2]. 6999 Lower = *C; 7000 Upper = Lower.lshr(1) + 1; 7001 } else { 7002 // 'sdiv C, x' produces [-|C|, |C|]. 7003 Upper = C->abs() + 1; 7004 Lower = (-Upper) + 1; 7005 } 7006 } 7007 break; 7008 7009 case Instruction::UDiv: 7010 if (match(BO.getOperand(1), m_APInt(C)) && !C->isZero()) { 7011 // 'udiv x, C' produces [0, UINT_MAX / C]. 7012 Upper = APInt::getMaxValue(Width).udiv(*C) + 1; 7013 } else if (match(BO.getOperand(0), m_APInt(C))) { 7014 // 'udiv C, x' produces [0, C]. 7015 Upper = *C + 1; 7016 } 7017 break; 7018 7019 case Instruction::SRem: 7020 if (match(BO.getOperand(1), m_APInt(C))) { 7021 // 'srem x, C' produces (-|C|, |C|). 7022 Upper = C->abs(); 7023 Lower = (-Upper) + 1; 7024 } 7025 break; 7026 7027 case Instruction::URem: 7028 if (match(BO.getOperand(1), m_APInt(C))) 7029 // 'urem x, C' produces [0, C). 7030 Upper = *C; 7031 break; 7032 7033 default: 7034 break; 7035 } 7036 } 7037 7038 static void setLimitsForIntrinsic(const IntrinsicInst &II, APInt &Lower, 7039 APInt &Upper) { 7040 unsigned Width = Lower.getBitWidth(); 7041 const APInt *C; 7042 switch (II.getIntrinsicID()) { 7043 case Intrinsic::ctpop: 7044 case Intrinsic::ctlz: 7045 case Intrinsic::cttz: 7046 // Maximum of set/clear bits is the bit width. 7047 assert(Lower == 0 && "Expected lower bound to be zero"); 7048 Upper = Width + 1; 7049 break; 7050 case Intrinsic::uadd_sat: 7051 // uadd.sat(x, C) produces [C, UINT_MAX]. 7052 if (match(II.getOperand(0), m_APInt(C)) || 7053 match(II.getOperand(1), m_APInt(C))) 7054 Lower = *C; 7055 break; 7056 case Intrinsic::sadd_sat: 7057 if (match(II.getOperand(0), m_APInt(C)) || 7058 match(II.getOperand(1), m_APInt(C))) { 7059 if (C->isNegative()) { 7060 // sadd.sat(x, -C) produces [SINT_MIN, SINT_MAX + (-C)]. 7061 Lower = APInt::getSignedMinValue(Width); 7062 Upper = APInt::getSignedMaxValue(Width) + *C + 1; 7063 } else { 7064 // sadd.sat(x, +C) produces [SINT_MIN + C, SINT_MAX]. 7065 Lower = APInt::getSignedMinValue(Width) + *C; 7066 Upper = APInt::getSignedMaxValue(Width) + 1; 7067 } 7068 } 7069 break; 7070 case Intrinsic::usub_sat: 7071 // usub.sat(C, x) produces [0, C]. 7072 if (match(II.getOperand(0), m_APInt(C))) 7073 Upper = *C + 1; 7074 // usub.sat(x, C) produces [0, UINT_MAX - C]. 7075 else if (match(II.getOperand(1), m_APInt(C))) 7076 Upper = APInt::getMaxValue(Width) - *C + 1; 7077 break; 7078 case Intrinsic::ssub_sat: 7079 if (match(II.getOperand(0), m_APInt(C))) { 7080 if (C->isNegative()) { 7081 // ssub.sat(-C, x) produces [SINT_MIN, -SINT_MIN + (-C)]. 7082 Lower = APInt::getSignedMinValue(Width); 7083 Upper = *C - APInt::getSignedMinValue(Width) + 1; 7084 } else { 7085 // ssub.sat(+C, x) produces [-SINT_MAX + C, SINT_MAX]. 7086 Lower = *C - APInt::getSignedMaxValue(Width); 7087 Upper = APInt::getSignedMaxValue(Width) + 1; 7088 } 7089 } else if (match(II.getOperand(1), m_APInt(C))) { 7090 if (C->isNegative()) { 7091 // ssub.sat(x, -C) produces [SINT_MIN - (-C), SINT_MAX]: 7092 Lower = APInt::getSignedMinValue(Width) - *C; 7093 Upper = APInt::getSignedMaxValue(Width) + 1; 7094 } else { 7095 // ssub.sat(x, +C) produces [SINT_MIN, SINT_MAX - C]. 7096 Lower = APInt::getSignedMinValue(Width); 7097 Upper = APInt::getSignedMaxValue(Width) - *C + 1; 7098 } 7099 } 7100 break; 7101 case Intrinsic::umin: 7102 case Intrinsic::umax: 7103 case Intrinsic::smin: 7104 case Intrinsic::smax: 7105 if (!match(II.getOperand(0), m_APInt(C)) && 7106 !match(II.getOperand(1), m_APInt(C))) 7107 break; 7108 7109 switch (II.getIntrinsicID()) { 7110 case Intrinsic::umin: 7111 Upper = *C + 1; 7112 break; 7113 case Intrinsic::umax: 7114 Lower = *C; 7115 break; 7116 case Intrinsic::smin: 7117 Lower = APInt::getSignedMinValue(Width); 7118 Upper = *C + 1; 7119 break; 7120 case Intrinsic::smax: 7121 Lower = *C; 7122 Upper = APInt::getSignedMaxValue(Width) + 1; 7123 break; 7124 default: 7125 llvm_unreachable("Must be min/max intrinsic"); 7126 } 7127 break; 7128 case Intrinsic::abs: 7129 // If abs of SIGNED_MIN is poison, then the result is [0..SIGNED_MAX], 7130 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. 7131 if (match(II.getOperand(1), m_One())) 7132 Upper = APInt::getSignedMaxValue(Width) + 1; 7133 else 7134 Upper = APInt::getSignedMinValue(Width) + 1; 7135 break; 7136 default: 7137 break; 7138 } 7139 } 7140 7141 static void setLimitsForSelectPattern(const SelectInst &SI, APInt &Lower, 7142 APInt &Upper, const InstrInfoQuery &IIQ) { 7143 const Value *LHS = nullptr, *RHS = nullptr; 7144 SelectPatternResult R = matchSelectPattern(&SI, LHS, RHS); 7145 if (R.Flavor == SPF_UNKNOWN) 7146 return; 7147 7148 unsigned BitWidth = SI.getType()->getScalarSizeInBits(); 7149 7150 if (R.Flavor == SelectPatternFlavor::SPF_ABS) { 7151 // If the negation part of the abs (in RHS) has the NSW flag, 7152 // then the result of abs(X) is [0..SIGNED_MAX], 7153 // otherwise it is [0..SIGNED_MIN], as -SIGNED_MIN == SIGNED_MIN. 7154 Lower = APInt::getZero(BitWidth); 7155 if (match(RHS, m_Neg(m_Specific(LHS))) && 7156 IIQ.hasNoSignedWrap(cast<Instruction>(RHS))) 7157 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 7158 else 7159 Upper = APInt::getSignedMinValue(BitWidth) + 1; 7160 return; 7161 } 7162 7163 if (R.Flavor == SelectPatternFlavor::SPF_NABS) { 7164 // The result of -abs(X) is <= 0. 7165 Lower = APInt::getSignedMinValue(BitWidth); 7166 Upper = APInt(BitWidth, 1); 7167 return; 7168 } 7169 7170 const APInt *C; 7171 if (!match(LHS, m_APInt(C)) && !match(RHS, m_APInt(C))) 7172 return; 7173 7174 switch (R.Flavor) { 7175 case SPF_UMIN: 7176 Upper = *C + 1; 7177 break; 7178 case SPF_UMAX: 7179 Lower = *C; 7180 break; 7181 case SPF_SMIN: 7182 Lower = APInt::getSignedMinValue(BitWidth); 7183 Upper = *C + 1; 7184 break; 7185 case SPF_SMAX: 7186 Lower = *C; 7187 Upper = APInt::getSignedMaxValue(BitWidth) + 1; 7188 break; 7189 default: 7190 break; 7191 } 7192 } 7193 7194 static void setLimitForFPToI(const Instruction *I, APInt &Lower, APInt &Upper) { 7195 // The maximum representable value of a half is 65504. For floats the maximum 7196 // value is 3.4e38 which requires roughly 129 bits. 7197 unsigned BitWidth = I->getType()->getScalarSizeInBits(); 7198 if (!I->getOperand(0)->getType()->getScalarType()->isHalfTy()) 7199 return; 7200 if (isa<FPToSIInst>(I) && BitWidth >= 17) { 7201 Lower = APInt(BitWidth, -65504); 7202 Upper = APInt(BitWidth, 65505); 7203 } 7204 7205 if (isa<FPToUIInst>(I) && BitWidth >= 16) { 7206 // For a fptoui the lower limit is left as 0. 7207 Upper = APInt(BitWidth, 65505); 7208 } 7209 } 7210 7211 ConstantRange llvm::computeConstantRange(const Value *V, bool ForSigned, 7212 bool UseInstrInfo, AssumptionCache *AC, 7213 const Instruction *CtxI, 7214 const DominatorTree *DT, 7215 unsigned Depth) { 7216 assert(V->getType()->isIntOrIntVectorTy() && "Expected integer instruction"); 7217 7218 if (Depth == MaxAnalysisRecursionDepth) 7219 return ConstantRange::getFull(V->getType()->getScalarSizeInBits()); 7220 7221 const APInt *C; 7222 if (match(V, m_APInt(C))) 7223 return ConstantRange(*C); 7224 7225 InstrInfoQuery IIQ(UseInstrInfo); 7226 unsigned BitWidth = V->getType()->getScalarSizeInBits(); 7227 APInt Lower = APInt(BitWidth, 0); 7228 APInt Upper = APInt(BitWidth, 0); 7229 if (auto *BO = dyn_cast<BinaryOperator>(V)) 7230 setLimitsForBinOp(*BO, Lower, Upper, IIQ, ForSigned); 7231 else if (auto *II = dyn_cast<IntrinsicInst>(V)) 7232 setLimitsForIntrinsic(*II, Lower, Upper); 7233 else if (auto *SI = dyn_cast<SelectInst>(V)) 7234 setLimitsForSelectPattern(*SI, Lower, Upper, IIQ); 7235 else if (isa<FPToUIInst>(V) || isa<FPToSIInst>(V)) 7236 setLimitForFPToI(cast<Instruction>(V), Lower, Upper); 7237 7238 ConstantRange CR = ConstantRange::getNonEmpty(Lower, Upper); 7239 7240 if (auto *I = dyn_cast<Instruction>(V)) 7241 if (auto *Range = IIQ.getMetadata(I, LLVMContext::MD_range)) 7242 CR = CR.intersectWith(getConstantRangeFromMetadata(*Range)); 7243 7244 if (CtxI && AC) { 7245 // Try to restrict the range based on information from assumptions. 7246 for (auto &AssumeVH : AC->assumptionsFor(V)) { 7247 if (!AssumeVH) 7248 continue; 7249 CallInst *I = cast<CallInst>(AssumeVH); 7250 assert(I->getParent()->getParent() == CtxI->getParent()->getParent() && 7251 "Got assumption for the wrong function!"); 7252 assert(I->getCalledFunction()->getIntrinsicID() == Intrinsic::assume && 7253 "must be an assume intrinsic"); 7254 7255 if (!isValidAssumeForContext(I, CtxI, DT)) 7256 continue; 7257 Value *Arg = I->getArgOperand(0); 7258 ICmpInst *Cmp = dyn_cast<ICmpInst>(Arg); 7259 // Currently we just use information from comparisons. 7260 if (!Cmp || Cmp->getOperand(0) != V) 7261 continue; 7262 // TODO: Set "ForSigned" parameter via Cmp->isSigned()? 7263 ConstantRange RHS = 7264 computeConstantRange(Cmp->getOperand(1), /* ForSigned */ false, 7265 UseInstrInfo, AC, I, DT, Depth + 1); 7266 CR = CR.intersectWith( 7267 ConstantRange::makeAllowedICmpRegion(Cmp->getPredicate(), RHS)); 7268 } 7269 } 7270 7271 return CR; 7272 } 7273 7274 static Optional<int64_t> 7275 getOffsetFromIndex(const GEPOperator *GEP, unsigned Idx, const DataLayout &DL) { 7276 // Skip over the first indices. 7277 gep_type_iterator GTI = gep_type_begin(GEP); 7278 for (unsigned i = 1; i != Idx; ++i, ++GTI) 7279 /*skip along*/; 7280 7281 // Compute the offset implied by the rest of the indices. 7282 int64_t Offset = 0; 7283 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) { 7284 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i)); 7285 if (!OpC) 7286 return None; 7287 if (OpC->isZero()) 7288 continue; // No offset. 7289 7290 // Handle struct indices, which add their field offset to the pointer. 7291 if (StructType *STy = GTI.getStructTypeOrNull()) { 7292 Offset += DL.getStructLayout(STy)->getElementOffset(OpC->getZExtValue()); 7293 continue; 7294 } 7295 7296 // Otherwise, we have a sequential type like an array or fixed-length 7297 // vector. Multiply the index by the ElementSize. 7298 TypeSize Size = DL.getTypeAllocSize(GTI.getIndexedType()); 7299 if (Size.isScalable()) 7300 return None; 7301 Offset += Size.getFixedSize() * OpC->getSExtValue(); 7302 } 7303 7304 return Offset; 7305 } 7306 7307 Optional<int64_t> llvm::isPointerOffset(const Value *Ptr1, const Value *Ptr2, 7308 const DataLayout &DL) { 7309 APInt Offset1(DL.getIndexTypeSizeInBits(Ptr1->getType()), 0); 7310 APInt Offset2(DL.getIndexTypeSizeInBits(Ptr2->getType()), 0); 7311 Ptr1 = Ptr1->stripAndAccumulateConstantOffsets(DL, Offset1, true); 7312 Ptr2 = Ptr2->stripAndAccumulateConstantOffsets(DL, Offset2, true); 7313 7314 // Handle the trivial case first. 7315 if (Ptr1 == Ptr2) 7316 return Offset2.getSExtValue() - Offset1.getSExtValue(); 7317 7318 const GEPOperator *GEP1 = dyn_cast<GEPOperator>(Ptr1); 7319 const GEPOperator *GEP2 = dyn_cast<GEPOperator>(Ptr2); 7320 7321 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical 7322 // base. After that base, they may have some number of common (and 7323 // potentially variable) indices. After that they handle some constant 7324 // offset, which determines their offset from each other. At this point, we 7325 // handle no other case. 7326 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0) || 7327 GEP1->getSourceElementType() != GEP2->getSourceElementType()) 7328 return None; 7329 7330 // Skip any common indices and track the GEP types. 7331 unsigned Idx = 1; 7332 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx) 7333 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx)) 7334 break; 7335 7336 auto IOffset1 = getOffsetFromIndex(GEP1, Idx, DL); 7337 auto IOffset2 = getOffsetFromIndex(GEP2, Idx, DL); 7338 if (!IOffset1 || !IOffset2) 7339 return None; 7340 return *IOffset2 - *IOffset1 + Offset2.getSExtValue() - 7341 Offset1.getSExtValue(); 7342 } 7343